diff --git a/CONTRIBUTORS.txt b/CONTRIBUTORS.txt new file mode 100644 index 0000000..3a93fce --- /dev/null +++ b/CONTRIBUTORS.txt @@ -0,0 +1,5 @@ + - Jeremy Metz + Project coordinator + + - Ashley Smith + Core developer diff --git a/README.md b/README.md new file mode 100644 index 0000000..9297818 --- /dev/null +++ b/README.md @@ -0,0 +1,56 @@ +# Introduction + +This project aims to provide an automated framework to detect, track, and analyse bacteria in a "Mother Machine" microfluidic setup. + +# Prerequisites +In order to run momanalysis you must ensure that you have python installed on your computer. +For windows we recommend either Anaconda or WinPython +For Mac, Unix or Linux we recommend Anaconda + +WinPython: https://winpython.github.io +Anaconda: https://conda.io/docs/user-guide/install/index.html + +# Installing momanalysis + +Installing momanalysis is simple. + +Just open Terminal, Anaconda prompt or WinPython command prompt and type the following: + +> pip install -U "momanalysis link here!!!" + +# Starting momanalysis + +Once the package is installed, you can start the user interface by typing: + +> momanalysis + +![Alt text](/docs/momanalysis_gui.png "User Interface") + +# Running momanalysis + +Running momanalysis is easy. You can manually add the file(s)/folder path, select it using the button provided or simple "drag and drop". +The various input options and the respective parameters required on the user interface are listed below. +N.B. If running in batch mode on a folder of images (can include fluorescence) then see the next section + +No fluorescence +* Single brightfield or phase contrast image - add the file and hit "start analysis" +* stack of brightfield or phase contrast images - add the file and hit "start analysis" + +Fluorescence +* Alternating stack of brightfield/phase contrast images and their matching fluorescent images (e.g. BF/Fluo/BF/FLuo) - add the file, select "combined fluorescence" and hit "start analysis" +* Corresponding fluorescent image(s) are in a separate file (stacks if multiple frames) - add the files, select "separate fluorescence" and hit "start analysis" + +# Folder (Batch mode) +momanalysis also has the capacity to run in batch mode on a data set containing multiple areas. In order to do this the files within the folder must be in the following format: +"a_b_c.tif" where: +* a = an identifier for the area in which the image was taken. E.g. "01" for all images in the first area of the mother machine, "02" for the second, etc. +* b = a time stamp - this allows momanalysis to stack images from the same timepoint in chronological order +* c = any further identifiers of your choice + +Once you have a suitable folder, you simply add it to the interface as described and select "batch run". +If it is just brightfield/phase then you can hit "start analysis", but if your folder includes fluorescent images (see note below) then select "combined fluorescence" before starting the analysis + +Note: When including fluorescent images in a folder, the timestamps should alternate between BF/Phase and the matching fluorescent image (like alternating stack above) + +# Additional options +You also have the option of specifying where you want the output of the analysis to be saved. If this is undefined then it will be in the same folder as the input diff --git a/docs/momanalysis_gui.png b/docs/momanalysis_gui.png new file mode 100644 index 0000000..17c0767 Binary files /dev/null and b/docs/momanalysis_gui.png differ diff --git a/momanalysis/__init__.py b/momanalysis/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/momanalysis/__main__.py b/momanalysis/__main__.py new file mode 100644 index 0000000..5dc7092 --- /dev/null +++ b/momanalysis/__main__.py @@ -0,0 +1,90 @@ +# +# FILE : __main__.py +# CREATED : 22/09/16 12:48:59 +# AUTHOR : J. Metz +# DESCRIPTION : CLI module interface - invoked using python -m momanalysis +# + +import argparse +from momanalysis.main import run_analysis_pipeline +from momanalysis.main import batch + +from momanalysis.utility import logger + +logger.setLevel("DEBUG") + +# Parse command line arguments + +parser = argparse.ArgumentParser( + description="MOther Machine ANALYSIS module") +parser.add_argument("filename", nargs="+", help="Data file(s) to load and analyse") +parser.add_argument("-t", default=None, type=int, help="Frame to run to for multi-frame stacks") +parser.add_argument("--invert", action="store_true", help="Invert Brightfield image") +parser.add_argument("--brightchannel", action="store_true", help="Detect channel as bright line instead of dark line") +parser.add_argument("--show", action="store_true", help="Show the plots") +parser.add_argument("--limitmem", action="store_true", help="Limit memory to 1/3 of system RAM") +parser.add_argument("--debug", action="store_true", help="Debug") +parser.add_argument("--loader", default="default", choices=['default', 'tifffile'], help="Switch IO method") +parser.add_argument("--channel", type=int, default=None, help="Select channel for multi-channel images") +parser.add_argument("--tdim", type=int, default=0, help="Identify time channel if not 0") +parser.add_argument("--output", default=None, help ="name of output file, if not specified it will be the same as input") +parser.add_argument("--fluo", default=None, help ="stack of matching fluorescent images") +parser.add_argument("-f", action="store_true", help="stack of images is contains alternating matching fluorescent images") +parser.add_argument("-ba", action="store_true", help="a folder containing multiple image areas to run at the same time") +args = parser.parse_args() + + +if args.limitmem: + #------------------------------ + # Memory managment + #------------------------------ + import resource + import os + gb = 1024.**3 + mem_bytes = os.sysconf('SC_PAGE_SIZE') * os.sysconf('SC_PHYS_PAGES') + mem_gib = mem_bytes/gb # e.g. 3.74 + logger.debug("MANAGING MEMORY USAGE...") + logger.debug("SYSTEM MEMORY: %0.2f GB" % mem_gib) + logger.debug("LIMITING PROGRAM TO %0.2f GB" % (mem_gib/3)) + lim = mem_bytes//3 + rsrc = resource.RLIMIT_AS + soft, hard = resource.getrlimit(rsrc) + resource.setrlimit(rsrc, (lim, hard)) #limit +# Run appropriate analysis + +if args.ba == True: + batch( + args.filename, + output=args.output, + tmax=args.t, + invert=args.invert, + show=args.show, + debug=args.debug, + brightchannel=args.brightchannel, + loader=args.loader, + channel=args.channel, + tdim=args.tdim, + fluo=args.fluo, + fluoresc=args.f, + batch=args.ba + ) + +elif args.ba == False: + run_analysis_pipeline( + args.filename, + output=args.output, + tmax=args.t, + invert=args.invert, + show=args.show, + debug=args.debug, + brightchannel=args.brightchannel, + loader=args.loader, + channel=args.channel, + tdim=args.tdim, + fluo=args.fluo, + fluoresc=args.f, + batch=args.ba + ) + + + diff --git a/momanalysis/__pycache__/__init__.cpython-36.pyc b/momanalysis/__pycache__/__init__.cpython-36.pyc new file mode 100644 index 0000000..2ee9ca6 Binary files /dev/null and b/momanalysis/__pycache__/__init__.cpython-36.pyc differ diff --git a/momanalysis/__pycache__/__main__.cpython-36.pyc b/momanalysis/__pycache__/__main__.cpython-36.pyc new file mode 100644 index 0000000..dd2edf1 Binary files /dev/null and b/momanalysis/__pycache__/__main__.cpython-36.pyc differ diff --git a/momanalysis/__pycache__/bacteria_tracking.cpython-36.pyc b/momanalysis/__pycache__/bacteria_tracking.cpython-36.pyc new file mode 100644 index 0000000..a414cb2 Binary files /dev/null and b/momanalysis/__pycache__/bacteria_tracking.cpython-36.pyc differ diff --git a/momanalysis/__pycache__/detection.cpython-36.pyc b/momanalysis/__pycache__/detection.cpython-36.pyc new file mode 100644 index 0000000..6aaaed7 Binary files /dev/null and b/momanalysis/__pycache__/detection.cpython-36.pyc differ diff --git a/momanalysis/__pycache__/gui.cpython-36.pyc b/momanalysis/__pycache__/gui.cpython-36.pyc new file mode 100644 index 0000000..a68e86f Binary files /dev/null and b/momanalysis/__pycache__/gui.cpython-36.pyc differ diff --git a/momanalysis/__pycache__/io.cpython-36.pyc b/momanalysis/__pycache__/io.cpython-36.pyc new file mode 100644 index 0000000..7d47c3a Binary files /dev/null and b/momanalysis/__pycache__/io.cpython-36.pyc differ diff --git a/momanalysis/__pycache__/main.cpython-36.pyc b/momanalysis/__pycache__/main.cpython-36.pyc new file mode 100644 index 0000000..f0cdd2e Binary files /dev/null and b/momanalysis/__pycache__/main.cpython-36.pyc differ diff --git a/momanalysis/__pycache__/measurements.cpython-36.pyc b/momanalysis/__pycache__/measurements.cpython-36.pyc new file mode 100644 index 0000000..3deeb0e Binary files /dev/null and b/momanalysis/__pycache__/measurements.cpython-36.pyc differ diff --git a/momanalysis/__pycache__/output.cpython-36.pyc b/momanalysis/__pycache__/output.cpython-36.pyc new file mode 100644 index 0000000..2b70b23 Binary files /dev/null and b/momanalysis/__pycache__/output.cpython-36.pyc differ diff --git a/momanalysis/__pycache__/plot.cpython-36.pyc b/momanalysis/__pycache__/plot.cpython-36.pyc new file mode 100644 index 0000000..8267297 Binary files /dev/null and b/momanalysis/__pycache__/plot.cpython-36.pyc differ diff --git a/momanalysis/__pycache__/skimage_future.cpython-36.pyc b/momanalysis/__pycache__/skimage_future.cpython-36.pyc new file mode 100644 index 0000000..f0fdd75 Binary files /dev/null and b/momanalysis/__pycache__/skimage_future.cpython-36.pyc differ diff --git a/momanalysis/__pycache__/tracking.cpython-36.pyc b/momanalysis/__pycache__/tracking.cpython-36.pyc new file mode 100644 index 0000000..c223f6b Binary files /dev/null and b/momanalysis/__pycache__/tracking.cpython-36.pyc differ diff --git a/momanalysis/__pycache__/utility.cpython-36.pyc b/momanalysis/__pycache__/utility.cpython-36.pyc new file mode 100644 index 0000000..e9249b0 Binary files /dev/null and b/momanalysis/__pycache__/utility.cpython-36.pyc differ diff --git a/momanalysis/_filters.py b/momanalysis/_filters.py new file mode 100644 index 0000000..306d53e --- /dev/null +++ b/momanalysis/_filters.py @@ -0,0 +1,72 @@ +import numpy as np +import scipy.ndimage as ndi + +def gaussian_kernel(sigma, window_factor=4): + # Method suggested on scipy cookbook + """ Returns a normalized 2D gauss kernel array for convolutions """ + + + linrange = slice(-int(window_factor*sigma), int(window_factor*sigma)+1) + x, y = np.mgrid[linrange, + linrange] + g = np.exp(-(0.5*x**2/sigma**2 + 0.5*y**2/sigma**2)) + return g / g.sum() + + """ + # Method I used in the 1d gaussian filter + sd = float(sigma) + # make the length of the filter equal to 4 times the standard + # deviations: + lw = int(window_factor * sd + 0.5) + weights = [0.0] * (2 * lw + 1) + weights[lw] = 1.0 + sum = 1.0 + + sd = sd * sd + # calculate the kernel: + for ii in range(1, lw + 1): + tmp = np.exp(-0.5 * float(ii * ii) / sd) + weights[lw + ii] = tmp + weights[lw - ii] = tmp + wsum += 2.0 * tmp + for ii in range(2 * lw + 1): + weights[ii] /= wsum + """ + +def _eig2image(Lxx,Lxy,Lyy): + """ + TODO: Acknowldege here + """ + + tmp = np.sqrt((Lxx - Lyy)**2 + 4*Lxy**2) + v2x = 2*Lxy + v2y = Lyy - Lxx + tmp + + # Normalize + mag = np.sqrt(v2x**2 + v2y**2) + i = (mag != 0) + v2x[i] = v2x[i]/mag[i] + v2y[i] = v2y[i]/mag[i] + + # The eigenvectors are orthogonal + v1x = -v2y + v1y = v2x + + # Compute the eigenvalues + mu1 = 0.5*(Lxx + Lyy + tmp) + mu2 = 0.5*(Lxx + Lyy - tmp) + + # Sort eigen values by absolute value abs(Lambda1)np.abs(mu2) + + Lambda1=mu1 + Lambda1[check]=mu2[check] + Lambda2=mu2 + Lambda2[check]=mu1[check] + + Ix=v1x + Ix[check]=v2x[check] + Iy=v1y + Iy[check]=v2y[check] + + return Lambda1,Lambda2,Ix,Iy diff --git a/momanalysis/bacteria_tracking.py b/momanalysis/bacteria_tracking.py new file mode 100644 index 0000000..830ac25 --- /dev/null +++ b/momanalysis/bacteria_tracking.py @@ -0,0 +1,106 @@ +from skimage.measure import regionprops +import matplotlib.pyplot as plt +import itertools +from functools import reduce +import numpy as np +import matplotlib.pyplot as plt + +prob_div = 0.01 #we need a way of determining these but will hard code for now +prob_death = 0.5 #we need a way of determining these but will hard code for now +prob_no_change = 0.95 #may need this to be independent of prob_death/prob_div +av_bac_length = 18 #average back length - may want to code this in - actual bacteria nearer 18 + + + +def find_changes(in_list, option_list, well, new_well): + measurements_in = {} + measurements_out = {} + change_dict = {} + for i, region in enumerate(regionprops(well)): + measurements_in[i] = [region.centroid[0], region.area] #find y centr coord and area of old each bac + for j, region2 in enumerate(regionprops(new_well)): + measurements_out[j] = [region2.centroid[0], region2.area] #find y centr coord and area of each new bac + for option in option_list: #each option is a potential combination of bacteria lineage/death + in_options_dict = {} + for in_num, in_options in enumerate(in_list): + out_bac_area = [] + out_bac_centr = [] + num_divs = (option.count(in_options))-1 #determine the number of divisions/deaths + for l, op in enumerate(option): + if op == in_options: #if the values match append the new centr/areas + out_bac_area.append(measurements_out[l][1]) + out_bac_centr.append(measurements_out[l][0]) + if sum(out_bac_area) < (measurements_in[in_num][1]): #need to divide by biggest number (so prob < 1) + area_chan = sum(out_bac_area)/(measurements_in[in_num][1]) #find relative change in area compared to original + else: + area_chan = (measurements_in[in_num][1])/sum(out_bac_area) #find relative change in area compared to original + if len(out_bac_centr) is not 0: + centr_chan = abs(((sum(out_bac_centr))/(len(out_bac_centr)))-(measurements_in[in_num][0])) #find the average new centroid + else: + centr_chan = 0 + #assign the values to the correct 'in' label + in_options_dict[in_options] = [num_divs, area_chan, centr_chan] + change_dict[option] = in_options_dict #assign the changes to the respective option + return change_dict + +def find_probs(change_dict): + prob_dict = {} + temp_dict = {} + if len(change_dict) == 0: + most_likely = None + return most_likely + for option, probs in change_dict.items(): + probslist = [] + for p in probs: + divs_deaths = probs[p][0] #find the potential number of deaths/divisions for each bac + relative_area = probs[p][1] #find the relative area change + change_centr = probs[p][2] #find the number of pixels the centroid has moved by + if divs_deaths<0: #if the bacteria has died: + prob_divis = prob_death #probability simply equals that of death + prob_centr = 1 #the change in centroid is irrelevant so set probability as 1 + prob_area = 1 #the change in area is irrelevant so set probability as 1 + if divs_deaths == 0: #if the bacteria hasn't died/or divided + prob_divis = prob_no_change #probability of division simply equals probability of no change + prob_area = relative_area #the area will be equal to the relative area change - may need adjusting + if change_centr == 0: #if there is no change then set prob to 1 (0 will cause div error) + prob_centr = 1 + else: + prob_centr = 1/(abs(change_centr)) #the greater the change the less likely + if divs_deaths > 0: #if bacteria have divided: + if relative_area < divs_deaths: #need to make sure we divide by biggest number to keep prob < 1 + prob_area = relative_area/divs_deaths #normalise relative area to the number of divisions + else: + prob_area = divs_deaths/relative_area #normalise relative area to the number of divisions + prob_divis = prob_div**(divs_deaths*divs_deaths) #each division becomes more likely - need to think about it + #for each division the bacteria centroid is expected to move half the bac length + prob_centr = 1/abs(((divs_deaths*(av_bac_length/2))-(change_centr))) + probslist.append(prob_area*prob_divis*prob_centr) #combine the probabilities for division, area and centroid + temp_dict[p] = prob_area*prob_divis*prob_centr #same as probslist but makes output more readable during development + prob_dict[option] = reduce(lambda x, y: x*y, probslist) #multiply the probabilities across all bacteria + most_likely = max(prob_dict, key=prob_dict.get) + return most_likely + +def label_most_likely(most_likely, new_well, label_dict_string): + out_well = np.zeros(new_well.shape, dtype=new_well.dtype) + if most_likely is None: + return out_well, label_dict_string #if there is no likely option return an empty well + new_label_string = 0 + smax = max(label_dict_string, key=int) + for i, region in enumerate(regionprops(new_well)): + if most_likely.count(most_likely[i]) == 1: + out_well[new_well==region.label] = most_likely[i] + else: + smax+=1 + out_well[new_well==region.label] = smax + if i > 0: + last_label_start = label_dict_string[most_likely[i-1]] + else: + last_label_start = label_dict_string[most_likely[i]] + new_label_start = label_dict_string[most_likely[i]] + if new_label_start != last_label_start: + new_label_string = 0 + new_label_string += 1 + add_string = "_%s" % (new_label_string) + label_dict_string[smax] = new_label_start+add_string + return out_well, label_dict_string + diff --git a/momanalysis/detection.py b/momanalysis/detection.py new file mode 100644 index 0000000..59bc627 --- /dev/null +++ b/momanalysis/detection.py @@ -0,0 +1,760 @@ +# +# FILE : detection.py +# CREATED : 27/09/16 13:08:52 +# AUTHOR : J. Metz +# DESCRIPTION : Detection functions +# + +import math +import skimage.filters as skfilt +import momanalysis.skimage_future as skfuture +import skimage.morphology as skmorph +import skimage.measure as skmeas +import numpy as np +import scipy.spatial as scispat +import scipy.ndimage as ndi +from momanalysis.utility import logger +import matplotlib.pyplot as plt +from skimage.morphology import watershed +from skimage.filters import sobel +from skimage.measure import regionprops +import skimage.exposure as exp +from statistics import median +from scipy.interpolate import InterpolatedUnivariateSpline + +def detect_channel(image, + scale_range=[5.0, 10.0], + bright=False, + minwidth=5, + ): + """ + Detect channel which is characterised by a long dark/bright band within the image. + + Inputs + ------ + + image - the image to analyse + scale_range - scale range for channel ridge detection + """ + # Use ridge filter to detect... ridges. + # NOTE: frangi detects DARK ridges by default + if bright: + filt = skfuture.frangi( + image.max()-image, + scale_range=scale_range, + black_ridges=True, + ) + else: + filt = skfuture.frangi( + image, + scale_range=scale_range, + black_ridges=True, + ) + + #ridges = filt > skfilt.threshold_otsu(filt) + ridges = filt > skfuture.threshold_li(filt) + # Clean up to separate any branches + if minwidth: + ridges = skmorph.opening(ridges, selem=skmorph.disk(minwidth)) + + # Pull out the largest region + lbl2,N2 = skmeas.label(ridges, return_num=True) + if N2 == 0: + logger.warning("Unable to detect ridge!") + return np.zeros(lbl2.shape, dtype=bool) + lchannel = np.argmax([ (lbl2==l).sum() for l in range(1,N2+1)])+1 + channel = lbl2==lchannel + + return channel + + +def detect_wells( + image, + scale_range=[2.0,6.0], + maxd=300, + mind=100, + maxperp=22, + minwidth=3, + ): + """ + Detect the wells using a ridge filter, and the classifying + based on their periodic property + """ + # NOTE: frangi detects DARK ridges + filt = skfuture.frangi( + image, + scale_range=scale_range, + ) + + ridges = filt > skfilt.threshold_li(filt) + + # Classify using simple measures + lbl,N = skmeas.label(ridges, return_num=True) + lblgood = np.zeros(lbl.shape, dtype='int16') + + smax = 0 + for region in regionprops(lbl): + #print(region.label, region.area) + if region.area > 2500: + smax+=1 + lblgood[lbl==region.label] = smax + lblgood = lblgood > 0 + lbl_filled = ndi.morphology.binary_fill_holes(lblgood) + wells_middle = lbl_filled - lblgood + wells_middle = ndi.morphology.binary_closing(wells_middle, iterations=5) + + # Classify using simple measures + lbl_wells,N = skmeas.label(wells_middle, return_num=True) + #props = skmeas.regionprops(lbl) + bwnew = np.zeros(lbl_wells.shape, "bool") + coms = [] + ngood = 0 + lbl_final= np.zeros(lbl_wells.shape, dtype='int16') + """ + plt.figure() + plt.imshow(image) + plt.figure() + plt.imshow(filt) + plt.figure() + plt.imshow(ridges) + plt.figure() + plt.imshow(lblgood) + plt.figure() + plt.imshow(wells_middle) + plt.show() + """ + for l in range(1, N+1): + bw = lbl_wells == l + # Size + area = bw.sum() + if area > (maxd*maxd/2) or area < mind: + continue + perim = bw ^ ndi.binary_erosion(bw) + pts = np.array(perim.nonzero(), dtype=float).T + # Length = max separation between any two points in each region + maxdist = scispat.distance.cdist(pts, pts) + dnow = maxdist.max() + imax = (maxdist == dnow).nonzero() + vmax = pts[imax[0][0]] - pts[imax[1][0]] + vmax /= np.sqrt(np.sum(vmax**2)) + vperp = np.array([-1*vmax[1], vmax[0]]) + pperp = [np.dot(vperp, p) for p in pts] + distperp = np.max(pperp) - np.min(pperp) + if (dnow> mind) & (dnow < maxd) & (distperp < maxperp): + #maxperp is causing some "well loss" on detection - values occassionally + #higher than 20 but less than 22 (2 well widths) so will change to 22 + #May be due to knew detection method so need to revise!!! + bwnew[bw] = True + ngood +=1 + lbl_final[bw] = ngood + if ngood < 2: + logger.warning("Less than 2 wells detected, this function has probably failed") + + ## Lastly let's see if we can detect anomolous things + #propsgood = skmeas.regionprops(lblgood) + #good = np.zeros(lblgood.shape) + #bad = np.zeros(lblgood.shape) + #mus = np.array([p.moments_hu for p in propsgood]) + #mus2 = mus - mus.mean(axis=0) + + return lbl_final, ridges # For debugging + + +def extract_well_profiles( + image, + channel, + wells, + wellwidth = 11, + debug=False, + min_well_sep_factor=2.5, + max_well_sep_factor=6, + tracked = False): + """ + * Extend wells down to channel and up to maximal height + * Add in missing wells (using interpolation of positions) + * Extract well profiles + + TODO: Add full docstring with returns etc + """ + + #------------------- + # A bit of pre-processing on the wells + propsgood = skmeas.regionprops(wells) + # Make sure we have at least 2 wells... at least this many for the + # interpolation... + # TODO: Decide if we need more + if len(propsgood) < 2: + logger.error("Less than 2 wells detected; unable to extract well profiles") + blank_wellimage = np.zeros(image.shape, dtype="uint16") + return [], blank_wellimage, [] + + coms, oris, uvec_para, uvec_perp = _get_wells_and_unit_vectors( + propsgood, + debug=debug, + ) + + normseps, posperp_sorted = _get_well_spacing_and_separations( + coms, + uvec_perp, + wellwidth, + debug=debug, + min_well_sep_factor=min_well_sep_factor, + max_well_sep_factor=max_well_sep_factor, + ) + + images, wellimage, coords = interpolate_positions_and_extract_profiles( + normseps, + posperp_sorted, + propsgood, + uvec_para, + uvec_perp, + wellwidth, + image, + debug=debug, + tracked=tracked + ) + + return images, wellimage, coords + + +#------------------------------ +# Subfunctions for extract_well_profiles +#------------------------------ + +def get_channel_orientation_and_line(channel): + """ + Determine channel orientation and get line coordinates + + Returns + ------- + orientation : float + Angle between x-axis and major axis of ellipse that + has the same second-moments as the channel. + Ranges from -pi/2 to pi/2 in counter-clockwise direction + line : (2,2) tuple + ((x1,y1), (x2,y2)) for the line end-points; + uses the major axis length to determine the length + """ + channelprop = skmeas.regionprops(1*channel)[0] + yc0,xc0 = channelprop.centroid + orientation = channelprop.orientation + xc1 = xc0 + math.cos(orientation) * 0.5 * channelprop.major_axis_length + yc1 = yc0 + math.sin(orientation) * 0.5 * channelprop.major_axis_length + xc2 = xc0 - math.cos(orientation) * 0.5 * channelprop.major_axis_length + yc2 = yc0 - math.sin(orientation) * 0.5 * channelprop.major_axis_length + return orientation, ((xc1, yc1), (xc2, yc2)) + + +def _get_wells_and_unit_vectors(props, debug=False): + # In perpendicular direction, get positions, + # and in parallel direction, get min and max coords + coms = [] + oris = [] + lens = [] + + for i,prop in enumerate(props): + # Convert to simple line using + # centroid and orientation information + y0,x0 = prop.centroid + orientation = prop.orientation + # Make sure angle is in range 0, pi + ori2 = orientation % np.pi + + # Find maximal well length (separation of points in binary image) + dist = scispat.distance.cdist(prop.coords, prop.coords) + length = dist.max() + x1 = x0 + math.cos(ori2) * 0.5 * length + y1 = y0 - math.sin(ori2) * 0.5 * length + x2 = x0 - math.cos(ori2) * 0.5 * length + y2 = y0 + math.sin(ori2) * 0.5 * length + coms.append(np.array((x0,y0))) + oris.append(ori2) + lens.append(length) + coms = np.array(coms) + oris = np.array(oris) + # Get the median well orientation + ori = np.median(oris) + if debug: + print("\nWell orientations") + print(oris) + print("median orientation:", ori) + # Generate parallel and perpendicular unit vectors. + uvec_para = np.array([math.cos(ori), -math.sin(ori)]) + uvec_perp = np.array([math.sin(ori), math.cos(ori)]) + if debug: + print("Parallel vector", uvec_para) + print("Perpendicular vector", uvec_perp) + # filter out any positions that shouldn't be here + pospara = [ np.dot(uvec_para, p) for p in coms ] + pospara_med = np.median(pospara) + pospara_dif = np.abs(np.array(pospara)-pospara_med) + pospara_bad = pospara_dif > (0.5*np.median(lens)) + coms = coms[~pospara_bad] + oris = oris[~pospara_bad] + if debug: + print("After filtering bad CoMs") + print("Orientations") + print(oris) + ori = np.median(oris) + # Generate parallel and perpendicular unit vectors. + uvec_para = np.array([math.cos(ori), -math.sin(ori)]) + uvec_perp = np.array([math.sin(ori), math.cos(ori)]) + if debug: + print("Parallel vector", uvec_para) + print("Perpendicular vector", uvec_perp) + return coms, oris, uvec_para, uvec_perp + + +def _get_well_spacing_and_separations( + coms, + uvec_perp, + wellwidth, + min_well_sep_factor=2.5, + max_well_sep_factor=6, + debug=False): + # Let's get the spacing + posperp = [ np.dot(uvec_perp, p) for p in coms ] + if debug: + print("Perpendicular positions") + print(posperp) + + posperp_sorted = np.sort(posperp) + seps = np.diff( posperp_sorted ) + seps2 = np.diff(posperp_sorted) #create a duplicate for determination of median + # Remove any separations that are less than the well-width, + # these are likely due to fragmentation of a well + goodseps = seps > wellwidth + seps = seps[goodseps] + posperp_sorted = posperp_sorted[np.concatenate([[True,], goodseps])] + seps_sorted = np.sort(seps) + + #use only "realistic" separations for determining median + #I.e. bigger than one well width but less than 6 + goodseps2 = ( + (seps2 >= (wellwidth*min_well_sep_factor)) + & (seps2 <(wellwidth*max_well_sep_factor))) + seps2 = seps2[goodseps2] + seps2_sorted = np.sort(seps2) + + if debug: + print("\nSaving SEPARATIONS.jpg...") + import matplotlib.pyplot as plt + plt.figure() + plt.hist(seps) + plt.savefig('SEPARATIONS.jpg') + plt.close() + + # Median well separation... + # wellsep0 = np.median(seps) + # It's possible that we have a few gaps; if the most frequent gap + # is 1 or more, then the mean and median will be unusable + # Check the median of the lowest few seps + # and compare with the "wellsep" + + if len(seps2_sorted) > 5: + lowerseps = seps2_sorted + else: + # Too few seps for stats, take lowest and hope for best! + lowerseps = seps2_sorted[0] + # 2017/02/01 13:20:06 (GMT):[JM] Surely this should be median... + #lowersep = np.mean(lowerseps) + lowersep = np.median(lowerseps) #determine median from "realistic values" + + normseps = seps/lowersep #normalise separations to the median + normseps_temp = normseps.round() #round to the nearest whole number + normseps_temp = abs(normseps_temp - normseps) #subtract actual normalised values + wrong_seps = [] + for i, j in enumerate(normseps_temp): + if j > 0.1: #"correct" values will be close to whole numbers when divided by median + wrong_seps.append(i) #append the index value of "incorrect" values + posperp_sorted = np.delete(posperp_sorted, wrong_seps) #delete these "incorrect" values + seps3 = np.diff( posperp_sorted ) + normseps = seps3/lowersep + normseps_sorted = np.sort(normseps) + + if len(seps_sorted) > 5: + #2017/02/01 13:21:26 (GMT)[JM] As above, should be median here?? + #lowernormed = np.mean(normseps_sorted[:5]) + lowernormed = np.median(normseps_sorted[:5]) + else: + lowernormed = normseps_sorted[0] + if debug: + print("normseps_sorted") + print(normseps_sorted) + print("LOWERNORMED") + print(lowernormed) + + if (0.95 < lowernormed) and (1.05 > lowernormed): + # We're pretty sure we've got a good estimator + # here + pass + else: + raise Exception("Wells not extracted properly - mean normalized separation\n" + + "not close to 1: %f [lowersep = %f, lowerseps = %s]"%(lowernormed, lowersep, str(lowerseps))) + + if debug: + print("NORMSEPS") + print(normseps) + + return normseps, posperp_sorted + + +def interpolate_positions_and_extract_profiles( + normseps, + posperp_sorted, + props, + uvec_para, + uvec_perp, + wellwidth, + image, + debug=False, + tracked=False): + + wellimage = np.zeros(image.shape, dtype="uint16") + numseps = normseps.round() + # corrected = seps/numseps + # wellsep = np.mean(corrected) + #corrected_normed = corrected / wellsep + + if tracked is False: + #find the median well separations + seps = np.diff( posperp_sorted ) + #the separations have already been filtered so we can take the min here + #separations can still be >1 so mean and median may not work!!! + min_sep = np.min(seps) + + #find how many extra wells we can fit in at either end + low_extra = int((posperp_sorted[0]/min_sep)) + high_bound = image.shape[1]-(posperp_sorted[len(posperp_sorted)-1]) + high_extra = int((high_bound/min_sep)) + + # Now whereever we have a sep of > 1, we need to + # interpolate in additional wells! + + #if we use the above method simply replace intpos with posp_numbered + intpos = [0,] + list(np.cumsum(numseps)) + spline = InterpolatedUnivariateSpline(intpos, posperp_sorted, k=1) + int_interp = np.arange(0-low_extra, np.max(intpos)+1+high_extra) + + posperp_interp = spline(int_interp) + else: + # corrected = seps/numseps + # wellsep = np.mean(corrected) + #corrected_normed = corrected / wellsep + + # Now whereever we have a sep of > 1, we need to + # interpolate in additional wells! + + #if we use the above method simply replace intpos with posp_numbered + intpos = [0,] + list(np.cumsum(numseps)) + posperp_interp = np.interp( + np.arange(0, np.max(intpos)+1), + intpos, + posperp_sorted, + ) + + if debug: + print("INTERPOLATED WELL POSITIONS ALONG AXIS") + print(posperp_interp) + + # Let's look at the max and min extents along parallel + # direction + maxparallel = [] + minparallel = [] + for i,prop in enumerate(props): + posp = np.array([ np.dot( uvec_para, p[[1,0]]) for p in prop.coords]) + maxparallel.append(posp.max()) + minparallel.append(posp.min()) + + # Use "average" end positions? + #medmax = np.median(maxparallel) + #medmin = np.median(minparallel) + # Use extremal end positions + medmax = np.max(maxparallel) + medmin = np.min(minparallel) + p00 = medmin * uvec_para + p01 = medmax * uvec_para + p10 = p00 + 1000*uvec_perp + p11 = p01 + 1000*uvec_perp + + if debug: + print("Median maximal parallel position", medmax) + print("Median minimal parallel posotion", medmin) + print("Basline") + print(p00, p01) + print(p10, p11) + # Extract profiles + #profiles = [] + #for pperp in posperp_interp: + # + # p0 = p00 + pperp * uvec_perp + # p1 = p01 + pperp * uvec_perp + # + # # Now lets get a good line-scan for each well + # profiles.append(skmeas.profile_line( + # image, + # p0[[1,0]], + # p1[[1,0]], + # linewidth=wellwidth, + # )) + + # Lets see the images instead of the profiles + images = {} + #images = [] + coords = [] + #we need to take away "low_extra" (the number of new wells we can extrapolate) + #from the minimum label - so it assigns them properly when tracked! + for numnow, pperp in enumerate(posperp_interp, min([p.label for p in props])): + p0 = p00 + pperp * uvec_perp + p1 = p01 + pperp * uvec_perp + # TODO: Propose removal : Is this useful for debug? + #if debug: + # print("Well %d"%numnow) + # print("Line coordinates:", p0, p1) + perp_lines = skmeas.profile._line_profile_coordinates( + p0[[1,0]], p1[[1,0]], + linewidth=wellwidth) + # TODO: Propose removal : Is this useful for debug? + #if debug: + # print("Coordinates from skmeas") + # print(perp_lines) + # print("as int") + # print(perp_lines.round().astype(int)) + pixels = ndi.map_coordinates(image, perp_lines, + order=1, mode='constant', cval=0.0) + #images.append(pixels) + images[numnow]=pixels #creates a dictionary of wells with each well linked to a number + + perp_line_coords = perp_lines.round().astype(int).squeeze() + for ndim in range(wellimage.ndim): + perp_line_coords[ndim][perp_line_coords[ndim] < 0] = 0 + perp_line_coords[ndim][perp_line_coords[ndim] + >= wellimage.shape[ndim]] = wellimage.shape[ndim]-1 + perp_line_coords = tuple(perp_line_coords.tolist()) + wellimage[perp_line_coords] = numnow + coords.append(perp_line_coords) + return images, wellimage, coords + +#------------------------------ +# End of extract_well_profiles subfunctions +#------------------------------ + + +def remove_background_max(profiles): + """ + Assumes background well image is maximum value + across each well image + """ + bglevel = np.max(list(profiles.values()), axis=0) + newprofiles = {} + for k2, image2 in profiles.items(): + p = bglevel-image2 + p2 = p*(p>0) + newprofiles[k2] = p2 + return newprofiles + + + +def remove_background(profiles, radius=20, light_background=True): + """ + Uses port of ImageJ rolling ball background subtraction + to estimate background + """ + # Make "spherical" structuring element + sz = 2*radius + (radius+1)%2 + X,Y = np.meshgrid(range(sz), range(sz)) + ballheight = float(radius**2) - (X-radius)**2 - (Y-radius)**2 + ballheight[ballheight<0] = 0 + ballheight = np.ma.masked_where(ballheight < 0, ballheight) + ballheight = np.sqrt(ballheight) + newprofiles = {} + for k, im in profiles.items(): + # Run background subtraction + if light_background: + imax = im.max() + im2 = imax-im + bg = ndi.grey_opening(im2, structure=ballheight, mode="reflect") + im2 -= bg + newprofiles[k] = im2-imax + else: + bg = ndi.grey_opening(im, structure=ballheight, mode="reflect") + newprofiles[k] = bg-im + return newprofiles + +def detect_bacteria_in_wells( + wellimages, + timepoint = None, + label_dict_string = None, #dictionary of string labels + maxsize = 1500, # maximum area (in pixels) of an object to be considered a bacteria + minsize = 20, # maximum area (in pixels) of an object to be considered a bacteria + absolwidth = 1, #width (in pixels) at which something is definitely a bacteria + distfrombottom = 30, #ignores anything labeled this distance from the bottom of the well (prevents channel border being labelled) + topborder = 3, # Distance to exclude from top due to "shadow" + toprow = 12,#number of pixels along the top row for a label to be discarded + thresh_perc = 0.75, #percentage of threshold to use 1=100% - long term just change filter + ): + + segs = {} + segs2 = [] + segs3 = {} + + #sigma_list = np.arange(0.5, 3.5, 0.1) + sigma_list = np.arange(2.0, 6.0) + for n, im in wellimages.items(): + #find the well shape so we can get the coordinates of the top of the well + a = im.shape + ylen = a[0] + #the top few pixels of the well is often just shadow and is sometimes mistakenly + #labelled as bacteria - so will use this value later to filter it out + toppixofwell = ylen - topborder + # Basic filtering + + #using scale space + gl_images = [-ndi.gaussian_laplace(im, s, mode="nearest") * s ** 2 + for s in sigma_list] + newwell =np.max(gl_images, axis=0) + + segs[n]= newwell + segs2.append(newwell) + #can we "normalise" the numbers after the filter so it's between a set value? + #or just change filter + thresh = (skfilt.threshold_otsu(np.concatenate([s.flatten() for s in segs2]))*thresh_perc) + smax = 0 + for i,f in segs.items(): + bw = f > thresh + bw1 = ndi.morphology.binary_erosion(bw) + bw2 = ndi.morphology.binary_dilation(bw) + bw3 = bw2^bw1 + + #perform distance transform on filtered image + dist = ndi.distance_transform_edt(bw1) + + #create markers for watershed + markers = np.zeros_like(f) + + + markers[dist >= absolwidth] = 2 + markers[dist < absolwidth] = 0 + markers = ndi.label(markers)[0] + markers = markers+1 + markers[bw3]=0 + + #Perform watershed + segmentation = watershed(f, markers) + segmentation = ndi.binary_fill_holes(segmentation - 1) + + #label image + labeled_bacteria, Nbac = ndi.label(segmentation) + newbac = np.zeros(labeled_bacteria.shape, dtype=labeled_bacteria.dtype) + + for labnow, region in enumerate(regionprops(labeled_bacteria),start=1): + # TODO: REMOVE - POINTLESS + #newbac2 = np.zeros(labeled_bacteria.shape, dtype=labeled_bacteria.dtype) + if(maxsize > region.area > minsize): + ycoords = [] + for coords in region.coords: + y = coords[0] + ycoords.append(y) + toppix = 0 + for coords in ycoords: + if coords >= toppixofwell: + toppix += 1 + if ((all(i <= distfrombottom for i in ycoords)) == False): #may need to change to any - move to split!!! + if toppix <= toprow: + #if they have passed all of these then we can label them in "newbac" + smax += 1 + newbac[labeled_bacteria==labnow] = smax + + segs3[i] = newbac + + return segs3 + +def split_bacteria( + segs, + label_dict_string, + timepoint=None, + relative_width = 5, #how narrow bacteria must become to be split e.g. 4 = 1/4 of the median width + min_area = 20, #the minimum area of a new "split" bacteria in pixels + change_in_centroid = 5, #minimum change in centroid - unsplit bacteria will be similar/same + area_change = 20, #percentage change in area - unsplit bacteria shouldn't change much + distfrombottom = 20, #ignores anything labeled this distance from the bottom of the well (prevents channel border being labelled) + absolwidth = 2, #average width of a bacteria (determined along the skeleton) + ): + + """NOTE!!! We may want to make some sort of loop for multiple splits. But at the moment it uses the whole skeleton + and directly references coordinates so should be okay""" + split_bac = {} + if timepoint==0: + label_dict_string = {} + smax=0 + else: + smax = max(label_dict_string, key=int) + for num, well in segs.items(): + tempwell = np.copy(well) + widths = [] + max_label = np.max(tempwell) + max_label2 = np.max(tempwell) + comparison_well =np.zeros(well.shape, dtype=well.dtype) + if len(regionprops(tempwell)) == 0: + comparison_well = np.copy(well) + else: + for i, region in enumerate(regionprops(tempwell)): + widths.append(region.minor_axis_length) + w = median(widths) #find median width + """ + + maxy = max(region.coords[0:,0]) #find max and min y coordinates in region + miny = min(region.coords[0:,0]) + for c in region.coords: + if (maxy-w)<=c[0]<=(maxy) or (miny+w>=c[0]>=miny): + #if the y coordinate falls within the width from the top/bottom set the value to 0 in tempwell + tempwell[c[0]][c[1]] = 0 + """ + skel,dist_temp = skmorph.medial_axis(tempwell, return_distance=True) + skel = ndi.morphology.binary_dilation(skel) + skel = dist_temp*skel #distance transform of skeleton + markers = np.zeros_like(skel) + skel[skel==0] = 100 #set the background to a high number so it doesn't get included in the next step + markers[skel<(w*(1/relative_width))] = 1 #1/4 of width + markers, N = ndi.label(markers) + for j, r in enumerate(regionprops(markers)): + for coords in r.coords: + tempwell[coords[0]][coords[1]] = 0 #set any of these points to 0 in the temporary image (copy of original) + new_tempwell, N = ndi.label(tempwell) #label this image + for k, r_new in enumerate(regionprops(new_tempwell)): + if r_new.area > min_area: #remove small areas + max_label += 1 #give it a temporary "new label" - unique + comparison_well[new_tempwell==r_new.label] = max_label + if len(regionprops(well)) == len(regionprops(comparison_well)): + comparison_well = np.copy(well) #if the number of regions hasn't changed we will simply use the original image + elif len(regionprops(well)) < len(regionprops(comparison_well)):#if we have "split" some bacteria we need to do a bit more + for r_old in regionprops(well): + for lbl in regionprops(comparison_well): + if (abs(r_old.centroid[0]-lbl.centroid[0])0].mean() + if av_width > absolwidth: + smax +=1 + newwell[comparison_well==r2.label] = smax + if timepoint == 0: + label_dict_string[smax] = str(smax) + split_bac[num]= newwell + + return split_bac, label_dict_string diff --git a/momanalysis/gui.py b/momanalysis/gui.py new file mode 100644 index 0000000..2ac6b22 --- /dev/null +++ b/momanalysis/gui.py @@ -0,0 +1,414 @@ +import sys +from PyQt5.QtWidgets import ( + QMainWindow, + QApplication, + QPushButton, + QWidget, + QFileDialog, + qApp, + QMessageBox, + QGridLayout, + QLabel, + QLineEdit, + QCheckBox, + QRadioButton, + QPlainTextEdit, + QComboBox, + QListWidget, +) +from PyQt5.QtGui import QFont +from PyQt5.QtCore import Qt, QThread, pyqtSignal, QObject +import logging + +import os +sys.path.append(os.path.join(os.path.dirname(__file__), '..')) +import momanalysis.main as mom +from momanalysis.utility import logger +import subprocess +import matplotlib.pyplot as plt +plt.switch_backend('qt5agg') + +class MainWindow(QMainWindow): + + def __init__(self): + super().__init__() + + self.initUI() + self.files = [] + self.fluorescence_stack = None + self.integrated_fluo = False + self.output_file = None + self.currently_selected_file = None + self.batch = False + + def initUI(self): + + self.statusBar().showMessage("No File Added") + self.mw = MainWidget(self) + self.setCentralWidget(self.mw) + self.setGeometry(300, 300, 800, 300) + self.show() + + + def findfile(self): + options = QFileDialog.Options() + options |= QFileDialog.DontUseNativeDialog + fileName, _ = QFileDialog.getOpenFileName(self, + "Select input file", "", + "All Files (*);;Python Files (*.py)", + options=options) + if fileName: + self.statusBar().showMessage("File added: " + fileName) + #self.files.append(fileName) + #self.files = fileName if isinstance(fileName, list) else [fileName,] + self.mw.startbutton.setEnabled(True) + self.mw.outputlabel.setEnabled(True) + self.mw.brightfield_box.setChecked(True) + self.mw.removeselectedfiles.setEnabled(True) + self.currently_selected_file = fileName + self.update_files_added() + + def exitmomanalysis(self): + reply = QMessageBox.question(self, 'Message', + "Are you sure to quit?", QMessageBox.Yes | + QMessageBox.No, QMessageBox.No) + + if reply == QMessageBox.Yes: + qApp.quit() + + def fluoresc_stack(self, state): + self.fluorescence_stack = 1 if state else None + + def integr_fluo(self, state): + self.integrated_fluo = state + + #def brightfield_or_fluorescence(self, state): + # pass + + def outputcheckbox(self, state): + if state == Qt.Checked: + self.mw.output.setEnabled(True) + else: + self.mw.output.setEnabled(False) + self.mw.output.setText("Optional: Specify output filename. N.B. it will be followed by a timestamp") + + def remove_files(self): + + self.statusBar().showMessage("File removed. Please add a new file") + for item in self.mw.selectedfiles.selectedItems(): + self.mw.selectedfiles.takeItem(self.mw.selectedfiles.row(item)) + filetoberemoved = item.text() + self.files.remove(filetoberemoved) + if len(self.files) < 1: + self.mw.removeselectedfiles.setEnabled(False) + self.statusBar().showMessage("Add a file before starting analysis") + self.mw.startbutton.setEnabled(False) + self.mw.comb_fluorescence.setEnabled(False) + self.mw.comb_fluorescence.setChecked(False) + self.mw.seper_fluorescence.setEnabled(False) + self.mw.seper_fluorescence.setChecked(False) + self.mw.brightfield_box.setChecked(True) + + + def manually_entered_file(self): + self.currently_selected_file = self.mw.filesadded.text() + self.update_files_added() + self.mw.filesadded.setText(self.mw.added_files_text) + self.mw.removeselectedfiles.setEnabled(True) + self.mw.startbutton.setEnabled(True) + + def update_files_added(self): + if self.currently_selected_file is not None: + self.files.append(self.currently_selected_file) + self.mw.selectedfiles.clear() + for file in self.files: + self.mw.selectedfiles.addItem(str(file)) + self.currently_selected_file = None + + def batch_or_not(self,state): + self.batch = state == Qt.Checked + + +class MainWidget(QWidget): + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + self.dir_name = [] + self.info_level = "INFO" + self.setAcceptDrops(True) + + self.infolevel_dict = logging._levelToName + self.initUI() + + def initUI(self): + self.added_files_text = "write the full file path, manually choose or drag and drop the files" + + self.addfilesbutton = QPushButton("Choose Files") + self.addfilesbutton.clicked.connect(self.parent().findfile) + + self.selectedfiles = QListWidget(self) + #self.selectedfiles.setReadOnly(True) + + self.exitbutton = QPushButton("Exit") + self.exitbutton.clicked.connect(self.parent().exitmomanalysis) + + self.startbutton = QPushButton("Start Analysis") + self.startbutton.clicked.connect(self.start_analysis) + self.startbutton.setEnabled(False) + + self.seeresults = QPushButton("Open results folder") + self.seeresults.setEnabled(False) + self.seeresults.clicked.connect(lambda: launch_file_explorer(self.dir_name)) + + self.output = QLineEdit("Optional: Specify output filename. N.B. it will be followed by a timestamp") + self.output.setEnabled(False) + self.outputlabel = QCheckBox("Use own name for output File:") + self.outputlabel.setEnabled(False) + self.outputlabel.stateChanged.connect(self.parent().outputcheckbox) + + self.addfiles = QLabel("Input file:") + self.welcome = QLabel("Welcome to Momanalysis") + self.filesadded = QLineEdit(self.added_files_text) + self.filesadded.returnPressed.connect(self.parent().manually_entered_file) + + self.removeselectedfiles = QPushButton("Remove selected files") + self.removeselectedfiles.setEnabled(False) + self.removeselectedfiles.clicked.connect(self.parent().remove_files) + + self.analysisoptions = QLabel("Select options") + + ### + ### Start file-mode options (i.e. Brightfield only, combined, seperate) + ### + self.brightfield_box = QRadioButton("Brightfield Only") + self.brightfield_box.setToolTip("Default.
Images are only brightfield and no fluorescence analysis is required.") + self.brightfield_box.setChecked(True) + #self.brightfield_box.toggled.connect(self.parent().brightfield_or_fluorescence) + + self.comb_fluorescence = QRadioButton("Combined Fluorescence") + self.comb_fluorescence.setToolTip("Select if your stack contains alternating brightfield and fluorescent images") + self.comb_fluorescence.toggled.connect(self.parent().integr_fluo) + #self.comb_fluorescence.setEnabled(False) + + self.seper_fluorescence = QRadioButton("Seperate Fluorescence") + self.seper_fluorescence.setToolTip("Select if you have a stack of brightfield images and a separate stack of matching fluorescent images") + self.seper_fluorescence.toggled.connect(self.parent().fluoresc_stack) + #self.seper_fluorescence.setEnabled(False) + ### + ### End of file-mode options + ### + + self.set_debug = QLabel("Select info level") + self.set_debug.setToolTip("Select the level of logging information to display:
Not Set = all information
Critical = only major errors.

Default is set to Info.") + self.debug_info_level = QComboBox(self) + self.debug_info_level.addItems(self.infolevel_dict.values()) + self.debug_info_level.activated[str].connect(self.set_info_level) + + self.batchcheckbox = QCheckBox("Batch run") + self.batchcheckbox.setEnabled(True) + self.batchcheckbox.setChecked(False) + self.batchcheckbox.stateChanged.connect(self.parent().batch_or_not) + + self.logwidget = QPlainTextEdit(self) + self.logwidget.setReadOnly(True) + + + font = QFont() + font.setBold(True) + font.setPointSize(8) + self.welcome.setFont(font) + + #edit layout + + grid = QGridLayout() + grid.setSpacing(10) + + grid.addWidget(self.welcome,0,0) + grid.addWidget(self.addfilesbutton,1,5) + grid.addWidget(self.selectedfiles,2,1) + grid.addWidget(self.removeselectedfiles,2,5) + grid.addWidget(self.filesadded,1,1) + grid.addWidget(self.addfiles,1,0) + grid.addWidget(self.exitbutton,14,6) + grid.addWidget(self.outputlabel,9,0) + grid.addWidget(self.output,9,1) + grid.addWidget(self.analysisoptions,3,0) + grid.addWidget(self.brightfield_box,4,0) + grid.addWidget(self.comb_fluorescence,5,0) + grid.addWidget(self.seper_fluorescence,6,0) + grid.addWidget(self.startbutton, 10, 1) + grid.addWidget(self.seeresults, 11, 1) + grid.addWidget(self.set_debug,12,0) + grid.addWidget(self.debug_info_level,12,1) + grid.addWidget(self.logwidget,13,1) + grid.addWidget(self.batchcheckbox,7,0) + + + #set layout + self.setLayout(grid) + + self.thread = AnalysisThread(self.parent(), self) + self.thread.finished_analysis.connect(self.updateUi) + self.thread.log_message.connect(self.addLogMessage) + + def set_info_level(self, str): + self.info_level = str #self.infolevel_dict[str] + return self.info_level + #print(self.infolevel_dict[str], flush = True) + + + def start_analysis(self): + if self.thread.isRunning(): + self.parent().output_file = None + #self.setEnabled(True) + for child in self.children(): + if hasattr(child, "setEnabled"): + child.setEnabled(True) + self.thread.terminate() + self.startbutton.setText('Start Analysis') + logger.critical("Run aborted by user") + self.parent().statusBar().showMessage("Run aborted by user. Please add a new file and start again") + self.output.setText("Optional: Specify output filename. N.B. it will be followed by a timestamp") + self.filesadded.setText(self.added_files_text) + self.parent().files = [] + self.parent().update_files_added() + #self.removeselectedfiles.setEnabled(False) + #self.setAcceptDrops(True) + else: + self.startbutton.setText('Stop Analysis') + + #self.setEnabled(False) + for child in self.children(): + if hasattr(child, "setEnabled"): + child.setEnabled(False) + self.startbutton.setEnabled(True) + + self.parent().statusBar().showMessage("Running analysis") + if self.outputlabel.isChecked(): + self.parent().output_file = str(self.output.text()) + else: + self.parent().output_file = None + #self.addfilesbutton.setEnabled(False) + #self.filesadded.setEnabled(False) + #self.comb_fluorescence.setEnabled(False) + #self.seper_fluorescence.setEnabled(False) + #self.seeresults.setEnabled(False) + #self.outputlabel.setEnabled(False) + #self.setAcceptDrops(False) + self.thread.start() + + def addLogMessage(self, msg): + self.logwidget.appendPlainText(msg) + + def updateUi(self, dir_name): + #self.setEnabled(True) + for child in self.children(): + if hasattr(child, "setEnabled"): + child.setEnabled(True) + self.startbutton.setText('Start Analysis') + self.seeresults.setEnabled(True) + self.addfilesbutton.setEnabled(True) + self.parent().statusBar().showMessage("Analysis Finished. Click to see your files or please add a new file") + self.dir_name = dir_name + self.filesadded.setEnabled(True) + self.filesadded.setText("Finished. Type new file path, manually select or drag file") + + + def dragEnterEvent(self, e): + if e.mimeData().hasUrls(): + e.accept() + else: + e.ignore() + + def dropEvent(self, e): + for url in e.mimeData().urls(): + if os.path.isfile(url.toLocalFile()) or os.path.isdir(url.toLocalFile()): + path = url.toLocalFile() + #self.parent().files = path if isinstance(path, list) else [path,] + #self.filesadded.setText(path) + self.parent().currently_selected_file = path + self.startbutton.setEnabled(True) + self.outputlabel.setEnabled(True) + self.parent().statusBar().showMessage("File added: " + path) + self.removeselectedfiles.setEnabled(True) + self.brightfield_box.setChecked(True) + self.parent().update_files_added() + + + +class AnalysisThread(QThread): + + finished_analysis = pyqtSignal(str) + log_message = pyqtSignal(str) + + def __init__(self, parent=None, parent2 = None): + QThread.__init__(self) + self.parent = parent + self.mainWidget = parent2 + + + def run(self): + output = self.parent.output_file + fluo = self.parent.fluorescence_stack + fluoresc = self.parent.integrated_fluo + inputfile = self.parent.files + self.loghandler = QLogHandler(self) + """This changes the format of the GUI messages""" + #self.loghandler.setFormatter(logger.formatter) + logger.addHandler(self.loghandler) + #using the numeric values (20 is default = Info) + logger.setLevel(self.mainWidget.info_level) + + if self.parent.batch == True: + dir_name = mom.batch( + inputfile, + output = output, + fluo =fluo, + fluoresc= fluoresc, + batch=True) + else: + dir_name = mom.run_analysis_pipeline( + inputfile, + output = output, + fluo =fluo, + fluoresc= fluoresc, + ) + self.finished_analysis.emit(dir_name) + + +def launch_file_explorer(path): + # Try cross-platform file-explorer opening... + # Courtesy of: http://stackoverflow.com/a/1795849/537098 + if sys.platform=='win32': + subprocess.Popen(['start', path], shell= True) + elif sys.platform=='darwin': + subprocess.Popen(['open', path]) + else: + try: + subprocess.Popen(['xdg-open', path]) + except OSError: + # er, think of something else to try + # xdg-open *should* be supported by recent Gnome, KDE, Xfce + QMessageBox.critical(self, + "Oops", + "\n".join(["Couldn't launch the file explorer, sorry!" + "Manually open %s in your favourite file manager"%path]) + ) + +class QLogHandler(logging.Handler): + def __init__(self, parent): + super().__init__() + self.parent=parent + + def emit(self, record): + msg = self.format(record) + self.parent.log_message.emit(msg) + + +if __name__ == '__main__': + app = QApplication(sys.argv) + ex = MainWindow() + ex.setWindowTitle("Momanalysis") + ex.show() + app.exec_() diff --git a/momanalysis/io.py b/momanalysis/io.py new file mode 100644 index 0000000..7b87180 --- /dev/null +++ b/momanalysis/io.py @@ -0,0 +1,224 @@ +"""Input/Output functions + +""" +import numpy as np +import scipy.ndimage as ndi +import skimage.io as skio +from skimage.external import tifffile +import os +from collections import defaultdict +import shutil + +##========================================================= +## Data IO (wrappers around skimage functions) +##========================================================= + +def input_folder(inputfiles): + """determines if the input is a file or a folder. + If it's a folder it returns a list of the tif files inside + """ + original_dir = os.getcwd() + image_list = [] + for f in inputfiles: + f = os.path.realpath(f) + if os.path.isfile(f)==True: + return inputfiles + if os.path.isdir(f)==True: + os.chdir(f) + for file in os.listdir(f): + if file.endswith(".tif"): + image = os.path.realpath(file) + image_list.append(image) + os.chdir(original_dir) + return image_list + +def folder_all_areas(mainfolder): + """takes our recommended input of a folder with all time/area stamped images e.g. 'area_date_time_info.tif', + subfolders them by area number""" + original_dir = os.getcwd() + area = defaultdict(list) + arealist = [] + mainfolder = mainfolder[0] + os.chdir(mainfolder) + for file in os.listdir(mainfolder): + if file.endswith(".tif"): + basename = os.path.splitext(file)[0] + area_num = basename.split('_')[0] + basename_split = basename.split('_') + #this (above) may need to change slightly if our labelling system changes + if not basename == 'Thumbs': #windows produces a Thumbs.db file so this stops this interfering + if not 'PI' in basename_split: #if it is a PI image don't include it in analysis + area[area_num].append(file) + return area + + +def load_data(filenames): + """Load a single file or sequence of files using skimage.io""" + filenames = [filenames,] if isinstance(filenames, str) else filenames + loadfunc = tifffile.imread if all(f.lower().endswith("tif") + for f in filenames) else skio.imread + if len(filenames) > 1: + return np.array([ loadfunc(f) for f in filenames ], dtype=float) + elif len(filenames) == 1: + return loadfunc(filenames[0]).astype(float) + else: + raise Exception("load_data received an empty list") + +##========================================================= +## Sample data +##========================================================= + +def _generate_well( + width=11, + height=200, + num_bacteria_lambda=1, + signal_to_noise=5, + length_mean=20, + length_std=10, + bg_offset = 50, + ): + """ + Generate a single well + """ + # Background; choose 10 so should be no -ve pixels + bg = bg_offset + np.random.randn(width, height) + + # How many bacteria? + N_bac = np.random.poisson(num_bacteria_lambda, 1) + lbl = np.zeros((width, height), dtype="uint16") + + if N_bac == 0: + return bg, lbl + + # Distribute randomly along height, making sure there are no "collisions" + signal = np.zeros((width, height), dtype=bool) + n = 0 + while n < N_bac: # for n in range(N_bac): + length = max(( length_mean + length_std * np.random.randn(), 5)) + pos = width/2 + length/2 + (height-width-length)*np.random.rand() + # Create skeleton + x = int(width/2) + y = np.arange(pos-length/2, pos+length/2+1, dtype=int) + + if np.any(signal[x, y]): + continue + + signal[x, y] = 1 + # Use distance transform to dilate the skeletons + lbl[ ndi.distance_transform_edt(~signal) < ((width/2)-2)] = n+1 + n += 1 + signal = 1.0*(lbl > 0) + + # Blur edges + signal = ndi.gaussian_filter(signal, 2.0) + signal -= signal.min() + signal /= signal.max() + + return bg - signal_to_noise * signal, lbl + + +def _generate_well_with_border( + border_multiplier = 1.2, # Multiple of SNR that border is darker + border_width=2, + **well_kwargs + ): + well_width = well_kwargs['width'] + well_height = well_kwargs['height'] + width = 2*border_width + well_width + height = 2*border_width + well_height + bg = well_kwargs.get("bg_offset", 50) - border_multiplier * well_kwargs.get("signal_to_noise", 5) + signal = bg + np.random.randn(width, height) + well_im, well_lbl = _generate_well(**well_kwargs) + signal[border_width:border_width + well_width, border_width:border_width+well_height] = well_im + lbl = np.zeros((width, height)) + lbl[border_width:border_width+well_width, border_width:border_width+well_height] = well_lbl + return signal, lbl + + +def load_sample_well_data( + num_wells = 20, + seed = None, + ): + """ + Generates simple sample well data, i.e. columns which sometimes contain a bacteria or so + + Returns: + row of wells (as an image) + row of labelled data ( as label image ) + """ + np.random.seed(seed) + wells = [] + labels = [] + for n in range(num_wells): + well, lbl = _generate_well() + wells.append(well) + labels.append(lbl) + return np.vstack(wells).T, np.vstack(labels).T + + +def load_sample_full_frame( + size=(600, 1000), + well_vertical_offset=100, + well_seperation=50, + well_width = 11, + border_width = 3, + well_height = 200, + bg_offset = 50, + signal_to_noise = 3, + channel_dark_width = 8, + channel_light_width = 8, + channel_dark_line = -5, + channel_light_line = 5, + seed = None, + **well_kwargs + ): + np.random.seed(seed) + + # Create a bigger image so that rotation doesn't cause any edge effects; + sizebig = [ int(1.4*max(size)) for d in range(2)] + + # Generate the bg image + image = bg_offset + np.random.randn(*sizebig) + lbl_wells = np.zeros(sizebig, dtype='uint16') + lbl_bacteria = np.zeros(sizebig, dtype='uint16') + + bords = [ (sbig - s)//2 for sbig, s in zip(sizebig, size)] + # Add in the wells + for lab, x in enumerate(range(10, sizebig[0]-well_width, well_seperation + well_width), start=1): + well_im, well_lbl = _generate_well_with_border( + border_width=border_width, + width=well_width, + height=well_height, + bg_offset=bg_offset+signal_to_noise, + signal_to_noise =signal_to_noise, + ) + image[well_vertical_offset+bords[0]:well_vertical_offset+well_height+2*border_width + bords[0], + x:x+well_width+2*border_width] = well_im.T + lbl_wells[well_vertical_offset+bords[0]:well_vertical_offset+well_height+2*border_width + bords[0], + x:x+well_width+2*border_width] = lab + # Add in the channel; a dark line then bright + + image[well_vertical_offset+bords[0]:well_vertical_offset+channel_dark_width+bords[0]] += channel_dark_line + image[well_vertical_offset+bords[0]:well_vertical_offset-channel_light_width+bords[0]:-1] += channel_light_line + + # Invert Y + image = image[::-1, :] + lbl_wells = lbl_wells[::-1, :] + + # Now pull out the properly sized image! + slices = [ slice(b, -b) for b in bords] + image = image[slices] + lbl_wells = lbl_wells[slices] + lbl_bacteria = lbl_bacteria[slices] + + image -= image.min() + image /= image.max() + + return image, lbl_wells, lbl_bacteria + +def split_fluorescence(data): + newdata = data[::2] + fluodata = data[1::2] + + return newdata, fluodata + diff --git a/momanalysis/main.py b/momanalysis/main.py new file mode 100644 index 0000000..980afea --- /dev/null +++ b/momanalysis/main.py @@ -0,0 +1,279 @@ + +from momanalysis.utility import logger +import momanalysis.io as mio +import momanalysis.plot as mplot +import momanalysis.detection as mdet +import momanalysis.tracking as mtrack +import momanalysis.measurements as mmeas +import momanalysis.output as outp +import os +import traceback + +import matplotlib.pyplot as plt +import numpy as np + +def batch(filenames, *args, **kwargs): + in_folder = [] + in_folder.append(os.path.abspath(filenames[0])) + """Used to run multiple image areas one after another for batch processing""" + filedict = mio.folder_all_areas(in_folder) + for area, files in sorted(filedict.items()): + files = sorted(files) + #feed list into analysis pipeline as well as the area number (dict.key) for output folders + try: + run_analysis_pipeline(files, area_num=area, *args, **kwargs) + except: + logger.error("Failed analysis on area number: %s" %(area)) + +def run_analysis_pipeline( + filenames, + output=None, + tmax=None, + invert=False, + show=False, + debug=False, + brightchannel=False, + loader='default', + channel=None, + tdim=0, + fluo=None, + fluoresc=False, + batch=False, + area_num = None + #logger=logger, + ): + """Main analysis pipeline + Executes the full analysis pipeline, from input images to output images and + tables. + + Parameters + ---------- + filenames : list of strings + List of files to load (can be single file) + output : String + Name of output file base (default : input file name) + tmax : Integer + Frame to run analysis until (default : all) + invert : Boolean + Whether the image needs to be inverted (default : False) + show : Boolean + Whether to "show" the plots on the screen (default : False) + debug : Boolean + Whether to add debugging outputs (default : False) + brightchannel : Boolean + Whether to detect channel as a bright instead of dark line (default : False) + loader : string (options: tifffile, default) + Which image loader to use (default : default) + channel : Integer + Select channel dimension from multi-channel + image (default : None - single channel image) + tdim : Integer + Select time dimension (default : 0) + fluo : List of strings + List of fluorescence images to load (default : None) + fluoresc : Boolean + Whether image stack contains alternating fluorescence images (default : False) + batch : Boolean + Whether input is a folder containing multiple image areas to + run concurrently (default : False) + area_num : Integer + Area number for dealing with multiple areas (default : None) + """ + #See if the input is a folder or files + filenames = mio.input_folder(filenames) + + label_dict_string = None + + dir_name, output, image_dir = mmeas.find_input_filename(filenames[0], out = output, batch = batch, im_area = area_num) + + logger.debug("Starting run_analysis_pipeline") + logger.debug(" invert:", invert) + logger.debug(" show:", show) + logger.info("Loading data...") + fluo_data = None + + if loader == "tifffile": + try: + import tifffile + except: + loader = "default" + + if loader == "default": + data = mio.load_data(filenames) + if fluo is not None: + fluo_data = mio.load_data(fluo) + elif loader == "tifffile": + data = tifffile.imread(filenames).astype(float) + else: + logger.error("Invalid loader specified [%s]" % str(loader)) + return + + logger.debug("Initial load:", data.shape) + if fluoresc: + data, fluo_data = mio.split_fluorescence(data) + + if data.ndim == 2: + data = data[None] + + if fluo is not None or fluoresc is not False: + if fluo_data.ndim ==2: + fluo_data = fluo_data[None] + + if tdim: + data = np.rollaxis(data, tdim) + + if tmax: + data = data[:tmax] + if channel is not None: + data = data[:,channel,...] + if invert: + raise Exception("HANDLE ME BETTER") + if isinstance(data, (list, tuple)): + data = [ d.max() - d for d in data ] + else: + data = data.max() - data + logger.info("Data loaded:", data.shape, "of type", data.dtype) + + logger.info("Detecting channels, wells, and extracting well profiles...") + allwells = [] + allchannels = [] + allridges = [] + allbacteria = [] + figs = [] + lastimage =None + lastframe = None + lastbac = None + final_timepoint = len(data)-1 + for t, d in enumerate(data): + try: + lastframe, channel, ridges, wells, bacteria, label_dict_string, lastimage, lastbac = run_frame_analysis(t,d,fluo_data,label_dict_string,lastframe,final_timepoint,lastbac,lastimage,image_dir,dir_name,output,tmax,invert,show,debug,brightchannel,loader,channel,tdim,fluo,fluoresc,batch,area_num) + allwells.append(wells) + allchannels.append(channel) + allridges.append(ridges) + allbacteria.append(bacteria) + except: + logger.error("Analysis failed for area number: %s timepoint %s" %(area_num,t)) + logger.error("Exception:%s" % traceback.format_exc()) + outp.final_output(dir_name, output, fluo, fluoresc, final_timepoint+1) + return dir_name + +def run_frame_analysis(t,d,fluo_data,label_dict_string,lastframe,final_timepoint,lastbac,lastimage,image_dir,dir_name,output,tmax,invert,show,debug,brightchannel,loader,channel,tdim,fluo,fluoresc,batch,area_num): + + logger.info("T = %d"%t) + tracked = False + if debug: + plt.figure() + plt.imshow(d) + plt.savefig("DEBUG_DATAINL_%0.6d.jpg"%t) + plt.close() + channel = mdet.detect_channel(d, bright=brightchannel) + if debug: + plt.figure() + plt.imshow(channel) + plt.savefig("DEBUG_CHANNEL_%0.6d.jpg"%t) + plt.close() + wells0, ridges = mdet.detect_wells(d) + if debug: + plt.figure() + plt.imshow(wells0) + plt.savefig("DEBUG_WELLS_INITIAL_%0.6d.jpg"%t) + plt.close() + if lastframe is not None: + # Estimate transform + tform = mtrack.frametracker(lastframe, d, debug=debug) + + logger.debug("\tFrame tracking registered a transform of:", str(tform)) + # Update the well labels + # INFO + # at the moment the second output is how many wells + # the frame has shifted - need to check this - + # the value is used in bacteria tracking to allign + # the wells as they aren't labelled + profiles0, wellimg, coords = mdet.extract_well_profiles(d, channel, wells0, debug=debug) + wells = mtrack.welltracking(lastimage, wellimg, tform) + tracked = True + else: + wells = wells0 + lastwells = wells + if debug: + plt.figure() + plt.imshow(wells) + plt.savefig("DEBUG_WELLS_POST_TRACKING_%0.6d.jpg"%t) + plt.close() + + #wells = mdet.detect_wells(d) + profiles0, wellimg, coords = mdet.extract_well_profiles(d, channel, wells, debug=debug, tracked=tracked) + lastimage = wellimg + + if len(profiles0) < 2: + logger.error("Less than 2 wells detected in extract_well_profiles, aborting") + return + + #print("DEBUG: SAVING WELL PROFILES (WITHOUT BG SUBTRACTION") + #import skimage.io as skio + #skio.imsave("DEBUG_profiles.tif", np.array(profiles0)) + + profiles = mdet.remove_background(profiles0) + logger.info("\t%d wells extracted..."%(len(profiles))) + if not profiles: + logger.warn("\tNo wells extracted, aborting") + if debug: + plt.figure() + plt.imshow(d, cmap='gray') + plt.title("Data frame") + plt.figure() + plt.imshow(channel) + plt.title("Channels") + plt.figure() + plt.imshow(ridges) + plt.title("Ridges") + plt.figure() + plt.imshow(wells0) + plt.title("Initial wells") + plt.figure() + plt.imshow(wells) + plt.title("Wells after tracking") + plt.figure() + plt.imshow(wellimg) + plt.title("After extracting profiles") + plt.show() + return + + + # Detect bacteria + if lastframe is not None: + bacteria = mdet.detect_bacteria_in_wells(profiles, t, label_dict_string) + bacteria, label_dict_string = mdet.split_bacteria(bacteria, label_dict_string, t) + bacteria, label_dict_string = mtrack.bacteria_tracking(lastbac, bacteria, label_dict_string) + bac_counts = mmeas.count_bacteria_in_wells(bacteria) + total_bac = sum(bac_counts) + logger.info("\t%d bacteria (total)..."%total_bac) + logger.info("\tper well:", str(bac_counts)) + else: + bacteria = mdet.detect_bacteria_in_wells(profiles, t, label_dict_string) + bacteria, label_dict_string = mdet.split_bacteria(bacteria, label_dict_string, t) + #logger.info("\t%d bacteria (total)..."%max((b.max() for b in bacteria))) + bac_counts = mmeas.count_bacteria_in_wells(bacteria) + total_bac = sum(bac_counts) + logger.info("\t%d bacteria (total)..."%total_bac) + logger.info("\tper well:", str(bac_counts)) + + lastbac = bacteria + numberbac = max(b.max() for b in bacteria.values()) #this takes the max label from the previous image + lastframe = d + segfull = np.zeros(wellimg.shape, dtype="int16") + for s, c in zip(bacteria, coords): + segfull[c] = bacteria[s] + # Generate output images + outp.output_figures(d, t, channel, wells, profiles, bacteria, coords, wellimg, ridges, image_dir, label_dict_string) + #Measurements and output + if fluo is None and fluoresc is False: + bac_meas = mmeas.bacteria_measurements(label_dict_string, segfull) + outp.output_csv(bac_meas,dir_name, output, t) + else: + bkground, bg_sem = mmeas.fluorescence_background(wells0,segfull, fluo_data[t])#need to use this in measurements and output + fluo_meas = mmeas.fluorescence_measurements(segfull, bkground, bg_sem, fluo_data[t]) #while writing fluorescence function + bac_meas = mmeas.bacteria_measurements(label_dict_string, segfull, fluo_values=fluo_meas) + outp.output_csv(bac_meas,dir_name, output, t, bg=bkground, fl=True) + return lastframe, channel, ridges, wells, bacteria, label_dict_string, lastimage, lastbac + diff --git a/momanalysis/measurements.py b/momanalysis/measurements.py new file mode 100644 index 0000000..dec7f5b --- /dev/null +++ b/momanalysis/measurements.py @@ -0,0 +1,127 @@ +# -*- coding: utf-8 -*- +""" +Created on Tue Aug 16 13:17:59 2016 + +@author: as624 +""" +import csv +import scipy.ndimage as ndi +#sys.path.append(os.path.join(os.path.dirname(__file__), '..')) +from skimage.measure import regionprops +import os +import matplotlib.pyplot as plt +import numpy as np +from time import gmtime, strftime +from math import sqrt + + +def find_input_filename(fname, out = None, batch=False, im_area=None, test=False): + """Takes the filename input and splits it into simply the filename + which is then used to name the csv later""" + dir_name = os.path.split(os.path.realpath(fname))[0] + + timestamp = strftime("%d%m%Y_%H%M%S", gmtime()) + + if out is None: + file_name = os.path.splitext(os.path.basename(fname))[0] + if batch == True: + new_folder = "Area"+ im_area + "_results_" + timestamp + else: + new_folder = file_name + "_results_" + timestamp + new_dir = os.path.join(dir_name, new_folder) + else: + file_name = out + new_folder = file_name + "_" + timestamp + new_dir = os.path.join(dir_name, new_folder) + + img = "output_images" + image_dir = os.path.join(new_dir, img) + if test is False: + os.makedirs(new_dir) + os.makedirs(image_dir) + + return new_dir, file_name, image_dir + +def bacteria_measurements(label_dict_string,segfull, fluo_values=None): + """ + Takes a list of labelled images representing individual wells + and returns a string containing the area and length of bacteria + """ + + measurementsArray = [] + lblvalues = label_dict_string.values() + original_lbls = [] + list1 = [] + for lbl in lblvalues: + newlbl = int(float(lbl.split('_')[0])) + original_lbls.append(newlbl) + Nbac = segfull.max() + for bac in range(1, Nbac+1): + region = regionprops(segfull) + for r in (r for r in region if r.label == bac): + A = r.area + L = r.major_axis_length + W = r.minor_axis_length + string_lbl = label_dict_string[bac] + list1 = [string_lbl,A, L, W] + if fluo_values is not None: + fluo, fluo_bk = fluo_values[r.label] + list1 = [string_lbl,A, L, W, fluo, fluo_bk, (fluo_bk*A)] + continue + if not list1 and bac in original_lbls: + string_lbl = str(bac) + list1 = [string_lbl,"-","-","-"] + if fluo_values is not None: + list1 = [string_lbl,"-","-","-","-","-","-"] + elif not list1: + string_lbl = str(bac) + list1 = [string_lbl,"-","-","-"] + if fluo_values is not None: + list1 = [string_lbl,"-","-","-","-","-","-"] + measurementsArray.append(list1) + list1=[] + + return measurementsArray + +def count_bacteria_in_wells(bacteria_labels): + """ + For each well, return the number of bacteria it contains; + NOTE: This works on lists of bacteria labels - use count_bacteria + if working with full size labels of wells and bacteria + """ + numbac2=[] + for n, bac in bacteria_labels.items(): + numbac=ndi.label(bac)[1] + numbac2.append(numbac) + return numbac2 + #return [ndi.label(w>0)[1] for w in bacteria_labels] + +def fluorescence_measurements(labelled_image, bkground, bg_sem, fluo_image): + fluo_values = {} + bg_2sem = bkground + (2*bg_sem) + for region in regionprops(labelled_image): + fluo_1 = [] + fluo = np.mean(fluo_image[labelled_image==region.label]) + if fluo > bg_2sem: + fluobg = fluo-bkground + else: + fluobg = 0 + fluo_1 = [fluo, fluobg] + fluo_values[region.label] = fluo_1 + return fluo_values + +def fluorescence_background(wells,segfull, fluo_image): + allwells = wells + allwells[allwells>0] = 1 + allbac = np.copy(segfull) + allbac[allbac>0] = 1 + background_wells = allwells-allbac + + backg = fluo_image[background_wells>0] + bkground = np.mean(backg) + bg_stdev = np.std(backg) + bg_sem = bg_stdev/(sqrt(len(backg))) + return bkground, bg_sem + + + diff --git a/momanalysis/output.py b/momanalysis/output.py new file mode 100644 index 0000000..6df03cf --- /dev/null +++ b/momanalysis/output.py @@ -0,0 +1,132 @@ +# -*- coding: utf-8 -*- +""" +Created on Mon Jan 09 09:59:13 2017 + +@author: as624 +""" +import csv +import os +import matplotlib.pyplot as plt +import numpy as np +import itertools + +def output_csv(measurementsArray, dir_name, filename, timepoint,bg=None, fl=False): + """takes the measurements of the bacteria and writes it to a csv""" + outfilename = os.path.join( + dir_name, + '%s_timepoint_%s.csv' %(filename, timepoint) + ) + + with open(outfilename, "w", newline='') as f: + """need to work out how we will do it for multiple frames""" + writer = csv.writer(f) + if fl is False: + writer.writerow(["Bacteria Number", "Area", "Length", "Width"]) + else: + writer.writerow(["Background", bg, "","","","",""]) + writer.writerow(["Bacteria Number", "Area", "Length", "Width", "fluorescence", "fluo - background", "fluo sum - background"]) + writer.writerows(measurementsArray) + +def output_figures(d, timeindex, channel, wells, profiles, bacteria, coords, wellimg, ridges, image_dir, label_dict_string): + #FIXME: MISSING DOCSTRING + + #Generate images + figs = [] + #C + plt.figure() #to fix platform specific bug with matplotlib initialisation (same as line above "#plt.switch_backend('qt5agg')") + """if profiles: + figs.append(plt.figure(figsize=(16,12))) + plt.imshow(np.hstack(profiles.values())) + plt.colorbar() + """ + figs.append(plt.figure(figsize=(16,12))) + plt.imshow(d, cmap='gray') + plt.contour(wellimg, levels=[0.5], colors=['y']) + for lab_well in range(1, wellimg.max()+1): + bw = wellimg == lab_well + if not np.any(bw): + continue + pos0 = bw.nonzero() + pos = (np.min(pos0[0]), np.max(pos0[1])) + plt.text(pos[1], pos[0], "%d"%lab_well, color="y") + + # FIXME: LAZY WAY TO DO THIS FOR NOW + segfull = np.zeros(wellimg.shape, dtype="int16") + for s, c in zip(bacteria, coords): + segfull[c] = bacteria[s] + + Nbac = segfull.max() + for lab_bac in range(1, Nbac+1): + bw = segfull == lab_bac + pos0 = bw.nonzero() + if len(pos0[0]) == 0 or len(pos0[1]) == 0: + continue + lab_string = label_dict_string[lab_bac] + pos = (np.min(pos0[0]), np.max(pos0[1])) + col = plt.cm.gist_rainbow((lab_bac/9.1)%1) + plt.contour( bw, levels=[0.5], colors=[col]) + plt.text(pos[1], pos[0], "%s"%lab_string, color=col) + + + figs.append(plt.figure()) + plt.imshow(d, cmap='gray') + plt.imshow((channel>0) + 2*(wells>0) , alpha=0.4) + figs.append(plt.figure()) + plt.imshow(ridges) + + for i, f in enumerate(figs): + f.savefig(os.path.join(image_dir, "wells_%0.6d_%0.6d.jpg"%(timeindex, i))) + plt.close(f) + +def final_output(dir_name, filename, fluo, fluoresc, tmax): + #FIXME: MISSING DOCSTRING + timepoints = list(range(0,tmax)) #create a list of timepoints + rows_csvs = [] + if fluo is None and fluoresc is False: + no_data = ["", "", "", ""] #if there is no data at a given timepoint this will be written + time_row = ["",""] + else: + no_data = ["", "", "", "", "", "", ""] + time_row = ["","","","",""] + time_row2 = [] + for t in timepoints: + timestamp = ["Time =", t] + time_row2.extend(timestamp) + time_row2.extend(time_row) + individual_csv_filename = os.path.join( + dir_name, + '%s_timepoint_%s.csv' %(filename, t) + ) + try: + with open(individual_csv_filename, "r",newline='') as f: + reader2 = list(csv.reader(f)) + rows_csvs.append(len(reader2)) #find the length of all the individual timepoint csvs + except: + rows_csvs.append(int(0)) + max_rows = max(rows_csvs) #find the maximum length + + combined_csv_filename = os.path.join( + dir_name, + 'Combined_Output.csv', + ) + with open(combined_csv_filename, "w", newline='') as o: + writer = csv.writer(o) + writer.writerow(time_row2) + for r in range(0,max_rows): + current_row = [] + for t in timepoints: + individual_csv_filename = os.path.join( + dir_name, + '%s_timepoint_%s.csv' %(filename, t) + ) + try: + with open(individual_csv_filename, "r",newline='') as f: + num_rows = rows_csvs[t] #find the number of rows for this timepoint + if r < num_rows: #if the row exists extract it + temp_row = next(itertools.islice(csv.reader(f), r, None)) + current_row.extend(temp_row) #create one long list of values + else: #if row is empty + current_row.extend(no_data) + except: + current_row.extend(no_data) + writer.writerow(current_row) #write the new row diff --git a/momanalysis/plot.py b/momanalysis/plot.py new file mode 100644 index 0000000..7a43847 --- /dev/null +++ b/momanalysis/plot.py @@ -0,0 +1,51 @@ +# +# FILE : plot.py +# CREATED : 22/09/16 13:24:16 +# AUTHOR : J. Metz +# DESCRIPTION : Main plotting functions +# + +import matplotlib.pyplot as plt +import matplotlib.animation as animation + +def view_stack(data): + """ + Wrapper around matplotlib to create a simple data viewer + """ + + fig = plt.figure() + ax = fig.add_subplot(111) + im = ax.imshow(data[0], cmap='gray') + im.TMAX = len(data) + im.tnow = 0 + im.pause = False + title = plt.title("Frame %d"%im.tnow) + + def prevframe(): + im.tnow = (im.tnow-1)%im.TMAX + im.set_data(data[im.tnow]) + title.set_text("Frame %d"%im.tnow) + fig.canvas.draw() + def nextframe(stuff=None): + if im.pause and (stuff is not None): + return + im.tnow = (im.tnow+1)%im.TMAX + im.set_data(data[im.tnow]) + title.set_text("Frame %d"%im.tnow) + fig.canvas.draw() + + + def press(event): + if event.key == "left": + prevframe() + elif event.key == "right": + nextframe() + elif event.key == " ": + im.pause ^= True + else: + print("Unbound key pressed:", event.key) + + fig.canvas.mpl_connect('key_press_event', press) + ani = animation.FuncAnimation(fig, nextframe, blit=False, interval=10, repeat=True) + + plt.show() diff --git a/momanalysis/skimage_future.py b/momanalysis/skimage_future.py new file mode 100644 index 0000000..72cb317 --- /dev/null +++ b/momanalysis/skimage_future.py @@ -0,0 +1,196 @@ +# +# FILE : skimage_future.py +# CREATED : 27/09/16 13:40:48 +# AUTHOR : J. Metz +# DESCRIPTION : Skimage functions that are unavailable in stable +# release but present in development version +# + +import numpy as np +from skimage.feature import hessian_matrix, hessian_matrix_eigvals + + +def _frangi_hessian_common_filter(image, scale_range, scale_step, + beta1, beta2): + """This is an intermediate function for Frangi and Hessian filters. + Shares the common code for Frangi and Hessian functions. + Parameters + ---------- + image : (N, M) ndarray + Array with input image data. + scale_range : 2-tuple of floats, optional + The range of sigmas used. + scale_step : float, optional + Step size between sigmas. + beta1 : float, optional + Frangi correction constant. + beta2 : float, optional + Frangi correction constant. + Returns + ------- + filtered_list : list + List of pre-filtered images. + """ + # Import has to be here due to circular import error + + sigmas = np.arange(scale_range[0], scale_range[1], scale_step) + if np.any(np.asarray(sigmas) < 0.0): + raise ValueError("Sigma values less than zero are not valid") + + beta1 = 2 * beta1 ** 2 # `ad` in imagej code L137 + beta2 = 2 * beta2 ** 2 # `bd` in imagej code L138 + + filtered_array = np.zeros(sigmas.shape + image.shape) + lambdas_array = np.zeros(sigmas.shape + image.shape) + + # Filtering for all sigmas + for i, sigma in enumerate(sigmas): + # Make 2D hessian + # Whole routine for this in imagej L158-1282, but pre-calculates + # gaussian-blurring, here thats part of hessian_matrix + (Dxx, Dxy, Dyy) = hessian_matrix(image, sigma) + + + # Correct for scale + Dxx = (sigma ** 2) * Dxx + Dxy = (sigma ** 2) * Dxy + Dyy = (sigma ** 2) * Dyy + + # Calculate (abs sorted) eigenvalues and vectors + # NOTE: According to _hessian_matrix_det code, this is only valid for sigma >= 3!! + (lambda1, lambda2) = hessian_matrix_eigvals(Dxx, Dxy, Dyy) + + # Compute some similarity measures + lambda1[lambda1 == 0] = 1e-10 + rb = (lambda2 / lambda1) ** 2 + s2 = lambda1 ** 2 + lambda2 ** 2 + + # Compute the output image + filtered = np.exp(-rb / beta1) * (np.ones(np.shape(image)) - + np.exp(-s2 / beta2)) + + # Store the results in 3D matrices + filtered_array[i] = filtered + lambdas_array[i] = lambda1 + return filtered_array, lambdas_array + + +def frangi(image, scale_range=(1, 10), scale_step=2, beta1=0.5, beta2=15, + black_ridges=True): + """Filter an image with the Frangi filter. + This filter can be used to detect continuous edges, e.g. vessels, + wrinkles, rivers. It can be used to calculate the fraction of the + whole image containing such objects. + Calculates the eigenvectors of the Hessian to compute the similarity of + an image region to vessels, according to the method described in _[1]. + Parameters + ---------- + image : (N, M) ndarray + Array with input image data. + scale_range : 2-tuple of floats, optional + The range of sigmas used. + scale_step : float, optional + Step size between sigmas. + beta1 : float, optional + Frangi correction constant. + beta2 : float, optional + Frangi correction constant. + black_ridges : boolean, optional + When True (the default), the filter detects black ridges; when + False, it detects white ridges. + Returns + ------- + out : (N, M) ndarray + Filtered image (maximum of pixels across all scales). + Notes + ----- + Written by Marc Schrijver, 2/11/2001 + Re-Written by D. J. Kroon University of Twente (May 2009) + References + ---------- + .. [1] A. Frangi, W. Niessen, K. Vincken, and M. Viergever. "Multiscale + vessel enhancement filtering," In LNCS, vol. 1496, pages 130-137, + Germany, 1998. Springer-Verlag. + .. [2] Kroon, D.J.: Hessian based Frangi vesselness filter. + .. [3] http://mplab.ucsd.edu/tutorials/gabor.pdf. + """ + filtered, lambdas = _frangi_hessian_common_filter(image, + scale_range, scale_step, + beta1, beta2) + if black_ridges: + filtered[lambdas < 0] = 0 + else: + filtered[lambdas > 0] = 0 + + # Return for every pixel the value of the scale(sigma) with the maximum + # output pixel value + return np.max(filtered, axis=0) + + +def threshold_li(image): + """Return threshold value based on adaptation of Li's Minimum Cross Entropy method. + + Parameters + ---------- + image : array + Input image. + + Returns + ------- + threshold : float + Upper threshold value. All pixels intensities more than + this value are assumed to be foreground. + + References + ---------- + .. [1] Li C.H. and Lee C.K. (1993) "Minimum Cross Entropy Thresholding" + Pattern Recognition, 26(4): 617-625 + .. [2] Li C.H. and Tam P.K.S. (1998) "An Iterative Algorithm for Minimum + Cross Entropy Thresholding" Pattern Recognition Letters, 18(8): 771-776 + .. [3] Sezgin M. and Sankur B. (2004) "Survey over Image Thresholding + Techniques and Quantitative Performance Evaluation" Journal of + Electronic Imaging, 13(1): 146-165 + http://citeseer.ist.psu.edu/sezgin04survey.html + .. [4] ImageJ AutoThresholder code, http://fiji.sc/wiki/index.php/Auto_Threshold + + Examples + -------- + >>> from skimage.data import camera + >>> image = camera() + >>> thresh = threshold_li(image) + >>> binary = image > thresh + """ + # Copy to ensure input image is not modified + image = image.copy() + # Requires positive image (because of log(mean)) + immin = np.min(image) + image -= immin + imrange = np.max(image) + tolerance = 0.5 * imrange / 256 + + # Calculate the mean gray-level + mean = np.mean(image) + + # Initial estimate + new_thresh = mean + old_thresh = new_thresh + 2 * tolerance + + # In case while starts off false + threshold = new_thresh + # Stop the iterations when the difference between the + # new and old threshold values is less than the tolerance + while abs(new_thresh - old_thresh) > tolerance: + old_thresh = new_thresh + threshold = old_thresh + tolerance # range + # Calculate the means of background and object pixels + mean_back = image[image <= threshold].mean() + mean_obj = image[image > threshold].mean() + + temp = (mean_back - mean_obj) / (np.log(mean_back) - np.log(mean_obj)) + + if temp < 0: + new_thresh = temp - tolerance + else: + new_thresh = temp + tolerance + + return threshold + immin diff --git a/momanalysis/tests/Thumbs.db b/momanalysis/tests/Thumbs.db new file mode 100644 index 0000000..05d29f6 Binary files /dev/null and b/momanalysis/tests/Thumbs.db differ diff --git a/momanalysis/tests/__init__.py b/momanalysis/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/momanalysis/tests/__main__.py b/momanalysis/tests/__main__.py new file mode 100644 index 0000000..7e6cd86 --- /dev/null +++ b/momanalysis/tests/__main__.py @@ -0,0 +1,20 @@ +# +# FILE : __main__.py +# CREATED : 08/11/16 15:14:01 +# AUTHOR : J. Metz +# DESCRIPTION : Testing - with options +# + + + +# By default run standard test discovery, but add a call to matplotlib.pyplot.show +import matplotlib.pyplot as plt +import sys +from unittest import main + +if __name__ == '__main__': + sys.path.append("..") + + #unittest.TestLoader + main(module=None, exit=False, verbosity=2) + plt.show() diff --git a/momanalysis/tests/__pycache__/__init__.cpython-36.pyc b/momanalysis/tests/__pycache__/__init__.cpython-36.pyc new file mode 100644 index 0000000..7c20b0f Binary files /dev/null and b/momanalysis/tests/__pycache__/__init__.cpython-36.pyc differ diff --git a/momanalysis/tests/__pycache__/__main__.cpython-36.pyc b/momanalysis/tests/__pycache__/__main__.cpython-36.pyc new file mode 100644 index 0000000..9b3b186 Binary files /dev/null and b/momanalysis/tests/__pycache__/__main__.cpython-36.pyc differ diff --git a/momanalysis/tests/__pycache__/core.cpython-36.pyc b/momanalysis/tests/__pycache__/core.cpython-36.pyc new file mode 100644 index 0000000..64e409b Binary files /dev/null and b/momanalysis/tests/__pycache__/core.cpython-36.pyc differ diff --git a/momanalysis/tests/__pycache__/test_detection.cpython-36-PYTEST.pyc b/momanalysis/tests/__pycache__/test_detection.cpython-36-PYTEST.pyc new file mode 100644 index 0000000..e2a274d Binary files /dev/null and b/momanalysis/tests/__pycache__/test_detection.cpython-36-PYTEST.pyc differ diff --git a/momanalysis/tests/__pycache__/test_detection.cpython-36.pyc b/momanalysis/tests/__pycache__/test_detection.cpython-36.pyc new file mode 100644 index 0000000..ecebdce Binary files /dev/null and b/momanalysis/tests/__pycache__/test_detection.cpython-36.pyc differ diff --git a/momanalysis/tests/__pycache__/test_gui.cpython-36-PYTEST.pyc b/momanalysis/tests/__pycache__/test_gui.cpython-36-PYTEST.pyc new file mode 100644 index 0000000..49982fc Binary files /dev/null and b/momanalysis/tests/__pycache__/test_gui.cpython-36-PYTEST.pyc differ diff --git a/momanalysis/tests/__pycache__/test_gui.cpython-36.pyc b/momanalysis/tests/__pycache__/test_gui.cpython-36.pyc new file mode 100644 index 0000000..0a84e81 Binary files /dev/null and b/momanalysis/tests/__pycache__/test_gui.cpython-36.pyc differ diff --git a/momanalysis/tests/__pycache__/test_io.cpython-36-PYTEST.pyc b/momanalysis/tests/__pycache__/test_io.cpython-36-PYTEST.pyc new file mode 100644 index 0000000..eecbdde Binary files /dev/null and b/momanalysis/tests/__pycache__/test_io.cpython-36-PYTEST.pyc differ diff --git a/momanalysis/tests/__pycache__/test_io.cpython-36.pyc b/momanalysis/tests/__pycache__/test_io.cpython-36.pyc new file mode 100644 index 0000000..b7e3217 Binary files /dev/null and b/momanalysis/tests/__pycache__/test_io.cpython-36.pyc differ diff --git a/momanalysis/tests/__pycache__/test_main.cpython-36.pyc b/momanalysis/tests/__pycache__/test_main.cpython-36.pyc new file mode 100644 index 0000000..0c80fd9 Binary files /dev/null and b/momanalysis/tests/__pycache__/test_main.cpython-36.pyc differ diff --git a/momanalysis/tests/__pycache__/test_measurements.cpython-36-PYTEST.pyc b/momanalysis/tests/__pycache__/test_measurements.cpython-36-PYTEST.pyc new file mode 100644 index 0000000..5756e06 Binary files /dev/null and b/momanalysis/tests/__pycache__/test_measurements.cpython-36-PYTEST.pyc differ diff --git a/momanalysis/tests/__pycache__/test_measurements.cpython-36.pyc b/momanalysis/tests/__pycache__/test_measurements.cpython-36.pyc new file mode 100644 index 0000000..a4a03de Binary files /dev/null and b/momanalysis/tests/__pycache__/test_measurements.cpython-36.pyc differ diff --git a/momanalysis/tests/__pycache__/test_output.cpython-36-PYTEST.pyc b/momanalysis/tests/__pycache__/test_output.cpython-36-PYTEST.pyc new file mode 100644 index 0000000..21fe35b Binary files /dev/null and b/momanalysis/tests/__pycache__/test_output.cpython-36-PYTEST.pyc differ diff --git a/momanalysis/tests/__pycache__/test_output.cpython-36.pyc b/momanalysis/tests/__pycache__/test_output.cpython-36.pyc new file mode 100644 index 0000000..6104ddb Binary files /dev/null and b/momanalysis/tests/__pycache__/test_output.cpython-36.pyc differ diff --git a/momanalysis/tests/__pycache__/test_plot.cpython-36.pyc b/momanalysis/tests/__pycache__/test_plot.cpython-36.pyc new file mode 100644 index 0000000..dd06b5e Binary files /dev/null and b/momanalysis/tests/__pycache__/test_plot.cpython-36.pyc differ diff --git a/momanalysis/tests/__pycache__/test_tracking.cpython-36-PYTEST.pyc b/momanalysis/tests/__pycache__/test_tracking.cpython-36-PYTEST.pyc new file mode 100644 index 0000000..83159ff Binary files /dev/null and b/momanalysis/tests/__pycache__/test_tracking.cpython-36-PYTEST.pyc differ diff --git a/momanalysis/tests/__pycache__/test_tracking.cpython-36.pyc b/momanalysis/tests/__pycache__/test_tracking.cpython-36.pyc new file mode 100644 index 0000000..8241a27 Binary files /dev/null and b/momanalysis/tests/__pycache__/test_tracking.cpython-36.pyc differ diff --git a/momanalysis/tests/__pycache__/test_utility.cpython-36.pyc b/momanalysis/tests/__pycache__/test_utility.cpython-36.pyc new file mode 100644 index 0000000..f583439 Binary files /dev/null and b/momanalysis/tests/__pycache__/test_utility.cpython-36.pyc differ diff --git a/momanalysis/tests/core.py b/momanalysis/tests/core.py new file mode 100644 index 0000000..e165471 --- /dev/null +++ b/momanalysis/tests/core.py @@ -0,0 +1,30 @@ + + + +import matplotlib.pyplot as plt +import numpy as np +import sys + +def assert_array_equal(a, b): + if not np.array_equal(a,b): + fname = sys._getframe().f_back.f_code.co_name + name = fname + plt.matshow(a) + plt.title("%s : Array 1" % name) + plt.matshow(b) + plt.title("%s : Array 2" % name) + np.testing.assert_array_equal(a,b) + + +def assert_array_almost_equal(a, b): + try: + np.testing.assert_array_almost_equal(a,b) + except: + fname = sys._getframe().f_back.f_code.co_name + name = fname + plt.matshow(a) + plt.title("%s : Array 1" % name) + plt.matshow(b) + plt.title("%s : Array 2" % name) + #np.testing.assert_array_almost_equal(a,b) + raise diff --git a/momanalysis/tests/test_detection.py b/momanalysis/tests/test_detection.py new file mode 100644 index 0000000..2f630d9 --- /dev/null +++ b/momanalysis/tests/test_detection.py @@ -0,0 +1,1014 @@ +# -*- coding: utf-8 -*- +""" +Created on Fri Nov 1 11:35 2016 + +@author: as624 +""" + +from momanalysis.detection import detect_bacteria_in_wells as detbac +import momanalysis.detection as mdet +from momanalysis.tests import core +import numpy as np +import unittest +import scipy.ndimage as ndi +import skimage.measure as skmeas + +class TestSubtractBackground(unittest.TestCase): + def setUp(self): + self.sz = (100,100) + self.ground_truth = np.zeros(self.sz) + self.ground_truth_level = 400 + # Add some objects + self.ground_truth[10:20, 50:60] = self.ground_truth_level + # Noisy background + self.bg_std = 10 + self.bg_offset = 100 + self.bg_grad_max = 100 + self.bg = self.bg_std*np.random.randn(*self.sz) + self.bg_offset + # Add a constant gradient + X,Y = np.meshgrid(np.arange(self.sz[0]), np.arange(self.sz[1])) + self.bg += self.bg_grad_max * X/X.max() + self.image = {0:self.ground_truth + self.bg} + + def test_subtract_background(self): + removed = mdet.remove_background(self.image, light_background=False) + # For our current workflow, the background-removed images are inverted + removed = -removed[0] + + #import matplotlib.pyplot as plt + #plt.imshow(removed) + #plt.colorbar() + #plt.show() + # Make sure the background is all relatively low now + # NOTE: As background is subtracted, need to go double above reasonable + # statisitcally realy unlikely values of ~4 sigma from normal distribution + self.assertTrue(np.all(removed[self.ground_truth == 0] < 8*self.bg_std)) + # Make sure the foreground is about right + self.assertTrue(np.all(np.abs(removed[self.ground_truth > 0] - self.ground_truth_level) < 8*self.bg_std)) + +class DetectBacteria(unittest.TestCase): + + def setUp(self): + self.lbl1 = {1:np.array([[1,1,1,1,1,1,1,1,1,1], + [1,1,1,1,1,1,1,1,1,1], + [1,200,200,200,1,1,1,1,1,1], + [1,200,200,200,1,1,1,1,1,1], + [1,200,200,200,1,1,1,1,1,1], + [1,200,200,200,1,1,1,1,1,1], + [1,1,1,1,1,1,1,1,1,1], + [1,1,1,1,1,1,1,1,1,1], + [1,1,1,1,1,1,1,1,1,1]])} + + + self.lbl2 = {2:np.array([[1,1,1,1,1,1,1,1,1,1], + [1,1,1,1,1,1,1,1,1,1], + [1,1,1,1,1,1,1,1,1,1], + [1,1,1,1,200,200,200,200,1,1], + [1,1,1,1,200,200,200,200,1,1], + [1,1,1,1,200,200,200,200,1,1], + [1,1,1,1,200,200,200,200,1,1], + [1,1,1,1,200,200,200,2,1,1], + [1,1,1,1,1,1,1,1,1,1]])} + + + self.lbl_twobac1 = {3:np.array( + [[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], + [ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], + [ 1,200,200,200, 1, 1, 1, 1, 1, 1], + [ 1,200,200,200, 1, 1, 1, 1, 1, 1], + [ 1,200,200,200, 1, 1, 1, 180, 180, 180], + [ 1,200,200,200, 1, 1, 1, 180, 180, 180], + [ 1, 1, 1, 1, 1, 1, 1,180,180,180], + [ 1, 1, 1, 1, 1, 1, 1,180,180,180], + [ 1, 1, 1, 1, 1, 1, 1,1,1,1]])} + + + + #self.lbl2 = np.array([[0,0,0,0,0,0,0,0,0,0], + # [0,0,0,0,0,0,0,0,0,0], + # [0,1,1,1,0,0,0,0,0,0], + # [0,1,1,1,0,0,0,0,0,0], + # [0,1,1,1,0,0,0,0,0,0], + # [0,1,1,1,0,0,0,0,0,0], + # [0,0,0,0,0,0,0,0,0,0], + # [0,0,0,0,0,0,0,0,0,0], + # [0,0,0,0,0,0,0,0,0,0]]) + + # Seems to now remove a border.... + self.res1 = [np.array([[0,0,0,0,0,0,0,0,0,0], + [0,0,0,0,0,0,0,0,0,0], + [0,0,0,0,0,0,0,0,0,0], + [0,0,1,0,0,0,0,0,0,0], + [0,0,1,0,0,0,0,0,0,0], + [0,0,0,0,0,0,0,0,0,0], + [0,0,0,0,0,0,0,0,0,0], + [0,0,0,0,0,0,0,0,0,0], + [0,0,0,0,0,0,0,0,0,0]]),] + + self.res2 = [np.array([[0,0,0,0,0,0,0,0,0,0], + [0,0,0,0,0,0,0,0,0,0], + [0,0,0,0,0,0,0,0,0,0], + [0,0,0,0,0,0,0,0,0,0], + [0,0,0,0,0,1,1,0,0,0], + [0,0,0,0,0,1,1,0,0,0], + [0,0,0,0,0,1,1,0,0,0], + [0,0,0,0,0,0,0,0,0,0], + [0,0,0,0,0,0,0,0,0,0]]),] + + self.res_two1 = [np.array( + [[0,0,0,0,0,0,0,0,0,0], + [0,0,0,0,0,0,0,0,0,0], + [0,0,0,0,0,0,0,0,0,0], + [0,0,1,0,0,0,0,0,0,0], + [0,0,1,0,0,0,0,0,0,0], + [0,0,0,0,0,0,0,0,2,0], + [0,0,0,0,0,0,0,0,2,0], + [0,0,0,0,0,0,0,0,0,0], + [0,0,0,0,0,0,0,0,0,0]]),] + + self.wellnum1 = [1] + self.wellnum2 = [2] + self.wellnum3 = [3] + self.label_string = {1:'1'} + self.label_string2 = {1:'1',2:'2'} + def test_detect_small_bacteria1(self): + detected = detbac(self.lbl1, + timepoint = 0, + label_dict_string = None, #dictionary of string labels + maxsize = 1500, # maximum area (in pixels) of an object to be considered a bacteria + minsize = 0, # maximum area (in pixels) of an object to be considered a bacteria + absolwidth = 1, #width (in pixels) at which something is definitely a bacteria (can override relativewidth) + distfrombottom = 0, #ignores anything labeled this distance from the bottom of the well (prevents channel border being labelled) + topborder = 0, # Distance to exclude from top due to "shadow" + toprow = 0,#number of pixels along the top row for a label to be discarded + thresh_perc =1) + newarrays1 =[] + wellnums1 = [] + for j, k in detected.items(): + wellnums1.append(j) + newarrays1.append(k) + np.testing.assert_array_equal(newarrays1, self.res1) + self.assertEqual(self.wellnum1, wellnums1) + + def test_detect_small_bacteria2(self): + detected = detbac(self.lbl2, + timepoint = 0, + label_dict_string = None, #dictionary of string labels + maxsize = 1500, # maximum area (in pixels) of an object to be considered a bacteria + minsize = 0, # maximum area (in pixels) of an object to be considered a bacteria + absolwidth = 1, #width (in pixels) at which something is definitely a bacteria (can override relativewidth) + distfrombottom = 0, #ignores anything labeled this distance from the bottom of the well (prevents channel border being labelled) + topborder = 0, # Distance to exclude from top due to "shadow" + toprow = 0,#number of pixels along the top row for a label to be discarded + thresh_perc =1) + newarrays2 =[] + wellnums2 = [] + for j, k in detected.items(): + wellnums2.append(j) + newarrays2.append(k) + np.testing.assert_array_equal(newarrays2, self.res2) + self.assertEqual(self.wellnum2, wellnums2) + + def test_detect_two_bacteria1(self): + detected = detbac(self.lbl_twobac1, + timepoint = 0, + label_dict_string = None, #dictionary of string labels + maxsize = 1500, # maximum area (in pixels) of an object to be considered a bacteria + minsize = 0, # maximum area (in pixels) of an object to be considered a bacteria + absolwidth = 1, #width (in pixels) at which something is definitely a bacteria (can override relativewidth) + distfrombottom = 0, #ignores anything labeled this distance from the bottom of the well (prevents channel border being labelled) + topborder = 0, # Distance to exclude from top due to "shadow" + toprow = 0,#number of pixels along the top row for a label to be discarded + thresh_perc =1) + newarrays3 =[] + wellnums3 = [] + for j, k in detected.items(): + wellnums3.append(j) + newarrays3.append(k) + np.testing.assert_array_equal(newarrays3, self.res_two1) + self.assertEqual(self.wellnum3, wellnums3) + +class TestSplitBacteria(unittest.TestCase): + def setUp(self): + self.wells = {1: + np.array([[0,0,0,0,1,1,0,0,0,0], + [0,0,0,1,1,1,1,0,0,0], + [0,0,1,1,1,1,1,1,0,0], + [0,0,1,1,1,1,1,1,0,0], + [0,0,1,1,1,1,1,1,0,0], + [0,0,1,1,1,1,1,1,0,0], + [0,0,1,1,1,1,1,1,0,0], + [0,0,1,1,1,1,1,1,0,0], + [0,0,1,1,1,1,1,1,0,0], + [0,0,1,1,1,1,1,1,0,0], + [0,0,1,1,1,1,1,1,0,0], + [0,0,1,1,1,1,1,1,0,0], + [0,0,0,1,1,1,1,0,0,0], + [0,0,0,0,1,1,0,0,0,0], + [0,0,0,0,0,0,0,0,0,0], + [0,0,0,0,0,0,0,0,0,0], + [0,0,0,0,2,2,0,0,0,0], + [0,0,0,2,2,2,2,0,0,0], + [0,0,2,2,2,2,2,2,0,0], + [0,0,2,2,2,2,2,2,0,0], + [0,0,2,2,2,2,2,2,0,0], + [0,0,2,2,2,2,2,2,0,0], + [0,0,2,2,2,2,2,2,0,0], + [0,0,2,2,2,2,2,2,0,0], + [0,0,2,2,2,2,2,2,0,0], + [0,0,2,2,2,2,2,2,0,0], + [0,0,2,2,2,2,2,2,0,0], + [0,0,2,2,2,2,2,2,0,0], + [0,0,0,2,2,2,2,0,0,0], + [0,0,0,0,2,2,0,0,0,0], + [0,0,0,0,0,0,0,0,0,0], + [0,0,0,0,0,0,0,0,0,0], + [0,0,0,0,0,0,0,0,0,0], + [0,0,0,0,0,0,0,0,0,0], + [0,0,0,0,0,0,0,0,0,0], + [0,0,0,0,0,0,0,0,0,0], + [0,0,0,0,0,0,0,0,0,0], + [0,0,0,0,0,0,0,0,0,0], + [0,0,0,0,0,0,0,0,0,0], + [0,0,0,0,0,0,0,0,0,0], + [0,0,0,0,0,0,0,0,0,0], + [0,0,0,0,0,0,0,0,0,0], + [0,0,0,0,0,0,0,0,0,0], + [0,0,0,0,0,0,0,0,0,0], + [0,0,0,0,0,0,0,0,0,0], + [0,0,0,0,0,0,0,0,0,0], + [0,0,0,0,0,0,0,0,0,0], + [0,0,0,0,0,0,0,0,0,0], + [0,0,0,0,0,0,0,0,0,0], + [0,0,0,0,0,0,0,0,0,0]])} + self.wells2 = {2: + np.array([[0,0,0,0,1,1,0,0,0,0], + [0,0,0,1,1,1,1,0,0,0], + [0,0,1,1,1,1,1,1,0,0], + [0,0,1,1,1,1,1,1,0,0], + [0,0,1,1,1,1,1,1,0,0], + [0,0,1,1,1,1,1,1,0,0], + [0,0,1,1,1,1,1,1,0,0], + [0,0,1,1,1,1,1,1,0,0], + [0,0,1,1,1,1,1,1,0,0], + [0,0,1,1,1,1,1,1,0,0], + [0,0,1,1,1,1,1,1,0,0], + [0,0,1,1,1,1,1,1,0,0], + [0,0,0,1,1,1,1,0,0,0], + [0,0,0,1,1,1,1,0,0,0], + [0,0,0,0,1,1,0,0,0,0], + [0,0,0,0,1,1,0,0,0,0], + [0,0,0,1,1,1,1,0,0,0], + [0,0,0,1,1,1,1,0,0,0], + [0,0,1,1,1,1,1,1,0,0], + [0,0,1,1,1,1,1,1,0,0], + [0,0,1,1,1,1,1,1,0,0], + [0,0,1,1,1,1,1,1,0,0], + [0,0,1,1,1,1,1,1,0,0], + [0,0,1,1,1,1,1,1,0,0], + [0,0,1,1,1,1,1,1,0,0], + [0,0,1,1,1,1,1,1,0,0], + [0,0,1,1,1,1,1,1,0,0], + [0,0,1,1,1,1,1,1,0,0], + [0,0,0,1,1,1,1,0,0,0], + [0,0,0,0,1,1,0,0,0,0], + [0,0,0,0,0,0,0,0,0,0], + [0,0,0,0,0,0,0,0,0,0], + [0,0,0,0,0,0,0,0,0,0], + [0,0,0,0,0,0,0,0,0,0], + [0,0,0,0,2,2,0,0,0,0], + [0,0,0,2,2,2,2,0,0,0], + [0,0,2,2,2,2,2,2,0,0], + [0,0,2,2,2,2,2,2,0,0], + [0,0,2,2,2,2,2,2,0,0], + [0,0,2,2,2,2,2,2,0,0], + [0,0,2,2,2,2,2,2,0,0], + [0,0,2,2,2,2,2,2,0,0], + [0,0,2,2,2,2,2,2,0,0], + [0,0,2,2,2,2,2,2,0,0], + [0,0,2,2,2,2,2,2,0,0], + [0,0,2,2,2,2,2,2,0,0], + [0,0,0,2,2,2,2,0,0,0], + [0,0,0,0,2,2,0,0,0,0], + [0,0,0,0,0,0,0,0,0,0], + [0,0,0,0,0,0,0,0,0,0]]) + } + + self.out_wells = [np.array([[0,0,0,0,1,1,0,0,0,0], + [0,0,0,1,1,1,1,0,0,0], + [0,0,1,1,1,1,1,1,0,0], + [0,0,1,1,1,1,1,1,0,0], + [0,0,1,1,1,1,1,1,0,0], + [0,0,1,1,1,1,1,1,0,0], + [0,0,1,1,1,1,1,1,0,0], + [0,0,1,1,1,1,1,1,0,0], + [0,0,1,1,1,1,1,1,0,0], + [0,0,1,1,1,1,1,1,0,0], + [0,0,1,1,1,1,1,1,0,0], + [0,0,1,1,1,1,1,1,0,0], + [0,0,0,1,1,1,1,0,0,0], + [0,0,0,0,1,1,0,0,0,0], + [0,0,0,0,0,0,0,0,0,0], + [0,0,0,0,0,0,0,0,0,0], + [0,0,0,0,2,2,0,0,0,0], + [0,0,0,2,2,2,2,0,0,0], + [0,0,2,2,2,2,2,2,0,0], + [0,0,2,2,2,2,2,2,0,0], + [0,0,2,2,2,2,2,2,0,0], + [0,0,2,2,2,2,2,2,0,0], + [0,0,2,2,2,2,2,2,0,0], + [0,0,2,2,2,2,2,2,0,0], + [0,0,2,2,2,2,2,2,0,0], + [0,0,2,2,2,2,2,2,0,0], + [0,0,2,2,2,2,2,2,0,0], + [0,0,2,2,2,2,2,2,0,0], + [0,0,0,2,2,2,2,0,0,0], + [0,0,0,0,2,2,0,0,0,0], + [0,0,0,0,0,0,0,0,0,0], + [0,0,0,0,0,0,0,0,0,0], + [0,0,0,0,0,0,0,0,0,0], + [0,0,0,0,0,0,0,0,0,0], + [0,0,0,0,0,0,0,0,0,0], + [0,0,0,0,0,0,0,0,0,0], + [0,0,0,0,0,0,0,0,0,0], + [0,0,0,0,0,0,0,0,0,0], + [0,0,0,0,0,0,0,0,0,0], + [0,0,0,0,0,0,0,0,0,0], + [0,0,0,0,0,0,0,0,0,0], + [0,0,0,0,0,0,0,0,0,0], + [0,0,0,0,0,0,0,0,0,0], + [0,0,0,0,0,0,0,0,0,0], + [0,0,0,0,0,0,0,0,0,0], + [0,0,0,0,0,0,0,0,0,0], + [0,0,0,0,0,0,0,0,0,0], + [0,0,0,0,0,0,0,0,0,0], + [0,0,0,0,0,0,0,0,0,0], + [0,0,0,0,0,0,0,0,0,0]]),] + + self.out_wells2 = [np.array([[0,0,0,0,0,0,0,0,0,0], + [0,0,0,1,1,1,0,0,0,0], + [0,0,1,1,1,1,1,1,0,0], + [0,0,1,1,1,1,1,1,0,0], + [0,0,1,1,1,1,1,1,0,0], + [0,0,1,1,1,1,1,1,0,0], + [0,0,1,1,1,1,1,1,0,0], + [0,0,1,1,1,1,1,1,0,0], + [0,0,1,1,1,1,1,1,0,0], + [0,0,1,1,1,1,1,1,0,0], + [0,0,1,1,1,1,1,1,0,0], + [0,0,1,1,1,1,1,1,0,0], + [0,0,0,1,1,1,0,0,0,0], + [0,0,0,0,1,1,1,0,0,0], + [0,0,0,0,0,0,0,0,0,0], + [0,0,0,0,0,0,0,0,0,0], + [0,0,0,2,2,2,0,0,0,0], + [0,0,0,0,2,2,2,0,0,0], + [0,0,2,2,2,2,2,2,0,0], + [0,0,2,2,2,2,2,2,0,0], + [0,0,2,2,2,2,2,2,0,0], + [0,0,2,2,2,2,2,2,0,0], + [0,0,2,2,2,2,2,2,0,0], + [0,0,2,2,2,2,2,2,0,0], + [0,0,2,2,2,2,2,2,0,0], + [0,0,2,2,2,2,2,2,0,0], + [0,0,2,2,2,2,2,2,0,0], + [0,0,2,2,2,2,2,2,0,0], + [0,0,0,2,2,2,2,0,0,0], + [0,0,0,0,2,2,0,0,0,0], + [0,0,0,0,0,0,0,0,0,0], + [0,0,0,0,0,0,0,0,0,0], + [0,0,0,0,0,0,0,0,0,0], + [0,0,0,0,0,0,0,0,0,0], + [0,0,0,0,3,3,0,0,0,0], + [0,0,0,3,3,3,3,0,0,0], + [0,0,3,3,3,3,3,3,0,0], + [0,0,3,3,3,3,3,3,0,0], + [0,0,3,3,3,3,3,3,0,0], + [0,0,3,3,3,3,3,3,0,0], + [0,0,3,3,3,3,3,3,0,0], + [0,0,3,3,3,3,3,3,0,0], + [0,0,3,3,3,3,3,3,0,0], + [0,0,3,3,3,3,3,3,0,0], + [0,0,3,3,3,3,3,3,0,0], + [0,0,3,3,3,3,3,3,0,0], + [0,0,0,3,3,3,3,0,0,0], + [0,0,0,0,3,3,0,0,0,0], + [0,0,0,0,0,0,0,0,0,0], + [0,0,0,0,0,0,0,0,0,0]]),] + + self.label_dict_string = {} + self.out_string = {1:'1',2:'2'} + self.out_string2 = {1:'1',2:'2',3:'3'} + self.wellnum1 = [1] + self.wellnum2 = [2] + + def test_bacteria_no_split(self): + split, label_dict = mdet.split_bacteria(self.wells, self.label_dict_string, timepoint=0,distfrombottom = -1, absolwidth=0) + newarrays1 =[] + wellnums1 = [] + for j, k in split.items(): + wellnums1.append(j) + newarrays1.append(k) + np.testing.assert_array_equal(newarrays1, self.out_wells) + self.assertEqual(self.wellnum1, wellnums1) + self.assertEqual(self.out_string, label_dict) + + def test_bacteria_split(self): + split, label_dict = mdet.split_bacteria(self.wells2, self.label_dict_string, timepoint=0,distfrombottom = -1, absolwidth=0) + newarrays2 =[] + wellnums2 = [] + for j, k in split.items(): + wellnums2.append(j) + newarrays2.append(k) + np.testing.assert_array_equal(newarrays2, self.out_wells2) + self.assertEqual(self.wellnum2, wellnums2) + self.assertEqual(self.out_string2, label_dict) + +class TestExtractWells(unittest.TestCase): + def setUp(self): + # Create test data for the extraction + # Doesn't really matter what the image is + self.image = np.random.rand(10,10) + self.channel = np.array([ + [0,0,0,0,0,0,0,0,0,0,], + [0,0,0,0,0,0,0,0,0,0,], + [0,0,0,0,0,0,0,0,0,0,], + [0,0,0,0,0,0,0,0,0,0,], + [0,0,0,0,0,0,0,0,0,0,], + [0,0,0,0,0,0,0,0,0,0,], + [0,0,0,0,0,0,0,0,0,0,], + [0,1,1,1,1,1,1,1,1,0,], + [0,0,0,0,0,0,0,0,0,0,], + [0,0,0,0,0,0,0,0,0,0,], + ], dtype=bool) + self.wells = np.array([ + [0,0,0,0,0,0,0,0,0,0,], + [0,0,0,0,0,0,0,0,0,0,], + [0,0,0,0,2,0,0,0,0,0,], + [0,0,1,0,2,0,0,0,3,0,], + [0,0,1,0,2,0,0,0,3,0,], + [0,0,1,0,2,0,0,0,3,0,], + [0,0,1,0,0,0,0,0,3,0,], + [0,0,0,0,0,0,0,0,0,0,], + [0,0,0,0,0,0,0,0,0,0,], + [0,0,0,0,0,0,0,0,0,0,], + ], dtype='uint16') + self.wellstrue = np.array([ + [0,0,0,0,0,0,0,0,0,0,], + [0,0,0,0,0,0,0,0,0,0,], + [0,0,1,0,2,0,3,0,4,0,], + [0,0,1,0,2,0,3,0,4,0,], + [0,0,1,0,2,0,3,0,4,0,], + [0,0,1,0,2,0,3,0,4,0,], + [0,0,1,0,2,0,3,0,4,0,], + [0,0,0,0,0,0,0,0,0,0,], + [0,0,0,0,0,0,0,0,0,0,], + [0,0,0,0,0,0,0,0,0,0,], + ], dtype='uint16') + self.wellwidth = 1 + self.coords = [ + ([6,5,4,3,2], [2,2,2,2,2]), + ([6,5,4,3,2], [4,4,4,4,4]), + ([6,5,4,3,2], [6,6,6,6,6]), + ([6,5,4,3,2], [8,8,8,8,8]), + ] + + self.wellimages = { l:self.image[c][:,None] + for l,c in enumerate(self.coords, start=1) + } + #self.wellimages = + #self.coords = + + def test_extract_well_profiles(self): + + images, wellimage, coords = mdet.extract_well_profiles( + self.image, + self.channel, + self.wells, + wellwidth = self.wellwidth, + #debug=True, + min_well_sep_factor=0.5, + ) + + self.assertEqual(len(images), len(self.wellimages)) + for w1, w2 in zip(images, self.wellimages): + #np.testing.assert_array_equal(w1, w2) + core.assert_array_almost_equal(w1, w2) + np.testing.assert_array_equal(wellimage, self.wellstrue) + self.assertEqual(coords, self.coords) + +class TestDetectWells(unittest.TestCase): + def setUp(self): + # Create test data for the detection + self.image = 10 + np.random.randn(10,10) + self.image += 20*np.array([ + [0,0,0,0,0,0,0,0,0,0,], + [0,0,0,0,0,0,0,0,0,0,], + [0,0,1,1,0,0,1,1,0,0,], + [0,0,1,1,0,0,1,1,0,0,], + [0,0,1,1,0,0,1,1,0,0,], + [0,0,1,1,0,0,1,1,0,0,], + [0,0,1,1,0,0,1,1,0,0,], + [0,0,1,1,0,0,1,1,0,0,], + [0,0,0,0,0,0,0,0,0,0,], + [0,0,0,0,0,0,0,0,0,0,], + ]) + self.scale_range = [1.0, 3.0] + self.maxd = 20 + self.mind = 3 + self.maxperp = 3 + self.minwidth=0 + self.lbl = ndi.label( + np.array([ + [0,0,0,0,0,0,0,0,0,0,], + [0,0,0,0,0,0,0,0,0,0,], + [0,0,1,1,0,0,1,1,0,0,], + [0,0,1,1,0,0,1,1,0,0,], + [0,0,1,1,0,0,1,1,0,0,], + [0,0,1,1,0,0,1,1,0,0,], + [0,0,1,1,0,0,1,1,0,0,], + [0,0,1,1,0,0,1,1,0,0,], + [0,0,0,0,0,0,0,0,0,0,], + [0,0,0,0,0,0,0,0,0,0,], + ]))[0] + + def test_detect_wells(self): + lblgood, ridges = mdet.detect_wells( + self.image, + scale_range = self.scale_range, + maxd = self.maxd, + mind = self.mind, + maxperp = self.maxperp, + minwidth = self.minwidth, + ) + core.assert_array_equal(lblgood, self.lbl) + + +class TestDetectChannel(unittest.TestCase): + def setUp(self): + # Create test data for the extraction + # REALLY SMALL, DUMMY VERSION + im_noise = 100 + np.random.randn(10,10) + self.image_bright = im_noise + 20*np.array([ + [0,0,0,0,0,0,0,0,0,0,], + [0,0,0,0,0,0,0,0,0,0,], + [0,0,0,1,0,0,1,0,0,0,], + [0,0,0,1,0,0,1,0,0,0,], + [0,0,0,1,0,0,1,0,0,0,], + [0,0,0,1,0,0,1,0,0,0,], + [0,3,3,3,3,3,3,3,3,0,], + [0,3,3,3,3,3,3,3,3,0,], + [0,3,3,3,3,3,3,3,3,0,], + [0,0,0,0,0,0,0,0,0,0,], + ]) + self.image_dark = im_noise + 20*np.array([ + [0,0,0,0,0,0,0,0,0,0,], + [0,0,0,0,0,0,0,0,0,0,], + [0,0,0,1,0,0,1,0,0,0,], + [0,0,0,1,0,0,1,0,0,0,], + [0,0,0,1,0,0,1,0,0,0,], + [0,0,0,1,0,0,1,0,0,0,], + [0,-3,-3,-3,-3,-3,-3,-3,-3,0,], + [0,-3,-3,-3,-3,-3,-3,-3,-3,0,], + [0,-3,-3,-3,-3,-3,-3,-3,-3,0,], + [0,0,0,0,0,0,0,0,0,0,], + ]) + self.scale_range = [1.0, 2.0] + self.channel = np.array([ + [0,0,0,0,0,0,0,0,0,0,], + [0,0,0,0,0,0,0,0,0,0,], + [0,0,0,0,0,0,0,0,0,0,], + [0,0,0,0,0,0,0,0,0,0,], + [0,0,0,0,0,0,0,0,0,0,], + [0,0,0,0,0,0,0,0,0,0,], + [0,1,1,1,1,1,1,1,1,0,], + [0,1,1,1,1,1,1,1,1,0,], + [0,1,1,1,1,1,1,1,1,0,], + [0,0,0,0,0,0,0,0,0,0,], + ], dtype=bool) + + # + # MORE REALISTIC CHANNEL + #im_noise = 150 + np.random.randn(200,1000) + #self.image_bright = im_noise.copy() + ## Add in wells + #well_y0 = 20 + #well_y1 = 120 + #well_width = 5 + #chan_x0 = 10 + #chan_x1 = 1000-10 + #chan_y0 = 5 + #chan_y1 = 20 # I.e. width of 15 + #for left in range(50, 1000, 100): # 50, 150, 250, ... 950 + # self.image_bright[well_y0:well_y1, left:left+well_width] += 20 + # self.image_dark[well_y0:well_y1, left:left+well_width] += 20 + ## Add in channel + #self.image_bright[chan_y0:chan_y1, chan_x0:chanx1] += 20*3 + #self.image_dark[chan_y0:chan_y1, chan_x0:chanx1] -= 20*3 + + self.orientation = 0 + + def test_detect_channel_bright(self): + channel = mdet.detect_channel( + self.image_bright, + scale_range = self.scale_range, + minwidth=0, + bright=True, + ) + # This is a little messier than wells; let's allow + # for the detected channel to be a little smaller... + # make sure it's contained by the expected + try: + self.assertEqual( np.sum(self.channel[channel])/channel.sum(), 1.0 ) + except: + print("COVERAGE", np.sum(self.channel[channel])/channel.sum()) + raise + + # Make sure what's missed is less than 50% + areafull = self.channel.sum() + areamissed = self.channel[~channel].sum() + try: + self.assertTrue( areamissed/areafull <= 0.5) + except: + print("AREA MISSED",areamissed/areafull) + core.plt.matshow(channel) + core.plt.matshow(self.channel) + raise + + # Lastly, make sure it's going to give us basically the + # same important properties! + prop = skmeas.regionprops(channel.astype(int))[0] + try: + self.assertTrue(np.abs(prop.orientation - self.orientation) + < (10*np.pi/180)) + except: + print("ANGLES", prop.orientation, self.orientation) + raise + + + def test_detect_channel_dark(self): + channel = mdet.detect_channel( + self.image_dark, + scale_range = self.scale_range, + bright = False, + minwidth=0, + ) + #core.assert_array_equal(channel, self.channel) + # This is a little messier than wells; let's allow + # for the detected channel to be a little smaller... + + # make sure it's contained by the expected + try: + self.assertEqual( np.sum(self.channel[channel])/channel.sum(), 1.0 ) + except: + import matplotlib.pyplot as plt + plt.figure() + plt.imshow(self.image_dark, cmap='gray') + plt.contour(self.channel, levels=[0.5], colors=["g"]) + plt.contour(channel, levels=[0.5], colors=["r"]) + plt.savefig("/tmp/momanalysis_debug_test_detect_channel_dark.png") + plt.close() + print("COVERAGE", np.sum(self.channel[channel])/channel.sum()) + raise + + # Make sure what's missed is less than 50% + areafull = self.channel.sum() + areamissed = self.channel[~channel].sum() + try: + self.assertTrue( areamissed/areafull <= 0.5) + except: + print("AREA MISSED",areamissed/areafull) + core.plt.matshow(channel) + core.plt.savefig("AREAMISSED_DETECTED.png") + core.plt.matshow(self.channel) + core.plt.savefig("AREAMISSED_EXPECTED.png") + raise + + # Lastly, make sure it's going to give us basically the + # same important properties! + prop = skmeas.regionprops(channel.astype(int))[0] + try: + self.assertTrue(np.abs(prop.orientation - self.orientation) + < (10*np.pi/180)) + except: + print("ANGLES", prop.orientation, self.orientation) + raise + +class TestGetChannelOrientationAndLine(unittest.TestCase): + def setUp(self): + self.channel_horiz = np.array([ + [0,0,0,0,0,0,0,0,0,0,], + [0,0,0,0,0,0,0,0,0,0,], + [0,0,0,0,0,0,0,0,0,0,], + [0,0,0,0,0,0,0,0,0,0,], + [0,0,0,0,0,0,0,0,0,0,], + [0,0,0,0,0,0,0,0,0,0,], + [0,1,1,0,1,1,1,1,0,0,], + [0,1,1,1,1,1,1,1,1,0,], + [0,1,1,1,1,1,1,0,1,0,], + [0,0,0,0,0,0,0,0,0,0,], + ], dtype=int) + self.channel_vert = np.array([ + [0,0,0,0,0,0,0,0,0,0,], + [0,0,0,1,1,0,0,0,0,0,], + [0,0,0,1,1,1,0,0,0,0,], + [0,0,0,1,1,1,0,0,0,0,], + [0,0,0,0,1,1,0,0,0,0,], + [0,0,0,1,1,1,0,0,0,0,], + [0,0,0,1,1,1,0,0,0,0,], + [0,0,0,1,1,0,0,0,0,0,], + [0,0,0,1,1,1,0,0,0,0,], + [0,0,0,0,0,0,0,0,0,0,], + ], dtype=int) + self.channel_diag = np.array([ + [0,0,0,0,0,0,0,0,0,0,], + [0,0,0,0,0,0,0,0,0,0,], + [0,0,0,0,0,0,1,1,0,0,], + [0,0,0,0,0,1,1,1,0,0,], + [0,0,0,0,1,1,1,0,0,0,], + [0,0,0,1,1,1,0,0,0,0,], + [0,0,1,1,1,0,0,0,0,0,], + [0,1,1,1,0,0,0,0,0,0,], + [0,0,1,0,0,0,0,0,0,0,], + [0,0,0,0,0,0,0,0,0,0,], + ], dtype=int) + + def test_get_channel_orientation_and_line_horizontal(self): + angle, line = mdet.get_channel_orientation_and_line(self.channel_horiz) + # Make sure angle is +-1 degree + self.assertAlmostEqual(angle, 0, delta=np.pi/180) + + # Order coordinates so we can compare properly + line = sorted(line, key=lambda pt: pt[0]) + + # Check coordinates + self.assertAlmostEqual(line[0][0], 0.5, delta=1) + self.assertAlmostEqual(line[0][1], 7, delta=1) + self.assertAlmostEqual(line[1][0], 8, delta=1) + self.assertAlmostEqual(line[1][1], 7, delta=1) + + def test_get_channel_orientation_and_line_vertical(self): + angle, line = mdet.get_channel_orientation_and_line(self.channel_vert) + # Make sure angle is +-1 degree + # Angle can be ~ + or - 90 degrees, so need to abs + self.assertAlmostEqual(np.abs(angle), np.pi/2, delta=np.pi/180) + + # Order coordinates so we can compare properly + line = sorted(line, key=lambda pt: pt[1]) + # Check coordinates + self.assertAlmostEqual(line[0][0], 4, delta=1) + self.assertAlmostEqual(line[0][1], 0.5, delta=1) + self.assertAlmostEqual(line[1][0], 4, delta=1) + self.assertAlmostEqual(line[1][1], 8.5, delta=1) + + def test_get_channel_orientation_and_line_diagonal(self): + angle, line = mdet.get_channel_orientation_and_line(self.channel_diag) + # Make sure angle is +-45 degree + # Angle can be ~ + or - 45 degrees, so need to abs + self.assertAlmostEqual(np.abs(angle), np.pi/4, delta=np.pi/180) + + # Order coordinates so we can compare properly + line = sorted(line, key=lambda pt: pt[0]) + + # Check coordinates + self.assertAlmostEqual(line[0][0], 1, delta=1) + self.assertAlmostEqual(line[0][1], 1, delta=1) + self.assertAlmostEqual(line[1][0], 8, delta=1) + self.assertAlmostEqual(line[1][1], 8, delta=1) + + + +class TestGetWellsAndUnitVectors(unittest.TestCase): + def setUp(self): + + wells_vertical = np.array([ + [0,0,0,0,0,0,0,0,0,0,], + [0,0,0,0,0,0,0,0,0,0,], + [0,0,0,0,0,0,0,0,0,0,], + [0,0,0,0,0,3,0,0,0,0,], + [0,1,0,2,0,3,0,4,0,0,], + [0,1,0,2,0,3,0,4,0,0,], + [0,1,0,2,0,3,0,4,0,0,], + [0,1,0,2,0,3,0,4,0,0,], + [0,1,0,2,0,3,0,4,0,0,], + [0,0,0,0,0,0,0,0,0,0,], + ], dtype=int) + self.props_vertical = skmeas.regionprops(wells_vertical) + + wells_horizontal = np.array([ + [0,0,0,0,0,0,0,0,0,0,], + [0,1,1,1,1,1,1,0,0,0,], + [0,0,0,0,0,0,0,0,0,0,], + [0,2,2,2,2,2,0,0,0,0,], + [0,0,0,0,0,0,0,0,0,0,], + [0,3,3,3,3,3,3,0,0,0,], + [0,0,0,0,0,0,0,0,0,0,], + [0,4,4,4,4,4,4,4,4,0,], + [0,0,0,0,0,0,0,0,0,0,], + [0,0,0,0,0,0,0,0,0,0,], + ], dtype=int) + self.props_horizontal = skmeas.regionprops(wells_horizontal) + + wells_vertical_with_outlier = np.array([ + [0,0,0,0,0,0,0,0,0,0,], + [0,5,5,5,5,5,5,5,0,0,], + [0,0,0,0,0,0,0,0,0,0,], + [0,0,0,0,0,3,0,0,0,0,], + [0,1,0,2,0,3,0,4,0,0,], + [0,1,0,2,0,3,0,4,0,0,], + [0,1,0,2,0,3,0,4,0,0,], + [0,1,0,2,0,3,0,4,0,0,], + [0,1,0,2,0,3,0,4,0,0,], + [0,0,0,0,0,0,0,0,0,0,], + ], dtype=int) + self.props_vertical_with_outlier = skmeas.regionprops(wells_vertical_with_outlier) + + def test_get_wells_and_unit_vectors_vertical(self): + + coms, oris, uvec_para, uvec_perp = mdet._get_wells_and_unit_vectors( + self.props_vertical) + # Unit vectors can be +-, so make sure by abs-ing + uvec_perp = np.abs(uvec_perp) + uvec_para = np.abs(uvec_para) + + np.testing.assert_array_almost_equal(uvec_para, [0, 1]) + np.testing.assert_array_almost_equal(uvec_perp, [1, 0]) + + def test_get_wells_and_unit_vectors_horizontal(self): + + coms, oris, uvec_para, uvec_perp = mdet._get_wells_and_unit_vectors( + self.props_horizontal) + # Unit vectors can be +-, so make sure by abs-ing + uvec_perp = np.abs(uvec_perp) + uvec_para = np.abs(uvec_para) + + np.testing.assert_array_almost_equal(uvec_para, [1, 0]) + np.testing.assert_array_almost_equal(uvec_perp, [0, 1]) + + def test_get_wells_and_unit_vectors_vertical_with_outlier(self): + """ + Now added in a horizontal well - if the simple outlier + detection doesn't filter it, the vectors shoudl be off. + """ + coms, oris, uvec_para, uvec_perp = mdet._get_wells_and_unit_vectors( + self.props_vertical_with_outlier) + # Unit vectors can be +-, so make sure by abs-ing + uvec_perp = np.abs(uvec_perp) + uvec_para = np.abs(uvec_para) + + np.testing.assert_array_almost_equal(uvec_para, [0, 1]) + np.testing.assert_array_almost_equal(uvec_perp, [1, 0]) + # Check that one region got rejected + self.assertEqual(len(oris), len(self.props_vertical_with_outlier)-1) + + +class TestGetWellSpacingAndSeparations(unittest.TestCase): + def setUp(self): + # Need, coms (centres of mass), the perpendicular unit vector + # to project the coms along, and wellwidth for filtering + # nearby coms + # Simple case - all uniformly distributed + self.coms_horizontal = [ + [2, 5], + [4, 6], + [6, 4], + [8, 5.5], + [10, 5], + ] + self.uvec_perp_horizontal = [1,0] + self.coms_vertical = [ + [5, 2], + [6, 4], + [4, 6], + [5.5, 8], + [5, 10], + ] + self.uvec_perp_vertical = [0,1] + self.coms_horizontal_with_gaps = [ + [2, 5], + [4, 6], + [6, 4], + [8, 5.5], + [12, 5], + [14, 6.5], + [18, 4.5], + ] + self.wellwidth = 0.5 + + def test_get_well_spacing_and_separations_horizontal(self): + normseps, posperp_sorted = mdet._get_well_spacing_and_separations( + self.coms_horizontal, + self.uvec_perp_horizontal, + self.wellwidth, + ) + self.assertListEqual(normseps.tolist(), [1,1,1,1]) + self.assertListEqual(posperp_sorted.tolist(), [2,4,6,8,10]) + + def test_get_well_spacing_and_separations_vertical(self): + normseps, posperp_sorted = mdet._get_well_spacing_and_separations( + self.coms_vertical, + self.uvec_perp_vertical, + self.wellwidth, + ) + self.assertListEqual(normseps.tolist(), [1,1,1,1]) + self.assertListEqual(posperp_sorted.tolist(), [2,4,6,8,10]) + + def test_get_well_spacing_and_separations_horizontal_with_gaps(self): + normseps, posperp_sorted = mdet._get_well_spacing_and_separations( + self.coms_horizontal_with_gaps, + self.uvec_perp_horizontal, + self.wellwidth, + ) + self.assertListEqual(normseps.tolist(), [1,1,1,2,1,2]) + self.assertListEqual(posperp_sorted.tolist(), [2,4,6,8,12,14,18]) + + + +class TestInterpolatePositionsAndExtractProfiles(unittest.TestCase): + def setUp(self): + self.image = 10 + np.random.randn(10,22) + self.image += 20*np.array([ + [0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,], + [0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,], + [0,0,10,10,0,0,1,1,0,0,2,2,0,0,3,3,0,0,4,4,0,0,], + [0,0,10,10,0,0,1,1,0,0,2,2,0,0,3,3,0,0,4,4,0,0,], + [0,0,10,10,0,0,1,1,0,0,2,2,0,0,3,3,0,0,4,4,0,0,], + [0,0,10,10,0,0,1,1,0,0,2,2,0,0,3,3,0,0,4,4,0,0,], + [0,0,10,10,0,0,1,1,0,0,2,2,0,0,3,3,0,0,4,4,0,0,], + [0,0,10,10,0,0,1,1,0,0,2,2,0,0,3,3,0,0,4,4,0,0,], + [0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,], + [0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,], + ]) + # Means for the regions after extrapolation + self.image_means = {1:210, 2:30, 3:50, 4:70, 5:90} + labels = np.array([ + [0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,], + [0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,], + [0,0,1,1, 0,0,2,2,0,0,3,3,0,0,0,0,0,0,4,4,0,0,], + [0,0,1,1, 0,0,2,2,0,0,3,3,0,0,0,0,0,0,4,4,0,0,], + [0,0,1,1, 0,0,2,2,0,0,3,3,0,0,0,0,0,0,4,4,0,0,], + [0,0,1,1, 0,0,2,2,0,0,3,3,0,0,0,0,0,0,4,4,0,0,], + [0,0,1,1, 0,0,2,2,0,0,3,3,0,0,0,0,0,0,4,4,0,0,], + [0,0,1,1, 0,0,2,2,0,0,3,3,0,0,0,0,0,0,4,4,0,0,], + [0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,], + [0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,], + ], dtype=int) + self.finallabel = np.array([ + [0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,], + [0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,], + [0,0,1,1, 0,0,2,2,0,0,3,3,0,0,4,4,0,0,5,5,0,0,], + [0,0,1,1, 0,0,2,2,0,0,3,3,0,0,4,4,0,0,5,5,0,0,], + [0,0,1,1, 0,0,2,2,0,0,3,3,0,0,4,4,0,0,5,5,0,0,], + [0,0,1,1, 0,0,2,2,0,0,3,3,0,0,4,4,0,0,5,5,0,0,], + [0,0,1,1, 0,0,2,2,0,0,3,3,0,0,4,4,0,0,5,5,0,0,], + [0,0,1,1, 0,0,2,2,0,0,3,3,0,0,4,4,0,0,5,5,0,0,], + [0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,], + [0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,], + ], dtype=int) + + self.normseps = [1, 1, 2] + self.posperp_sorted = [2.5, 6.6, 10.5, 18.5] + self.propsgood = skmeas.regionprops(labels) + self.uvec_para = [0,1] + self.uvec_perp = [1,0] + self.wellwidth = 2 + def test_interpolate_positions_and_extract_profiles(self): + images, wellimage, coords = mdet.interpolate_positions_and_extract_profiles( + np.array(self.normseps), + np.array(self.posperp_sorted), + self.propsgood, + np.array(self.uvec_para), + np.array(self.uvec_perp), + self.wellwidth, + self.image, + ) + + # We can check the stats for the regions + for k, im in images.items(): + # Have normally distributed noise with std 1, + # averaged over 12 values... deviation should + # be well less than 1... but just failed, set to delta=2 + self.assertAlmostEqual( + im.mean(), + self.image_means[k], + delta=2, + ) + np.testing.assert_array_equal(wellimage, self.finallabel) + + #print("\n\n") + #print("Well images:") + #for k in images: + # print(k) + # print(images[k]) + #print("Coords:") + #for c in coords: + # print(c) + #import matplotlib.pyplot as plt + #plt.imshow(wellimage) + #plt.show() + + + + +if __name__ == '__main__': + unittest.main() diff --git a/momanalysis/tests/test_gui.py b/momanalysis/tests/test_gui.py new file mode 100644 index 0000000..ae7dc7d --- /dev/null +++ b/momanalysis/tests/test_gui.py @@ -0,0 +1,39 @@ +# +# FILE : test_gui.py +# CREATED : 22/02/17 11:38:44 +# AUTHOR : J. Metz +# DESCRIPTION : Added gui testing +# + +import sys +import unittest +from PyQt5.QtWidgets import QApplication +from PyQt5.QtTest import QTest +from PyQt5.QtCore import Qt + +import momanalysis.gui as mgui + +app = QApplication(sys.argv) + +class TestMomanalysisGui(unittest.TestCase): + def setUp(self): + """ + Create the GUI + """ + self.gui = mgui.MainWindow() + + def test_defaults(self): + """ + Check all the default settings + """ + # Start should be disabled + self.assertFalse(self.gui.mw.startbutton.isEnabled()) + # Default mode is brightfield + self.assertTrue(self.gui.mw.brightfield_box.isChecked()) + self.assertFalse(self.gui.mw.comb_fluorescence.isChecked()) + self.assertFalse(self.gui.mw.seper_fluorescence.isChecked()) + + + +if __name__ == "__main__": + unittest.main() diff --git a/momanalysis/tests/test_io.py b/momanalysis/tests/test_io.py new file mode 100644 index 0000000..e2262bf --- /dev/null +++ b/momanalysis/tests/test_io.py @@ -0,0 +1,81 @@ +# +# FILE : test_io.py +# CREATED : 11/11/16 13:04:18 +# AUTHOR : J. Metz +# DESCRIPTION : Unittests for IO functions +# + +import unittest +import numpy as np +import tempfile +import skimage.io as skio +import os +import momanalysis.io as mio +import traceback + +class TestLoadData(unittest.TestCase): + + def setUp(self): + # Create a temporary file with test input data + # NB using high numbers to avoid annoying low contrast image + # warning from skimage.io + self.data = np.array([ + [100,200,3], + [4,5,6], + [7,8,9], + [100,200,255], + ], dtype='uint8') + fid, self.filename = tempfile.mkstemp(".tif") + skio.imsave(self.filename, self.data) + + def test_load_data(self): + np.testing.assert_array_equal( + mio.load_data(self.filename), + self.data) + + def tearDown(self): + try: + os.remove(self.filename) + except: + print("WARNING: UNABLE TO REMOVE TEMPORARY TESTING FILE:") + print(self.filename) + print("DUE TO ERROR:") + traceback.print_exc() + print("MAKE SURE TO MANUALLY REMOVE THE FILE YOURSELF") + print("(OR LET YOUR SYSTEM DEAL WITH IT!)") + + +class TestSampleData(unittest.TestCase): + + def setUp(self): + self.default_shape = (200,220) + self.dtype_wells = np.dtype(np.float64) + self.dtype_labs = np.dtype(np.uint16) + + def test_load_sample_well_data(self): + # The data itself is random, so let's just make sure + # we get arrays that look about right + wells, labs = mio.load_sample_well_data() + self.assertEqual(wells.shape, self.default_shape) + self.assertEqual(labs.shape, self.default_shape) + self.assertIs(wells.dtype, self.dtype_wells) + self.assertIs(labs.dtype, self.dtype_labs) + + def tearDown(self): + pass + + +class TestFluoSplit(unittest.TestCase): + + def setUp(self): + self.default_shape = np.arange(24).reshape(2,3,4) + + self.brightfield_image = np.arange(12).reshape(1,3,4) + + self.fluo_image = np.arange(12,24).reshape(1,3,4) + + def test(self): + data, fluo_data = mio.split_fluorescence(self.default_shape) + np.testing.assert_array_equal(data, self.brightfield_image) + np.testing.assert_array_equal(fluo_data, self.fluo_image) + diff --git a/momanalysis/tests/test_measurements.py b/momanalysis/tests/test_measurements.py new file mode 100644 index 0000000..d777ad5 --- /dev/null +++ b/momanalysis/tests/test_measurements.py @@ -0,0 +1,147 @@ +# -*- coding: utf-8 -*- +""" +Created on Tue Sep 20 15:51:22 2016 + +@author: as624 +""" + +import momanalysis.measurements as mmeas +import momanalysis.output as moutp +import numpy as np +import unittest +import csv +import tempfile +import os + +class TestFindFilename(unittest.TestCase): + + def setUp(self): + self.inputname = 'testinputfilename.test' + + self.output = 'testinputfilename' + + def test_input_filename(self): + self.assertEqual( self.output, mmeas.find_input_filename(self.inputname, test=True)[1]) + +class TestCounts(unittest.TestCase): + + def setUp(self): + self.wells = np.array([ + [0,0,0,0,0], + [0,1,0,2,0], + [0,1,0,2,0], + [0,1,0,2,0], + ]) + self.bacteria = np.array([ + [0,0,0,0,0], + [0,1,0,0,0], + [0,0,0,3,0], + [0,2,0,0,0], + ]) + self.counts = [2,1] + + # Using just individual labels + self.bacteria_labels = {1: + np.array([[1,0,2,2,2,0,3],]),2: + np.array([[0,0,0],]),3: + np.array([[1,],]) + } + self.counts2 = [3, 0, 1] + + def test_counts_just_labels(self): + self.assertEqual( self.counts2, mmeas.count_bacteria_in_wells(self.bacteria_labels)) + + +class TestBacteriaMeasurements(unittest.TestCase): + + def setUp(self): + self.bacteria_labels = np.array([[1,1,1,1,0,0], + [1,1,1,1,0,0], + [2,2,2,2,2,0], + [2,2,2,2,2,0]]) + + self.areaarray = [['1',8],['2',10]] + + self.length1 = 4 + self.length2 = 5 + self.lbl_dict = {1:'1', 2:'2'} + + def test_bacteria_measurements(self): + measurements = mmeas.bacteria_measurements(self.lbl_dict,self.bacteria_labels) + areas = [row[0:2] for row in measurements] + length1 = measurements[0][2] + length2 = measurements[1][2] + self.assertEqual(self.areaarray, areas) + self.assertTrue((self.length1-1) <= length1 <= (self.length1+1)) + self.assertTrue((self.length2-1) <= length2 <= (self.length2+1)) + +class TestFluoMeasurements(unittest.TestCase): + + def setUp(self): + self.bacteria_labels = np.array([[0,0,0,0,0,0,0,0,0,0,0,0], + [0,0,0,0,0,0,0,0,0,3,3,3], + [1,1,1,0,0,0,0,0,0,3,3,3], + [1,1,1,0,0,0,0,0,0,3,3,3], + [1,1,1,0,0,0,0,0,0,3,3,3], + [1,1,1,0,0,0,0,0,0,3,3,3], + [0,0,0,0,0,0,0,0,0,3,3,3], + [0,0,0,2,2,2,0,0,0,3,3,3], + [0,0,0,2,2,2,0,0,0,3,3,3], + [0,0,0,0,0,0,0,0,0,0,0,0], + [0,0,0,0,0,0,0,0,0,0,0,0], + [0,0,0,0,0,0,0,0,0,0,0,0], + [0,0,0,0,0,0,0,0,0,0,0,0]]) + + self.fluo_image = np.array([[0,0,0,0,0,0,0,0,0,0,0,0], + [0,0,0,0,0,0,0,0,0,3000,3000,3000], + [2000,2000,2000,0,0,0,0,0,0,3000,3000,3000], + [2100,2100,2100,0,0,0,0,0,0,3800,3800,3800], + [2100,2100,2100,0,0,0,0,0,0,3000,3000,3000], + [2200,2200,2200,0,0,0,0,0,0,3000,3000,3000], + [0,0,0,0,0,0,0,0,0,3000,3000,3000], + [0,0,0,3200,3200,3200,0,0,0,3000,3000,3000], + [0,0,0,3300,3300,3300,0,0,0,3000,3000,3000], + [0,0,0,0,0,0,0,0,0,0,0,0], + [0,0,0,0,0,0,0,0,0,0,0,0], + [0,0,0,0,0,0,0,0,0,0,0,0], + [0,0,0,0,0,0,0,0,0,0,0,0]]) + + self.bkground = 100 + + self.bkg_sem = 1 + + self.fluo_values = {1:[2100.0,2000.0],2:[3250.0,3150.0],3:[3100.0,3000.0]} + + def test_fluorescentmeasurements(self): + fluo = mmeas.fluorescence_measurements(self.bacteria_labels, self.bkground, self.bkg_sem, self.fluo_image) + self.assertEqual(self.fluo_values, fluo) + +class TestFluorescenceBackground(unittest.TestCase): + + def setUp(self): + self.wells0 = np.array([[0,0,0,0,0,0,0,0,0,0,0,0], + [0,0,0,0,0,0,0,0,0,0,0,0], + [1,1,1,0,0,0,0,0,0,3,3,3], + [1,1,1,0,0,2,0,0,0,3,3,3], + [1,1,1,0,2,2,2,0,0,3,3,3], + [1,1,1,0,2,2,2,0,0,3,3,3], + [1,1,1,0,2,2,2,0,0,3,3,3]]) + + self.fluo_image = np.array([[0,0,0,0,0,0,0,0,0,0,0,0], + [0,0,0,0,0,0,0,0,0,0,0,0], + [100,100,100,0,0,0,0,0,0,300,300,300], + [100,100,100,0,0,200,0,0,0,300,300,300], + [100,100,100,0,200,200,200,0,0,300,300,300], + [100,100,100,0,200,200,200,0,0,300,300,300], + [100,100,100,0,200,200,200,0,0,300,300,300]]) + + self.background = 200 + + def test_background_fluorescence(self): + bkground, bkg_sem = mmeas.fluorescence_background(self.wells0, self.fluo_image) + self.assertEqual(bkground, self.background) + +if __name__ == '__main__': + unittest.main() + + diff --git a/momanalysis/tests/test_output.py b/momanalysis/tests/test_output.py new file mode 100644 index 0000000..cd864f3 --- /dev/null +++ b/momanalysis/tests/test_output.py @@ -0,0 +1,296 @@ +# +# AUTHOR : J. Metz +# DESCRIPTION : Test the output functions +# + +import unittest +import numpy as np +import tempfile +import skimage.io as skio +import os +import momanalysis.output as mout +import traceback +import csv + +class TestOutputCSV(unittest.TestCase): + def setUp(self): + # + # IMPORTANT NOTE + # + # This is run before EACH class method + # Similarly the tearDown is run after EACH class + # method - so they are not once-only initializations / cleanups + # + # This means for example, that we can re-use the output directory + # between the normal and fluoro example. + self.inputmeasurements = [[1,4,8,10],[2,6,9,13]] + + self.readCSV = [["Bacteria Number", "Area", "Length", "Width"], + ['1','4','8','10'], + ['2','6','9','13',]] + + # This is deprecated... (see tempfile docs) + #self.csv_test_filename = tempfile.mktemp() + #self.csv_test_filename_1 = tempfile.mktemp() + self.output_dir = tempfile.mkdtemp() + _, self.csv_test_filename = tempfile.mkstemp( + dir=self.output_dir) + # Using this method, we create some intermediate temporary files + # that we don't need...we're only using this to + # generate the base filename + # TODO: Probably better to use mkdtemp and then just create a + # random string instead of using tempfile for the filename base + os.remove(self.csv_test_filename) + + # Now we only need the final parts of the filename + # at the moment, though I propose to pass in full paths and remove + # passing in a directory name and a relative file name... + self.csv_test_filename = os.path.basename(self.csv_test_filename) + self.inputmeasurements_fluo = [[1,4,8,20,15,33,60],[2,6,9,35,30,27,180]] + + self.bkground = 5 + + self.readCSV_2 = [['Background', '5', '','','','',''], + ["Bacteria Number", "Area", "Length", "Width", "fluorescence", "fluo - background", "fluo sum - background"], + ['1','4','8','20','15','33','60'], + ['2','6','9','35','30','27','180']] + + + def test_input_filename(self): + # Run the csv output function + mout.output_csv( + self.inputmeasurements, + self.output_dir, + self.csv_test_filename, + 1) + # Now check we got what we expected + self.expected_csv_file = os.path.join( + self.output_dir, + '%s_timepoint_%d.csv' % (self.csv_test_filename,1) + ) + with open( self.expected_csv_file, 'rt') as csvfile: + output = csv.reader(csvfile, delimiter=',') + outputtest = [] + for row in output: + outputtest.append(row) + self.assertEqual(outputtest, self.readCSV) + + def test_input_filename_fluo(self): + mout.output_csv( + self.inputmeasurements_fluo, + self.output_dir, + self.csv_test_filename, + 2, + bg=self.bkground, + fl=True) + + self.expected_csv_file = os.path.join( + self.output_dir, + '%s_timepoint_%d.csv' % (self.csv_test_filename,2) + + ) + with open( self.expected_csv_file, 'rt') as csvfile: + output = csv.reader(csvfile, delimiter=',') + outputtest = [] + for row in output: + outputtest.append(row) + self.assertEqual(outputtest, self.readCSV_2) + + def tearDown(self): + # Remove the csv file + os.remove(self.expected_csv_file) + # Remove the temporary folder + os.rmdir(self.output_dir) + + +class TestOuputFigure(unittest.TestCase): + """ + Test generating output figures: + * Data image with + - contour of well + label text + - contour of bacteria + label text + * Data image with + - Semi-transparent overlay of channel + wells segmentation + * Ridges image + The output folder is an input, and the output pattern is + wells_%0.6d_%0.6d.jpg%(t, i)) + where t is the time index (pass as input) + and i is the figure number (auto-generated, should range from 0 to 2 inclusive) + """ + def setUp(self): + # Create data + # "Realistic" + #self.size = (400,600) + #self.data = 10*np.random.randn(self.size) + 100 + # Toy + self.data = np.array([ + [ 10, 12, 12, 11, 10], + [ 11, 5, 11, 4, 9], + [ 12, 4, 11, 5, 11], + [ 10, 6, 12, 4, 12], + [ 10, 12, 12, 11, 10], + ]) + self.wells = np.array([ + [ 0, 0, 0, 0, 0], + [ 0, 1, 0, 2, 0], + [ 0, 1, 0, 2, 0], + [ 0, 1, 0, 2, 0], + [ 0, 0, 0, 0, 0], + ]) + self.ridges = np.array([ + [ 0, 0, 0, 0, 0], + [ 0, 1, 0, 1, 0], + [ 0, 1, 0, 1, 0], + [ 0, 1, 0, 1, 0], + [ 0, 0, 0, 0, 0], + ]) + # Old style bacteria... + #self.bacteria = np.array([ + # [ 0, 0, 0, 0, 0], + # [ 0, 1, 0, 3, 0], + # [ 0, 0, 0, 0, 0], + # [ 0, 2, 0, 0, 0], + # [ 0, 0, 0, 0, 0], + #]) + self.bacteria = { + 1:np.array([1]), + 2:np.array([2]), + 3:np.array([3]), + } + self.label_dict_string = { + 1:"BACT 1", + 2:"BACT 2", + 3:"BACT 3", + } + self.coords = [ + ([1,1]), + ([1,3]), + ([3,1]), + ] + self.channel = np.array([ + [ 0, 0, 0, 0, 0], + [ 0, 0, 0, 0, 0], + [ 0, 0, 0, 0, 0], + [ 0, 0, 0, 0, 0], + [ 1, 1, 1, 1, 1], + ]) + + # Output destination + self.folder = tempfile.mkdtemp() + self.pattern = "wells_%0.6d_%0.6d.jpg" # Take t then i + self.NUMFIGS = 3 + + # Dummy time index + self.tindex = 42 + + # Files that should be created + self.expected_figures = [ os.path.join( + self.folder, + self.pattern % (self.tindex, i) + ) for i in range(self.NUMFIGS) + ] + + # Create images myself + + def test_output_figures(self): + mout.output_figures( + self.data, + self.tindex, + self.channel, + self.wells, + None, #profiles, UNUSED + self.bacteria, + self.coords, + self.wells, + self.ridges, + self.folder, + self.label_dict_string, + ) + # Make sure the expected figures have been cerated + for f in self.expected_figures: + self.assertTrue(os.path.isfile(f)) + + def tearDown(self): + # Delete files + for f in self.expected_figures: + os.remove(f) + # Delete folder + os.rmdir(self.folder) + +class TestFinalOuput(unittest.TestCase): + + def setUp(self): + # Default value for non-fluoro data + self.fluo = None + self.fluoresc = False + + # Location of individual csv files + self.folder = tempfile.mkdtemp() + + # How many time points + self.tmax = 50 + # How many bacteria per time point? + self.nbac = 5 + # Number of measurements is 4 for non-fluoro data + self.nmeas = 4 # (See headers below) + + # Make some (tmax) csv files + self.csv_files = [] + header = ["Bacteria Number", "Area", "Length", "Width"] + _, self.filename = tempfile.mkstemp( + dir=self.folder, + #suffix='timepoint_%d.csv'%t + ) + # Remove the actual file - don't need it + os.remove(self.filename) + + self.expected_output_file = os.path.join(self.folder, + "Combined_Output.csv") + # Make sure it doesn't exist! + if os.path.isfile(self.expected_output_file): + os.remove(self.expected_output_file) + + + for t in range(self.tmax): + fname = "%s_timepoint_%d.csv"%(self.filename, t) + with open(fname, "w") as fout: + wrt = csv.writer(fout) + wrt.writerow(header) + for nb in range(1, self.nbac+1): + wrt.writerow([ + nb, # Label + np.random.randint(100,200), # Area + np.random.randint(10,20), # Length + np.random.randint(5, 10), # Width + ]) + self.csv_files.append(fname) + + + + def test_final_output(self): + mout.final_output( + self.folder, + self.filename, + self.fluo, + self.fluoresc, + self.tmax, + ) + # Make sure the output file exists and looks right + self.assertTrue(os.path.isfile(self.expected_output_file)) + # Make sure it has the correct dimensions + csvdata = list(csv.reader(open(self.expected_output_file))) + # Should have a time index row, a header row, plus nbac measurement rows + self.assertEqual(len(csvdata), 2+self.nbac) + # Should have nmeas * tmax columns + for row in csvdata: + self.assertEqual(len(row), self.nmeas*self.tmax) + + + def tearDown(self): + for f in self.csv_files: + os.remove(f) + try: + os.remove(os.path.join(self.folder, "Combined_Output.csv")) + except: + pass + os.rmdir(self.folder) diff --git a/momanalysis/tests/test_tracking.py b/momanalysis/tests/test_tracking.py new file mode 100644 index 0000000..e5be6ea --- /dev/null +++ b/momanalysis/tests/test_tracking.py @@ -0,0 +1,154 @@ +# -*- coding: utf-8 -*- +""" +Created on Thu Sep 1 14:28:10 2016 + +@author: as624 +""" + +from momanalysis.tracking import frametracker_keypoints as frametrck +from momanalysis.tracking import welltracking as welltrck +import unittest +import numpy as np +from skimage import transform as tf +from momanalysis.io import load_sample_full_frame +import scipy.ndimage as ndi + +class TestFrameTracking(unittest.TestCase): + + def setUp(self): + self.generate_frames() + + def generate_frames(self, tform=(12,15), seed=1, snr=10): + affinetform = tf.AffineTransform(translation=tform) + self.lbl1 = load_sample_full_frame( + seed = seed, + signal_to_noise=snr)[0] + self.lbl2 = tf.warp(self.lbl1, affinetform) + self.transf = np.array(tform) + + + def test_frametracking(self): + tfrm = frametrck(self.lbl1,self.lbl2, nk = 50, fn = 9, ft = 0.001, hk = 0.01, min_samples=10) + xtrans = abs(tfrm[0]-self.transf[1]) + ytrans = abs(tfrm[1]-self.transf[0]) + self.assertLessEqual(xtrans, 1.5) + self.assertLessEqual(ytrans, 1.5) + + +class TestWellTracking(unittest.TestCase): + + def setUp(self): + self.lbl1 = np.array([[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + [ 0, 1, 1, 0, 2, 2, 0, 3, 3, 0, 4, 4, 0], + [ 0, 1, 1, 0, 2, 2, 0, 3, 3, 0, 4, 4, 0], + [ 0, 1, 1, 0, 2, 2, 0, 3, 3, 0, 4, 4, 0], + [ 0, 1, 1, 0, 2, 2, 0, 3, 3, 0, 4, 4, 0], + ]) + self.lbl2 = np.array([[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + [ 0, 1, 1, 0, 2, 2, 0, 3, 3, 0, 4, 4, 0], + [ 0, 1, 1, 0, 2, 2, 0, 3, 3, 0, 4, 4, 0], + [ 0, 1, 1, 0, 2, 2, 0, 3, 3, 0, 4, 4, 0], + [ 0, 1, 1, 0, 2, 2, 0, 3, 3, 0, 4, 4, 0], + ]) + self.lbl2tracked = np.array([[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + [ 0, 2, 2, 0, 3, 3, 0, 4, 4, 0, 0, 0, 0], + [ 0, 2, 2, 0, 3, 3, 0, 4, 4, 0, 0, 0, 0], + [ 0, 2, 2, 0, 3, 3, 0, 4, 4, 0, 0, 0, 0], + [ 0, 2, 2, 0, 3, 3, 0, 4, 4, 0, 0, 0, 0], + ]) + + self.transform = [0, 3] + + self.shift = 1 + + + def test_welltracking(self): + newframe2 = welltrck(self.lbl1, self.lbl2, self.transform, pixls=1) + np.testing.assert_array_equal(self.lbl2tracked, newframe2) + + +class TestCombinedTracking(unittest.TestCase): + def setUp(self): + self.generate_frames() + + def generate_frames(self, tform=(12,15), seed=1, snr=10): + affinetform = tf.AffineTransform(translation=tform) + self.frame1, self.label1 = load_sample_full_frame( + seed = seed, + signal_to_noise=snr)[:2] + self.frame2 = tf.warp(self.frame1, affinetform) + self.label2 = tf.warp(self.label1.astype(float), affinetform) + self.transf = np.array(tform) + + + def test_full_tracking(self): + + label2in = ndi.label(self.label2>0)[0] + + measured_tfrm = frametrck(self.frame1,self.frame2, nk = 50, fn = 9, ft = 0.001, hk = 0.01, min_samples=10) + newlbl2 = welltrck(self.label1, label2in, measured_tfrm, pixls=10) + + np.testing.assert_array_equal(self.label2, newlbl2) + + return + print("DETECTED TRANSFORM:", measured_tfrm) + print("ACTUAL", self.transf) + + import matplotlib.pyplot as plt + plt.figure() + plt.imshow(self.frame1, cmap='gray') + plt.imshow(self.label1, cmap='jet', alpha=0.3) + for i in range(1, int(self.label1.max()+1)): + bw = self.label1 == i + if not np.any(bw): + continue + pt = np.mean(bw.nonzero(), axis=1) + plt.text(pt[1], pt[0], i, color="w") + plt.title("Input label 1") + + plt.figure() + plt.imshow(self.frame2, cmap='gray') + plt.imshow(label2in, cmap='jet', alpha=0.3) + for i in range(1, int(label2in.max()+1)): + bw = label2in == i + if not np.any(bw): + continue + pt = np.mean(bw.nonzero(), axis=1) + plt.text(pt[1], pt[0], i, color="w") + plt.title("Label2 INPUT (RANDOM)") + + plt.figure() + plt.imshow(self.frame2, cmap='gray') + plt.imshow(self.label2, cmap='jet', alpha=0.3) + for i in range(1, int(self.label2.max()+1)): + bw = self.label2 == i + if not np.any(bw): + continue + + pt = np.mean(bw.nonzero(), axis=1) + plt.text(pt[1], pt[0], i, color="w") + plt.title("Theory label 2") + + plt.figure() + plt.imshow(self.frame2, cmap='gray') + plt.imshow(newlbl2, cmap='jet', alpha=0.3) + for i in range(1, int(newlbl2.max()+1)): + bw = newlbl2 == i + if not np.any(bw): + continue + pt = np.mean(bw.nonzero(), axis=1) + plt.text(pt[1], pt[0], i, color="w") + plt.title("Measured label 2") + + plt.figure() + plt.title("DIFFERENCE") + plt.imshow(self.label2 - newlbl2) + + plt.show() + +if __name__ == '__main__': + unittest.main() + diff --git a/momanalysis/tracking.py b/momanalysis/tracking.py new file mode 100644 index 0000000..dd63a8a --- /dev/null +++ b/momanalysis/tracking.py @@ -0,0 +1,272 @@ +# -*- coding: utf-8 -*- +""" +Created on Wed Sep 28 14:38:00 2016 + +@author: as624 +""" + +from skimage.feature import ( + match_descriptors, + plot_matches, + ORB, + match_template, + ) +import skimage.util as skutil +from skimage.measure import ransac +import numpy as np +from skimage.transform import AffineTransform +from skimage.measure import regionprops +from momanalysis.utility import logger +import matplotlib.pyplot as plt +import tempfile +import os +import momanalysis.bacteria_tracking as bactrack +import itertools + + +def frametracker(*args, **kwargs): + """ + Wrap the specific frametracker we're going to use + """ + return frametracker_template(*args, **kwargs) + +def frametracker_template( + img1, + img2, + border = 100, + debug=False, + ): + """ + Determine overall frame shift using template + matching (normalized cross-correlation) + """ + template = img1[border:-border, border:-border] + xcorr = match_template( + img2, # where to look + template, # the template to look for + ) + y, x = np.unravel_index(np.argmax(xcorr), xcorr.shape) + dy = y - border + dx = x - border + + if debug: + plt.figure() + plt.imshow(img2, cmap='gray') + rect = plt.Rectangle((x,y), template.shape[1], template.shape[0], + edgecolor='r', facecolor='none') + plt.gca().add_patch(rect) + plt.savefig("DEBUG_FRAMETRACKING_LOCATION.jpg") + plt.close() + plt.figure() + plt.imshow(template, cmap='gray') + plt.savefig("DEBUG_FRAMETRACKING_TEMPLATE.jpg") + plt.close() + plt.figure() + imcol = np.zeros(img1.shape + (3,)) + imcol[y:y+template.shape[0],x:x+template.shape[1], 0] = template + imcol[...,1] = img2 + imcol -= imcol.min() + imcol /= imcol.max() + plt.imshow(imcol) + plt.savefig("DEBUG_FRAMETRACKING_OVERLAY.jpg") + plt.close() + + return np.array([-dy,-dx]) + +def frametracker_keypoints( + img1, + img2, + nk = 50, + fn = 9, + ft = 0.001, + hk = 0.1, + min_samples=10, + xchange = 300, + ychange = 30, + debug=False, + ): + """ + Determine overall frame shift using ORB detection + and keypoint matching + """ + img1 = skutil.img_as_float(img1) + img2 = skutil.img_as_float(img2) + + + descriptor_extractor = ORB( + n_keypoints= nk, + fast_n= fn, + fast_threshold= ft, + harris_k= hk, + ) + + #determine keypoints and extract coordinates for first image + descriptor_extractor.detect_and_extract(img1) + keypoints1 = descriptor_extractor.keypoints + descriptors1 = descriptor_extractor.descriptors + + #determine keypoints and extract coordinates for second image + descriptor_extractor.detect_and_extract(img2) + keypoints2 = descriptor_extractor.keypoints + descriptors2 = descriptor_extractor.descriptors + + #determine matching coordinates + matches12 = match_descriptors(descriptors1, descriptors2, cross_check=True) + + #create empty lists + src = [] + dst = [] + + for matches in matches12: + #find index of the match from original image and image being compared + a = matches[0] + b = matches[1] + #use the index from above to find the original coordinates from the images + a1 = keypoints1[a] + b1 = keypoints2[b] + #Create a list of the matched coordinates + a1x = a1[1] + a1y = a1[0] + b1x = b1[1] + b1y = b1[0] + xc = abs(a1x - b1x) + yc = abs(a1y - b1y) + #Create a list of the matched coordinates + if (xc < xchange) & (yc < ychange): + src.append(a1) + dst.append(b1) + + src = np.array(src) + dst = np.array(dst) + + if debug: + plt.figure() + plt.imshow(img1, cmap='gray') + plt.plot(keypoints1[:,1], keypoints1[:,0], '.r') + plt.savefig("DEBUG_WELLTRACKING_frame_a_plus_keypoints.jpg") + plt.close() + plt.figure() + plt.imshow(img2, cmap='gray') + plt.plot(keypoints2[:,1], keypoints2[:,0], '.r') + plt.savefig("DEBUG_WELLTRACKING_frame_b_plus_keypoints.jpg") + plt.close() + plt.figure() + ax = plt.gca() + plot_matches(ax, img1, img2, keypoints1, keypoints2, matches12) + plt.savefig("DEBUG_WELLTRACKING_matches.jpg") + plt.close() + + try: + # estimate affine transform model using all coordinates + model = AffineTransform() + model.estimate(src, dst) + + # robustly estimate affine transform model with RANSAC + model_robust, inliers = ransac((dst, src), AffineTransform, min_samples=min_samples, + residual_threshold=2, max_trials=100) + trans = model_robust.translation + return trans + except: + debugfolder = tempfile.mkdtemp() + plt.figure() + plt.imshow(img1, cmap='gray') + plt.plot(keypoints1[:,1], keypoints1[:,0], '.r') + plt.savefig(os.path.join(debugfolder, "img1_plus_keypoints.jpg")) + plt.close() + plt.figure() + plt.imshow(img2, cmap='gray') + plt.plot(keypoints2[:,1], keypoints2[:,0], '.r') + plt.savefig(os.path.join(debugfolder, "img2_plus_keypoints.jpg")) + plt.close() + plt.figure() + ax = plt.gca() + plot_matches(ax, img1, img2, keypoints1, keypoints2, matches12) + plt.savefig(os.path.join(debugfolder, "matches.jpg")) + plt.close() + logger.critical("Failed to estimate affine transform!") + logger.critical("Debugging images saved to ", debugfolder) + +def welltracking(wellim1, wellim2, trans, pixls = 11): + + newwellim2 = np.zeros(wellim2.shape, dtype=wellim2.dtype) + newwellim1 = np.zeros(wellim1.shape, dtype=wellim2.dtype) + for i,r in enumerate(regionprops(wellim1), start=1): + #find coordinates of the regions and select first coordinates + co = r.coords + p = co[0] + #subtract the frame shift + init = p - trans + #determine the "new" coordinates + xp1 = init[1] + yp1 = init[0] + for j, r2 in enumerate(regionprops(wellim2), start=1): + #find coordinates of the 2nd image regions and select first coordinates + co2 = r2.coords + p2 = co2[0] + xp2 = p2[1] + yp2 = p2[0] + #subtract "new" coordinates away from 2nd image coordinates + revshiftx = abs(xp2-xp1) + revshifty = abs(yp2-yp1) + #if they are within one well width (11 pixels) then assign them to the same label + if (revshiftx < pixls) & (revshifty < pixls): + newwellim2[wellim2 == j] = r.label + """work out the change in wells for bacteria tracking""" + #newwellim1[wellim1 == i] = r.label + #return new matching labelled well images + """ + plt.figure() + plt.imshow(wellim1) + plt.figure() + plt.imshow(wellim2) + plt.figure() + plt.imshow(newwellim2) + plt.show() + """ + return newwellim2 + +def bacteria_tracking(last_wells, current_wells, label_dict_string): + + """start of tracking""" + + options_dict = {} + well_dict = {} + bac_info = {} + out_wells = {} + for n, well in last_wells.items(): + for n2, new_well in current_wells.items(): + if n == n2: #if well numbers match then we can try and track the bacteria + in_list = [] + option_list = [] + for i, region in enumerate(regionprops(well)): + in_list.append(region.label) #list the bacteria labels from the current frame + #create a list of all the possible combinations + output_options = list(itertools.product(in_list, repeat=(len(regionprops(new_well))))) + for option in output_options: + #the bacteria have to stay in the same order so we can filter out those that aren't in order + if sorted(option) == list(option): + if len(option) > 0: #if no options it is an empty list - filter this out + option_list.append(option) + if not in_list: + if len(regionprops(new_well)) > 0: + #if there is now a new bacteria we don't want to ignore as it may have just + #been missed in previous frame + smax = max(label_dict_string, key=int) + newwell = np.zeros(new_well.shape, dtype=new_well.dtype) + for k, new_bac in enumerate(regionprops(new_well)): + #so lets give each "new" bacteria a new label + smax+=1 + newwell[new_well==new_bac.label] = smax + label_dict_string[smax] = str(smax) + elif not option_list: # + #if the out well is also empty then there is nothing to track and simply return an empty well + newwell = np.zeros(new_well.shape, dtype=new_well.dtype) + else: #determine probabilities and label matching/new bacteria + options_dict[n] = [in_list,option_list] + change_options = bactrack.find_changes(in_list,option_list, well, new_well) + probs_ = bactrack.find_probs(change_options) + newwell, label_dict_string = bactrack.label_most_likely(probs_, new_well, label_dict_string) + out_wells[n] = newwell + + return out_wells, label_dict_string + diff --git a/momanalysis/utility.py b/momanalysis/utility.py new file mode 100644 index 0000000..f1f2a0e --- /dev/null +++ b/momanalysis/utility.py @@ -0,0 +1,153 @@ +# +# FILE : utility.py +# CREATED : 30/09/16 13:07:39 +# AUTHOR : J. Metz +# DESCRIPTION : Utlity functions, such as logging +# + +import logging +import os, sys + +BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE = list(range(8)) +#The background is set with 40 plus the number of the color, and the foreground with 30 +#These are the sequences need to get colored ouput +RESET_SEQ = "\033[0m" +COLOR_SEQ = "\033[1;%dm" +BOLD_SEQ = "\033[1m" +def formatter_message(message, use_color = True): + if use_color: + message = message.replace("$RESET", RESET_SEQ).replace("$BOLD", BOLD_SEQ) + message = message.replace("$RED", COLOR_SEQ % (30 + RED)) + else: + message = message.replace("$RESET", "").replace("$BOLD", "") + message = message.replace("$RED", "") + return message + +COLORS = { + 'WARNING': YELLOW, + 'INFO': WHITE, + 'DEBUG': BLUE, + 'CRITICAL': YELLOW, + 'ERROR': RED +} + +class ColoredFormatter(logging.Formatter): + def __init__(self, msg, use_color = True): + logging.Formatter.__init__(self, msg) + self.use_color = use_color + + def format(self, record): + levelname = record.levelname + if self.use_color and levelname in COLORS: + levelname_color = COLOR_SEQ % (30 + COLORS[levelname]) + levelname + RESET_SEQ + record.levelname = levelname_color + if self.use_color: + record.msg = COLOR_SEQ % (30 + GREEN ) + record.msg + RESET_SEQ + + return logging.Formatter.format(self, record) + + +class MonoFormatter(logging.Formatter): + #def __init__(self, msg): + # logging.Formatter.__init__(self, msg) + # + #def format(self, record): + # return logging.Formatter.format(self, record) + pass + +class ColoredLogger(logging.Logger): + FORMAT = "".join([ + "[$BOLD%(name)-8s$RESET][%(levelname)-18s] :", + " ($BOLD%(filename)s$RESET:$RED%(lineno)d$RESET)", + " %(message)s", + ]) + COLOR_FORMAT = formatter_message(FORMAT, True) + def __init__(self, name): + logging.Logger.__init__(self, name, logging.DEBUG) + color_formatter = ColoredFormatter(self.COLOR_FORMAT) + console = logging.StreamHandler() + console.setFormatter(color_formatter) + self.addHandler(console) + return + + + def makeRecord(self, name, level, fn, lno, msg, args, exc_info, + func=None, extra=None, sinfo=None): + """ + A factory method which can be overridden in subclasses to create + specialized LogRecords. + """ + if args: + msg += " " + " ".join(map(str, args)) + args = [] + rv = logging.LogRecord(name, level, fn, lno, msg, args, exc_info, func, sinfo) + if extra is not None: + for key in extra: + if (key in ["message", "asctime"]) or (key in rv.__dict__): + raise KeyError("Attempt to overwrite %r in LogRecord" % key) + rv.__dict__[key] = extra[key] + return rv + +class MonoLogger(logging.Logger): + FORMAT = "".join([ + "[%(name)-8s][%(levelname)-18s] :", + " (%(filename)s%(lineno)d)", + " %(message)s", + ]) + # "[$BOLD%(name)-8s$RESET][%(levelname)-18s] :", + # " ($BOLD%(filename)s$RESET:$RED%(lineno)d$RESET)", + # " %(message)s", + MONOFORMAT = formatter_message(FORMAT, True) + def __init__(self, name): + logging.Logger.__init__(self, name, logging.DEBUG) + self.formatter = MonoFormatter(self.MONOFORMAT) + console = logging.StreamHandler() + console.setFormatter(self.formatter) + self.addHandler(console) + return + + + def makeRecord(self, name, level, fn, lno, msg, args, exc_info, + func=None, extra=None, sinfo=None): + """ + A factory method which can be overridden in subclasses to create + specialized LogRecords. + """ + if args: + msg += " " + " ".join(map(str, args)) + args = [] + rv = logging.LogRecord(name, level, fn, lno, msg, args, exc_info, func, sinfo) + if extra is not None: + for key in extra: + if (key in ["message", "asctime"]) or (key in rv.__dict__): + raise KeyError("Attempt to overwrite %r in LogRecord" % key) + rv.__dict__[key] = extra[key] + return rv + + +def supports_color(): + """ + Returns True if the running system's terminal supports color, and False + otherwise. + + Found on stackoverflow, apparently from Django project: + https://github.com/django/django/blob/master/django/core/management/color.py + """ + plat = sys.platform + supported_platform = plat != 'Pocket PC' and (plat != 'win32' or + 'ANSICON' in os.environ) + # isatty is not always implemented, #6223. + is_a_tty = hasattr(sys.stdout, 'isatty') and sys.stdout.isatty() + if not supported_platform or not is_a_tty: + return False + return True + +if supports_color(): + #print("DEBUG: using color logger", flush=True) + logger = ColoredLogger("momanalysis") +else: + #print("DEBUG: using monochrome logger", flush=True) + logger = MonoLogger("momanalysis") + #logging.basicConfig() + #logger = logging.getLogger("momanalysis") +logger.setLevel(logging.WARNING) diff --git a/run.bat b/run.bat new file mode 100644 index 0000000..424131c --- /dev/null +++ b/run.bat @@ -0,0 +1,21 @@ +@echo off +rem +rem Simple run script for Windows Systems +rem +rem This script assumes that Python has been correctly setup for your shell. +rem + + +rem WHERE python +python -V >nul 2>&1 + +rem if %ERRORLEVEL% NEQ 0( +rem ECHO The Python command, python, is not recognized by your terminal. +rem ECHO Please setup Python properly then run this command again. +rem &ECHO. +rem ECHO You may now close this terminal. +rem pause +rem exit /b %errorlevel% +rem ) + +python -m momanalysis.gui diff --git a/setup.py b/setup.py new file mode 100644 index 0000000..2f88389 --- /dev/null +++ b/setup.py @@ -0,0 +1,14 @@ +#!/usr/bin/env python +from setuptools import setup, Command +# Make sure correct qt backend is used +import matplotlib +matplotlib.use('qt5agg') + + +setup(name='Momanalysis', + version='0.73', + description='Mother Machine Analysis module', + author='Jeremy Metz', + author_email='j.metz@exeter.ac.uk', + packages=['momanalysis'], +)