diff --git a/README.md b/README.md index d2f3e2e2a82cc9de7e4cf656cd6d7492264f810d..c97ba1c0f9ed0bb961ba9cc9115128859290535d 100644 --- a/README.md +++ b/README.md @@ -130,7 +130,8 @@ seg_expt = SegmentedExperiment(expt) The initialization should take a few seconds, as it needs to align the images in time. - + +#### Get a timelapse for a given trap From there, you can obtain a timelapse for a single trap as follows: ```python channels = [0] #Get only the first channel, this is also the default @@ -149,10 +150,23 @@ timelapse = seg_expt.get_trap_timelapse(trap_id, tile_size=tile_size, This can take several seconds at the moment. For a speed-up: take fewer z-positions if you can. -If you're not sure what your channel's index is, you can get the ordered - list of channels in the experiment with: +If you're not sure what indices to use: +```python +seg_expt.channels # Get a list of channels +channel = 'Brightfield' +ch_id = seg_expt.get_channel_index(channel) + +n_traps = seg_expt.n_traps # Get the number of traps +``` + +#### Get the traps for a given time point +Alternatively, if you want to get all the traps at a given timepoint: + ```python -expt.channels +timepoint = 0 +seg_expt.get_traps_timepoints(timepoint, tile_size=96, channels=None, + z=[0,1,2,3,4]) ``` + diff --git a/core/experiment.py b/core/experiment.py index f7acdd5a8a9546d0b85a9e7b7c9fbb25568ac08b..b340160d3b9017aad6eb9f49e218352cb73f7fec 100644 --- a/core/experiment.py +++ b/core/experiment.py @@ -116,7 +116,8 @@ class ExperimentOMERO(Experiment): self.name = self.dataset.getName() self._positions = {img.getName(): img.getId() for img in - self.dataset.listChildren()} + sorted(self.dataset.listChildren(), + key=lambda x: x.getName())} # Set up the current position as the first in the list self._current_position = self.get_position(self.positions[0]) @@ -248,9 +249,9 @@ class ExperimentLocal(Experiment): described above. :return: """ - positions = [f.name for f in root_dir.iterdir() + positions = sorted([f.name for f in root_dir.iterdir() if (re.search(r'pos[0-9]+$', f.name) and - f.is_dir())] + f.is_dir())]) acq_file = glob.glob(os.path.join(str(root_dir), '*[Aa]cq.txt')) log_file = glob.glob(os.path.join(str(root_dir), '*[Ll]og.txt')) cache_file = glob.glob(os.path.join(str(root_dir), 'cache.config')) diff --git a/core/segment.py b/core/segment.py index 9ee8952f484491088af8176379a592dfe6757f07..726a2e31fe7b8701c65a66cf5a1f7979a8da9175 100644 --- a/core/segment.py +++ b/core/segment.py @@ -3,7 +3,6 @@ Includes splitting the image into traps/parts, cell segmentation, nucleus segmentation.""" from skimage import feature import numpy as np -from scipy.spatial import distance from pathlib import Path from core.traps import identify_trap_locations @@ -12,6 +11,16 @@ trap_template_directory = Path(__file__).parent / 'trap_templates' trap_template = np.load(trap_template_directory / 'trap_bg_1.npy') +def get_tile_shapes(x, tile_size, max_shape): + half_size = tile_size // 2 + xmin = int(x[0] - half_size) + ymin = max(0, int(x[1] - half_size)) + if xmin + tile_size > max_shape[0]: + xmin = max_shape[0] - tile_size + if ymin + tile_size > max_shape[1]: + ymin = max_shape[1] - tile_size + return xmin, xmin + tile_size, ymin, ymin + tile_size + def align_timelapse_images(raw_data, channel=0, reference_reset_time=80, reference_reset_drift=25): """ @@ -31,15 +40,21 @@ def align_timelapse_images(raw_data, channel=0, reference_reset_time=80, register before resetting the reference image. :param channel: index of the channel to use for image registration. """ - ref = np.squeeze(raw_data[channel, 0, :, :, 0]) + + def centre(img, percentage=0.3): + y, x = img.shape + cropx = int(np.ceil(x * percentage)) + cropy = int(np.ceil(y * percentage)) + startx = int(x // 2 - (cropx // 2)) + starty = int(y // 2 - (cropy // 2)) + return img[starty:starty + cropy, startx:startx + cropx] + + ref = centre(np.squeeze(raw_data[channel, 0, :, :, 0])) size_t = raw_data.shape[1] - # TODO take only a central region of the image drift = [np.array([0, 0])] - references = [0] - processed = 0 for i in range(1, size_t): - img = np.squeeze(raw_data[channel, i, :, :, 0]) + img = centre(np.squeeze(raw_data[channel, i, :, :, 0])) shifts, _, _ = feature.register_translation(ref, img) # If a huge move is detected at a single time point it is taken @@ -51,17 +66,13 @@ def align_timelapse_images(raw_data, channel=0, reference_reset_time=80, shifts = drift[-1] drift.append(shifts) - processed += 1 + ref = img - # If the images have drifted too far from the reference or too - # much time has passed we change the reference and keep track of - # which images are kept as references - if (any([x > reference_reset_drift for x in drift[-1]]) - or processed > reference_reset_time): - ref = img - processed = 0 - references.append(i) - return np.stack(drift), references + # TODO test necessity for references, description below + # If the images have drifted too far from the reference or too + # much time has passed we change the reference and keep track of + # which images are kept as references + return np.stack(drift) class SegmentedExperiment(object): @@ -73,57 +84,95 @@ class SegmentedExperiment(object): """ self.raw_expt = raw_expt self.rotation = None - self.trap_locations = self.tile_timelapse() + self.trap_locations = dict() self.cell_outlines = None self.compartment_outlines = None + # Tile the current position + self.trap_locations[self.current_position] = self.tile_timelapse() + + @property + def n_traps(self): + return len(self.trap_locations[self.current_position]) + + @property + def positions(self): + return self.raw_expt.positions + + @property + def current_position(self): + return str(self.raw_expt.current_position) + + @current_position.setter + def current_position(self, position): + self.raw_expt.current_position = position + # Tile that position + if self.current_position not in self.trap_locations.keys(): + self.trap_locations[self.current_position] = self.tile_timelapse() + + @property + def channels(self): + return self.raw_expt.channels + + def get_channel_index(self, channel): + return self.raw_expt.current_position.get_channel_index(channel) + def tile_timelapse(self, channel=0): # Get the drift and references - drifts, refs = align_timelapse_images(self.raw_expt, channel=channel) + drifts = align_timelapse_images(self.raw_expt, channel=channel) # Find traps in the references - trap_locations = {ref: identify_trap_locations( - np.squeeze(self.raw_expt[channel, ref, :, :, 0]), - trap_template) for ref in refs} - # Compare reference direct traps-location to trap-location from drift - # Add drift to obtain the trap locations for each image - # TODO make single for loop using conditional clause for references - for j in range(len(refs) - 1): - for i in range(refs[j], refs[j + 1]): - trap_locations[i] = ( - trap_locations[refs[j]] - drifts[i, [1, 0]]) - - indices = np.argmin(distance.cdist(trap_locations[refs[j + 1]], - trap_locations[refs[j]] - - drifts[refs[j + 1], [1, 0]]), - axis=0) - - trap_locations[refs[j + 1]] = trap_locations[refs[j + 1]][indices] - # Add the timepoints after the final reference - for i in range(refs[-1], len(drifts)): - trap_locations[i] = ( - trap_locations[refs[-1]] - drifts[i, [1, 0]]) + trap_locations = {0: identify_trap_locations( + np.squeeze(self.raw_expt[channel, 0, :, :, 0]), trap_template)} + for i in range(len(drifts)): + trap_locations[i] = trap_locations[0] \ + - np.sum(drifts[:i, [1, 0]], axis=0) + return trap_locations - def get_trap_timelapse(self, trap_id, tile_size=96, channels=[0], - z=[0]): + def get_trap_timelapse(self, trap_id, tile_size=96, channels=None, z=None): + """ + Get a timelapse for a given trap by specifying the trap_id + :param trap_id: An integer defining which trap to choose. Counted + between 0 and SegmentedExperiment.n_traps - 1 + :param tile_size: The size of the trap tile (centered around the + trap as much as possible, edge cases exist) + :param channels: Which channels to fetch, indexed from 0. + If None, defaults to [0] + :param z: Which z_stacks to fetch, indexed from 0. + If None, defaults to [0]. + :return: A numpy array with the timelapse in (C,T,X,Y,Z) order + """ + # Set the defaults (list is mutable) + channels = channels if channels is not None else [0] + z = z if z is not None else [0] # Get trap location for that id: - trap_centers = [self.trap_locations[i][trap_id] for i in - range(len(self.trap_locations))] - - half_size = tile_size // 2 - - def get_tile_shapes(x, max_shape=(self.raw_expt.shape[2], - self.raw_expt.shape[3])): - xmin = int(x[0] - half_size) - ymin = max(0, int(x[1] - half_size)) - if xmin + tile_size > max_shape[0]: - xmin = max_shape[0] - tile_size - if ymin + tile_size > max_shape[1]: - ymin = max_shape[1] - tile_size - return xmin, xmin + tile_size, ymin, ymin + tile_size + trap_centers = [self.trap_locations[self.current_position][i][trap_id] + for i in + range(len(self.trap_locations[self.current_position]))] - tiles_shapes = [get_tile_shapes(x) for x in trap_centers] + max_shape=(self.raw_expt.shape[2], self.raw_expt.shape[3]) + tiles_shapes = [get_tile_shapes(x, tile_size, max_shape) + for x in trap_centers] timelapse = [self.raw_expt[channels, i, xmin:xmax, ymin:ymax, z] for i, (xmin, xmax, ymin, ymax) in enumerate(tiles_shapes)] return np.hstack(timelapse) + + def get_traps_timepoint(self, tp, tile_size=96, channels=[0], z=[0]): + """ + Get all the traps from a given timepoint + :param tp: + :param tile_size: + :param channels: + :param z: + :return: A numpy array with the traps in the (trap, C, T, X, Y, + Z) order + """ + traps = [] + max_shape=(self.raw_expt.shape[2], self.raw_expt.shape[3]) + for trap_center in self.trap_locations[self.current_position]: + xmin, xmax, ymin,ymax = get_tile_shapes(trap_center, tile_size, + max_shape) + traps.append(self.raw_expt[channels, tp, xmin:xmax, ymin:ymax, z]) + return np.stack(traps) + diff --git a/core/traps.py b/core/traps.py index 61bdbb9b84890edd88496014a4fcb894f7e5ae9f..ff9d9b797eb273908cf8636e736e6f3a64f05863 100644 --- a/core/traps.py +++ b/core/traps.py @@ -2,7 +2,6 @@ A set of utilities for dealing with ALCATRAS traps """ -# TODO portfolio of trap templates import numpy as np from skimage import transform, feature diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..7c2e9401c68450402d6ea4df2f647c2fe7028fe4 --- /dev/null +++ b/requirements.txt @@ -0,0 +1,7 @@ +imageio==2.8.0 +numpy +omero-py==5.6.2 +pandas +scikit-image==0.16.2 +tqdm +zeroc-ice>=3.6.0 diff --git a/setup.py b/setup.py index 87fb9d3c88b251775ca9f1f01ca50057ebeac21d..3b9e2ff3f7cd01882def63ce24003a8569e0f9e1 100644 --- a/setup.py +++ b/setup.py @@ -13,7 +13,6 @@ setup( include_package_data=True, install_requires=[ 'numpy', - 'pandas', 'tqdm', 'scikit-image==0.16.2', 'imageio==2.8.0',