From 3ba6307516ceca8f8c169d4dc0c210e0cc795e96 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Al=C3=A1n=20Mu=C3=B1oz?= <amuoz@ed.ac.uk>
Date: Fri, 17 Dec 2021 11:36:17 +0000
Subject: [PATCH] rename and cleanup

---
 pcore/__init__.py                   |   0
 pcore/baby_client.py                | 268 -------------
 pcore/cells.py                      | 325 ----------------
 pcore/core.py                       |  34 --
 pcore/experiment.py                 | 499 ------------------------
 pcore/extract.py                    | 279 --------------
 pcore/grouper.py                    | 175 ---------
 pcore/haystack.py                   |  97 -----
 pcore/io/__init__.py                |   0
 pcore/io/base.py                    | 142 -------
 pcore/io/matlab.py                  | 569 ----------------------------
 pcore/io/metadata_parser.py         |  77 ----
 pcore/io/omero.py                   | 133 -------
 pcore/io/signal.py                  | 234 ------------
 pcore/io/utils.py                   |  44 ---
 pcore/io/writer.py                  | 567 ---------------------------
 pcore/multiexperiment.py            |  25 --
 pcore/pipeline.py                   | 271 -------------
 pcore/post_processing.py            | 189 ---------
 pcore/results.py                    |  35 --
 pcore/segment.py                    | 344 -----------------
 pcore/tests/__init__.py             |   0
 pcore/tests/test_integration.py     |  28 --
 pcore/tests/test_units.py           |  99 -----
 pcore/timelapse.py                  | 427 ---------------------
 pcore/trap_templates/trap_bg_1.npy  | Bin 8578 -> 0 bytes
 pcore/trap_templates/trap_bm_1.npy  | Bin 8578 -> 0 bytes
 pcore/trap_templates/trap_bm_2.npy  | Bin 8708 -> 0 bytes
 pcore/trap_templates/trap_prime.npy | Bin 27506 -> 0 bytes
 pcore/traps.py                      | 480 -----------------------
 pcore/utils.py                      | 135 -------
 setup.py                            |   4 +-
 32 files changed, 2 insertions(+), 5478 deletions(-)
 delete mode 100644 pcore/__init__.py
 delete mode 100644 pcore/baby_client.py
 delete mode 100644 pcore/cells.py
 delete mode 100644 pcore/core.py
 delete mode 100644 pcore/experiment.py
 delete mode 100644 pcore/extract.py
 delete mode 100644 pcore/grouper.py
 delete mode 100644 pcore/haystack.py
 delete mode 100644 pcore/io/__init__.py
 delete mode 100644 pcore/io/base.py
 delete mode 100644 pcore/io/matlab.py
 delete mode 100644 pcore/io/metadata_parser.py
 delete mode 100644 pcore/io/omero.py
 delete mode 100644 pcore/io/signal.py
 delete mode 100644 pcore/io/utils.py
 delete mode 100644 pcore/io/writer.py
 delete mode 100644 pcore/multiexperiment.py
 delete mode 100644 pcore/pipeline.py
 delete mode 100644 pcore/post_processing.py
 delete mode 100644 pcore/results.py
 delete mode 100644 pcore/segment.py
 delete mode 100644 pcore/tests/__init__.py
 delete mode 100644 pcore/tests/test_integration.py
 delete mode 100644 pcore/tests/test_units.py
 delete mode 100644 pcore/timelapse.py
 delete mode 100644 pcore/trap_templates/trap_bg_1.npy
 delete mode 100644 pcore/trap_templates/trap_bm_1.npy
 delete mode 100644 pcore/trap_templates/trap_bm_2.npy
 delete mode 100644 pcore/trap_templates/trap_prime.npy
 delete mode 100644 pcore/traps.py
 delete mode 100644 pcore/utils.py

diff --git a/pcore/__init__.py b/pcore/__init__.py
deleted file mode 100644
index e69de29b..00000000
diff --git a/pcore/baby_client.py b/pcore/baby_client.py
deleted file mode 100644
index ad9ff070..00000000
--- a/pcore/baby_client.py
+++ /dev/null
@@ -1,268 +0,0 @@
-import collections
-import itertools
-import json
-import time
-from pathlib import Path
-from typing import Iterable
-
-import h5py
-import numpy as np
-import pandas as pd
-import re
-import requests
-import tensorflow as tf
-from tqdm import tqdm
-
-from agora.base import ParametersABC, ProcessABC
-import baby.errors
-from baby import modelsets
-from baby.brain import BabyBrain
-from baby.crawler import BabyCrawler
-from requests.exceptions import Timeout, HTTPError
-from requests_toolbelt.multipart.encoder import MultipartEncoder
-
-from pcore.utils import Cache, accumulate, get_store_path
-
-
-################### Dask Methods ################################
-def format_segmentation(segmentation, tp):
-    """Format a single timepoint into a dictionary.
-
-    Parameters
-    ------------
-    segmentation: list
-                  A list of results, each result is the output of the crawler, which is JSON-encoded
-    tp: int
-        the time point considered
-
-    Returns
-    --------
-    A dictionary containing the formatted results of BABY
-    """
-    # Segmentation is a list of dictionaries, ordered by trap
-    # Add trap information
-    # mother_assign = None
-    for i, x in enumerate(segmentation):
-        x["trap"] = [i] * len(x["cell_label"])
-        x["mother_assign_dynamic"] = np.array(x["mother_assign"])[
-            np.array(x["cell_label"], dtype=int) - 1
-        ]
-    # Merge into a dictionary of lists, by column
-    merged = {
-        k: list(itertools.chain.from_iterable(res[k] for res in segmentation))
-        for k in segmentation[0].keys()
-    }
-    # Special case for mother_assign
-    # merged["mother_assign_dynamic"] = [merged["mother_assign"]]
-    if "mother_assign" in merged:
-        del merged["mother_assign"]
-        mother_assign = [x["mother_assign"] for x in segmentation]
-    # Check that the lists are all of the same length (in case of errors in
-    # BABY)
-    n_cells = min([len(v) for v in merged.values()])
-    merged = {k: v[:n_cells] for k, v in merged.items()}
-    merged["timepoint"] = [tp] * n_cells
-    merged["mother_assign"] = mother_assign
-    return merged
-
-
-class BabyParameters(ParametersABC):
-    def __init__(
-        self,
-        model_config,
-        tracker_params,
-        clogging_thresh,
-        min_bud_tps,
-        isbud_thresh,
-        session,
-        graph,
-        print_info,
-        suppress_errors,
-        error_dump_dir,
-        tf_version,
-    ):
-        self.model_config = model_config
-        self.tracker_params = tracker_params
-        self.clogging_thresh = clogging_thresh
-        self.min_bud_tps = min_bud_tps
-        self.isbud_thresh = isbud_thresh
-        self.session = session
-        self.graph = graph
-        self.print_info = print_info
-        self.suppress_errors = suppress_errors
-        self.error_dump_dir = error_dump_dir
-        self.tf_version = tf_version
-
-    @classmethod
-    def default(cls, **kwargs):
-        """kwargs passes values to the model chooser"""
-        return cls(
-            model_config=choose_model_from_params(**kwargs),
-            tracker_params=dict(ctrack_params=dict(), budtrack_params=dict()),
-            clogging_thresh=1,
-            min_bud_tps=3,
-            isbud_thresh=0.5,
-            session=None,
-            graph=None,
-            print_info=False,
-            suppress_errors=False,
-            error_dump_dir=None,
-            tf_version=2,
-        )
-
-
-class BabyRunner:
-    """A BabyRunner object for cell segmentation.
-
-    Does segmentation one time point at a time."""
-
-    def __init__(self, tiler, parameters=None, *args, **kwargs):
-        self.tiler = tiler
-        # self.model_config = modelsets()[choose_model_from_params(**kwargs)]
-        self.model_config = modelsets()[
-            (
-                parameters.model_config
-                if parameters is not None
-                else choose_model_from_params(**kwargs)
-            )
-        ]
-        self.brain = BabyBrain(**self.model_config)
-        self.crawler = BabyCrawler(self.brain)
-        self.bf_channel = self.tiler.get_channel_index("Brightfield")
-
-    @classmethod
-    def from_tiler(cls, parameters: BabyParameters, tiler):
-        return cls(tiler, parameters)
-
-    def get_data(self, tp):
-        # Swap axes x and z, probably shouldn't swap, just move z
-        return self.tiler.get_tp_data(tp, self.bf_channel).swapaxes(1, 3).swapaxes(1, 2)
-
-    def run_tp(self, tp, with_edgemasks=True, assign_mothers=True, **kwargs):
-        """Simulating processing time with sleep"""
-        # Access the image
-        img = self.get_data(tp)
-        segmentation = self.crawler.step(
-            img, with_edgemasks=with_edgemasks, assign_mothers=assign_mothers, **kwargs
-        )
-        return format_segmentation(segmentation, tp)
-
-
-class BabyClient:
-    """A dummy BabyClient object for Dask Demo.
-
-
-    Does segmentation one time point at a time.
-    Should work better with the parallelisation.
-    """
-
-    bf_channel = 0
-    model_name = "prime95b_brightfield_60x_5z"
-    url = "http://localhost:5101"
-    max_tries = 50
-    sleep_time = 0.1
-
-    def __init__(self, tiler, *args, **kwargs):
-        self.tiler = tiler
-        self._session = None
-
-    @property
-    def session(self):
-        if self._session is None:
-            r_session = requests.get(self.url + f"/session/{self.model_name}")
-            r_session.raise_for_status()
-            self._session = r_session.json()["sessionid"]
-        return self._session
-
-    def get_data(self, tp):
-        return self.tiler.get_tp_data(tp, self.bf_channel).swapaxes(1, 3)
-
-    def queue_image(self, img, **kwargs):
-        bit_depth = img.dtype.itemsize * 8  # bit depth =  byte_size * 8
-        data = create_request(img.shape, bit_depth, img, **kwargs)
-        status = requests.post(
-            self.url + f"/segment?sessionid={self.session}",
-            data=data,
-            headers={"Content-Type": data.content_type},
-        )
-        status.raise_for_status()
-        return status
-
-    def get_segmentation(self):
-        try:
-            seg_response = requests.get(
-                self.url + f"/segment?sessionid={self.session}", timeout=120
-            )
-            seg_response.raise_for_status()
-            result = seg_response.json()
-        except Timeout as e:
-            raise e
-        except HTTPError as e:
-            raise e
-        return result
-
-    def run_tp(self, tp, **kwargs):
-        # Get data
-        img = self.get_data(tp)
-        # Queue image
-        status = self.queue_image(img, **kwargs)
-        # Get segmentation
-        for _ in range(self.max_tries):
-            try:
-                seg = self.get_segmentation()
-                break
-            except (Timeout, HTTPError):
-                time.sleep(self.sleep_time)
-                continue
-        return format_segmentation(seg, tp)
-
-
-def choose_model_from_params(
-    modelset_filter=None,
-    camera="prime95b",
-    channel="brightfield",
-    zoom="60x",
-    n_stacks="5z",
-    **kwargs,
-):
-    """
-    Define which model to query from the server based on a set of parameters.
-
-    Parameters
-    ----------
-    valid_models: List[str]
-                  The names of the models that are available.
-    modelset_filter: str
-                    A regex filter to apply on the models to start.
-    camera: str
-            The camera used in the experiment (case insensitive).
-    channel:str
-            The channel used for segmentation (case insensitive).
-    zoom: str
-          The zoom on the channel.
-    n_stacks: str
-              The number of z_stacks to use in segmentation
-
-    Returns
-    -------
-    model_name : str
-    """
-    valid_models = list(modelsets().keys())
-
-    # Apply modelset filter if specified
-    if modelset_filter is not None:
-        msf_regex = re.compile(modelset_filter)
-        valid_models = filter(msf_regex.search, valid_models)
-
-    # Apply parameter filters if specified
-    params = [
-        str(x) if x is not None else ".+"
-        for x in [camera.lower(), channel.lower(), zoom, n_stacks]
-    ]
-    params_re = re.compile("^" + "_".join(params) + "$")
-    valid_models = list(filter(params_re.search, valid_models))
-    # Check that there are valid models
-    if len(valid_models) == 0:
-        raise KeyError("No model sets found matching {}".format(", ".join(params)))
-    # Pick the first model
-    return valid_models[0]
diff --git a/pcore/cells.py b/pcore/cells.py
deleted file mode 100644
index e9c14b8a..00000000
--- a/pcore/cells.py
+++ /dev/null
@@ -1,325 +0,0 @@
-import logging
-from pathlib import Path, PosixPath
-from time import perf_counter
-from typing import Union
-from itertools import groupby
-from collections.abc import Iterable
-
-from utils_find_1st import find_1st, cmp_equal
-import h5py
-import numpy as np
-from scipy import ndimage
-from scipy.sparse.base import isdense
-
-from pcore.io.matlab import matObject
-from pcore.utils import timed
-from pcore.io.writer import load_complex
-
-
-def cell_factory(store, type="matlab"):
-    if isinstance(store, matObject):
-        return CellsMat(store)
-    if type == "matlab":
-        mat_object = matObject(store)
-        return CellsMat(mat_object)
-    elif type == "hdf5":
-        return CellsHDF(store)
-    else:
-        raise TypeError(
-            "Could not get cells for type {}:" "valid types are matlab and hdf5"
-        )
-
-
-class Cells:
-    """An object that gathers information about all the cells in a given
-    trap.
-    This is the abstract object, used for type testing
-    """
-
-    def __init__(self):
-        pass
-
-    @staticmethod
-    def from_source(source: Union[PosixPath, str], kind: str = None):
-        if isinstance(source, str):
-            source = Path(source)
-        if kind is None:  # Infer kind from filename
-            kind = "matlab" if source.suffix == ".mat" else "hdf5"
-        return cell_factory(source, kind)
-
-    @staticmethod
-    def _asdense(array):
-        if not isdense(array):
-            array = array.todense()
-        return array
-
-    @staticmethod
-    def _astype(array, kind):
-        # Convert sparse arrays if needed and if kind is 'mask' it fills the outline
-        array = Cells._asdense(array)
-        if kind == "mask":
-            array = ndimage.binary_fill_holes(array).astype(int)
-        return array
-
-    @classmethod
-    def hdf(cls, fpath):
-        return CellsHDF(fpath)
-
-    @classmethod
-    def mat(cls, path):
-        return CellsMat(matObject(store))
-
-
-class CellsHDF(Cells):
-    def __init__(self, filename, path="cell_info"):
-        self.filename = filename
-        self.cinfo_path = path
-        self._edgem_indices = None
-        self._edgemasks = None
-        self._tile_size = None
-
-    def __getitem__(self, item):
-        if item == "edgemasks":
-            return self.edgemasks
-        _item = "_" + item
-        if not hasattr(self, _item):
-            setattr(self, _item, self._fetch(item))
-        return getattr(self, _item)
-
-    def _get_idx(self, cell_id, trap_id):
-        return (self["cell_label"] == cell_id) & (self["trap"] == trap_id)
-
-    def _fetch(self, path):
-        with h5py.File(self.filename, mode="r") as f:
-            return f[self.cinfo_path][path][()]
-
-    @property
-    def ntraps(self):
-        with h5py.File(self.filename, mode="r") as f:
-            return len(f["/trap_info/trap_locations"][()])
-
-    @property
-    def traps(self):
-        return list(set(self["trap"]))
-
-    @property
-    def tile_size(self):  # TODO read from metadata
-        if self._tile_size is None:
-            with h5py.File(self.filename, mode="r") as f:
-                self._tile_size == f["trap_info/tile_size"][0]
-        return self._tile_size
-
-    @property
-    def edgem_indices(self):
-        if self._edgem_indices is None:
-            edgem_path = "edgemasks/indices"
-            self._edgem_indices = load_complex(self._fetch(edgem_path))
-        return self._edgem_indices
-
-    @property
-    def edgemasks(self):
-        if self._edgemasks is None:
-            edgem_path = "edgemasks/values"
-            self._edgemasks = self._fetch(edgem_path)
-
-        return self._edgemasks
-
-    def _edgem_where(self, cell_id, trap_id):
-        ix = trap_id + 1j * cell_id
-        return find_1st(self.edgem_indices == ix, True, cmp_equal)
-
-    @property
-    def labels(self):
-        """
-        Return all cell labels in object
-        We use mother_assign to list traps because it is the only propriety that appears even
-        when no cells are found"""
-        return [self.labels_in_trap(trap) for trap in self.traps]
-
-    def where(self, cell_id, trap_id):
-        """
-        Returns
-        Parameters
-        ----------
-            cell_id: int
-                Cell index
-            trap_id: int
-                Trap index
-
-        Returns
-        ----------
-            indices int array
-            boolean mask array
-            edge_ix int array
-        """
-        indices = self._get_idx(cell_id, trap_id)
-        edgem_ix = self._edgem_where(cell_id, trap_id)
-        return (
-            self["timepoint"][indices],
-            indices,
-            edgem_ix,
-        )  # FIXME edgem_ix makes output different to matlab's Cell
-
-    def outline(self, cell_id, trap_id):
-        times, indices, cell_ix = self.where(cell_id, trap_id)
-        return times, self["edgemasks"][cell_ix, times]
-
-    def mask(self, cell_id, trap_id):
-        times, outlines = self.outline(cell_id, trap_id)
-        return times, np.array(
-            [ndimage.morphology.binary_fill_holes(o) for o in outlines]
-        )
-
-    def at_time(self, timepoint, kind="mask"):
-        ix = self["timepoint"] == timepoint
-        cell_ix = self["cell_label"][ix]
-        traps = self["trap"][ix]
-        indices = traps + 1j * cell_ix
-        choose = np.in1d(self.edgem_indices, indices)
-        edgemasks = self["edgemasks"][choose, timepoint]
-        masks = [
-            self._astype(edgemask, kind) for edgemask in edgemasks if edgemask.any()
-        ]
-        return self.group_by_traps(traps, masks)
-
-    def group_by_traps(self, traps, data):
-        # returns a dict with traps as keys and labels as value
-        iterator = groupby(zip(traps, data), lambda x: x[0])
-        d = {key: [x[1] for x in group] for key, group in iterator}
-        d = {i: d.get(i, []) for i in self.traps}
-        return d
-
-    def labels_in_trap(self, trap_id):
-        # Return set of cell ids in a trap.
-        return set((self["cell_label"][self["trap"] == trap_id]))
-
-    def labels_at_time(self, timepoint):
-        labels = self["cell_label"][self["timepoint"] == timepoint]
-        traps = self["trap"][self["timepoint"] == timepoint]
-        return self.group_by_traps(traps, labels)
-
-
-class CellsMat(Cells):
-    def __init__(self, mat_object):
-        super(CellsMat, self).__init__()
-        # TODO add __contains__ to the matObject
-        timelapse_traps = mat_object.get(
-            "timelapseTrapsOmero", mat_object.get("timelapseTraps", None)
-        )
-        if timelapse_traps is None:
-            raise NotImplementedError(
-                "Could not find a timelapseTraps or "
-                "timelapseTrapsOmero object. Cells "
-                "from cellResults not implemented"
-            )
-        else:
-            self.trap_info = timelapse_traps["cTimepoint"]["trapInfo"]
-
-            if isinstance(self.trap_info, list):
-                self.trap_info = {
-                    k: list([res.get(k, []) for res in self.trap_info])
-                    for k in self.trap_info[0].keys()
-                }
-
-    def where(self, cell_id, trap_id):
-        times, indices = zip(
-            *[
-                (tp, np.where(cell_id == x)[0][0])
-                for tp, x in enumerate(self.trap_info["cellLabel"][:, trap_id].tolist())
-                if np.any(cell_id == x)
-            ]
-        )
-        return times, indices
-
-    def outline(self, cell_id, trap_id):
-        times, indices = self.where(cell_id, trap_id)
-        info = self.trap_info["cell"][times, trap_id]
-
-        def get_segmented(cell, index):
-            if cell["segmented"].ndim == 0:
-                return cell["segmented"][()].todense()
-            else:
-                return cell["segmented"][index].todense()
-
-        segmentation_outline = [
-            get_segmented(cell, idx) for idx, cell in zip(indices, info)
-        ]
-        return times, np.array(segmentation_outline)
-
-    def mask(self, cell_id, trap_id):
-        times, outlines = self.outline(cell_id, trap_id)
-        return times, np.array(
-            [ndimage.morphology.binary_fill_holes(o) for o in outlines]
-        )
-
-    def at_time(self, timepoint, kind="outline"):
-
-        """Returns the segmentations for all the cells at a given timepoint.
-
-        FIXME: this is extremely hacky and accounts for differently saved
-            results in the matlab object. Deprecate ASAP.
-        """
-        # Case 1: only one cell per trap: trap_info['cell'][timepoint] is a
-        # structured array
-        if isinstance(self.trap_info["cell"][timepoint], dict):
-            segmentations = [
-                self._astype(x, "outline")
-                for x in self.trap_info["cell"][timepoint]["segmented"]
-            ]
-        # Case 2: Multiple cells per trap: it becomes a list of arrays or
-        # dictionaries,  one for each trap
-        # Case 2.1 : it's a dictionary
-        elif isinstance(self.trap_info["cell"][timepoint][0], dict):
-            segmentations = []
-            for x in self.trap_info["cell"][timepoint]:
-                seg = x["segmented"]
-                if not isinstance(seg, np.ndarray):
-                    seg = [seg]
-                segmentations.append([self._astype(y, "outline") for y in seg])
-        # Case 2.2 : it's an array
-        else:
-            segmentations = [
-                [self._astype(y, type) for y in x["segmented"]] if x.ndim != 0 else []
-                for x in self.trap_info["cell"][timepoint]
-            ]
-            # Return dict for compatibility with hdf5 output
-        return {i: v for i, v in enumerate(segmentations)}
-
-    def labels_at_time(self, tp):
-        labels = self.trap_info["cellLabel"]
-        labels = [_aslist(x) for x in labels[tp]]
-        labels = {i: [lbl for lbl in lblset] for i, lblset in enumerate(labels)}
-        return labels
-
-    @property
-    def ntraps(self):
-        return len(self.trap_info["cellLabel"][0])
-
-    @property
-    def tile_size(self):
-        pass
-
-
-class ExtractionRunner:
-    """An object to run extraction of fluorescence, and general data out of
-    segmented data.
-
-    Configure with what extraction we want to run.
-    Cell selection criteria.
-    Filtering criteria.
-    """
-
-    def __init__(self, tiler, cells):
-        pass
-
-    def run(self, keys, store, **kwargs):
-        pass
-
-
-def _aslist(x):
-    if isinstance(x, Iterable):
-        if hasattr(x, "tolist"):
-            x = x.tolist()
-    else:
-        x = [x]
-    return x
diff --git a/pcore/core.py b/pcore/core.py
deleted file mode 100644
index 56422434..00000000
--- a/pcore/core.py
+++ /dev/null
@@ -1,34 +0,0 @@
-"""Barebones implementation of the structure/organisation of experiments."""
-
-
-class Experiment:
-    def __init__(self):
-        self.strains = dict()
-        self._metadata = None
-
-    def add_strains(self, name, strain):
-        self.strains[name] = strain
-
-
-class Strain:
-    def __init__(self):
-        self.positions = dict()
-
-    def add_position(self, name, position):
-        self.positions[name] = position
-
-
-class Position:
-    def __init__(self):
-        self.traps = []
-
-    def add_trap(self, trap):
-        self.traps.append(trap)
-
-
-class Trap:  # TODO Name this Tile?
-    def __init__(self):
-        self.cells = []
-
-    def add_cell(self, cell):
-        self.cells.append(cell)
diff --git a/pcore/experiment.py b/pcore/experiment.py
deleted file mode 100644
index c8b07fef..00000000
--- a/pcore/experiment.py
+++ /dev/null
@@ -1,499 +0,0 @@
-"""Core classes for the pipeline"""
-import atexit
-import itertools
-import os
-import abc
-import glob
-import json
-import warnings
-from getpass import getpass
-from pathlib import Path
-import re
-import logging
-from typing import Union
-
-import h5py
-from tqdm import tqdm
-import pandas as pd
-
-import omero
-from omero.gateway import BlitzGateway
-from logfile_parser import Parser
-
-from pcore.timelapse import TimelapseOMERO, TimelapseLocal
-from pcore.utils import accumulate
-
-from pcore.io.writer import Writer
-
-logger = logging.getLogger(__name__)
-
-########################### Dask objects ###################################
-##################### ENVIRONMENT INITIALISATION ################
-import omero
-from omero.gateway import BlitzGateway, PixelsWrapper
-from omero.model import enums as omero_enums
-import numpy as np
-
-# Set up the pixels so that we can reuse them across sessions (?)
-PIXEL_TYPES = {
-    omero_enums.PixelsTypeint8: np.int8,
-    omero_enums.PixelsTypeuint8: np.uint8,
-    omero_enums.PixelsTypeint16: np.int16,
-    omero_enums.PixelsTypeuint16: np.uint16,
-    omero_enums.PixelsTypeint32: np.int32,
-    omero_enums.PixelsTypeuint32: np.uint32,
-    omero_enums.PixelsTypefloat: np.float32,
-    omero_enums.PixelsTypedouble: np.float64,
-}
-
-
-class NonCachedPixelsWrapper(PixelsWrapper):
-    """Extend gateway.PixelWrapper to override _prepareRawPixelsStore."""
-
-    def _prepareRawPixelsStore(self):
-        """
-        Creates RawPixelsStore and sets the id etc
-        This overrides the superclass behaviour to make sure that
-        we don't re-use RawPixelStore in multiple processes since
-        the Store may be closed in 1 process while still needed elsewhere.
-        This is needed when napari requests may planes simultaneously,
-        e.g. when switching to 3D view.
-        """
-        ps = self._conn.c.sf.createRawPixelsStore()
-        ps.setPixelsId(self._obj.id.val, True, self._conn.SERVICE_OPTS)
-        return ps
-
-
-omero.gateway.PixelsWrapper = NonCachedPixelsWrapper
-# Update the BlitzGateway to use our NonCachedPixelsWrapper
-omero.gateway.refreshWrappers()
-
-
-######################  DATA ACCESS ###################
-import dask.array as da
-from dask import delayed
-
-
-def get_data_lazy(image) -> da.Array:
-    """Get 5D dask array, with delayed reading from OMERO image."""
-    nt, nc, nz, ny, nx = [getattr(image, f"getSize{x}")() for x in "TCZYX"]
-    pixels = image.getPrimaryPixels()
-    dtype = PIXEL_TYPES.get(pixels.getPixelsType().value, None)
-    get_plane = delayed(lambda idx: pixels.getPlane(*idx))
-
-    def get_lazy_plane(zct):
-        return da.from_delayed(get_plane(zct), shape=(ny, nx), dtype=dtype)
-
-    # 5D stack: TCZXY
-    t_stacks = []
-    for t in range(nt):
-        c_stacks = []
-        for c in range(nc):
-            z_stack = []
-            for z in range(nz):
-                z_stack.append(get_lazy_plane((z, c, t)))
-            c_stacks.append(da.stack(z_stack))
-        t_stacks.append(da.stack(c_stacks))
-    return da.stack(t_stacks)
-
-
-# Metadata writer
-from pcore.io.metadata_parser import parse_logfiles
-
-
-class MetaData:
-    """Small metadata Process that loads log."""
-
-    def __init__(self, log_dir, store):
-        self.log_dir = log_dir
-        self.store = store
-
-    def load_logs(self):
-        parsed_flattened = parse_logfiles(self.log_dir)
-        return parsed_flattened
-
-    def run(self):
-        metadata_writer = Writer(self.store)
-        metadata_dict = self.load_logs()
-        metadata_writer.write(path="/", meta=metadata_dict, overwrite=False)
-
-
-########################### Old Objects ####################################
-
-
-class Experiment(abc.ABC):
-    """
-    Abstract base class for experiments.
-    Gives all the functions that need to be implemented in both the local
-    version and the Omero version of the Experiment class.
-
-    As this is an abstract class, experiments can not be directly instantiated
-    through the usual `__init__` function, but must be instantiated from a
-    source.
-    >>> expt = Experiment.from_source(root_directory)
-    Data from the current timelapse can be obtained from the experiment using
-    colon and comma separated slicing.
-    The order of data is C, T, X, Y, Z
-    C, T and Z can have any slice
-    X and Y will only consider the beginning and end as we want the images
-    to be continuous
-    >>> bf_1 = expt[0, 0, :, :, :] # First channel, first timepoint, all x,y,z
-    """
-
-    __metaclass__ = abc.ABCMeta
-
-    # metadata_parser = AcqMetadataParser()
-
-    def __init__(self):
-        self.exptID = ""
-        self._current_position = None
-        self.position_to_process = 0
-
-    def __getitem__(self, item):
-        return self.current_position[item]
-
-    @property
-    def shape(self):
-        return self.current_position.shape
-
-    @staticmethod
-    def from_source(*args, **kwargs):
-        """
-        Factory method to construct an instance of an Experiment subclass (
-        either ExperimentOMERO or ExperimentLocal).
-
-        :param source: Where the data is stored (OMERO server or directory
-        name)
-        :param kwargs: If OMERO server, `user` and `password` keyword
-        arguments are required. If the data is stored locally keyword
-        arguments are ignored.
-        """
-        if len(args) > 1:
-            logger.debug("ExperimentOMERO: {}".format(args, kwargs))
-            return ExperimentOMERO(*args, **kwargs)
-        else:
-            logger.debug("ExperimentLocal: {}".format(args, kwargs))
-            return ExperimentLocal(*args, **kwargs)
-
-    @property
-    @abc.abstractmethod
-    def positions(self):
-        """Returns list of available position names"""
-        return
-
-    @abc.abstractmethod
-    def get_position(self, position):
-        return
-
-    @property
-    def current_position(self):
-        return self._current_position
-
-    @property
-    def channels(self):
-        return self._current_position.channels
-
-    @current_position.setter
-    def current_position(self, position):
-        self._current_position = self.get_position(position)
-
-    def get_hypercube(self, x, y, z_positions, channels, timepoints):
-        return self.current_position.get_hypercube(
-            x, y, z_positions, channels, timepoints
-        )
-
-
-# Todo: cache images like in ExperimentLocal
-class ExperimentOMERO(Experiment):
-    """
-    Experiment class to organise different timelapses.
-    Connected to a Dataset object which handles database I/O.
-    """
-
-    def __init__(self, omero_id, host, port=4064, **kwargs):
-        super(ExperimentOMERO, self).__init__()
-        self.exptID = omero_id
-        # Get annotations
-        self.use_annotations = kwargs.get("use_annotations", True)
-        self._files = None
-        self._tags = None
-
-        # Create a connection
-        self.connection = BlitzGateway(
-            kwargs.get("username") or input("Username: "),
-            kwargs.get("password") or getpass("Password: "),
-            host=host,
-            port=port,
-        )
-        connected = self.connection.connect()
-        try:
-            assert connected is True, "Could not connect to server."
-        except AssertionError as e:
-            self.connection.close()
-            raise (e)
-        try:  # Run everything that could cause the initialisation to fail
-            self.dataset = self.connection.getObject("Dataset", self.exptID)
-            self.name = self.dataset.getName()
-            # Create positions objects
-            self._positions = {
-                img.getName(): img.getId()
-                for img in sorted(
-                    self.dataset.listChildren(), key=lambda x: x.getName()
-                )
-            }
-            # Set up local cache
-            self.root_dir = Path(kwargs.get("save_dir", "./")) / self.name
-            if not self.root_dir.exists():
-                self.root_dir.mkdir(parents=True)
-            self.compression = kwargs.get("compression", None)
-            self.image_cache = h5py.File(self.root_dir / "images.h5", "a")
-
-            # Set up the current position as the first in the list
-            self._current_position = self.get_position(self.positions[0])
-            self.running_tp = 0
-        except Exception as e:
-            # Close the connection!
-            print("Error in initialisation, closing connection.")
-            self.connection.close()
-            print(self.connection.isConnected())
-            raise e
-        atexit.register(self.close)  # Close everything if program ends
-
-    def close(self):
-        print("Clean-up on exit.")
-        self.image_cache.close()
-        self.connection.close()
-
-    @property
-    def files(self):
-        if self._files is None:
-            self._files = {
-                x.getFileName(): x
-                for x in self.dataset.listAnnotations()
-                if isinstance(x, omero.gateway.FileAnnotationWrapper)
-            }
-        return self._files
-
-    @property
-    def tags(self):
-        if self._tags is None:
-            self._tags = {
-                x.getName(): x
-                for x in self.dataset.listAnnotations()
-                if isinstance(x, omero.gateway.TagAnnotationWrapper)
-            }
-        return self._tags
-
-    @property
-    def positions(self):
-        return list(self._positions.keys())
-
-    def _get_position_annotation(self, position):
-        # Get file annotations filtered by position name and ordered by
-        # creation date
-        r = re.compile(position)
-        wrappers = sorted(
-            [self.files[key] for key in filter(r.match, self.files)],
-            key=lambda x: x.creationEventDate(),
-            reverse=True,
-        )
-        # Choose newest file
-        if len(wrappers) < 1:
-            return None
-        else:
-            # Choose the newest annotation and cache it
-            annotation = wrappers[0]
-            filepath = self.root_dir / annotation.getFileName().replace("/", "_")
-            if not filepath.exists():
-                with open(str(filepath), "wb") as fd:
-                    for chunk in annotation.getFileInChunks():
-                        fd.write(chunk)
-            return filepath
-
-    def get_position(self, position):
-        """Get a Timelapse object for a given position by name"""
-        # assert position in self.positions, "Position not available."
-        img = self.connection.getObject("Image", self._positions[position])
-        if self.use_annotations:
-            annotation = self._get_position_annotation(position)
-        else:
-            annotation = None
-        return TimelapseOMERO(img, annotation, self.image_cache)
-
-    def cache_locally(
-        self,
-        root_dir="./",
-        positions=None,
-        channels=None,
-        timepoints=None,
-        z_positions=None,
-    ):
-        """
-        Save the experiment locally.
-
-        :param root_dir: The directory in which the experiment will be
-        saved. The experiment will be a subdirectory of "root_directory"
-        and will be named by its id.
-        """
-        logger.warning("Saving experiment {}; may take some time.".format(self.name))
-
-        if positions is None:
-            positions = self.positions
-        if channels is None:
-            channels = self.current_position.channels
-        if timepoints is None:
-            timepoints = range(self.current_position.size_t)
-        if z_positions is None:
-            z_positions = range(self.current_position.size_z)
-
-        save_dir = Path(root_dir) / self.name
-        if not save_dir.exists():
-            save_dir.mkdir()
-        # Save the images
-        for pos_name in tqdm(positions):
-            pos = self.get_position(pos_name)
-            pos_dir = save_dir / pos_name
-            if not pos_dir.exists():
-                pos_dir.mkdir()
-            self.cache_set(pos, range(pos.size_t))
-
-        self.cache_logs(save_dir)
-        # Save the file annotations
-        cache_config = dict(
-            positions=positions,
-            channels=channels,
-            timepoints=timepoints,
-            z_positions=z_positions,
-        )
-        with open(str(save_dir / "cache.config"), "w") as fd:
-            json.dump(cache_config, fd)
-        logger.info("Downloaded experiment {}".format(self.exptID))
-
-    def cache_logs(self, **kwargs):
-        # Save the file annotations
-        tags = dict()  # and the tag annotations
-        for annotation in self.dataset.listAnnotations():
-            if isinstance(annotation, omero.gateway.FileAnnotationWrapper):
-                filepath = self.root_dir / annotation.getFileName().replace("/", "_")
-                if str(filepath).endswith("txt") and not filepath.exists():
-                    # Save only the text files
-                    with open(str(filepath), "wb") as fd:
-                        for chunk in annotation.getFileInChunks():
-                            fd.write(chunk)
-            if isinstance(annotation, omero.gateway.TagAnnotationWrapper):
-                key = annotation.getDescription()
-                if key == "":
-                    key = "misc. tags"
-                if key in tags:
-                    if not isinstance(tags[key], list):
-                        tags[key] = [tags[key]]
-                    tags[key].append(annotation.getValue())
-                else:
-                    tags[key] = annotation.getValue()
-        with open(str(self.root_dir / "omero_tags.json"), "w") as fd:
-            json.dump(tags, fd)
-        return
-
-    def run(self, keys: Union[list, int], store, **kwargs):
-        if self.running_tp == 0:
-            self.cache_logs(**kwargs)
-            self.running_tp = 1  # Todo rename based on annotations
-        run_tps = dict()
-        for pos, tps in accumulate(keys):
-            position = self.get_position(pos)
-            run_tps[pos] = position.run(tps, store, save_dir=self.root_dir)
-        # Update the keys to match what was actually run
-        keys = [(pos, tp) for pos in run_tps for tp in run_tps[pos]]
-        return keys
-
-
-class ExperimentLocal(Experiment):
-    def __init__(self, root_dir, finished=True):
-        super(ExperimentLocal, self).__init__()
-        self.root_dir = Path(root_dir)
-        self.exptID = self.root_dir.name
-        self._pos_mapper = dict()
-        # Fixme: Made the assumption that the Acq file gets saved before the
-        #  experiment is run and that the information in that file is
-        #  trustworthy.
-        acq_file = self._find_acq_file()
-        acq_parser = Parser("multiDGUI_acq_format")
-        with open(acq_file, "r") as fd:
-            metadata = acq_parser.parse(fd)
-        self.metadata = metadata
-        self.metadata["finished"] = finished
-        self.files = [f for f in self.root_dir.iterdir() if f.is_file()]
-        self.image_cache = h5py.File(self.root_dir / "images.h5", "a")
-        if self.finished:
-            cache = self._find_cache()
-            # log = self._find_log() # Todo: add log metadata
-            if cache is not None:
-                with open(cache, "r") as fd:
-                    cache_config = json.load(fd)
-                self.metadata.update(**cache_config)
-        self._current_position = self.get_position(self.positions[0])
-
-    def _find_file(self, regex):
-        file = glob.glob(os.path.join(str(self.root_dir), regex))
-        if len(file) != 1:
-            return None
-        else:
-            return file[0]
-
-    def _find_acq_file(self):
-        file = self._find_file("*[Aa]cq.txt")
-        if file is None:
-            raise ValueError(
-                "Cannot load this experiment. There are either "
-                "too many or too few acq files."
-            )
-        return file
-
-    def _find_cache(self):
-        return self._find_file("cache.config")
-
-    @property
-    def finished(self):
-        return self.metadata["finished"]
-
-    @property
-    def running(self):
-        return not self.metadata["finished"]
-
-    @property
-    def positions(self):
-        return self.metadata["positions"]["posname"]
-
-    def _get_position_annotation(self, position):
-        r = re.compile(position)
-        files = list(filter(lambda x: r.match(x.stem), self.files))
-        if len(files) == 0:
-            return None
-        files = sorted(files, key=lambda x: x.lstat().st_ctime, reverse=True)
-        # Get the newest and return as string
-        return files[0]
-
-    def get_position(self, position):
-        if position not in self._pos_mapper:
-            annotation = self._get_position_annotation(position)
-            self._pos_mapper[position] = TimelapseLocal(
-                position,
-                self.root_dir,
-                finished=self.finished,
-                annotation=annotation,
-                cache=self.image_cache,
-            )
-        return self._pos_mapper[position]
-
-    def run(self, keys, store, **kwargs):
-        """
-
-        :param keys: List of (position, time point) tuples to process.
-        :return:
-        """
-        run_tps = dict()
-        for pos, tps in accumulate(keys):
-            run_tps[pos] = self.get_position(pos).run(tps, store)
-        # Update the keys to match what was actually run
-        keys = [(pos, tp) for pos in run_tps for tp in run_tps[pos]]
-        return keys
diff --git a/pcore/extract.py b/pcore/extract.py
deleted file mode 100644
index edd7c063..00000000
--- a/pcore/extract.py
+++ /dev/null
@@ -1,279 +0,0 @@
-"""
-A module to extract data from a processed experiment.
-"""
-import h5py
-import numpy as np
-from tqdm import tqdm
-
-from core.io.matlab import matObject
-from growth_rate.estimate_gr import estimate_gr
-
-
-class Extracted:
-    # TODO write the filtering functions.
-    def __init__(self):
-        self.volume = None
-        self._keep = None
-
-    def filter(self, filename=None, **kwargs):
-        """
-        1. Filter out small non-growing tracks. This means:
-            a. the cell size never reaches beyond a certain size-threshold
-            volume_thresh or
-            b. the cell's volume doesn't increase by at least a minimum
-            amount over its lifetime
-        2. Join daughter tracks that are contiguous and within a volume
-           threshold of each other
-        3. Discard tracks that are shorter than a threshold number of
-           timepoints
-
-        This function is used to fix tracking/bud errors in post-processing.
-        The parameters define the thresholds used to determine which cells are
-        discarded.
-        FIXME Ideally we get to a point where this is no longer needed.
-        :return:
-        """
-        #self.join_tracks()
-        filter_out = self.filter_size(**kwargs)
-        filter_out += self.filter_lifespan(**kwargs)
-        # TODO save data or just filtering parameters?
-        #self.to_hdf(filename)
-        self.keep = ~filter_out
-
-    def filter_size(self, volume_thresh=7, growth_thresh=10, **kwargs):
-        """Filter out small and non-growing cells.
-        :param volume_thresh: Size threshold for small cells
-        :param growth_thresh: Size difference threshold for non-growing cells
-        """
-        filter_out = np.where(np.max(self.volume, axis=1) < volume_thresh,
-                              True, False)
-        growth = [v[v > 0] for v in self.volume]
-        growth = np.array([v[-1] - v[0] if len(v) > 0 else 0 for v in growth])
-        filter_out += np.where(growth < growth_thresh, True, False)
-        return filter_out
-
-    def filter_lifespan(self, min_time=5, **kwargs):
-        """Remove daughter cells that have a small life span.
-
-        :param min_time: The minimum life span, under which cells are removed.
-        """
-        # TODO What if there are nan values?
-        filter_out = np.where(np.count_nonzero(self.volume, axis=1) <
-                              min_time, True, False)
-        return filter_out
-
-    def join_tracks(self, threshold=7):
-        """ Join contiguous tracks that are within a certain volume
-        threshold of each other.
-
-        :param threshold: Maximum volume difference to join contiguous tracks.
-        :return:
-        """
-        # For all pairs of cells
-        #
-        pass
-
-
-class ExtractedHDF(Extracted):
-    # TODO pull all the data out of the HFile and filter!
-    def __init__(self, file):
-        # We consider the data to be read-only
-        self.hfile = h5py.File(file, 'r')
-
-
-class ExtractedMat(Extracted):
-    """ Pulls the extracted data out of the MATLAB cTimelapse file.
-
-    This is mostly a convenience function in order to run the
-    gaussian-processes growth-rate estimation
-    """
-    def __init__(self, file, debug=False):
-        ct = matObject(file)
-        self.debug = debug
-        # Pre-computed data
-        # TODO what if there is no timelapseTrapsOmero?
-        self.metadata = ct['timelapseTrapsOmero']['metadata']
-        self.extracted_data = ct['timelapseTrapsOmero']['extractedData']
-        self.channels = ct['timelapseTrapsOmero']['extractionParameters'][
-            'functionParameters']['channels'].tolist()
-        self.time_settings = ct['timelapseTrapsOmero']['metadata']['acq'][
-            'times']
-        # Get filtering information
-        n_cells = self.extracted_data['cellNum'][0].shape
-        self.keep = np.full(n_cells, True)
-        # Not yet computed data
-        self._growth_rate = None
-        self._daughter_index = None
-
-
-    def get_channel_index(self, channel):
-        """Get index of channel based on name. This only considers
-        fluorescence channels."""
-        return self.channels.index(channel)
-
-    @property
-    def trap_num(self):
-        return self.extracted_data['trapNum'][0][self.keep]
-
-    @property
-    def cell_num(self):
-        return self.extracted_data['cellNum'][0][self.keep]
-
-    def identity(self, cell_idx):
-        """Get the (position), trap, and cell label given a cell's global
-        index."""
-        # Todo include position when using full strain
-        trap = self.trap_num[cell_idx]
-        cell = self.cell_num[cell_idx]
-        return trap, cell
-
-    def global_index(self, trap_id, cell_label):
-        """Get the global index of a cell given it's trap/cellNum
-        combination."""
-        candidates = np.where(np.logical_and(
-                            (self.trap_num == trap_id), # +1?
-                            (self.cell_num == cell_label)
-                        ))[0]
-        # TODO raise error if number of candidates != 1
-        if len(candidates) == 1:
-            return candidates[0]
-        elif len(candidates) == 0:
-            return -1
-        else:
-            raise(IndexError("No such cell/trap combination"))
-
-    @property
-    def daughter_label(self):
-        """Returns the cell label of the daughters of each cell over the
-        timelapse.
-
-        0 corresponds to no daughter. This *not* the index of the daughter
-        cell within the data. To get this, use daughter_index.
-        """
-        return self.extracted_data['daughterLabel'][0][self.keep]
-
-    def _single_daughter_idx(self, mother_idx, daughter_labels):
-        trap_id, _ = self.identity(mother_idx)
-        daughter_index = [self.global_index(trap_id, cell_label) for
-                          cell_label
-                          in daughter_labels]
-        return daughter_index
-
-    @property
-    def daughter_index(self):
-        """Returns the global index of the daughters of each cell.
-
-        This is different from the daughter label because it corresponds to
-        the index of the daughter when counting all of the cells. This can
-        be used to index within the data arrays.
-        """
-        if self._daughter_index is None:
-            daughter_index = [self._single_daughter_idx(i, daughter_labels)
-                          for i, daughter_labels in enumerate(
-                                  self.daughter_label)]
-            self._daughter_index = np.array(daughter_index)
-        return self._daughter_index
-
-    @property
-    def births(self):
-        return np.array(self.extracted_data['births'][0].todense())[self.keep]
-
-    @property
-    def volume(self):
-        """Get the volume of all of the cells"""
-        return np.array(self.extracted_data['volume'][0].todense())[self.keep]
-
-    def _gr_estimation(self):
-        dt = self.time_settings['interval'] / 360  # s to h conversion
-        results = []
-        for v in tqdm(self.volume):
-            results.append(estimate_gr(v, dt))
-        merged = {k: np.stack([x[k] for x in results]) for k in results[0]}
-        self._gr_results = merged
-        return
-
-    @property
-    def growth_rate(self):
-        """Get the growth rate for all cells.
-
-        Note that this uses the gaussian processes method of estimating
-        growth rate by default. If there is no growth rate in the given file
-        (usually the case for MATLAB), it needs to run estimation first.
-        This can take a while.
-        """
-        # TODO cache the results of growth rate estimation.
-        if self._gr_results is None:
-            dt = self.time_settings['interval'] / 360  # s to h conversion
-            self._growth_rate = [estimate_gr(v, dt) for v in self.volume]
-        return self._gr_results['growth_rate']
-
-    def _fluo_attribute(self, channel, attribute):
-        channel_id = self.get_channel_index(channel)
-        res = np.array(self.extracted_data[attribute][channel_id].todense())
-        return res[self.keep]
-
-    def protein_localisation(self, channel, method='nucEstConv'):
-        """Returns protein localisation data for a given channel.
-
-        Uses the 'nucEstConv' by default. Alternatives are 'smallPeakConv',
-        'max5px', 'max2p5pc'
-        """
-        return self._fluo_attribute(channel, method)
-
-    def background_fluo(self, channel):
-        return self._fluo_attribute(channel, 'imBackground')
-
-    def mean(self, channel):
-        return self._fluo_attribute(channel, 'mean')
-
-    def median(self, channel):
-        return self._fluo_attribute(channel, 'median')
-
-    def filter(self, filename=None):
-        """Filters and saves results to and HDF5 file.
-
-        This is necessary because we cannot write to the MATLAB file,
-        so the results of the filter cannot be saved in the object.
-        """
-        super().filter(filename=filename)
-        self._growth_rate = None  # reset growth rate so it is recomputed
-
-    def to_hdf(self, filename):
-        """Store the current results, including any filtering done, to a file.
-
-        TODO Should we save filtered results or just re-do?
-        :param filename:
-        :return:
-        """
-        store = h5py.File(filename, 'w')
-        try:
-            # Store (some of the) metadata
-            for meta in ['experiment', 'username', 'microscope',
-                              'comments', 'project', 'date', 'posname',
-                              'exptid']:
-                store.attrs[meta] = self.metadata[meta]
-            # TODO store timing information?
-            store.attrs['time_interval'] = self.time_settings['interval']
-            store.attrs['timepoints'] = self.time_settings['ntimepoints']
-            store.attrs['total_duration'] = self.time_settings['totalduration']
-            # Store volume, births, daughterLabel, trapNum, cellNum
-            for key in ['volume', 'births', 'daughter_label', 'trap_num',
-                        'cell_num']:
-                store[key] = getattr(self, key)
-            # Store growth rate results
-            if self._gr_results:
-                grp = store.create_group('gaussian_process')
-                for key, val in self._gr_results.items():
-                    grp[key] = val
-            for channel in self.channels:
-                # Create a group for each channel
-                grp = store.create_group(channel)
-                # Store protein_localisation, background fluorescence, mean, median
-                # for each channel
-                grp['protein_localisation'] = self.protein_localisation(channel)
-                grp['background_fluo'] = self.background_fluo(channel)
-                grp['mean'] = self.mean(channel)
-                grp['median'] = self.median(channel)
-        finally:
-            store.close()
-
diff --git a/pcore/grouper.py b/pcore/grouper.py
deleted file mode 100644
index 9f61fc6f..00000000
--- a/pcore/grouper.py
+++ /dev/null
@@ -1,175 +0,0 @@
-#!/usr/bin/env python3
-
-from abc import ABC, abstractmethod, abstractproperty
-from pathlib import Path
-from pathos.multiprocessing import Pool
-
-import h5py
-import numpy as np
-import pandas as pd
-
-from pcore.io.signal import Signal
-
-
-class Grouper(ABC):
-    """
-    Base grouper class
-    """
-
-    files = []
-
-    def __init__(self, dir):
-        self.files = list(Path(dir).glob("*.h5"))
-        self.load_signals()
-
-    def load_signals(self):
-        self.signals = {f.name[:-3]: Signal(f) for f in self.files}
-
-    @property
-    def fsignal(self):
-        return list(self.signals.values())[0]
-
-    @property
-    def siglist(self):
-        return self.fsignal.datasets
-
-    @abstractproperty
-    def group_names():
-        pass
-
-    def concat_signal(self, path, reduce_cols=None, axis=0, pool=8):
-        group_names = self.group_names
-        sitems = self.signals.items()
-        if pool:
-            with Pool(pool) as p:
-                signals = p.map(
-                    lambda x: concat_signal_ind(path, group_names, x[0], x[1]),
-                    sitems,
-                )
-        else:
-            signals = [
-                concat_signal_ind(path, group_names, name, signal)
-                for name, signal in sitems
-            ]
-
-        signals = [s for s in signals if s is not None]
-        sorted = pd.concat(signals, axis=axis).sort_index()
-        if reduce_cols:
-            sorted = sorted.apply(np.nanmean, axis=1)
-            spath = path.split("/")
-            sorted.name = "_".join([spath[1], spath[-1]])
-
-        return sorted
-
-    @property
-    def ntraps(self):
-        for pos, s in self.signals.items():
-            with h5py.File(s.filename, "r") as f:
-                print(pos, f["/trap_info/trap_locations"].shape[0])
-
-    def traplocs(self):
-        d = {}
-        for pos, s in self.signals.items():
-            with h5py.File(s.filename, "r") as f:
-                d[pos] = f["/trap_info/trap_locations"][()]
-        return d
-
-
-class MetaGrouper(Grouper):
-    """Group positions using metadata's 'group' number"""
-
-    pass
-
-
-class NameGrouper(Grouper):
-    """
-    Group a set of positions using a subsection of the name
-    """
-
-    def __init__(self, dir, by=None):
-        super().__init__(dir=dir)
-
-        if by is None:
-            by = (0, -4)
-        self.by = by
-
-    @property
-    def group_names(self):
-        if not hasattr(self, "_group_names"):
-            self._group_names = {}
-            for name in self.signals.keys():
-                self._group_names[name] = name[self.by[0] : self.by[1]]
-
-        return self._group_names
-
-    def aggregate_multisignals(self, paths=None, **kwargs):
-
-        aggregated = pd.concat(
-            [
-                self.concat_signal(path, reduce_cols=np.nanmean, **kwargs)
-                for path in paths
-            ],
-            axis=1,
-        )
-        # ph = pd.Series(
-        #     [
-        #         self.ph_from_group(x[list(aggregated.index.names).index("group")])
-        #         for x in aggregated.index
-        #     ],
-        #     index=aggregated.index,
-        #     name="media_pH",
-        # )
-        # self.aggregated = pd.concat((aggregated, ph), axis=1)
-
-        return aggregated
-
-
-class phGrouper(NameGrouper):
-    """
-    Grouper for pH calibration experiments where all surveyed media pH values
-    are within a single experiment.
-    """
-
-    def __init__(self, dir, by=(3, 7)):
-        super().__init__(dir=dir, by=by)
-
-    def get_ph(self):
-        self.ph = {gn: self.ph_from_group(gn) for gn in self.group_names}
-
-    @staticmethod
-    def ph_from_group(group_name):
-        if group_name.startswith("ph_"):
-            group_name = group_name[3:]
-
-        return float(group_name.replace("_", "."))
-
-    def aggregate_multisignals(self, paths):
-
-        aggregated = pd.concat(
-            [self.concat_signal(path, reduce_cols=np.nanmean) for path in paths], axis=1
-        )
-        ph = pd.Series(
-            [
-                self.ph_from_group(x[list(aggregated.index.names).index("group")])
-                for x in aggregated.index
-            ],
-            index=aggregated.index,
-            name="media_pH",
-        )
-        aggregated = pd.concat((aggregated, ph), axis=1)
-
-        return aggregated
-
-
-def concat_signal_ind(path, group_names, group, signal):
-    print("Looking at ", group)
-    try:
-        combined = signal[path]
-        combined["position"] = group
-        combined["group"] = group_names[group]
-        combined.set_index(["group", "position"], inplace=True, append=True)
-        combined.index = combined.index.swaplevel(-2, 0).swaplevel(-1, 1)
-
-        return combined
-    except:
-        return None
diff --git a/pcore/haystack.py b/pcore/haystack.py
deleted file mode 100644
index 5ca8118c..00000000
--- a/pcore/haystack.py
+++ /dev/null
@@ -1,97 +0,0 @@
-import numpy as np
-from time import perf_counter
-from pathlib import Path
-
-import tensorflow as tf
-
-from pcore.io.writer import DynamicWriter
-
-
-def initialise_tf(version):
-    # Initialise tensorflow
-    if version == 1:
-        core_config = tf.ConfigProto()
-        core_config.gpu_options.allow_growth = True
-        session = tf.Session(config=core_config)
-        return session
-    # TODO this only works for TF2
-    if version == 2:
-        gpus = tf.config.experimental.list_physical_devices("GPU")
-        if gpus:
-            for gpu in gpus:
-                tf.config.experimental.set_memory_growth(gpu, True)
-            logical_gpus = tf.config.experimental.list_logical_devices("GPU")
-            print(len(gpus), "Physical GPUs,", len(logical_gpus), "Logical GPUs")
-        return None
-
-
-def timer(func, *args, **kwargs):
-    start = perf_counter()
-    result = func(*args, **kwargs)
-    print(f"Function {func.__name__}: {perf_counter() - start}s")
-    return result
-
-
-################## CUSTOM OBJECTS ##################################
-
-
-class ModelPredictor:
-    """Generic object that takes a NN and returns the prediction.
-
-    Use for predicting fluorescence/other from bright field.
-    This does not do instance segmentations of anything.
-    """
-
-    def __init__(self, tiler, model, name):
-        self.tiler = tiler
-        self.model = model
-        self.name = name
-
-    def get_data(self, tp):
-        # Change axes to X,Y,Z rather than Z,Y,X
-        return self.tiler.get_tp_data(tp, self.bf_channel).swapaxes(1, 3).swapaxes(1, 2)
-
-    def format_result(self, result, tp):
-        return {self.name: result, "timepoints": [tp] * len(result)}
-
-    def run_tp(self, tp, **kwargs):
-        """Simulating processing time with sleep"""
-        # Access the image
-        segmentation = self.model.predict(self.get_data(tp))
-        return self._format_result(segmentation, tp)
-
-
-class ModelPredictorWriter(DynamicWriter):
-    def __init__(self, file, name, shape, dtype):
-        super.__init__(file)
-        self.datatypes = {name: (shape, dtype), "timepoint": ((None,), np.uint16)}
-        self.group = f"{self.name}_info"
-
-
-class Saver:
-    channel_names = {0: "BrightField", 1: "GFP"}
-
-    def __init__(self, tiler, save_directory, pos_name):
-        """This class straight up saves the trap data for use with neural networks in the future."""
-        self.tiler = tiler
-        self.name = pos_name
-        self.save_dir = Path(save_directory)
-
-    def channel_dir(self, index):
-        ch_dir = self.save_dir / self.channel_names[index]
-        if not ch_dir.exists():
-            ch_dir.mkdir()
-        return ch_dir
-
-    def get_data(self, tp, ch):
-        return self.tiler.get_tp_data(tp, ch).swapaxes(1, 3).swapaxes(1, 2)
-
-    def cache(self, tp):
-        # Get a given time point
-        # split into channels
-        for ch in self.channel_names:
-            ch_dir = self.channel_dir(ch)
-            data = self.get_data(tp, ch)
-            for tid, trap in enumerate(data):
-                np.save(ch_dir / f"{self.name}_{tid}_{tp}.npy", trap)
-        return
diff --git a/pcore/io/__init__.py b/pcore/io/__init__.py
deleted file mode 100644
index e69de29b..00000000
diff --git a/pcore/io/base.py b/pcore/io/base.py
deleted file mode 100644
index a9b91173..00000000
--- a/pcore/io/base.py
+++ /dev/null
@@ -1,142 +0,0 @@
-from typing import Union
-import collections
-from itertools import groupby, chain, product
-
-import numpy as np
-import h5py
-
-
-class BridgeH5:
-    """
-    Base class to interact with h5 data stores.
-    It also contains functions useful to predict how long should segmentation take.
-    """
-
-    def __init__(self, filename, flag="r"):
-        self.filename = filename
-        if flag is not None:
-            self._hdf = h5py.File(filename, flag)
-
-            self._filecheck
-
-    def _filecheck(self):
-        assert "cell_info" in self._hdf, "Invalid file. No 'cell_info' found."
-
-    def close(self):
-        self._hdf.close()
-
-    def max_ncellpairs(self, nstepsback):
-        """
-        Get maximum number of cell pairs to be calculated
-        """
-
-        dset = self._hdf["cell_info"][()]
-        # attrs = self._hdf[dataset].attrs
-        pass
-
-    @property
-    def cell_tree(self):
-        return self.get_info_tree()
-
-    def get_n_cellpairs(self, nstepsback=2):
-        cell_tree = self.cell_tree
-        # get pair of consecutive trap-time points
-        pass
-
-    @staticmethod
-    def get_consecutives(tree, nstepsback):
-        # Receives a sorted tree and returns the keys of consecutive elements
-        vals = {k: np.array(list(v)) for k, v in tree.items()}  # get tp level
-        where_consec = [
-            {
-                k: np.where(np.subtract(v[n + 1 :], v[: -n - 1]) == n + 1)[0]
-                for k, v in vals.items()
-            }
-            for n in range(nstepsback)
-        ]  # get indices of consecutive elements
-        return where_consec
-
-    def get_npairs(self, nstepsback=2, tree=None):
-        if tree is None:
-            tree = self.cell_tree
-
-        consecutive = self.get_consecutives(tree, nstepsback=nstepsback)
-        flat_tree = flatten(tree)
-
-        n_predictions = 0
-        for i, d in enumerate(consecutive, 1):
-            flat = list(chain(*[product([k], list(v)) for k, v in d.items()]))
-            pairs = [(f, (f[0], f[1] + i)) for f in flat]
-            for p in pairs:
-                n_predictions += len(flat_tree.get(p[0], [])) * len(
-                    flat_tree.get(p[1], [])
-                )
-
-        return n_predictions
-
-    def get_npairs_over_time(self, nstepsback=2):
-        tree = self.cell_tree
-        npairs = []
-        for t in self._hdf["cell_info"]["processed_timepoints"][()]:
-            tmp_tree = {
-                k: {k2: v2 for k2, v2 in v.items() if k2 <= t} for k, v in tree.items()
-            }
-            npairs.append(self.get_npairs(tree=tmp_tree))
-
-        return np.diff(npairs)
-
-    def get_info_tree(
-        self, fields: Union[tuple, list] = ("trap", "timepoint", "cell_label")
-    ):
-        """
-        Returns traps, time points and labels for this position in form of a tree
-        in the hierarchy determined by the argument fields. Note that it is
-        compressed to non-empty elements and timepoints.
-
-        Default hierarchy is:
-        - trap
-         - time point
-          - cell label
-
-        This function currently produces trees of depth 3, but it can easily be
-        extended for deeper trees if needed (e.g. considering groups,
-        chambers and/or positions).
-
-        input
-        :fields: Fields to fetch from 'cell_info' inside the hdf5 storage
-
-        returns
-        :tree: Nested dictionary where keys (or branches) are the upper levels
-             and the leaves are the last element of :fields:.
-        """
-        zipped_info = (*zip(*[self._hdf["cell_info"][f][()] for f in fields]),)
-
-        return recursive_groupsort(zipped_info)
-
-
-def groupsort(iterable: Union[tuple, list]):
-    # Sorts iterable and returns a dictionary where the values are grouped by the first element.
-
-    iterable = sorted(iterable, key=lambda x: x[0])
-    grouped = {k: [x[1:] for x in v] for k, v in groupby(iterable, lambda x: x[0])}
-    return grouped
-
-
-def recursive_groupsort(iterable):
-    # Recursive extension of groupsort
-    if len(iterable[0]) > 1:
-        return {k: recursive_groupsort(v) for k, v in groupsort(iterable).items()}
-    else:  # Only two elements in list
-        return [x[0] for x in iterable]
-
-
-def flatten(d, parent_key="", sep="_"):
-    """Flatten nested dict. Adapted from https://stackoverflow.com/a/6027615"""
-    items = []
-    for k, v in d.items():
-        new_key = parent_key + (k,) if parent_key else (k,)
-        if isinstance(v, collections.MutableMapping):
-            items.extend(flatten(v, new_key, sep=sep).items())
-        else:
-            items.append((new_key, v))
-    return dict(items)
diff --git a/pcore/io/matlab.py b/pcore/io/matlab.py
deleted file mode 100644
index a4ed5984..00000000
--- a/pcore/io/matlab.py
+++ /dev/null
@@ -1,569 +0,0 @@
-"""Read and convert MATLAB files from Swain Lab platform.
-
-TODO: Information that I need from lab members esp J and A
-    * Lots of examples to try
-    * Any ideas on what these Map objects are?
-
-TODO: Update Swain Lab wiki
-
-All credit to Matt Bauman for
-the reverse engineering at https://nbviewer.jupyter.org/gist/mbauman/9121961
-"""
-
-import re
-import struct
-import sys
-from collections import Iterable
-from io import BytesIO
-
-import h5py
-import numpy as np
-import pandas as pd
-import scipy
-from numpy.compat import asstr
-
-# TODO only use this if scipy>=1.6 or so
-from scipy.io import matlab
-from scipy.io.matlab.mio5 import MatFile5Reader
-from scipy.io.matlab.mio5_params import mat_struct
-
-from pcore.io.utils import read_int, read_string, read_delim
-
-
-def read_minimat_vars(rdr):
-    rdr.initialize_read()
-    mdict = {"__globals__": []}
-    i = 0
-    while not rdr.end_of_stream():
-        hdr, next_position = rdr.read_var_header()
-        name = asstr(hdr.name)
-        if name == "":
-            name = "var_%d" % i
-            i += 1
-        res = rdr.read_var_array(hdr, process=False)
-        rdr.mat_stream.seek(next_position)
-        mdict[name] = res
-        if hdr.is_global:
-            mdict["__globals__"].append(name)
-    return mdict
-
-
-def read_workspace_vars(fname):
-    fp = open(fname, "rb")
-    rdr = MatFile5Reader(fp, struct_as_record=True, squeeze_me=True)
-    vars = rdr.get_variables()
-    fws = vars["__function_workspace__"]
-    ws_bs = BytesIO(fws.tostring())
-    ws_bs.seek(2)
-    rdr.mat_stream = ws_bs
-    # Guess byte order.
-    mi = rdr.mat_stream.read(2)
-    rdr.byte_order = mi == b"IM" and "<" or ">"
-    rdr.mat_stream.read(4)  # presumably byte padding
-    mdict = read_minimat_vars(rdr)
-    fp.close()
-    return mdict
-
-
-class matObject:
-    """A python read-out of MATLAB objects
-    The objects pulled out of the
-    """
-
-    def __init__(self, filepath):
-        self.filepath = filepath  # For record
-        self.classname = None
-        self.object_name = None
-        self.buffer = None
-        self.version = None
-        self.names = None
-        self.segments = None
-        self.heap = None
-        self.attrs = dict()
-        self._init_buffer()
-        self._init_heap()
-        self._read_header()
-        self.parse_file()
-
-    def __getitem__(self, item):
-        return self.attrs[item]
-
-    def keys(self):
-        """Returns the names of the available properties"""
-        return self.attrs.keys()
-
-    def get(self, item, default=None):
-        return self.attrs.get(item, default)
-
-    def _init_buffer(self):
-        fp = open(self.filepath, "rb")
-        rdr = MatFile5Reader(fp, struct_as_record=True, squeeze_me=True)
-        vars = rdr.get_variables()
-        self.classname = vars["None"]["s2"][0].decode("utf-8")
-        self.object_name = vars["None"]["s0"][0].decode("utf-8")
-        fws = vars["__function_workspace__"]
-        self.buffer = BytesIO(fws.tostring())
-        fp.close()
-
-    def _init_heap(self):
-        super_data = read_workspace_vars(self.filepath)
-        elem = super_data["var_0"][0, 0]
-        if isinstance(elem, mat_struct):
-            self.heap = elem.MCOS[0]["arr"]
-        else:
-            self.heap = elem["MCOS"][0]["arr"]
-
-    def _read_header(self):
-        self.buffer.seek(248)  # the start of the header
-        version = read_int(self.buffer)
-        n_str = read_int(self.buffer)
-
-        offsets = read_int(self.buffer, n=6)
-
-        # check that the next two are zeros
-        reserved = read_int(self.buffer, n=2)
-        assert all(
-            [x == 0 for x in reserved]
-        ), "Non-zero reserved header fields: {}".format(reserved)
-        # check that we are at the right place
-        assert self.buffer.tell() == 288, "String elemnts begin at 288"
-        hdrs = []
-        for i in range(n_str):
-            hdrs.append(read_string(self.buffer))
-        self.names = hdrs
-        self.version = version
-        # The offsets are actually STARTING FROM 248 as well
-        self.segments = [x + 248 for x in offsets]  # list(offsets)
-        return
-
-    def parse_file(self):
-        # Get class attributes from segment 1
-        self.buffer.seek(self.segments[0])
-        classes = self._parse_class_attributes(self.segments[1])
-        # Get first set of properties from segment 2
-        self.buffer.seek(self.segments[1])
-        props1 = self._parse_properties(self.segments[2])
-        # Get the property description from segment 3
-        self.buffer.seek(self.segments[2])
-        object_info = self._parse_prop_description(classes, self.segments[3])
-        # Get more properties from segment 4
-        self.buffer.seek(self.segments[3])
-        props2 = self._parse_properties(self.segments[4])
-        # Check that the last segment is empty
-        self.buffer.seek(self.segments[4])
-        seg5_length = (self.segments[5] - self.segments[4]) // 8
-        read_delim(self.buffer, seg5_length)
-        props = (props1, props2)
-        self._to_attrs(object_info, props)
-
-    def _to_attrs(self, object_info, props):
-        """Re-organise the various classes and subclasses into a nested
-        dictionary.
-        :return:
-        """
-        for pkg_clss, indices, idx in object_info:
-            pkg, clss = pkg_clss
-            idx = max(indices)
-            which = indices.index(idx)
-            obj = flatten_obj(props[which][idx])
-            subdict = self.attrs
-            if pkg != "":
-                subdict = self.attrs.setdefault(pkg, {})
-            if clss in subdict:
-                if isinstance(subdict[clss], list):
-                    subdict[clss].append(obj)
-                else:
-                    subdict[clss] = [subdict[clss]]
-                    subdict[clss].append(obj)
-            else:
-                subdict[clss] = obj
-
-    def describe(self):
-        describe(self.attrs)
-
-    def _parse_class_attributes(self, section_end):
-        """Read the Class attributes = the first segment"""
-        read_delim(self.buffer, 4)
-        classes = []
-        while self.buffer.tell() < section_end:
-            package_index = read_int(self.buffer) - 1
-            package = self.names[package_index] if package_index > 0 else ""
-            name_idx = read_int(self.buffer) - 1
-            name = self.names[name_idx] if name_idx > 0 else ""
-            classes.append((package, name))
-            read_delim(self.buffer, 2)
-        return classes
-
-    def _parse_prop_description(self, classes, section_end):
-        """Parse the description of each property = the third segment"""
-        read_delim(self.buffer, 6)
-        object_info = []
-        while self.buffer.tell() < section_end:
-            class_idx = read_int(self.buffer) - 1
-            class_type = classes[class_idx]
-            read_delim(self.buffer, 2)
-            indices = [x - 1 for x in read_int(self.buffer, 2)]
-            obj_id = read_int(self.buffer)
-            object_info.append((class_type, indices, obj_id))
-        return object_info
-
-    def _parse_properties(self, section_end):
-        """
-        Parse the actual values of the attributes == segments 2 and 4
-        """
-        read_delim(self.buffer, 2)
-        props = []
-        while self.buffer.tell() < section_end:
-            n_props = read_int(self.buffer)
-            d = parse_prop(n_props, self.buffer, self.names, self.heap)
-            if not d:  # Empty dictionary
-                break
-            props.append(d)
-            # Move to next 8-byte aligned offset
-            self.buffer.seek(self.buffer.tell() + self.buffer.tell() % 8)
-        return props
-
-    def to_hdf(self, filename):
-        f = h5py.File(filename, mode="w")
-        save_to_hdf(f, "/", self.attrs)
-
-
-def describe(d, indent=0, width=4, out=None):
-    for key, value in d.items():
-        print(f'{"": <{width * indent}}' + str(key), file=out)
-        if isinstance(value, dict):
-            describe(value, indent + 1, out=out)
-        elif isinstance(value, np.ndarray):
-            print(
-                f'{"": <{width * (indent + 1)}} {value.shape} array '
-                f"of type {value.dtype}",
-                file=out,
-            )
-        elif isinstance(value, scipy.sparse.csc.csc_matrix):
-            print(
-                f'{"": <{width * (indent + 1)}} {value.shape} '
-                f"sparse matrix of type {value.dtype}",
-                file=out,
-            )
-        elif isinstance(value, Iterable) and not isinstance(value, str):
-            print(
-                f'{"": <{width * (indent + 1)}} {type(value)} of len ' f"{len(value)}",
-                file=out,
-            )
-        else:
-            print(f'{"": <{width * (indent + 1)}} {value}', file=out)
-
-
-def parse_prop(n_props, buff, names, heap):
-    d = dict()
-    for i in range(n_props):
-        name_idx, flag, heap_idx = read_int(buff, 3)
-        if flag not in [0, 1, 2] and name_idx == 0:
-            n_props = flag
-            buff.seek(buff.tell() - 1)  # go back on one byte
-            d = parse_prop(n_props, buff, names, heap)
-        else:
-            item_name = names[name_idx - 1]
-            if flag == 0:
-                d[item_name] = names[heap_idx]
-            elif flag == 1:
-                d[item_name] = heap[heap_idx + 2]  # Todo: what is the heap?
-            elif flag == 2:
-                assert 0 <= heap_idx <= 1, (
-                    "Boolean flag has a value other " "than 0 or 1 "
-                )
-                d[item_name] = bool(heap_idx)
-            else:
-                raise ValueError(
-                    "unknown flag {} for property {} with heap "
-                    "index {}".format(flag, item_name, heap_idx)
-                )
-    return d
-
-
-def is_object(x):
-    """Checking object dtype for structured numpy arrays"""
-    if x.dtype.names is not None and len(x.dtype.names) > 1:  # Complex obj
-        return all(x.dtype[ix] == np.object for ix in range(len(x.dtype)))
-    else:  # simple object
-        return x.dtype == np.object
-
-
-def flatten_obj(arr):
-    # TODO turn structured arrays into nested dicts of lists rather that
-    #  lists of dicts
-    if isinstance(arr, np.ndarray):
-        if arr.dtype.names:
-            arrdict = dict()
-            for fieldname in arr.dtype.names:
-                arrdict[fieldname] = flatten_obj(arr[fieldname])
-            arr = arrdict
-        elif arr.dtype == np.object and arr.ndim == 0:
-            arr = flatten_obj(arr[()])
-        elif arr.dtype == np.object and arr.ndim > 0:
-            try:
-                arr = np.stack(arr)
-                if arr.dtype.names:
-                    d = {k: flatten_obj(arr[k]) for k in arr.dtype.names}
-                    arr = d
-            except:
-                arr = [flatten_obj(x) for x in arr.tolist()]
-    elif isinstance(arr, dict):
-        arr = {k: flatten_obj(v) for k, v in arr.items()}
-    elif isinstance(arr, list):
-        try:
-            arr = flatten_obj(np.stack(arr))
-        except:
-            arr = [flatten_obj(x) for x in arr]
-    return arr
-
-
-def save_to_hdf(h5file, path, dic):
-    """
-    Saving a MATLAB object to HDF5
-    """
-    if isinstance(dic, list):
-        dic = {str(i): v for i, v in enumerate(dic)}
-    for key, item in dic.items():
-        if isinstance(item, (int, float, str)):
-            h5file[path].attrs.create(key, item)
-        elif isinstance(item, list):
-            if len(item) == 0 and path + key not in h5file:  # empty list empty group
-                h5file.create_group(path + key)
-            if all(isinstance(x, (int, float, str)) for x in item):
-                if path not in h5file:
-                    h5file.create_group(path)
-                h5file[path].attrs.create(key, item)
-            else:
-                if path + key not in h5file:
-                    h5file.create_group(path + key)
-                save_to_hdf(
-                    h5file, path + key + "/", {str(i): x for i, x in enumerate(item)}
-                )
-        elif isinstance(item, scipy.sparse.csc.csc_matrix):
-            try:
-                h5file.create_dataset(
-                    path + key, data=item.todense(), compression="gzip"
-                )
-            except Exception as e:
-                print(path + key)
-                raise e
-        elif isinstance(item, (np.ndarray, np.int64, np.float64)):
-            if item.dtype == np.dtype("<U1"):  # Strings to 'S' type for HDF5
-                item = item.astype("S")
-            try:
-                h5file.create_dataset(path + key, data=item, compression="gzip")
-            except Exception as e:
-                print(path + key)
-                raise e
-        elif isinstance(item, dict):
-            if path + key not in h5file:
-                h5file.create_group(path + key)
-            save_to_hdf(h5file, path + key + "/", item)
-        elif item is None:
-            continue
-        else:
-            raise ValueError(f"Cannot save {type(item)} type at key {path + key}")
-
-
-## NOT YET FULLY IMPLEMENTED!
-
-
-class _Info:
-    def __init__(self, info):
-        self.info = info
-        self._identity = None
-
-    def __getitem__(self, item):
-        val = self.info[item]
-        if val.shape[0] == 1:
-            val = val[0]
-        if 0 in val[1].shape:
-            val = val[0]
-        if isinstance(val, scipy.sparse.csc.csc_matrix):
-            return np.asarray(val.todense())
-        if val.dtype == np.dtype("O"):
-            # 3d "sparse matrix"
-            if all(isinstance(x, scipy.sparse.csc.csc_matrix) for x in val):
-                val = np.array([x.todense() for x in val])
-            # TODO: The actual object data
-        equality = val[0] == val[1]
-        if isinstance(equality, scipy.sparse.csc.csc_matrix):
-            equality = equality.todense()
-        if equality.all():
-            val = val[0]
-        return np.squeeze(val)
-
-    @property
-    def categories(self):
-        return self.info.dtype.names
-
-
-class TrapInfo(_Info):
-    def __init__(self, info):
-        """
-        The information on all of the traps in a given position.
-
-        :param info: The TrapInfo structure, can be found in the heap of
-        the CTimelapse at index 7
-        """
-        super().__init__(info)
-
-
-class CellInfo(_Info):
-    def __init__(self, info):
-        """
-        The extracted information of all cells in a given position.
-        :param info: The CellInfo structure, can be found in the heap
-        of the CTimelapse at index 15.
-        """
-        super().__init__(info)
-
-    @property
-    def identity(self):
-        if self._identity is None:
-            self._identity = pd.DataFrame(
-                zip(self["trapNum"], self["cellNum"]), columns=["trapNum", "cellNum"]
-            )
-        return self._identity
-
-    def index(self, trapNum, cellNum):
-        query = "trapNum=={} and cellNum=={}".format(trapNum, cellNum)
-        try:
-            result = self.identity.query(query).index[0]
-        except Exception as e:
-            print(query)
-            raise e
-        return result
-
-    @property
-    def nucEstConv1(self):
-        return np.asarray(self.info["nuc_est_conv"][0][0].todense())
-
-    @property
-    def nucEstConv2(self):
-        return np.asarray(self.info["nuc_est_conv"][0][1].todense())
-
-    @property
-    def mothers(self):
-        return np.where((self["births"] != 0).any(axis=1))[0]
-
-    def daughters(self, mother_index):
-        """
-        Get daughters of cell with index `mother_index`.
-
-        :param mother_index: the index of the mother within the data. This is
-        different from the mother's cell/trap identity.
-        """
-        daughter_ids = np.unique(self["daughterLabel"][mother_index]).tolist()
-        daughter_ids.remove(0)
-        mother_trap = self.identity["trapNum"].loc[mother_index]
-        daughters = [self.index(mother_trap, cellNum) for cellNum in daughter_ids]
-        return daughters
-
-
-def _todict(matobj):
-    """
-    A recursive function which constructs from matobjects nested dictionaries
-    """
-    if not hasattr(matobj, "_fieldnames"):
-        return matobj
-    d = {}
-    for strg in matobj._fieldnames:
-        elem = matobj.__dict__[strg]
-        if isinstance(elem, matlab.mio5_params.mat_struct):
-            d[strg] = _todict(elem)
-        elif isinstance(elem, np.ndarray):
-            d[strg] = _toarray(elem)
-        else:
-            d[strg] = elem
-    return d
-
-
-def _toarray(ndarray):
-    """
-    A recursive function which constructs ndarray from cellarrays
-    (which are loaded as numpy ndarrays), recursing into the elements
-    if they contain matobjects.
-    """
-    if ndarray.dtype != "float64":
-        elem_list = []
-        for sub_elem in ndarray:
-            if isinstance(sub_elem, matlab.mio5_params.mat_struct):
-                elem_list.append(_todict(sub_elem))
-            elif isinstance(sub_elem, np.ndarray):
-                elem_list.append(_toarray(sub_elem))
-            else:
-                elem_list.append(sub_elem)
-        return np.array(elem_list)
-    else:
-        return ndarray
-
-
-from pathlib import Path
-
-
-class Strain:
-    """The cell info for all the positions of a strain."""
-
-    def __init__(self, origin, strain):
-        self.origin = Path(origin)
-        self.files = [x for x in origin.iterdir() if strain in str(x)]
-        self.cts = [matObject(x) for x in self.files]
-        self.cinfos = [CellInfo(x.heap[15]) for x in self.cts]
-        self._identity = None
-
-    def __getitem__(self, item):
-        try:
-            return np.concatenate([c[item] for c in self.cinfos])
-        except ValueError:  # If first axis is the channel
-            return np.concatenate([c[item] for c in self.cinfos], axis=1)
-
-    @property
-    def categories(self):
-        return set.union(*[set(c.categories) for c in self.cinfos])
-
-    @property
-    def identity(self):
-        if self._identity is None:
-            identities = []
-            for pos_id, cinfo in enumerate(self.cinfos):
-                identity = cinfo.identity
-                identity["position"] = pos_id
-                identities.append(identity)
-            self._identity = pd.concat(identities, ignore_index=True)
-        return self._identity
-
-    def index(self, posNum, trapNum, cellNum):
-        query = "position=={} and trapNum=={} and cellNum=={}".format(
-            posNum, trapNum, cellNum
-        )
-        try:
-            result = self.identity.query(query).index[0]
-        except Exception as e:
-            raise e
-        return result
-
-    @property
-    def mothers(self):
-        # At least two births are needed to be considered a mother cell
-        return np.where(np.count_nonzero(self["births"], axis=1) > 3)[0]
-
-    def daughters(self, mother_index):
-        """
-        Get daughters of cell with index `mother_index`.
-
-        :param mother_index: the index of the mother within the data. This is
-        different from the mother's pos/trap/cell identity.
-        """
-        daughter_ids = np.unique(self["daughterLabel"][mother_index]).tolist()
-        if 0 in daughter_ids:
-            daughter_ids.remove(0)
-        mother_pos_trap = self.identity[["position", "trapNum"]].loc[mother_index]
-        daughters = []
-        for cellNum in daughter_ids:
-            try:
-                daughters.append(self.index(*mother_pos_trap, cellNum))
-            except IndexError:
-                continue
-        return daughters
diff --git a/pcore/io/metadata_parser.py b/pcore/io/metadata_parser.py
deleted file mode 100644
index 81938152..00000000
--- a/pcore/io/metadata_parser.py
+++ /dev/null
@@ -1,77 +0,0 @@
-"""
-Parse microscopy log files according to specified JSON grammars.
-Produces dictionary to include in HDF5
-"""
-import glob
-import os
-import numpy as np
-import pandas as pd
-from datetime import datetime, timezone
-from pytz import timezone
-
-from logfile_parser import Parser
-
-# Paradigm: able to do something with all datatypes present in log files,
-# then pare down on what specific information is really useful later.
-
-# Needed because HDF5 attributes do not support dictionaries
-def flatten_dict(nested_dict, separator='/'):
-    '''
-    Flattens nested dictionary
-    '''
-    df = pd.json_normalize(nested_dict, sep=separator)
-    return df.to_dict(orient='records')[0]
-
-# Needed because HDF5 attributes do not support datetime objects
-# Takes care of time zones & daylight saving
-def datetime_to_timestamp(time, locale = 'Europe/London'):
-    '''
-    Convert datetime object to UNIX timestamp
-    '''
-    return timezone(locale).localize(time).timestamp()
-
-def find_file(root_dir, regex):
-    file = glob.glob(os.path.join(str(root_dir), regex))
-    if len(file) != 1:
-        return None
-    else:
-        return file[0]
-
-# TODO: re-write this as a class if appropriate
-# WARNING: grammars depend on the directory structure of a locally installed
-# logfile_parser repo
-def parse_logfiles(root_dir,
-                   acq_grammar = 'multiDGUI_acq_format.json',
-                   log_grammar = 'multiDGUI_log_format.json'):
-    '''
-    Parse acq and log files depending on the grammar specified, then merge into
-    single dict.
-    '''
-    # Both acq and log files contain useful information.
-    #ACQ_FILE = 'flavin_htb2_glucose_long_ramp_DelftAcq.txt'
-    #LOG_FILE = 'flavin_htb2_glucose_long_ramp_Delftlog.txt'
-    log_parser = Parser(log_grammar)
-    try:
-        log_file = find_file(root_dir, '*log.txt')
-    except:
-        raise ValueError('Experiment log file not found.')
-    with open(log_file, 'r') as f:
-        log_parsed = log_parser.parse(f)
-
-    acq_parser = Parser(acq_grammar)
-    try:
-        acq_file = find_file(root_dir, '*[Aa]cq.txt')
-    except:
-        raise ValueError('Experiment acq file not found.')
-    with open(acq_file, 'r') as f:
-        acq_parsed = acq_parser.parse(f)
-
-    parsed = {**acq_parsed, **log_parsed}
-
-    for key, value in parsed.items():
-        if isinstance(value, datetime):
-            parsed[key] = datetime_to_timestamp(value)
-
-    parsed_flattened = flatten_dict(parsed)
-
-    return parsed_flattened
diff --git a/pcore/io/omero.py b/pcore/io/omero.py
deleted file mode 100644
index b0e5eb65..00000000
--- a/pcore/io/omero.py
+++ /dev/null
@@ -1,133 +0,0 @@
-import h5py
-import omero
-from omero.gateway import BlitzGateway
-from pcore.experiment import get_data_lazy
-from pcore.cells import CellsHDF
-
-
-class Argo:
-    # TODO use the one in extraction?
-    def __init__(
-        self, host="islay.bio.ed.ac.uk", username="upload", password="***REMOVED***"
-    ):
-        self.conn = None
-        self.host = host
-        self.username = username
-        self.password = password
-
-    def get_meta(self):
-        pass
-
-    def __enter__(self):
-        self.conn = BlitzGateway(
-            host=self.host, username=self.username, passwd=self.password
-        )
-        self.conn.connect()
-        return self
-
-    def __exit__(self, *exc):
-        self.conn.close()
-        return False
-
-
-class Dataset(Argo):
-    def __init__(self, expt_id):
-        super().__init__()
-        self.expt_id = expt_id
-        self._files = None
-
-    @property
-    def dataset(self):
-        return self.conn.getObject("Dataset", self.expt_id)
-
-    @property
-    def name(self):
-        return self.dataset.getName()
-
-    @property
-    def date(self):
-        return self.dataset.getDate()
-
-    @property
-    def unique_name(self):
-        return "_".join((self.date.strftime("%Y_%m_%d").replace("/", "_"), self.name))
-
-    def get_images(self):
-        return {im.getName(): im.getId() for im in self.dataset.listChildren()}
-
-    @property
-    def files(self):
-        if self._files is None:
-            self._files = {
-                x.getFileName(): x
-                for x in self.dataset.listAnnotations()
-                if isinstance(x, omero.gateway.FileAnnotationWrapper)
-            }
-        return self._files
-
-    @property
-    def tags(self):
-        if self._tags is None:
-            self._tags = {
-                x.getName(): x
-                for x in self.dataset.listAnnotations()
-                if isinstance(x, omero.gateway.TagAnnotationWrapper)
-            }
-        return self._tags
-
-    def cache_logs(self, root_dir):
-        for name, annotation in self.files.items():
-            filepath = root_dir / annotation.getFileName().replace("/", "_")
-            if str(filepath).endswith("txt") and not filepath.exists():
-                # Save only the text files
-                with open(str(filepath), "wb") as fd:
-                    for chunk in annotation.getFileInChunks():
-                        fd.write(chunk)
-        return True
-
-
-class Image(Argo):
-    def __init__(self, image_id):
-        super().__init__()
-        self.image_id = image_id
-        self._image_wrap = None
-
-    @property
-    def image_wrap(self):
-        # TODO check that it is alive/ connected
-        if self._image_wrap is None:
-            self._image_wrap = self.conn.getObject("Image", self.image_id)
-        return self._image_wrap
-
-    @property
-    def name(self):
-        return self.image_wrap.getName()
-
-    @property
-    def data(self):
-        return get_data_lazy(self.image_wrap)
-
-    @property
-    def metadata(self):
-        meta = dict()
-        meta["size_x"] = self.image_wrap.getSizeX()
-        meta["size_y"] = self.image_wrap.getSizeY()
-        meta["size_z"] = self.image_wrap.getSizeZ()
-        meta["size_c"] = self.image_wrap.getSizeC()
-        meta["size_t"] = self.image_wrap.getSizeT()
-        meta["channels"] = self.image_wrap.getChannelLabels()
-        meta["name"] = self.image_wrap.getName()
-        return meta
-
-
-class Cells(CellsHDF):
-    def __init__(self, filename):
-        file = h5py.File(filename, "r")
-        super().__init__(file)
-
-    def __enter__(self):
-        return self
-
-    def __exit__(self, *exc):
-        self.close
-        return False
diff --git a/pcore/io/signal.py b/pcore/io/signal.py
deleted file mode 100644
index d2816a5a..00000000
--- a/pcore/io/signal.py
+++ /dev/null
@@ -1,234 +0,0 @@
-import numpy as np
-from copy import copy
-from itertools import accumulate
-
-from numpy import ndarray
-
-# from more_itertools import first_true
-
-import h5py
-import pandas as pd
-from utils_find_1st import find_1st, cmp_larger
-
-from pcore.io.base import BridgeH5
-
-
-class Signal(BridgeH5):
-    """
-    Class that fetches data from the hdf5 storage for post-processing
-    """
-
-    def __init__(self, file):
-        super().__init__(file, flag=None)
-
-        self.names = ["experiment", "position", "trap"]
-
-    @staticmethod
-    def add_name(df, name):
-        df.name = name
-        return df
-
-    def mothers(self, signal, cutoff=0.8):
-        df = self[signal]
-        get_mothers = lambda df: df.loc[df.notna().sum(axis=1) > df.shape[1] * cutoff]
-        if isinstance(df, pd.DataFrame):
-            return get_mothers(df)
-        elif isinstance(df, list):
-            return [get_mothers(d) for d in df]
-
-    def __getitem__(self, dsets):
-
-        if isinstance(dsets, str) and (
-            dsets.startswith("postprocessing")
-            or dsets.startswith("/postprocessing")
-            or dsets.endswith("imBackground")
-        ):
-            df = self.get_raw(dsets)
-
-        elif isinstance(dsets, str):
-            df = self.apply_prepost(dsets)
-
-        elif isinstance(dsets, list):
-            is_bgd = [dset.endswith("imBackground") for dset in dsets]
-            assert sum(is_bgd) == 0 or sum(is_bgd) == len(
-                dsets
-            ), "Trap data and cell data can't be mixed"
-            with h5py.File(self.filename, "r") as f:
-                return [self.add_name(self.apply_prepost(dset), dset) for dset in dsets]
-
-        return self.add_name(df, dsets)
-
-    def apply_prepost(self, dataset: str):
-        merges = self.get_merges()
-        with h5py.File(self.filename, "r") as f:
-            df = self.dset_to_df(f, dataset)
-
-            merged = df
-            if merges.any():
-                # Split in two dfs, one with rows relevant for merging and one without them
-                mergable_ids = pd.MultiIndex.from_arrays(
-                    np.unique(merges.reshape(-1, 2), axis=0).T,
-                    names=df.index.names,
-                )
-                merged = self.apply_merge(df.loc[mergable_ids], merges)
-
-                nonmergable_ids = df.index.difference(mergable_ids)
-
-                merged = pd.concat(
-                    (merged, df.loc[nonmergable_ids]), names=df.index.names
-                )
-
-            search = lambda a, b: np.where(
-                np.in1d(
-                    np.ravel_multi_index(a.T, a.max(0) + 1),
-                    np.ravel_multi_index(b.T, a.max(0) + 1),
-                )
-            )
-            if "modifiers/picks" in f:
-                picks = self.get_picks(names=merged.index.names)
-                missing_cells = [i for i in picks if tuple(i) not in set(merged.index)]
-
-                if picks:
-                    # return merged.loc[
-                    #     set(picks).intersection([tuple(x) for x in merged.index])
-                    # ]
-                    return merged.loc[picks]
-                else:
-                    if isinstance(merged.index, pd.MultiIndex):
-                        empty_lvls = [[] for i in merged.index.names]
-                        index = pd.MultiIndex(
-                            levels=empty_lvls,
-                            codes=empty_lvls,
-                            names=merged.index.names,
-                        )
-                    else:
-                        index = pd.Index([], name=merged.index.name)
-                    merged = pd.DataFrame([], index=index)
-            return merged
-
-    @property
-    def datasets(self):
-        with h5py.File(self.filename, "r") as f:
-            dsets = f.visititems(self._if_ext_or_post)
-        return dsets
-
-    def get_merged(self, dataset):
-        return self.apply_prepost(dataset, skip_pick=True)
-
-    @property
-    def merges(self):
-        with h5py.File(self.filename, "r") as f:
-            dsets = f.visititems(self._if_merges)
-        return dsets
-
-    @property
-    def n_merges(self):
-        print("{} merge events".format(len(self.merges)))
-
-    @property
-    def merges(self):
-        with h5py.File(self.filename, "r") as f:
-            dsets = f.visititems(self._if_merges)
-        return dsets
-
-    @property
-    def picks(self):
-        with h5py.File(self.filename, "r") as f:
-            dsets = f.visititems(self._if_picks)
-        return dsets
-
-    def apply_merge(self, df, changes):
-        if len(changes):
-
-            for target, source in changes:
-                df.loc[tuple(target)] = self.join_tracks_pair(
-                    df.loc[tuple(target)], df.loc[tuple(source)]
-                )
-                df.drop(tuple(source), inplace=True)
-
-        return df
-
-    def get_raw(self, dataset):
-        if isinstance(dataset, str):
-            with h5py.File(self.filename, "r") as f:
-                return self.dset_to_df(f, dataset)
-        elif isinstance(dataset, list):
-            return [self.get_raw(dset) for dset in dataset]
-
-    def get_merges(self):
-        # fetch merge events going up to the first level
-        with h5py.File(self.filename, "r") as f:
-            merges = f.get("modifiers/merges", np.array([]))
-            if not isinstance(merges, np.ndarray):
-                merges = merges[()]
-
-        return merges
-
-    # def get_picks(self, levels):
-    def get_picks(self, names, path="modifiers/picks/"):
-        with h5py.File(self.filename, "r") as f:
-            if path in f:
-                return list(zip(*[f[path + name] for name in names]))
-                # return f["modifiers/picks"]
-            else:
-                return None
-
-    def dset_to_df(self, f, dataset):
-        dset = f[dataset]
-        names = copy(self.names)
-        if not dataset.endswith("imBackground"):
-            names.append("cell_label")
-        lbls = {lbl: dset[lbl][()] for lbl in names if lbl in dset.keys()}
-        index = pd.MultiIndex.from_arrays(
-            list(lbls.values()), names=names[-len(lbls) :]
-        )
-
-        columns = (
-            dset["timepoint"][()] if "timepoint" in dset else dset.attrs["columns"]
-        )
-
-        df = pd.DataFrame(dset[("values")][()], index=index, columns=columns)
-
-        return df
-
-    @staticmethod
-    def dataset_to_df(f: h5py.File, path: str, mode: str = "h5py"):
-
-        if mode is "h5py":
-            all_indices = ["experiment", "position", "trap", "cell_label"]
-            indices = {k: f[path][k][()] for k in all_indices if k in f[path].keys()}
-            return pd.DataFrame(
-                f[path + "/values"][()],
-                index=pd.MultiIndex.from_arrays(
-                    list(indices.values()), names=indices.keys()
-                ),
-                columns=f[path + "/timepoint"][()],
-            )
-
-    @staticmethod
-    def _if_ext_or_post(name, *args):
-        flag = False
-        if name.startswith("extraction") and len(name.split("/")) == 4:
-            flag = True
-        elif name.startswith("postprocessing") and len(name.split("/")) == 3:
-            flag = True
-
-        if flag:
-            print(name)
-
-    @staticmethod
-    def _if_merges(name: str, obj):
-        if isinstance(obj, h5py.Dataset) and name.startswith("modifiers/merges"):
-            return obj[()]
-
-    @staticmethod
-    def _if_picks(name: str, obj):
-        if isinstance(obj, h5py.Group) and name.endswith("picks"):
-            return obj[()]
-
-    @staticmethod
-    def join_tracks_pair(target, source):
-        tgt_copy = copy(target)
-        end = find_1st(target.values[::-1], 0, cmp_larger)
-        tgt_copy.iloc[-end:] = source.iloc[-end:].values
-        return tgt_copy
diff --git a/pcore/io/utils.py b/pcore/io/utils.py
deleted file mode 100644
index a1029e82..00000000
--- a/pcore/io/utils.py
+++ /dev/null
@@ -1,44 +0,0 @@
-import re
-import struct
-
-
-def clean_ascii(text):
-    return re.sub(r'[^\x20-\x7F]', '.', text)
-
-
-def xxd(x, start=0, stop=None):
-    if stop is None:
-        stop = len(x)
-    for i in range(start, stop, 8):
-        # Row number
-        print("%04d" % i, end="   ")
-        # Hexadecimal bytes
-        for r in range(i, i + 8):
-            print("%02x" % x[r], end="")
-            if (r + 1) % 4 == 0:
-                print("  ", end="")
-        # ASCII
-        print("   ", clean_ascii(x[i:i + 8].decode('utf-8', errors='ignore')),
-              "   ", end="")
-        # Int32
-        print('{:>10} {:>10}'.format(*struct.unpack('II', x[i: i + 8])),
-              end="   ")
-        print("")  # Newline
-    return
-
-
-# Buffer reading functions
-def read_int(buffer, n=1):
-    res = struct.unpack('I' * n, buffer.read(4 * n))
-    if n == 1:
-        res = res[0]
-    return res
-
-
-def read_string(buffer):
-    return ''.join([x.decode() for x in iter(lambda: buffer.read(1), b'\x00')])
-
-
-def read_delim(buffer, n):
-    delim = read_int(buffer, n)
-    assert all([x == 0 for x in delim]), "Unknown nonzero value in delimiter"
diff --git a/pcore/io/writer.py b/pcore/io/writer.py
deleted file mode 100644
index 2da1253c..00000000
--- a/pcore/io/writer.py
+++ /dev/null
@@ -1,567 +0,0 @@
-import itertools
-import logging
-from time import perf_counter
-
-import h5py
-import numpy as np
-import pandas as pd
-from collections.abc import Iterable
-from typing import Dict
-
-from utils_find_1st import find_1st, cmp_equal
-
-from pcore.io.base import BridgeH5
-from pcore.utils import timed
-
-
-#################### Dynamic version ##################################
-
-
-def load_attributes(file: str, group="/"):
-    with h5py.File(file, "r") as f:
-        meta = dict(f[group].attrs.items())
-    return meta
-
-
-class DynamicWriter:
-    data_types = {}
-    group = ""
-    compression = None
-
-    def __init__(self, file: str):
-        self.file = file
-        self.metadata = load_attributes(file)
-
-    def _append(self, data, key, hgroup):
-        """Append data to existing dataset."""
-        try:
-            n = len(data)
-        except:
-            # Attributes have no length
-            n = 1
-        if key not in hgroup:
-            # TODO Include sparsity check
-            max_shape, dtype = self.datatypes[key]
-            shape = (n,) + max_shape[1:]
-            hgroup.create_dataset(
-                key,
-                shape=shape,
-                maxshape=max_shape,
-                dtype=dtype,
-                compression=self.compression,
-            )
-            hgroup[key][()] = data
-        else:
-            # The dataset already exists, expand it
-
-            try:  # FIXME This is broken by bugged mother-bud assignment
-                dset = hgroup[key]
-                dset.resize(dset.shape[0] + n, axis=0)
-                dset[-n:] = data
-            except:
-                logging.debug(
-                    "DynamicWriter:Inconsistency between dataset shape and new empty data"
-                )
-        return
-
-    def _overwrite(self, data, key, hgroup):
-        """Overwrite existing dataset with new data"""
-        # We do not append to mother_assign; raise error if already saved
-        n = len(data)
-        max_shape, dtype = self.datatypes[key]
-        if key in hgroup:
-            del hgroup[key]
-        hgroup.require_dataset(
-            key, shape=(n,), dtype=dtype, compression=self.compression
-        )
-        hgroup[key][()] = data
-
-    def _check_key(self, key):
-        if key not in self.datatypes:
-            raise KeyError(f"No defined data type for key {key}")
-
-    def write(self, data, overwrite: list):
-        # Data is a dictionary, if not, make it one
-        # Overwrite data is a dictionary
-        with h5py.File(self.file, "a") as store:
-            hgroup = store.require_group(self.group)
-
-            for key, value in data.items():
-                # We're only saving data that has a pre-defined data-type
-                self._check_key(key)
-                try:
-                    if key.startswith("attrs/"):  # metadata
-                        key = key.split("/")[1]  # First thing after attrs
-                        hgroup.attrs[key] = value
-                    elif key in overwrite:
-                        self._overwrite(value, key, hgroup)
-                    else:
-                        self._append(value, key, hgroup)
-                except Exception as e:
-                    print(key, value)
-                    raise (e)
-        return
-
-
-##################### Special instances #####################
-class TilerWriter(DynamicWriter):
-    datatypes = {
-        "trap_locations": ((None, 2), np.uint16),
-        "drifts": ((None, 2), np.float32),
-        "attrs/tile_size": ((1,), np.uint16),
-        "attrs/max_size": ((1,), np.uint16),
-    }
-    group = "trap_info"
-
-
-tile_size = 117
-
-
-@timed()
-def save_complex(array, dataset):
-    # Dataset needs to be 2D
-    n = len(array)
-    if n > 0:
-        dataset.resize(dataset.shape[0] + n, axis=0)
-        dataset[-n:, 0] = array.real
-        dataset[-n:, 1] = array.imag
-
-
-@timed()
-def load_complex(dataset):
-    array = dataset[:, 0] + 1j * dataset[:, 1]
-    return array
-
-
-class BabyWriter(DynamicWriter):
-    compression = "gzip"
-    max_ncells = 2e5  # Could just make this None
-    max_tps = 1e3  # Could just make this None
-    chunk_cells = 25  # The number of cells in a chunk for edge masks
-    default_tile_size = 117
-    datatypes = {
-        "centres": ((None, 2), np.uint16),
-        "position": ((None,), np.uint16),
-        "angles": ((None,), h5py.vlen_dtype(np.float32)),
-        "radii": ((None,), h5py.vlen_dtype(np.float32)),
-        "edgemasks": ((max_ncells, max_tps, tile_size, tile_size), np.bool),
-        "ellipse_dims": ((None, 2), np.float32),
-        "cell_label": ((None,), np.uint16),
-        "trap": ((None,), np.uint16),
-        "timepoint": ((None,), np.uint16),
-        "mother_assign": ((None,), h5py.vlen_dtype(np.uint16)),
-        "mother_assign_dynamic": ((None,), np.uint16),
-        "volumes": ((None,), np.float32),
-    }
-    group = "cell_info"
-
-    def __init__(self, *args, **kwargs):
-        super().__init__(*args, **kwargs)
-        # Get max_tps and trap info
-        self._traps_initialised = False
-
-    def __init_trap_info(self):
-        # Should only be run after the traps have been initialised
-        trap_metadata = load_attributes(self.file, "trap_info")
-        tile_size = trap_metadata.get("tile_size", self.default_tile_size)
-        max_tps = self.metadata["time_settings/ntimepoints"][0]
-        self.datatypes["edgemasks"] = (
-            (self.max_ncells, max_tps, tile_size, tile_size),
-            np.bool,
-        )
-        self._traps_initialised = True
-
-    def __init_edgemasks(self, hgroup, edgemasks, current_indices, n_cells):
-        # Create values dataset
-        # This holds the edge masks directly and
-        # Is of shape (n_tps, n_cells, tile_size, tile_size)
-        key = "edgemasks"
-        max_shape, dtype = self.datatypes[key]
-        shape = (n_cells, 1) + max_shape[2:]
-        chunks = (self.chunk_cells, 1) + max_shape[2:]
-        val_dset = hgroup.create_dataset(
-            "values",
-            shape=shape,
-            maxshape=max_shape,
-            dtype=dtype,
-            chunks=chunks,
-            compression=self.compression,
-        )
-        val_dset[:, 0] = edgemasks
-        # Create index dataset
-        # Holds the (trap, cell_id) description used to index into the
-        # values and is of shape (n_cells, 2)
-        ix_max_shape = (max_shape[0], 2)
-        ix_shape = (0, 2)
-        ix_dtype = np.uint16
-        ix_dset = hgroup.create_dataset(
-            "indices",
-            shape=ix_shape,
-            maxshape=ix_max_shape,
-            dtype=ix_dtype,
-            compression=self.compression,
-        )
-        save_complex(current_indices, ix_dset)
-
-    def __append_edgemasks(self, hgroup, edgemasks, current_indices):
-        key = "edgemasks"
-        val_dset = hgroup["values"]
-        ix_dset = hgroup["indices"]
-        existing_indices = load_complex(ix_dset)
-        # Check if there are any new labels
-        available = np.in1d(current_indices, existing_indices)
-        missing = current_indices[~available]
-        all_indices = np.concatenate([existing_indices, missing])
-        # Resizing
-        t = perf_counter()
-        n_tps = val_dset.shape[1] + 1
-        n_add_cells = len(missing)
-        # RESIZE DATASET FOR TIME and Cells
-        new_shape = (val_dset.shape[0] + n_add_cells, n_tps) + val_dset.shape[2:]
-        val_dset.resize(new_shape)
-        logging.debug(f"Timing:resizing:{perf_counter() - t}")
-        # Writing data
-        cell_indices = np.where(np.in1d(all_indices, current_indices))[0]
-        for ix, mask in zip(cell_indices, edgemasks):
-            try:
-                val_dset[ix, n_tps - 1] = mask
-            except Exception as e:
-                logging.debug(f"{ix}, {n_tps}, {val_dset.shape}")
-        # Save the index values
-        save_complex(missing, ix_dset)
-
-    def write_edgemasks(self, data, keys, hgroup):
-        if not self._traps_initialised:
-            self.__init_trap_info()
-        # DATA is TRAP_IDS, CELL_LABELS, EDGEMASKS in a structured array
-        key = "edgemasks"
-        val_key = "values"
-        idx_key = "indices"
-        # Length of edgemasks
-        traps, cell_labels, edgemasks = data
-        n_cells = len(cell_labels)
-        hgroup = hgroup.require_group(key)
-        current_indices = np.array(traps) + 1j * np.array(cell_labels)
-        if val_key not in hgroup:
-            self.__init_edgemasks(hgroup, edgemasks, current_indices, n_cells)
-        else:
-            self.__append_edgemasks(hgroup, edgemasks, current_indices)
-
-    def write(self, data, overwrite: list):
-        with h5py.File(self.file, "a") as store:
-            hgroup = store.require_group(self.group)
-
-            for key, value in data.items():
-                # We're only saving data that has a pre-defined data-type
-                self._check_key(key)
-                try:
-                    if key.startswith("attrs/"):  # metadata
-                        key = key.split("/")[1]  # First thing after attrs
-                        hgroup.attrs[key] = value
-                    elif key in overwrite:
-                        self._overwrite(value, key, hgroup)
-                    elif key == "edgemasks":
-                        keys = ["trap", "cell_label", "edgemasks"]
-                        value = [data[x] for x in keys]
-                        self.write_edgemasks(value, keys, hgroup)
-                    else:
-                        self._append(value, key, hgroup)
-                except Exception as e:
-                    print(key, value)
-                    raise (e)
-        return
-
-
-#################### Extraction version ###############################
-class Writer(BridgeH5):
-    """
-    Class in charge of transforming data into compatible formats
-
-    Decoupling interface from implementation!
-
-    Parameters
-    ----------
-        filename: str Name of file to write into
-        flag: str, default=None
-            Flag to pass to the default file reader. If None the file remains closed.
-        compression: str, default=None
-            Compression method passed on to h5py writing functions (only used for
-        dataframes and other array-like data.)
-    """
-
-    def __init__(self, filename, compression=None):
-        super().__init__(filename, flag=None)
-
-        if compression is None:
-            self.compression = "gzip"
-
-    def write(
-        self,
-        path: str,
-        data: Iterable = None,
-        meta: Dict = {},
-        overwrite: str = None,
-    ):
-        """
-        Parameters
-        ----------
-        path : str
-            Path inside h5 file to write into.
-        data : Iterable, default = None
-        meta : Dict, default = {}
-
-
-        """
-        self.id_cache = {}
-        with h5py.File(self.filename, "a") as f:
-            if overwrite == "overwrite":  # TODO refactor overwriting
-                if path in f:
-                    del f[path]
-            elif overwrite == "accumulate":  # Add a number if needed
-                if path in f:
-                    parent, name = path.rsplit("/", maxsplit=1)
-                    n = sum([x.startswith(name) for x in f[path]])
-                    path = path + str(n).zfill(3)
-            elif overwrite == "skip":
-                if path in f:
-                    logging.debug("Skipping dataset {}".format(path))
-
-            logging.debug(
-                "{} {} to {} and {} metadata fields".format(
-                    overwrite, type(data), path, len(meta)
-                )
-            )
-            if data is not None:
-                self.write_dset(f, path, data)
-            if meta:
-                for attr, metadata in meta.items():
-                    self.write_meta(f, path, attr, data=metadata)
-
-    def write_dset(self, f: h5py.File, path: str, data: Iterable):
-        if isinstance(data, pd.DataFrame):
-            self.write_pd(f, path, data, compression=self.compression)
-        elif isinstance(data, pd.MultiIndex):
-            self.write_index(f, path, data)  # , compression=self.compression)
-        elif isinstance(data, Iterable):
-            self.write_arraylike(f, path, data)
-        else:
-            self.write_atomic(data, f, path)
-
-    def write_meta(self, f: h5py.File, path: str, attr: str, data: Iterable):
-        obj = f.require_group(path)
-
-        obj.attrs[attr] = data
-
-    @staticmethod
-    def write_arraylike(f: h5py.File, path: str, data: Iterable, **kwargs):
-        if path in f:
-            del f[path]
-
-        narray = np.array(data)
-
-        chunks = None
-        if narray.any():
-            chunks = (1, *narray.shape[1:])
-
-        dset = f.create_dataset(
-            path,
-            shape=narray.shape,
-            chunks=chunks,
-            dtype="int",
-            compression=kwargs.get("compression", None),
-        )
-        dset[()] = narray
-
-    @staticmethod  # TODO Use this function to implement Diane's dynamic writer
-    def write_dynamic(f: h5py.File, path: str, data: Iterable):
-        pass
-
-    @staticmethod
-    def write_index(f, path, pd_index, **kwargs):
-        f.require_group(path)  # TODO check if we can remove this
-        for i, name in enumerate(pd_index.names):
-            ids = pd_index.get_level_values(i)
-            id_path = path + "/" + name
-            f.create_dataset(
-                name=id_path,
-                shape=(len(ids),),
-                dtype="uint16",
-                compression=kwargs.get("compression", None),
-            )
-            indices = f[id_path]
-            indices[()] = ids
-
-    def write_pd(self, f, path, df, **kwargs):
-        values_path = path + "values" if path.endswith("/") else path + "/values"
-        if path not in f:
-            max_ncells = 2e5
-
-            max_tps = 1e3
-            f.create_dataset(
-                name=values_path,
-                shape=df.shape,
-                # chunks=(min(df.shape[0], 1), df.shape[1]),
-                # dtype=df.dtypes.iloc[0], This is making NaN in ints into negative vals
-                dtype="float",
-                maxshape=(max_ncells, max_tps),
-                compression=kwargs.get("compression", None),
-            )
-            dset = f[values_path]
-            dset[()] = df.values
-
-            for name in df.index.names:
-                indices_path = "/".join((path, name))
-                f.create_dataset(
-                    name=indices_path,
-                    shape=(len(df),),
-                    dtype="uint16",  # Assuming we'll always use int indices
-                    chunks=True,
-                    maxshape=(max_ncells,),
-                )
-                dset = f[indices_path]
-                dset[()] = df.index.get_level_values(level=name).tolist()
-
-            if df.columns.dtype == np.int or df.columns.dtype == np.dtype("uint"):
-                tp_path = path + "/timepoint"
-                f.create_dataset(
-                    name=tp_path,
-                    shape=(df.shape[1],),
-                    maxshape=(max_tps,),
-                    dtype="uint16",
-                )
-                tps = df.columns.tolist()
-                f[tp_path][tps] = tps
-            else:
-                f[path].attrs["columns"] = df.columns.tolist()
-        else:
-            dset = f[values_path]
-
-            # Filter out repeated timepoints
-            new_tps = set(df.columns)
-            if path + "/timepoint" in f:
-                new_tps = new_tps.difference(f[path + "/timepoint"][()])
-            df = df[new_tps]
-
-            if (
-                not hasattr(self, "id_cache") or not df.index.nlevels in self.id_cache
-            ):  # Use cache dict to store previously-obtained indices
-                self.id_cache[df.index.nlevels] = {}
-                existing_ids = self.get_existing_ids(
-                    f, [path + "/" + x for x in df.index.names]
-                )
-                # Split indices in existing and additional
-                new = df.index.tolist()
-                if df.index.nlevels == 1:  # Cover for cases with a single index
-                    new = [(x,) for x in df.index.tolist()]
-                (
-                    found_multis,
-                    self.id_cache[df.index.nlevels]["additional_multis"],
-                ) = self.find_ids(
-                    existing=existing_ids,
-                    new=new,
-                )
-                found_indices = np.array(locate_indices(existing_ids, found_multis))
-
-                # We must sort our indices for h5py indexing
-                incremental_existing = np.argsort(found_indices)
-                self.id_cache[df.index.nlevels]["found_indices"] = found_indices[
-                    incremental_existing
-                ]
-                self.id_cache[df.index.nlevels]["found_multi"] = found_multis[
-                    incremental_existing
-                ]
-
-            existing_values = df.loc[
-                [
-                    _tuple_or_int(x)
-                    for x in self.id_cache[df.index.nlevels]["found_multi"]
-                ]
-            ].values
-            new_values = df.loc[
-                [
-                    _tuple_or_int(x)
-                    for x in self.id_cache[df.index.nlevels]["additional_multis"]
-                ]
-            ].values
-            ncells, ntps = f[values_path].shape
-
-            # Add found cells
-            dset.resize(dset.shape[1] + df.shape[1], axis=1)
-            dset[:, ntps:] = np.nan
-            for i, tp in enumerate(df.columns):
-                dset[
-                    self.id_cache[df.index.nlevels]["found_indices"], tp
-                ] = existing_values[:, i]
-            # Add new cells
-            n_newcells = len(self.id_cache[df.index.nlevels]["additional_multis"])
-            dset.resize(dset.shape[0] + n_newcells, axis=0)
-            dset[ncells:, :] = np.nan
-
-            for i, tp in enumerate(df.columns):
-                dset[ncells:, tp] = new_values[:, i]
-
-            # save indices
-            for i, name in enumerate(df.index.names):
-                tmp = path + "/" + name
-                dset = f[tmp]
-                n = dset.shape[0]
-                dset.resize(n + n_newcells, axis=0)
-                dset[n:] = (
-                    self.id_cache[df.index.nlevels]["additional_multis"][:, i]
-                    if len(self.id_cache[df.index.nlevels]["additional_multis"].shape)
-                    > 1
-                    else self.id_cache[df.index.nlevels]["additional_multis"]
-                )
-
-            tmp = path + "/timepoint"
-            dset = f[tmp]
-            n = dset.shape[0]
-            dset.resize(n + df.shape[1], axis=0)
-            dset[n:] = df.columns.tolist()
-
-    @staticmethod
-    def get_existing_ids(f, paths):
-        # Fetch indices and convert them to a (nentries, nlevels) ndarray
-        return np.array([f[path][()] for path in paths]).T
-
-    @staticmethod
-    def find_ids(existing, new):
-        # Compare two tuple sets and return the intersection and difference
-        # (elements in the 'new' set not in 'existing')
-        set_existing = set([tuple(*x) for x in zip(existing.tolist())])
-        existing_cells = np.array(list(set_existing.intersection(new)))
-        new_cells = np.array(list(set(new).difference(set_existing)))
-
-        return (
-            existing_cells,
-            new_cells,
-        )
-
-
-# @staticmethod
-def locate_indices(existing, new):
-    if new.any():
-        if new.shape[1] > 1:
-            return [
-                find_1st(
-                    (existing[:, 0] == n[0]) & (existing[:, 1] == n[1]), True, cmp_equal
-                )
-                for n in new
-            ]
-        else:
-            return [find_1st(existing[:, 0] == n, True, cmp_equal) for n in new]
-    else:
-        return []
-
-
-# def tuple_or_int(x):
-#     if isinstance(x, Iterable):
-#         return tuple(x)
-#     else:
-#         return x
-def _tuple_or_int(x):
-    # Convert tuple to int if it only contains one value
-    if len(x) == 1:
-        return x[0]
-    else:
-        return x
diff --git a/pcore/multiexperiment.py b/pcore/multiexperiment.py
deleted file mode 100644
index a3ce094f..00000000
--- a/pcore/multiexperiment.py
+++ /dev/null
@@ -1,25 +0,0 @@
-from pathos.multiprocessing import Pool
-
-from pcore.pipeline import PipelineParameters, Pipeline
-
-
-class MultiExp:
-    """
-    Manages cases when you need to segment several different experiments with a single
-    position (e.g. pH calibration).
-    """
-
-    def __init__(self, expt_ids, npools=8, *args, **kwargs):
-
-        self.expt_ids = expt_ids
-
-    def run(self):
-        run_expt = lambda expt: Pipeline(
-            PipelineParameters.default(general={"expt_id": expt, "distributed": 0})
-        ).run()
-        with Pool(npools) as p:
-            results = p.map(lambda x: self.create_pipeline(x), self.exp_ids)
-
-    @classmethod
-    def default(self):
-        return cls(expt_ids=list(range(20448, 20467 + 1)))
diff --git a/pcore/pipeline.py b/pcore/pipeline.py
deleted file mode 100644
index 1fca4085..00000000
--- a/pcore/pipeline.py
+++ /dev/null
@@ -1,271 +0,0 @@
-"""
-Pipeline and chaining elements.
-"""
-import logging
-import os
-from abc import ABC, abstractmethod
-from typing import List
-from pathlib import Path
-import traceback
-
-import itertools
-import yaml
-from tqdm import tqdm
-from time import perf_counter
-
-import numpy as np
-import pandas as pd
-from pathos.multiprocessing import Pool
-
-from agora.base import ParametersABC, ProcessABC
-from pcore.experiment import MetaData
-from pcore.io.omero import Dataset, Image
-from pcore.haystack import initialise_tf
-from pcore.baby_client import BabyRunner, BabyParameters
-from pcore.segment import Tiler, TilerParameters
-from pcore.io.writer import TilerWriter, BabyWriter
-from pcore.io.signal import Signal
-from extraction.core.extractor import Extractor, ExtractorParameters
-from extraction.core.functions.defaults import exparams_from_meta
-from postprocessor.core.processor import PostProcessor, PostProcessorParameters
-
-
-class PipelineParameters(ParametersABC):
-    def __init__(self, general, tiler, baby, extraction, postprocessing):
-        self.general = general
-        self.tiler = tiler
-        self.baby = baby
-        self.extraction = extraction
-        self.postprocessing = postprocessing
-
-    @classmethod
-    def default(
-        cls,
-        general={},
-        tiler={},
-        baby={},
-        extraction={},
-        postprocessing={},
-    ):
-        """
-        Load unit test experiment
-        :expt_id: Experiment id
-        :directory: Output directory
-
-        Provides default parameters for the entire pipeline. This downloads the logfiles and sets the default
-        timepoints and extraction parameters from there.
-        """
-        expt_id = general.get("expt_id", 19993)
-        directory = Path(general.get("directory", "../data"))
-        with Dataset(int(expt_id)) as conn:
-            directory = directory / conn.unique_name
-            if not directory.exists():
-                directory.mkdir(parents=True)
-                # Download logs to use for metadata
-            conn.cache_logs(directory)
-        meta = MetaData(directory, None).load_logs()
-        tps = meta["time_settings/ntimepoints"][0]
-        defaults = {
-            "general": dict(
-                id=expt_id,
-                distributed=0,
-                tps=tps,
-                directory=directory,
-                strain="",
-                earlystop=dict(
-                    min_tp=0,
-                    thresh_pos_clogged=0.3,
-                    thresh_trap_clogged=7,
-                    ntps_to_eval=5,
-                ),
-            )
-        }
-        defaults["tiler"] = TilerParameters.default().to_dict()
-        defaults["baby"] = BabyParameters.default().to_dict()
-        defaults["extraction"] = exparams_from_meta(meta)
-        defaults["postprocessing"] = PostProcessorParameters.default().to_dict()
-        for k in defaults.keys():
-            exec("defaults[k].update(" + k + ")")
-        return cls(**{k: v for k, v in defaults.items()})
-
-    def load_logs(self):
-        parsed_flattened = parse_logfiles(self.log_dir)
-        return parsed_flattened
-
-
-class Pipeline(ProcessABC):
-    """
-    A chained set of Pipeline elements connected through pipes.
-    """
-
-    # Tiling, Segmentation,Extraction and Postprocessing should use their own default parameters
-
-    # Early stop for clogging
-    earlystop = {
-        "min_tp": 50,
-        "thresh_pos_clogged": 0.3,
-        "thresh_trap_clogged": 7,
-        "ntps_to_eval": 5,
-    }
-
-    def __init__(self, parameters: PipelineParameters):
-        super().__init__(parameters)
-        self.store = self.parameters.general["directory"]
-
-    @classmethod
-    def from_yaml(cls, fpath):
-        # This is just a convenience function, think before implementing
-        # for other processes
-        return cls(parameters=PipelineParameters.from_yaml(fpath))
-
-    def run(self):
-        # Config holds the general information, use in main
-        # Steps holds the description of tasks with their parameters
-        # Steps: all holds general tasks
-        # steps: strain_name holds task for a given strain
-        config = self.parameters.to_dict()
-        expt_id = config["general"]["id"]
-        distributed = config["general"]["distributed"]
-        strain_filter = config["general"]["strain"]
-        root_dir = config["general"]["directory"]
-        root_dir = Path(root_dir)
-
-        print("Searching OMERO")
-        # Do all initialis
-        with Dataset(int(expt_id)) as conn:
-            image_ids = conn.get_images()
-            directory = root_dir / conn.unique_name
-            if not directory.exists():
-                directory.mkdir(parents=True)
-                # Download logs to use for metadata
-            conn.cache_logs(directory)
-
-        # Modify to the configuration
-        self.parameters.general["directory"] = directory
-        config["general"]["directory"] = directory
-
-        # Filter TODO integrate filter onto class and add regex
-        image_ids = {k: v for k, v in image_ids.items() if k.startswith(strain_filter)}
-
-        if distributed != 0:  # Gives the number of simultaneous processes
-            with Pool(distributed) as p:
-                results = p.map(lambda x: self.create_pipeline(x), image_ids.items())
-            return results
-        else:  # Sequential
-            results = []
-            for k, v in image_ids.items():
-                r = self.create_pipeline((k, v))
-                results.append(r)
-
-    def create_pipeline(self, image_id):
-        config = self.parameters.to_dict()
-        name, image_id = image_id
-        general_config = config["general"]
-        session = None
-        earlystop = general_config["earlystop"]
-        try:
-            directory = general_config["directory"]
-            with Image(image_id) as image:
-                filename = f"{directory}/{image.name}.h5"
-                try:
-                    os.remove(filename)
-                except:
-                    pass
-
-                # Run metadata first
-                process_from = 0
-                # if True:  # not Path(filename).exists():
-                meta = MetaData(directory, filename)
-                meta.run()
-                tiler = Tiler.from_image(
-                    image, TilerParameters.from_dict(config["tiler"])
-                )
-                # else: TODO add support to continue local experiments?
-                #     tiler = Tiler.from_hdf5(image.data, filename)
-                #     s = Signal(filename)
-                #     process_from = s["/general/None/extraction/volume"].columns[-1]
-                #     if process_from > 2:
-                #         process_from = process_from - 3
-                #         tiler.n_processed = process_from
-
-                writer = TilerWriter(filename)
-                session = initialise_tf(2)
-                runner = BabyRunner.from_tiler(
-                    BabyParameters.from_dict(config["baby"]), tiler
-                )
-                bwriter = BabyWriter(filename)
-                exparams = ExtractorParameters.from_dict(config["extraction"])
-                ext = Extractor.from_tiler(exparams, store=filename, tiler=tiler)
-
-                # RUN
-                tps = general_config["tps"]
-                frac_clogged_traps = 0
-                for i in tqdm(
-                    range(process_from, tps), desc=image.name, initial=process_from
-                ):
-                    if frac_clogged_traps < earlystop["thresh_pos_clogged"]:
-                        t = perf_counter()
-                        trap_info = tiler.run_tp(i)
-                        logging.debug(f"Timing:Trap:{perf_counter() - t}s")
-                        t = perf_counter()
-                        writer.write(trap_info, overwrite=[])
-                        logging.debug(f"Timing:Writing-trap:{perf_counter() - t}s")
-                        t = perf_counter()
-                        seg = runner.run_tp(i)
-                        logging.debug(f"Timing:Segmentation:{perf_counter() - t}s")
-                        # logging.debug(
-                        #     f"Segmentation failed:Segmentation:{perf_counter() - t}s"
-                        # )
-                        t = perf_counter()
-                        bwriter.write(seg, overwrite=["mother_assign"])
-                        logging.debug(f"Timing:Writing-baby:{perf_counter() - t}s")
-                        t = perf_counter()
-
-                        tmp = ext.run(tps=[i])
-                        logging.debug(f"Timing:Extraction:{perf_counter() - t}s")
-                    else:  # Stop if more than X% traps are clogged
-                        logging.debug(
-                            f"EarlyStop:{earlystop['thresh_pos_clogged']*100}% traps clogged at time point {i}"
-                        )
-                        print(
-                            f"Stopping analysis at time {i} with {frac_clogged_traps} clogged traps"
-                        )
-                        break
-
-                    if (
-                        i > earlystop["min_tp"]
-                    ):  # Calculate the fraction of clogged traps
-                        frac_clogged_traps = self.check_earlystop(filename, earlystop)
-                        logging.debug(f"Quality:Clogged_traps:{frac_clogged_traps}")
-                        print("Frac clogged traps: ", frac_clogged_traps)
-
-                # Run post processing
-                post_proc_params = PostProcessorParameters.from_dict(
-                    self.parameters.postprocessing
-                ).to_dict()
-                PostProcessor(filename, post_proc_params).run()
-                return True
-        except Exception as e:  # bug in the trap getting
-            print(f"Caught exception in worker thread (x = {name}):")
-            # This prints the type, value, and stack trace of the
-            # current exception being handled.
-            traceback.print_exc()
-            print()
-            raise e
-        finally:
-            if session:
-                session.close()
-
-    def check_earlystop(self, filename, es_parameters):
-        s = Signal(filename)
-        df = s["/extraction/general/None/area"]
-        frac_clogged_traps = (
-            df[df.columns[-1 - es_parameters["ntps_to_eval"] : -1]]
-            .dropna(how="all")
-            .notna()
-            .groupby("trap")
-            .apply(sum)
-            .apply(np.mean, axis=1)
-            > es_parameters["thresh_trap_clogged"]
-        ).mean()
-        return frac_clogged_traps
diff --git a/pcore/post_processing.py b/pcore/post_processing.py
deleted file mode 100644
index 58481dd8..00000000
--- a/pcore/post_processing.py
+++ /dev/null
@@ -1,189 +0,0 @@
-"""
-Post-processing utilities
-
-Notes: I don't have statistics on ranges of radii for each of the knots in
-the radial spline representation, but we regularly extract the average of
-these radii for each cell. So, depending on camera/lens, we get:
-    * 60x evolve: mean radii of 2-14 pixels (and measured areas of 30-750
-    pixels^2)
-    * 60x prime95b: mean radii of 3-24 pixels (and measured areas of 60-2000
-	pixels^2)
-
-And I presume that for a 100x lens we would get an ~5/3 increase over those
-values.
-
-In terms of the current volume estimation method, it's currently only
-implemented in the AnalysisToolbox repository, but it's super simple:
-
-mVol = 4/3*pi*sqrt(mArea/pi).^3
-
-where mArea is simply the sum of pixels for that cell.
-"""
-import matplotlib.pyplot as plt
-import numpy as np
-from mpl_toolkits.mplot3d.art3d import Poly3DCollection
-from scipy import ndimage
-from skimage.morphology import erosion, ball
-from skimage import measure, draw
-
-
-def my_ball(radius):
-    """Generates a ball-shaped structuring element.
-
-    This is the 3D equivalent of a disk.
-    A pixel is within the neighborhood if the Euclidean distance between
-    it and the origin is no greater than radius.
-
-    Parameters
-    ----------
-    radius : int
-        The radius of the ball-shaped structuring element.
-
-    Other Parameters
-    ----------------
-    dtype : data-type
-        The data type of the structuring element.
-
-    Returns
-    -------
-    selem : ndarray
-        The structuring element where elements of the neighborhood
-        are 1 and 0 otherwise.
-    """
-    n = 2 * radius + 1
-    Z, Y, X = np.mgrid[-radius:radius:n * 1j,
-              -radius:radius:n * 1j,
-              -radius:radius:n * 1j]
-    X **= 2
-    Y **= 2
-    Z **= 2
-    X += Y
-    X += Z
-    # s = X ** 2 + Y ** 2 + Z ** 2
-    return X <= radius * radius
-
-def circle_outline(r):
-    return ellipse_perimeter(r, r)
-
-def ellipse_perimeter(x, y):
-    im_shape = int(2*max(x, y) + 1)
-    img = np.zeros((im_shape, im_shape), dtype=np.uint8)
-    rr, cc = draw.ellipse_perimeter(int(im_shape//2), int(im_shape//2),
-                                    int(x), int(y))
-    img[rr, cc] = 1
-    return np.pad(img, 1)
-
-def capped_cylinder(x, y):
-    max_size = (y + 2*x + 2)
-    pixels = np.zeros((max_size, max_size))
-
-    rect_start = ((max_size-x)//2, x + 1)
-    rr, cc = draw.rectangle_perimeter(rect_start, extent=(x, y),
-                                     shape=(max_size, max_size))
-    pixels[rr, cc] = 1
-    circle_centres = [(max_size//2 - 1, x),
-                      (max_size//2 - 1, max_size - x - 1 )]
-    for r, c in circle_centres:
-        rr, cc = draw.circle_perimeter(r, c, (x + 1)//2,
-                                       shape=(max_size, max_size))
-        pixels[rr, cc] = 1
-    pixels = ndimage.morphology.binary_fill_holes(pixels)
-    pixels ^= erosion(pixels)
-    return pixels
-
-def volume_of_sphere(radius):
-    return 4 / 3 * np.pi * radius**3
-
-def plot_voxels(voxels):
-    verts, faces, normals, values = measure.marching_cubes_lewiner(
-        voxels, 0)
-    fig = plt.figure(figsize=(10, 10))
-    ax = fig.add_subplot(111, projection='3d')
-    mesh = Poly3DCollection(verts[faces])
-    mesh.set_edgecolor('k')
-    ax.add_collection3d(mesh)
-    ax.set_xlim(0, voxels.shape[0])
-    ax.set_ylim(0, voxels.shape[1])
-    ax.set_zlim(0, voxels.shape[2])
-    plt.tight_layout()
-    plt.show()
-
-# Volume estimation
-def union_of_spheres(outline, shape='my_ball', debug=False):
-    filled = ndimage.binary_fill_holes(outline)
-    nearest_neighbor = ndimage.morphology.distance_transform_edt(
-        outline == 0) * filled
-    voxels = np.zeros((filled.shape[0], filled.shape[1], max(filled.shape)))
-    c_z = voxels.shape[2] // 2
-    for x,y in zip(*np.where(filled)):
-        radius = nearest_neighbor[(x,y)]
-        if radius > 0:
-            if shape == 'ball':
-                b = ball(radius)
-            elif shape == 'my_ball':
-                b = my_ball(radius)
-            else:
-                raise ValueError(f"{shape} is not an accepted value for "
-                                 f"shape.")
-            centre_b = ndimage.measurements.center_of_mass(b)
-
-            I,J,K = np.ogrid[:b.shape[0], :b.shape[1], :b.shape[2]]
-            voxels[I + int(x - centre_b[0]), J + int(y - centre_b[1]),
-                   K + int(c_z - centre_b[2])] += b
-    if debug:
-        plot_voxels(voxels)
-    return voxels.astype(bool).sum()
-
-def improved_uos(outline, shape='my_ball', debug=False):
-    filled = ndimage.binary_fill_holes(outline)
-    nearest_neighbor = ndimage.morphology.distance_transform_edt(
-        outline == 0) * filled
-    voxels = np.zeros((filled.shape[0], filled.shape[1], max(filled.shape)))
-    c_z = voxels.shape[2] // 2
-
-    while np.any(nearest_neighbor != 0):
-        radius = np.max(nearest_neighbor)
-        x, y = np.argwhere(nearest_neighbor == radius)[0]
-        if shape == 'ball':
-            b = ball(np.ceil(radius))
-        elif shape == 'my_ball':
-            b = my_ball(np.ceil(radius))
-        else:
-            raise ValueError(f"{shape} is not an accepted value for shape")
-        centre_b = ndimage.measurements.center_of_mass(b)
-
-        I, J, K = np.ogrid[:b.shape[0], :b.shape[1], :b.shape[2]]
-        voxels[I + int(x - centre_b[0]), J + int(y - centre_b[1]),
-               K + int(c_z - centre_b[2])] += b
-
-        # Use the central disk of the ball from voxels to get the circle
-        # = 0 if nn[x,y] < r else nn[x,y]
-        rr, cc = draw.circle(x, y, np.ceil(radius), nearest_neighbor.shape)
-        nearest_neighbor[rr, cc] = 0
-    if debug:
-        plot_voxels(voxels)
-    return voxels.astype(bool).sum()
-
-def conical(outline, debug=False):
-    nearest_neighbor = ndimage.morphology.distance_transform_edt(
-        outline == 0) * ndimage.binary_fill_holes(outline)
-    if debug:
-        hf = plt.figure()
-        ha = hf.add_subplot(111, projection='3d')
-
-        X, Y = np.meshgrid(np.arange(nearest_neighbor.shape[0]),
-                           np.arange(nearest_neighbor.shape[1]))
-        ha.plot_surface(X, Y, nearest_neighbor)
-        plt.show()
-    return 4 * nearest_neighbor.sum()
-
-def volume(outline, method='spheres'):
-    if method=='conical':
-        return conical(outline)
-    elif method=='spheres':
-        return union_of_spheres(outline)
-    else:
-        raise ValueError(f"Method {method} not implemented.")
-
-def circularity(outline):
-    pass
\ No newline at end of file
diff --git a/pcore/results.py b/pcore/results.py
deleted file mode 100644
index fd12c283..00000000
--- a/pcore/results.py
+++ /dev/null
@@ -1,35 +0,0 @@
-"""Pipeline results classes and utilities"""
-
-
-class SegmentationResults:
-    """
-    Object storing the data from the Segmentation pipeline.
-    Everything is stored as an `AttributeDict`, which is a `defaultdict` where
-    you can get elements as attributes.
-
-    In addition, it implements:
-     - IO functionality (read from file, write to file)
-    """
-    def __init__(self, raw_expt):
-        pass
-
-
-
-
-class CellResults:
-    """
-    Results on a set of cells TODO: what set of cells, how?
-
-    Contains:
-    * cellInf describing which cells are taken into account
-    * annotations on the cell
-    * segmentation maps of the cell TODO: how to define and save this?
-    * trapLocations TODO: why is this not part of cellInf?
-    """
-
-    def __init__(self, cellInf=None, annotations=None, segmentation=None,
-                 trapLocations=None):
-        self._cellInf = cellInf
-        self._annotations = annotations
-        self._segmentation = segmentation
-        self._trapLocations = trapLocations
diff --git a/pcore/segment.py b/pcore/segment.py
deleted file mode 100644
index 8565b9c5..00000000
--- a/pcore/segment.py
+++ /dev/null
@@ -1,344 +0,0 @@
-"""Segment/segmented pipelines.
-Includes splitting the image into traps/parts,
-cell segmentation, nucleus segmentation."""
-import warnings
-from functools import lru_cache
-
-import h5py
-import numpy as np
-
-from pathlib import Path, PosixPath
-
-from skimage.registration import phase_cross_correlation
-
-from agora.base import ParametersABC, ProcessABC
-from pcore.traps import segment_traps
-from pcore.timelapse import TimelapseOMERO
-from pcore.io.matlab import matObject
-from pcore.traps import (
-    identify_trap_locations,
-    get_trap_timelapse,
-    get_traps_timepoint,
-    centre,
-    get_trap_timelapse_omero,
-)
-from pcore.utils import accumulate, get_store_path
-
-from pcore.io.writer import Writer, load_attributes
-from pcore.io.metadata_parser import parse_logfiles
-
-trap_template_directory = Path(__file__).parent / "trap_templates"
-# TODO do we need multiple templates, one for each setup?
-trap_template = np.array([])  # np.load(trap_template_directory / "trap_prime.npy")
-
-
-def get_tile_shapes(x, tile_size, max_shape):
-    half_size = tile_size // 2
-    xmin = int(x[0] - half_size)
-    ymin = max(0, int(x[1] - half_size))
-    if xmin + tile_size > max_shape[0]:
-        xmin = max_shape[0] - tile_size
-    if ymin + tile_size > max_shape[1]:
-        ymin = max_shape[1] - tile_size
-    return xmin, xmin + tile_size, ymin, ymin + tile_size
-
-
-###################### Dask versions ########################
-class Trap:
-    def __init__(self, centre, parent, size, max_size):
-        self.centre = centre
-        self.parent = parent  # Used to access drifts
-        self.size = size
-        self.half_size = size // 2
-        self.max_size = max_size
-
-    def padding_required(self, tp):
-        """Check if we need to pad the trap image for this time point."""
-        try:
-            assert all(self.at_time(tp) - self.half_size >= 0)
-            assert all(self.at_time(tp) + self.half_size <= self.max_size)
-        except AssertionError:
-            return True
-        return False
-
-    def at_time(self, tp):
-        """Return trap centre at time tp"""
-        drifts = self.parent.drifts
-        return self.centre - np.sum(drifts[:tp], axis=0)
-
-    def as_tile(self, tp):
-        """Return trap in the OMERO tile format of x, y, w, h
-
-        Also returns the padding necessary for this tile.
-        """
-        x, y = self.at_time(tp)
-        # tile bottom corner
-        x = int(x - self.half_size)
-        y = int(y - self.half_size)
-        return x, y, self.size, self.size
-
-    def as_range(self, tp):
-        """Return trap in a range format, two slice objects that can be used in Arrays"""
-        x, y, w, h = self.as_tile(tp)
-        return slice(x, x + w), slice(y, y + h)
-
-
-class TrapLocations:
-    def __init__(self, initial_location, tile_size, max_size=1200, drifts=[]):
-        self.tile_size = tile_size
-        self.max_size = max_size
-        self.initial_location = initial_location
-        self.traps = [
-            Trap(centre, self, tile_size, max_size) for centre in initial_location
-        ]
-        self.drifts = drifts
-
-    @classmethod
-    def from_source(cls, fpath: str):
-        with h5py.File(fpath, "r") as f:
-            # TODO read tile size from file metadata
-            drifts = f["trap_info/drifts"][()]
-            tlocs = cls(f["trap_info/trap_locations"][()], tile_size=96, drifts=drifts)
-
-        return tlocs
-
-    @property
-    def shape(self):
-        return len(self.traps), len(self.drifts)
-
-    def __len__(self):
-        return len(self.traps)
-
-    def __iter__(self):
-        yield from self.traps
-
-    def padding_required(self, tp):
-        return any([trap.padding_required(tp) for trap in self.traps])
-
-    def to_dict(self, tp):
-        res = dict()
-        if tp == 0:
-            res["trap_locations"] = self.initial_location
-            res["attrs/tile_size"] = self.tile_size
-            res["attrs/max_size"] = self.max_size
-        res["drifts"] = np.expand_dims(self.drifts[tp], axis=0)
-        # res['processed_timepoints'] = tp
-        return res
-
-    @classmethod
-    def read_hdf5(cls, file):
-        with h5py.File(file, "r") as hfile:
-            trap_info = hfile["trap_info"]
-            initial_locations = trap_info["trap_locations"][()]
-            drifts = trap_info["drifts"][()]
-            max_size = trap_info.attrs["max_size"]
-            tile_size = trap_info.attrs["tile_size"]
-        trap_locs = cls(initial_locations, tile_size, max_size=max_size)
-        trap_locs.drifts = drifts
-        return trap_locs
-
-
-class TilerParameters(ParametersABC):
-    def __init__(
-        self, tile_size: int, ref_channel: str, ref_z: int, template_name: str = None
-    ):
-        self.tile_size = tile_size
-        self.ref_channel = ref_channel
-        self.ref_z = ref_z
-        self.template_name = template_name
-
-    @classmethod
-    def from_template(cls, template_name: str, ref_channel: str, ref_z: int):
-        return cls(template.shape[0], ref_channel, ref_z, template_path=template_name)
-
-    @classmethod
-    def default(cls):
-        return cls(96, "Brightfield", 0)
-
-
-class Tiler(ProcessABC):
-    """A dummy TimelapseTiler object fora Dask Demo.
-
-    Does trap finding and image registration."""
-
-    def __init__(
-        self,
-        image,
-        metadata,
-        parameters: TilerParameters,
-    ):
-        super().__init__(parameters)
-        self.image = image
-        self.channels = metadata["channels"]
-        self.ref_channel = self.get_channel_index(parameters.ref_channel)
-
-    @classmethod
-    def from_image(cls, image, parameters: TilerParameters):
-        return cls(image.data, image.metadata, parameters)
-
-    @classmethod
-    def from_hdf5(cls, image, filepath, tile_size=None):
-        trap_locs = TrapLocations.read_hdf5(filepath)
-        metadata = load_attributes(filepath)
-        metadata["channels"] = metadata["channels/channel"].tolist()
-        if tile_size is None:
-            tile_size = trap_locs.tile_size
-        return Tiler(
-            image=image,
-            metadata=metadata,
-            template=None,
-            tile_size=tile_size,
-            trap_locs=trap_locs,
-        )
-
-    @lru_cache(maxsize=2)
-    def get_tc(self, t, c):
-        # Get image
-        full = self.image[t, c].compute()  # FORCE THE CACHE
-        return full
-
-    @property
-    def shape(self):
-        c, t, z, y, x = self.image.shape
-        return (c, t, x, y, z)
-
-    @property
-    def n_processed(self):
-        if not hasattr(self, "_n_processed"):
-            self._n_processed = 0
-        return self._n_processed
-
-    @n_processed.setter
-    def n_processed(self, value):
-        self._n_processed = value
-
-    @property
-    def n_traps(self):
-        return len(self.trap_locs)
-
-    @property
-    def finished(self):
-        return self.n_processed == self.image.shape[0]
-
-    def _initialise_traps(self, tile_size):
-        """Find initial trap positions.
-
-        Removes all those that are too close to the edge so no padding is necessary.
-        """
-        half_tile = tile_size // 2
-        max_size = min(self.image.shape[-2:])
-        initial_image = self.image[
-            0, self.ref_channel, self.ref_z
-        ]  # First time point, first channel, first z-position
-        trap_locs = segment_traps(initial_image, tile_size)
-        trap_locs = [
-            [x, y]
-            for x, y in trap_locs
-            if half_tile < x < max_size - half_tile
-            and half_tile < y < max_size - half_tile
-        ]
-        self.trap_locs = TrapLocations(trap_locs, tile_size)
-
-    def find_drift(self, tp):
-        # TODO check that the drift doesn't move any tiles out of the image, remove them from list if so
-        prev_tp = max(0, tp - 1)
-        drift, error, _ = phase_cross_correlation(
-            self.image[prev_tp, self.ref_channel, self.ref_z],
-            self.image[tp, self.ref_channel, self.ref_z],
-        )
-        self.trap_locs.drifts.append(drift)
-
-    def get_tp_data(self, tp, c):
-        traps = []
-        full = self.get_tc(tp, c)
-        # if self.trap_locs.padding_required(tp):
-        for trap in self.trap_locs:
-            ndtrap = self.ifoob_pad(full, trap.as_range(tp))
-
-            traps.append(ndtrap)
-        return np.stack(traps)
-
-    def get_trap_data(self, trap_id, tp, c):
-        full = self.get_tc(tp, c)
-        trap = self.trap_locs.traps[trap_id]
-        ndtrap = self.ifoob_pad(full, trap.as_range(tp))
-
-        return ndtrap
-
-    @staticmethod
-    def ifoob_pad(full, slices):
-        """
-        Returns the slices padded if it is out of bounds
-
-        Parameters:
-        ----------
-        full: (zstacks, max_size, max_size) ndarray
-        Entire position with zstacks as first axis
-        slices: tuple of two slices
-        Each slice indicates an axis to index
-
-
-        Returns
-        Trap for given slices, padded with median if needed, or np.nan if the padding is too much
-        """
-        max_size = full.shape[-1]
-
-        y, x = [slice(max(0, s.start), min(max_size, s.stop)) for s in slices]
-        trap = full[:, y, x]
-
-        padding = np.array(
-            [(-min(0, s.start), -min(0, max_size - s.stop)) for s in slices]
-        )
-        if padding.any():
-            tile_size = slices[0].stop - slices[0].start
-            if (padding > tile_size / 4).any():
-                trap = np.full((full.shape[0], tile_size, tile_size), np.nan)
-            else:
-
-                trap = np.pad(trap, [[0, 0]] + padding.tolist(), "median")
-
-        return trap
-
-    def run_tp(self, tp):
-        assert tp >= self.n_processed, "Time point already processed"
-        # TODO check contiguity?
-        if self.n_processed == 0:
-            self._initialise_traps(self.tile_size)
-        self.find_drift(tp)  # Get drift
-        # update n_processed
-        self.n_processed += 1
-        # Return result for writer
-        return self.trap_locs.to_dict(tp)
-
-    def run(self, tp):
-        if self.n_processed == 0:
-            self._initialise_traps(self.tile_size)
-        self.find_drift(tp)  # Get drift
-        # update n_processed
-        self.n_processed += 1
-        # Return result for writer
-        return self.trap_locs.to_dict(tp)
-
-    # The next set of functions are necessary for the extraction object
-    def get_traps_timepoint(self, tp, tile_size=None, channels=None, z=None):
-        # FIXME we currently ignore the tile size
-        # FIXME can we ignore z(always  give)
-        res = []
-        for c in channels:
-            val = self.get_tp_data(tp, c)[:, z]  # Only return requested z
-            # positions
-            # Starts at traps, z, y, x
-            # Turn to Trap, C, T, X, Y, Z order
-            val = val.swapaxes(1, 3).swapaxes(1, 2)
-            val = np.expand_dims(val, axis=1)
-            res.append(val)
-        return np.stack(res, axis=1)
-
-    def get_channel_index(self, item):
-        for i, ch in enumerate(self.channels):
-            if item in ch:
-                return i
-
-    def get_position_annotation(self):
-        # TODO required for matlab support
-        return None
diff --git a/pcore/tests/__init__.py b/pcore/tests/__init__.py
deleted file mode 100644
index e69de29b..00000000
diff --git a/pcore/tests/test_integration.py b/pcore/tests/test_integration.py
deleted file mode 100644
index 10e45f8a..00000000
--- a/pcore/tests/test_integration.py
+++ /dev/null
@@ -1,28 +0,0 @@
-"""
-Testing the "run" functions in the pipeline elements.
-"""
-import pytest
-pytest.mark.skip(reason='All tests still WIP')
-
-# Todo: data needed: an experiment object
-# Todo: data needed: an sqlite database
-# Todo: data needed: a Shelf storage
-class TestPipeline:
-    def test_experiment(self):
-        pass
-
-    def test_omero_experiment(self):
-        pass
-
-    def test_tiler(self):
-        pass
-
-    def test_baby_client(self):
-        pass
-
-    def test_baby_runner(self):
-        pass
-
-    def test_pipeline(self):
-        pass
-
diff --git a/pcore/tests/test_units.py b/pcore/tests/test_units.py
deleted file mode 100644
index 447ab601..00000000
--- a/pcore/tests/test_units.py
+++ /dev/null
@@ -1,99 +0,0 @@
-import pytest
-pytest.mark.skip("all tests still WIP")
-
-
-from core.core import PersistentDict
-
-# Todo: temporary file needed
-class TestPersistentDict:
-    @pytest.fixture(autouse=True, scope='class')
-    def _get_json_file(self, tmp_path):
-        self._filename = tmp_path / 'persistent_dict.json'
-
-    def test_persistent_dict(self):
-        p = PersistentDict(self._filename)
-        p['hello/from/the/other/side'] = "adele"
-        p['hello/how/you/doing'] = 'lionel'
-        # Todo: run checks
-
-
-# Todo: data needed - small experiment
-class TestExperiment:
-    def test_shape(self):
-        pass
-    def test_positions(self):
-        pass
-    def test_channels(self):
-        pass
-    def test_hypercube(self):
-        pass
-
-# Todo: data needed - a dummy OMERO server
-class TestConnection:
-    def test_dataset(self):
-        pass
-    def test_image(self):
-        pass
-
-# Todo data needed - a position
-class TestTimelapse:
-    def test_id(self):
-        pass
-    def test_name(self):
-        pass
-    def test_size_z(self):
-        pass
-    def test_size_c(self):
-        pass
-    def test_size_t(self):
-        pass
-    def test_size_x(self):
-        pass
-    def test_size_y(self):
-        pass
-    def test_channels(self):
-        pass
-    def test_channel_index(self):
-        pass
-
-# Todo: data needed image and template
-class TestTrapUtils:
-    def test_trap_locations(self):
-        pass
-    def test_tile_shape(self):
-        pass
-    def test_get_tile(self):
-        pass
-    def test_centre(self):
-        pass
-
-# Todo: data needed - a functional experiment object
-class TestTiler:
-    def test_n_timepoints(self):
-        pass
-    def test_n_traps(self):
-        pass
-    def test_get_trap_timelapse(self):
-        pass
-    def test_get_trap_timepoints(self):
-        pass
-
-# Todo: data needed - a functional tiler object
-# Todo: running server needed
-class TestBabyClient:
-    def test_get_new_session(self):
-        pass
-    def test_queue_image(self):
-        pass
-    def test_get_segmentation(self):
-        pass
-
-# Todo: data needed - a functional tiler object
-class TestBabyRunner:
-    def test_model_choice(self):
-        pass
-    def test_properties(self):
-        pass
-    def test_segment(self):
-        pass
-
diff --git a/pcore/timelapse.py b/pcore/timelapse.py
deleted file mode 100644
index 2e663cd1..00000000
--- a/pcore/timelapse.py
+++ /dev/null
@@ -1,427 +0,0 @@
-import itertools
-import logging
-
-import h5py
-import numpy as np
-from pathlib import Path
-
-from tqdm import tqdm
-import cv2
-
-from pcore.io.matlab import matObject
-from pcore.utils import Cache, imread, get_store_path
-
-logger = logging.getLogger(__name__)
-
-
-def parse_local_fs(pos_dir, tp=None):
-    """
-    Local file structure:
-    - pos_dir
-        -- exptID_{timepointID}_{ChannelID}_{z_position_id}.png
-
-    :param pos_dirs:
-    :return: Image_mapper
-    """
-    pos_dir = Path(pos_dir)
-
-    img_mapper = dict()
-
-    def channel_idx(img_name):
-        return img_name.stem.split("_")[-2]
-
-    def tp_idx(img_name):
-        return int(img_name.stem.split("_")[-3]) - 1
-
-    def z_idx(img_name):
-        return img_name.stem.split("_")[-1]
-
-    if tp is not None:
-        img_list = [img for img in pos_dir.iterdir() if tp_idx(img) in tp]
-    else:
-        img_list = [img for img in pos_dir.iterdir()]
-
-    for tp, group in itertools.groupby(sorted(img_list, key=tp_idx), key=tp_idx):
-        img_mapper[int(tp)] = {
-            channel: {i: item for i, item in enumerate(sorted(grp, key=z_idx))}
-            for channel, grp in itertools.groupby(
-                sorted(group, key=channel_idx), key=channel_idx
-            )
-        }
-    return img_mapper
-
-
-class Timelapse:
-    """
-    Timelapse class contains the specifics of one position.
-    """
-
-    def __init__(self):
-        self._id = None
-        self._name = None
-        self._channels = []
-        self._size_c = 0
-        self._size_t = 0
-        self._size_x = 0
-        self._size_y = 0
-        self._size_z = 0
-        self.image_cache = None
-        self.annotation = None
-
-    def __repr__(self):
-        return self.name
-
-    def full_mask(self):
-        return np.full(self.shape, False)
-
-    def __getitem__(self, item):
-        cached = self.image_cache[item]
-        # Check if there are missing values, if so reload
-        # TODO only reload missing
-        mask = np.isnan(cached)
-        if np.any(mask):
-            full = self.load_fn(item)
-            shape = self.image_cache[
-                item
-            ].shape  # TODO speed this up by  recognising the shape from the item
-            self.image_cache[item] = np.reshape(full, shape)
-            return full
-        return cached
-
-    def get_hypercube(self):
-        pass
-
-    def load_fn(self, item):
-        """
-        The hypercube is ordered as: C, T, X, Y, Z
-        :param item:
-        :return:
-        """
-
-        def parse_slice(s):
-            step = s.step if s.step is not None else 1
-            if s.start is None and s.stop is None:
-                return None
-            elif s.start is None and s.stop is not None:
-                return range(0, s.stop, step)
-            elif s.start is not None and s.stop is None:
-                return [s.start]
-            else:  # both s.start and s.stop are not None
-                return range(s.start, s.stop, step)
-
-        def parse_subitem(subitem, kw):
-            if isinstance(subitem, (int, float)):
-                res = [int(subitem)]
-            elif isinstance(subitem, list) or isinstance(subitem, tuple):
-                res = list(subitem)
-            elif isinstance(subitem, slice):
-                res = parse_slice(subitem)
-            else:
-                res = subitem
-                # raise ValueError(f"Cannot parse slice {kw}: {subitem}")
-
-            if kw in ["x", "y"]:
-                # Need exactly two values
-                if res is not None:
-                    if len(res) < 2:
-                        # An int was passed, assume it was
-                        res = [res[0], self.size_x]
-                    elif len(res) > 2:
-                        res = [res[0], res[-1] + 1]
-            return res
-
-        if isinstance(item, int):
-            return self.get_hypercube(
-                x=None, y=None, z_positions=None, channels=[item], timepoints=None
-            )
-        elif isinstance(item, slice):
-            return self.get_hypercube(channels=parse_slice(item))
-        keywords = ["channels", "timepoints", "x", "y", "z_positions"]
-        kwargs = dict()
-        for kw, subitem in zip(keywords, item):
-            kwargs[kw] = parse_subitem(subitem, kw)
-        return self.get_hypercube(**kwargs)
-
-    @property
-    def shape(self):
-        return (self.size_c, self.size_t, self.size_x, self.size_y, self.size_z)
-
-    @property
-    def id(self):
-        return self._id
-
-    @property
-    def name(self):
-        return self._name
-
-    @property
-    def size_z(self):
-        return self._size_z
-
-    @property
-    def size_c(self):
-        return self._size_c
-
-    @property
-    def size_t(self):
-        return self._size_t
-
-    @property
-    def size_x(self):
-        return self._size_x
-
-    @property
-    def size_y(self):
-        return self._size_y
-
-    @property
-    def channels(self):
-        return self._channels
-
-    def get_channel_index(self, channel):
-        return self.channels.index(channel)
-
-
-def load_annotation(filepath: Path):
-    try:
-        return matObject(filepath)
-    except Exception as e:
-        raise (
-            "Could not load annotation file. \n"
-            "Non MATLAB files currently unsupported"
-        ) from e
-
-
-class TimelapseOMERO(Timelapse):
-    """
-    Connected to an Image object which handles database I/O.
-    """
-
-    def __init__(self, image, annotation, cache, **kwargs):
-        super(TimelapseOMERO, self).__init__()
-        self.image = image
-        # Pre-load pixels
-        self.pixels = self.image.getPrimaryPixels()
-        self._id = self.image.getId()
-        self._name = self.image.getName()
-        self._size_x = self.image.getSizeX()
-        self._size_y = self.image.getSizeY()
-        self._size_z = self.image.getSizeZ()
-        self._size_c = self.image.getSizeC()
-        self._size_t = self.image.getSizeT()
-        self._channels = self.image.getChannelLabels()
-        # Check whether there are file annotations for this position
-        if annotation is not None:
-            self.annotation = load_annotation(annotation)
-        # Get an HDF5 dataset to use as a cache.
-        compression = kwargs.get("compression", None)
-        self.image_cache = cache.require_dataset(
-            self.name,
-            self.shape,
-            dtype=np.float16,
-            fillvalue=np.nan,
-            compression=compression,
-        )
-
-    def get_hypercube(
-        self, x=None, y=None, z_positions=None, channels=None, timepoints=None
-    ):
-        if x is None and y is None:
-            tile = None  # Get full plane
-        elif x is None:
-            ymin, ymax = y
-            tile = (None, ymin, None, ymax - ymin)
-        elif y is None:
-            xmin, xmax = x
-            tile = (xmin, None, xmax - xmin, None)
-        else:
-            xmin, xmax = x
-            ymin, ymax = y
-            tile = (xmin, ymin, xmax - xmin, ymax - ymin)
-
-        if z_positions is None:
-            z_positions = range(self.size_z)
-        if channels is None:
-            channels = range(self.size_c)
-        if timepoints is None:
-            timepoints = range(self.size_t)
-
-        z_positions = z_positions or [0]
-        channels = channels or [0]
-        timepoints = timepoints or [0]
-
-        zcttile_list = [
-            (z, c, t, tile)
-            for z, c, t in itertools.product(z_positions, channels, timepoints)
-        ]
-        planes = list(self.pixels.getTiles(zcttile_list))
-        order = (
-            len(z_positions),
-            len(channels),
-            len(timepoints),
-            planes[0].shape[-2],
-            planes[0].shape[-1],
-        )
-        result = np.stack([x for x in planes]).reshape(order)
-        # Set to C, T, X, Y, Z order
-        result = np.moveaxis(result, -1, -2)
-        return np.moveaxis(result, 0, -1)
-
-    def cache_set(self, save_dir, timepoints, expt_name, quiet=True):
-        # TODO deprecate when this is default
-        pos_dir = save_dir / self.name
-        if not pos_dir.exists():
-            pos_dir.mkdir()
-        for tp in tqdm(timepoints, desc=self.name):
-            for channel in tqdm(self.channels, disable=quiet):
-                for z_pos in tqdm(range(self.size_z), disable=quiet):
-                    ch_id = self.get_channel_index(channel)
-                    image = self.get_hypercube(
-                        x=None,
-                        y=None,
-                        channels=[ch_id],
-                        z_positions=[z_pos],
-                        timepoints=[tp],
-                    )
-                    im_name = "{}_{:06d}_{}_{:03d}.png".format(
-                        expt_name, tp + 1, channel, z_pos + 1
-                    )
-                    cv2.imwrite(str(pos_dir / im_name), np.squeeze(image))
-        # TODO update positions table to get the number of timepoints?
-        return list(itertools.product([self.name], timepoints))
-
-    def run(self, keys, store, save_dir="./", **kwargs):
-        """
-        Parse file structure and get images for the timepoints in keys.
-        """
-        save_dir = Path(save_dir)
-        if keys is None:
-            # TODO save final metadata
-            return None
-        store = save_dir / store
-        # A position specific store
-        store = store.with_name(self.name + store.name)
-        # Create store if it does not exist
-        if not store.exists():
-            # The first run, add metadata to the store
-            with h5py.File(store, "w") as pos_store:
-                # TODO Add metadata to the store.
-                pass
-        # TODO check how sensible the keys are with what is available
-        #   if some of the keys don't make sense, log a warning and remove
-        #   them so that the next steps of the pipeline make sense
-        return keys
-
-    def clear_cache(self):
-        self.image_cache.clear()
-
-
-class TimelapseLocal(Timelapse):
-    def __init__(
-        self, position, root_dir, finished=True, annotation=None, cache=None, **kwargs
-    ):
-        """
-        Linked to a local directory containing the images for one position
-        in an experiment.
-        Can be a still running experiment or a finished one.
-
-        :param position: Name of the position
-        :param root_dir: Root directory
-        :param finished: Whether the experiment has finished running or the
-        class will be used as part of a pipeline, mostly with calls to `run`
-        """
-        super(TimelapseLocal, self).__init__()
-        self.pos_dir = Path(root_dir) / position
-        assert self.pos_dir.exists()
-        self._id = position
-        self._name = position
-        if finished:
-            self.image_mapper = parse_local_fs(self.pos_dir)
-            self._update_metadata()
-        else:
-            self.image_mapper = dict()
-        self.annotation = None
-        # Check whether there are file annotations for this position
-        if annotation is not None:
-            self.annotation = load_annotation(annotation)
-        compression = kwargs.get("compression", None)
-        self.image_cache = cache.require_dataset(
-            self.name,
-            self.shape,
-            dtype=np.float16,
-            fillvalue=np.nan,
-            compression=compression,
-        )
-
-    def _update_metadata(self):
-        self._size_t = len(self.image_mapper)
-        # Todo: if cy5 is the first one it causes issues with getting x, y
-        #   hence the sorted but it's not very robust
-        self._channels = sorted(
-            list(set.union(*[set(tp.keys()) for tp in self.image_mapper.values()]))
-        )
-        self._size_c = len(self._channels)
-        # Todo: refactor so we don't rely on there being any images at all
-        self._size_z = max([len(self.image_mapper[0][ch]) for ch in self._channels])
-        single_img = self.get_hypercube(
-            x=None, y=None, z_positions=None, channels=[0], timepoints=[0]
-        )
-        self._size_x = single_img.shape[2]
-        self._size_y = single_img.shape[3]
-
-    def get_hypercube(
-        self, x=None, y=None, z_positions=None, channels=None, timepoints=None
-    ):
-        xmin, xmax = x if x is not None else (None, None)
-        ymin, ymax = y if y is not None else (None, None)
-
-        if z_positions is None:
-            z_positions = range(self.size_z)
-        if channels is None:
-            channels = range(self.size_c)
-        if timepoints is None:
-            timepoints = range(self.size_t)
-
-        def z_pos_getter(z_positions, ch_id, t):
-            default = np.zeros((self.size_x, self.size_y))
-            names = [
-                self.image_mapper[t][self.channels[ch_id]].get(i, None)
-                for i in z_positions
-            ]
-            res = [imread(name) if name is not None else default for name in names]
-            return res
-
-        # nested list of images in C, T, X, Y, Z order
-        ctxyz = []
-        for ch_id in channels:
-            txyz = []
-            for t in timepoints:
-                xyz = z_pos_getter(z_positions, ch_id, t)
-                txyz.append(np.dstack(list(xyz))[xmin:xmax, ymin:ymax])
-            ctxyz.append(np.stack(txyz))
-        return np.stack(ctxyz)
-
-    def clear_cache(self):
-        self.image_cache.clear()
-
-    def run(self, keys, store, save_dir="./", **kwargs):
-        """
-        Parse file structure and get images for the time points in keys.
-        """
-        if keys is None:
-            return None
-        elif isinstance(keys, int):
-            keys = [keys]
-        self.image_mapper.update(parse_local_fs(self.pos_dir, tp=keys))
-        self._update_metadata()
-        # Create store if it does not exist
-        store = get_store_path(save_dir, store, self.name)
-        if not store.exists():
-            # The first run, add metadata to the store
-            with h5py.File(store, "w") as pos_store:
-                # TODO Add metadata to the store.
-                pass
-        # TODO check how sensible the keys are with what is available
-        #   if some of the keys don't make sense, log a warning and remove
-        #   them so that the next steps of the pipeline make sense
-        return keys
diff --git a/pcore/trap_templates/trap_bg_1.npy b/pcore/trap_templates/trap_bg_1.npy
deleted file mode 100644
index ca09a8b6fdbb857de18289059bde8027f55b5a55..0000000000000000000000000000000000000000
GIT binary patch
literal 0
HcmV?d00001

literal 8578
zcmbW6hj(1nwZ?7GDRZv?I{~k}lotZznSdc730@EaDFoIsixvnakVbnUlxI3g2!>!V
z0b_!3FW`b~%aW{W$+GImsw0icj5Lx)ZPZ82k}bL0IPbSF`3s_1qq+CoUB3P8z0W=O
z+*NnpcgNkoo;~Z8S+C81{D~JIdtv^ME}sA6m%l%M!Nv3cckv4^z3|BY{eJNak3S*f
zw?6XBi%&5A;**a&_XOvcFT8fa#S5?f&Vq|yzxY4=bAl$%WS9yQ;5)!)#<ZGIGj0x<
zv!>e&nWzbx&&^RjXH3M@Fcvg%Gi|C&5!WV=51J$VpEQl8#VA{1`b?J@F;m!UHO;2U
zlp%etokX*OW53BZb*9I3pdG=}e#RP1j@fHsrV{yM*lRXzrkC@RplHHVKhh&+%$xv4
z50?A*4sukBWVb2B_c44P#r_E-$3f9%%88|skt#FGj+?_EttFZWb^`nlnS=PR;V59*
zO*+Th`9ErgIf`O?2CXKJMu@4!6f@ohrUM|EWrsXWW9Uwq6i^m}wU;Q)vlVz8CAt~(
z`oYmd1ocRVz!Aej9}&h(C;zj-Sr4{B#_EY+AH1xG*9jt;!eS-(QcW0+>tSX;Jk0PJ
zFj;u$CdwIpo6HhyHKH9c1^7RKzj4max6L5$AhIyAHNix=S<QJLQcZkn$V)D4?#5RP
z?pv@vV)mFV*a^c@8$OQ^Q7JM-a65>l5n|{B-)Sr#;kv}pK#Y^<mNEV>tPe02Wh}@?
z`0eBW3GX<L%`owdn`&gn7>k2D4lDI&ba{v*3w7{t(BthiSL^srgQ5XD=i3-;ML-<j
ze2{!vTMY}-Cc!pvCKVFIUbacXLIcPBoVAkiDR7IG2)UYu-ytHa18Wy?S0RxBq6x0|
zF<K7F2Be0O8Z^iGHMSeR!`SdMJ_UPH9j*<4tqb&HXpIupXK2*pb3dP9G&-r1FuxOK
zwmsLzJ-gLlNF%!0wgatG*g4k*ISP4f2EbHe@;PrN%Xx5e6ihpj596yDY<}3DAWz-&
z;~}DmnYp$Ky#07<Ch|^>deNzZmk}Zhz`{X}2C#634oTs+ma`5#wjdKG2SNDj1aTQW
zOp)0}u1z5;F-z@4Ksk(!HgKgeqO8<k%v@jxiC8*pHZk;q_8=M|<RW-(gO>(yOfo9f
zK7-C_v_v<ZvsyAX&9{M$I)X+N>wV;)hfgn~2VpM?uLIx=bG;btHn7f6!RLc_oD2xs
z8DfdxC5o?ntoG0=dpVMx8^F?Wcxr^3c8+6k8iUDtw5srM0DecX89{0Q?@{z8sn=qT
zyRbdRSp$53P6U!a>FXHsg;=bDwWyc3W;hr516<$F)fQ?Ygw0;GhKNEu3kQe5?8jrC
zXI1+22%g$ZCbH*H!C6$A<l`h?;k}YbPvdJ0nK`zIkrJ%bll>&@5A*HA?g;o!p)-n<
z%&tS^p#nrbUiPLyRR^9%@Eym`6j{#XBV3Qe|1{%c*qTJU&#Sjn=tYr=;3<IZ7<wZ}
z#K~F-u}vTwCdwu}9zkaoxo9Hpc4C`Arjy)PaweTviUpZ5Wu6{pJcNXd_oLg%@dRTD
zwwx%t@o@%U?PO{i`6gza6IiW<yCQs7QQH~V*a*gx$W0?B3`UU}CxR;c6qsZ@R-jSE
zy+r1?0_KtdIF6&)4p+H&>>;)&S5AVWjVmX}a058wjFw>8&ruEcy|dKFar0$6jb0%U
zpJUg6HWmA|*h;iHu-<^qAd;2D*h*yiuo=dFJ2hE?+&H>N865{xBN6sssh*l2g|`9Z
zB#Xz$#k5y<!)O&@O{^YaHk|;U%mg8?(_6h5>OpV-41>tlg0GrvMDRDt*$JW=0zo^`
zWFdE&I2!R6p&z7%hQU#2e6|OB2dRV+*cxE02rE*va<2-aUqUv<i7r5H`@s^2%OS>E
ziK5r5g+aV_pex-Xb(aE{<3t)}><oydPbR=1ec;D-oB0rXa!2n5Q2;cpM5&m)hZ$?f
zLp4__u+fXJ5LswJT54hhoBn^VP2%)nl)4#$y{#U?CM0KDKWw!yp6AtGGZ==vj5hPX
z6)XFQx`)wp#*6Tn4Ymm1I5;Bk5k^YpiZr;df!R_R4v<r+*=8(<sHb|aj&UsWPl#xf
zi9^;eZFJ))=I{t(T|^+WYaOT~{BJ-;=87CNx~b(>khGHZGRDiWd<gv!uVTVj?nS2z
zze8plvQj-Vo0U^7Rb;Z6*rXd%ks4%`AhT>6vNF$%Vy^~Hs=$2=Nm-$VL0gHHFwu4}
zmIK-}7?S$y$3hi%l?f`QnwlxX_ZVYQtWUyH5;FUcDDgU`mo-fbBehgaIhp7(8<45x
zzuf=JkPBd?f@pKl>Y&^FoR#58W{@6m=OR(ZuS6hYhwu<Zw!rLSMDn+v_=mBUPtEKD
zgLGLX_S&&78I*hY1Wc5IAPXPeNan+BJ>Jr}@1}C)J^W<jJH@Oc&L+O&NTeXqMg7a1
zBwbdH)ozc?0S~9FogyCA8uKZ~DO|6^(iZGXPwz52h@lScR-!E7xB$+Z@f-uwW^iYt
zy~ezcbT+&PxSEY$Sx;{P*<LK}MMv(@`P5hpZ*giUOm9VyD~G8Lz8lRy%m?Om>T0|B
z#4I;!%{x@q8?dq4^Ux1J2g&qNvM!x5N&Z6M3W9e($p6BxcwEQ-56N@@uYU6`Yra)r
z34x##PU6UhLAe(cnb_;XYA)Dvv9Q-{LUs$$RO53S9kvZ5FY-x&^K#<r0!<fZa_>8i
zwXH;7!g)6Sb`$Gz^PIWYoNHowSfA8)oBOf3nMk&gpB+?s8=4d3Bab|E!ekg$E3qRT
zu^fCA)b(RnxWXu`EFdPy=~Mjwkc!F1Mn93qsM;~mR8vEx)NUO6uff$eu3u)B>KfIm
zYE_~7TwSG~)mOqm2Hy=xtb<)YjMal$cpZknJzOm$dn-ZkDg1xc_;sSLQd`t+HL9-H
zN%|JE6zwJEEo3{r%r=q929Q-?r3ov3a<zked~Akvhx&<nT}`U{^*wr)E>-JPpK8=U
zAcA$~W$YxuRRruKc-=)N_Q1`4bU&bmip^c7OJAbzQR4{<)Ca0g<)}1OtiG>(<`rro
z2@4;?*cQB26L*l20`GqpOvKDT&4}KmeyV!adFJc3%;wmI_FCi9_p8lnvA&K9dxs3~
z0{y2jT}AY=$}a@}Myf$7|4Gvbf1?S536)CIeK*?)^J8<7-lt;fs7{8XchHv=dM&w-
z9v-BdPmr56WOLBmZqBF&)a5$Ce&QQ+7Pv3FTb;GO0dtf7p<1fm&<o8SW`$WyT~xqZ
z4ONv4b8EqqLN(Qyw@r)AS49cmP?>tE{jqb_dCk4lz23RWZqk+N2EE8UW8Nf&eDW@9
z`#ff@YNBYxQiNGD-Sp_EmDc^{NvF`Ali2C@xsC3d?qkkV_6GfRm9Ada4-n-C<TC`{
z&G_C+Ep3Czo#dgw#C4Whm2j!L+AQ;3<xacLB_=1XO#EYFr}KdQF4*$)efWQtZzq|P
zc~+jg>WHS4-Wb914SKmcqqq6CxhApQ`KdF<Np-fm`R-chX?wf=2^d29PV+msTn+QP
zynYMP^?t16n}^ILdUwJsb%QDMZFE)QAKc5`DQAiMV&aby`+cvPCS~*-s$dD|x0AzC
zvQ&ZRUFf&bkF)F#jM9^8o((!zCLVDv^gU-EwthR;`G&jJeapGXo=|^O=jjAuSO?#K
zBl1ivMXAjVu=<wyvp$+IoiM8XzMwnoRy(iyT5X+O>ifETdEyV<-`UG_R1NFDQ*H0^
z*@3p8NWsEkutcbb>&;<xg<fuNb9cCnzU!>qIj*tK*+SoK?qc_}?<KQRy{q!|!_>lC
zu%1u<7h*-aAz)7ErK&Aqnf`_E9(S|*s54=I%^K{OdD4E{sdksU3w;mjw?XheUB89w
z719ZDvX@R3%H3!R1iv(n{;rPLU%0<?U-o_5p5z|(6?=hw)6Q}J=-%jl(^se;RA18b
zh<^#$UkwY5u&{x?UTR*_=g_w|*>)$%UF@8;7uwl2f&GJdHtIVkvB!PQ_kw;|?bJ^)
zKc>Lhdt@L&Ps&qz0hJMDmj11|N8g}-W(S>Q_sh;=dzN0h4NKp!&33!9#{HcWHtXr_
zF5<`}6RW93xi`rZ;H{=t-Kbi1HyIdm{^0u-_`he*x3la^_SeoKH_N@!cT7F53ib2U
zZ#p`kfxU%}D5o1_1sk9*eq(m(Ir=lx3y*I*$MHR6eg=la=I3_Q*W!NLP4Hc!*Qo9K
zFL3Yy4AjHoF79eUGtZQ&HEPWG+)DRm=OH`SUTx=DgN^fibKOOW7rUe8ugoeJP(hpM
zgWb#^`OK=F)Ig9N$P-=zcY~<@AH7Ux+YQc;bJVxWj**A2+5%>ZO}-88D);xkMf!D>
zN;Pf3P8qXOqc_8SZswah^()<FE8R9X+4oJJwa;TMdI7WA-A=ySKvpl&*QyJd8Q-Uh
z-h=fJEFK^Wn_+SvNR~6U(oE}D^^iKH9bc-`<M@5K_C|XQtT)?(zG^qcebDzE{RUib
zB$5=eu$~!dHI)=G-!fC`DXn}N?jg6?cZe*FnE7_pp7Gt}_PXadpX;ks66`(C?DZnd
zRnWh(4wDsQkmzgRBi;Pj+-Oqu2{of`F|YUn&Pm7T%-AJ%vHgi}mU}K8m220lQuP(?
z3){U47{}H|=GVQ<1&`>z>k50X8)fzwvp3rn_BP*_oa^1~?teLVm@Cv>>Qa3*J@gDw
zzv<oO3$QXwP0I6hh~H1CkQL@4vqcZ9)9QY+!*|Mg+D&sVb8dC!xN*0H3jeLSUd_?J
z=kAhAMOQJi$jaqY=C+Thq3`LSxx#td{geBy<2u(kdz@wNJMPz<>&)$Ht~#n>`Yt@Y
z1nO+!&F5PJ_84<=F+8NxrP*+MpSg;@il_wrsCm=(FK3mT#O%=KR=H!&KkRY!XVsxs
zQUfc<LN>YUrjyc$A(K1u%{rpD+NYdF?myhWqSfI(;HLRb>qRO-<xxFXuts<V%}nyX
zhYD!(Ry^|L8>gm{z_Af#OS!vWXr9qc>Xho${q}sP5+*~=Lgzs{SHG`*%Y3xc{D~gS
z<{q2Rypq9PtqVTxH2d|hbcfEgS31X>&F&lSdrq2Ns{dOl)lL`w!2FyJ`Uuvxv6gF~
zgZI&kvcAdiI=USMvfGwTZ9QQwHGkD-xt}i5$Mj)lt7?;_OVlRyu>J+tKc<`3(1pT9
zF{qQVT0>PXG@JC@`USnrJZJ0eb@m$5p{`;kpQoGjjoi_mHOr9N#LVNT3U@LC<WU0y
z%$u_3CA%>>T-ix&zh@pH7v=gsU8fpUNQISOrK)L_p&z4<-)BZI;PW<_>qbY`qsz$0
zdT{*4RO%a99er6JR+;J#>Iv1P*6It)tLAAw&vOUgiMIwg>&1VRqZoB8Pc(xtl1`){
z5awd<HRNwKUo&A{q?a&5Y}Jjd7vH6Gl9-7;qW03s*>>tEm-|dEOl%>uDdg}^=BFlK
zKM5BP=||C=Yi?nlc^ZqapecHz)Ir3X30ld3>|r(#y~Hxe{iz0P0c@?o;!@D8By+E@
zGFk`c#pEl`yCduY?+Vb!N<JNpEas<;XrvNhG1sz)^(lJgKJ;ZhAvTg>Aj~XKLf1t>
zB70S<kdA@+0FhU~Y!#mS(JAM61+f--7;;!0mLnYic{(GjiR2S+ohZAS@;ny?LnZyW
zn%^yWN&-y{J(^AgpD@SV%a!Gf=V74)+u87*M>b_&ID>V*JW0zFjC6;edA^ohWO{OI
zuoZ=yJdmzpe2^7-J+@M*gnW;;I-*TRzm~Y`z*tWOrGUJc(SI`Q{0+Ti_*}^xl#G4h
zYabHhAd#mK*-w?-{YqBtvd>=N#VXH!rx@Ew92IEnBHQWUl^w&qbU_p7<&KaJciVWj
z%|)*XOR|4ji^g`)HxtilFf1XKWLU4pUJk4TK%Rn30~m@plYQ`JSZIN(X2t`Img8#{
z&$R=1mZ#M!GE_zmwt7sJvMT9EV=uKkL#!QGmR++D9Llp$4fmI9kjXRGI^y5#t)1%e
zx{;^MGOm0A)^b>FK}PmNdprcPTFJpn4kJ-^lqRqg!|FkJABNc)@MOVJF7YJ6KnmGt
z;apak=h_;u$S#u9_aW+_fNLduit(4raSO4_zJ4irlJO*trSIB_rkWV4`N&>fJ^aXC
zP%GBEKqfn8vNtNv)Xm;1G>hm`h+XzR!dR_i9+CZ2*<F;K>T*!Dab6CW`-#4qoV4Ss
z5iCJ|E76c|E=a?Csu=C}G;^>ayId0>iGiVyNc>brFZP4Pu@5QP-x)-|6WcYgnc?N<
z2tMRFwiEqvu1baMqDw-cE@7-4f9r{-kl$X8WKXLNE^3HH)~Ld6KQ_u?z6S(7ur7D&
zIGAV9nZkcFQHQ8WSyjuPNG&xaYtkv?CGuu8PVy^#G)$KiBU6rU6(2vy<XKwcYvV|E
zNqVpoB#M}q1&L60XA8MnK*t<FS9Vb=v3StKFT1d^OVCMNvX)Ebcnpg{Mx<h7kERyx
z>lu?=))3i2*hwSqWUR=$rvrT3yxN!j;n`G89`OXx48X4JyA2R?AF*UII}U)LnWy;~
zs!`sd9fGYH?jo|!EIHl7ZzDBXMn#Wstra}7yIH~ivq*O^)5*R>5$9Ww4dLq~6|o=L
zVwkA~Pk;<|6Lo}0TN#(PFS1_sGZJGKI188k=zqm#!dNZ#!rn+P-lfu_oY&w#;?=_-
zJI1n8FYg-rh*X|LvyhiPvQq4qQVlg+muKQ(t`@?~2=&{GtyCiYVn0!4uOJp==e~(=
zoLZ7yWZ6rOFy78lGrF={R19W$Zy<M%GS1_C8)2>v46+wh0*WDY>yg{U?<Z7dH#rV)
zWfyVEtSb2%AnPI6mA$Gx*pIL_%LP{ne5bL#^?NncfK_=<D3w)%>|u2C@E|+Be)y^9
zc{_x*WVRgU<=tzD$Yh5;6^T^NTKSdtma=!->DiLGQ+CYDz)%GHxy;uG$VZUzQMhYm
zt{Wk$4$n?KbEE9|_97#1o+hZa2J#fg!Wlj-$e*DrhB=yHANMS=${SDFiOwJ!-T05d
zg6xFK8^>}g<&0;e4-fT>H=sMoT-kuU%ms(BEwjyW;*4-@AD(hRRssXETQ6@F`jPE~
zP1!SUL0)$2<Jk7oGci1M;5kb5e9YByvNqyzJVi$2eLxTRM5~7=58$Jpo|GN>{jgsL
z;&JT8_#FhF>@UiD;4j%aA{s=amETM%<>bF@946OYoJYxU7S&Y+PY2ka7E5X5t&l1h
z2BDv<%bS}fqLm%qFYaKnFD-9YYVjrSm1I9pdUuGt?8ZZw@d)=ld1KIrY`4c<zt=;u
z*C21rzj%8Q2X`?ZC#h$7|2plBr@&trX?ZVRi}Wz>kDAG;?10K!`vDLhz|UFOlD8rf
zNgp;xsF4arE@E#|-YrQD$Xf~NilgLs3|1;V{^ZS{ygj{$r?LoAVdF4=v@?1JywWE*
z_?Hz_nKzpp!(S(PlO42PBpP9*h~qEjYq_gsfGR+y+ZYdlrwvb)c%EnTiAi{uj+A|~
zHN-3LX(fU~e6ol@?m(SHD|5>v*97Gx|GTg@&Uuj+@ga12>5vRG{fx!Qv&^fN=Kle+
CK!-s9

diff --git a/pcore/trap_templates/trap_bm_1.npy b/pcore/trap_templates/trap_bm_1.npy
deleted file mode 100644
index babded4ab4a877c9a7ecf67d400d8d9aec0bea4f..0000000000000000000000000000000000000000
GIT binary patch
literal 0
HcmV?d00001

literal 8578
zcmbW6iIZe^UBJWpb|D5!pi~q^YPBkEup%Xv!7{{BQ<PO&905cKM3QAQyV;$yr+cR7
z>SMZlrhB@lk2!XZoh;eiWD_uC2_Xg~8VQG@LI_Go$fd?20dq(+LP#o<pU><31y0v{
z^WN|HUcc}CHs1NZcU*ee%g&iTXMX=xL%D;K`!CwJ|Kh3N+JC{m{fEXU^OFb0t{9)Z
zYRK_FIxvzOVmx>Cfr%mB&wtB>7wmh>g>SrIUvc06^Y7edY{Yz&q8|&<h$B4bW0j|3
zOvO^n#c~{t$>>El+T7=(9Q7Dybc)fbSd0<&ZO3LDV`ht;t1-=r#i%jgWJfvOGs;^j
zKFat8d)K1M`}OSXv2HpltgHc{&i|!o0e2f%4c^yTJ;(nxZ_CVfS-Zqs6-dL}ccQ@T
z3Rr4DUSrL1C>UpTKSmi@Vvi6SaM6S_`Kq&emwSzU6=+xn>I^G;F$NwXml$dBoMS~1
z8fx5!filOc4wo`0^R~^3GtfUB*8pb(PWxOFthko<dYpi|5>(9ta|y0Sct62=hu<zZ
zX1O<*kq7Z9g9+xQ8S8?3E2X8xQxzCBW@doWWWJT2+8(smxE6sT-Pt(FOa-{P6w7q%
z@l=De7W=F0J<ZjDCeLO8&b((?-+_Y}o|l=c#??r%9M=J<o$}EF$_x~#CyUH)K=CXX
zX4AJxFjV0{sVyO?HRPP*{Um!@{BQG=gXTKtFTN>8mD3`lN@s$7MR=S6Uxkq#*t_7L
zM7|U3ork*~`*WOjm(x`bTI}{*uVhsg{GQA*T*$33%3O1-Y4Ynp!6K5DyDBsvOZ8M3
z3tTP6*5IeXzAZ-7#SQd%7MvyCmYFHDzsyq>S_)9wO3!#K<*ABhjWSnacagnQ@Y#UQ
z27A`I=UG(*&OBE>o$GNI?<ANj@fXOV0apVls>xcDdcFovb^bTmzX+5z63V9ZcENWn
zeNuOpQ|xm{st0Xj>?!lhvQxd+Vk>aJ1`KVu31@1|ENi>SW(wR#cwgb_uyP$4EF$S%
zildgUmY-$t&7>5o<!ad&(3Q0ky(7L5_r%}E+4wYia}0_{;I<8JeT25P#?x+!zr#qG
z5xLRsXTdnmy~(&#sk?fb3UX+JSsv7+W#p`ejIw?iU6QJ;l*&8eui{%!o{K*N*LIwO
z9;I@GbDB?O)&!PT(gkt_93@~Bk?8`s))`siH^-<RrOPjecTyj_Sn?G5&|`curDO$9
zunYZduw;=zo}IVEy+D6`yqLT^emdS3e-Q5n?*{7|KpF+|B$)cFX>&iuCFR1)!B;7T
zJJ7+S;G97c8^~`9I$Nx+rku2)U<GMu7xRovr4p+Gw-PtUBXMc`Z}NQd8_e&;=g_Jh
zxSK~74K$=3AK*<H9)y!TBLyfg0%01tl$g<$)y6Dqr_$4DF|Sn0spLKXRj_XXQ|#)W
z=U9j#s68FuX8+^K1IgQw+k^4%@sIK4xIg|0OU|=?5m*!4)tCiF>fEzjqwqfh{vGJ_
zbkvJ=q}4*k?x=G&lBo3)tkM&#@vB0$tEHrv%H*E7J^m|sFu552`ZTBUY3REKP5e@P
z5!kEn(FEHN^iBYO47~E%0JoSHQ_SVm>s6VrA%hxp8VTm1Q%h6_r-9Ois#Qje1D#YR
zQoS2rW%glCpc2pV`%C0F6o>Gnr{kvhS8P)om}2B|&f+5}^{dG2N_eS3S^lM`BC8s#
zlS<E4Pf$wbpch($>Izs#Qp(%ls6g`^m^#S)@8hEQ=j0dT8}VX1A2*-}#we|9E<VFq
z+=xs(&7<+&baVwcGh795Y7ZNnt{#1oOK+1$GDb0@vN|*mm%?0T*AeIvu2wmV6sDoz
zZ?OM2#be3a;_i4&=25UsFrNk5J6T_Vg3qS<*u~nXI0H3KZ7@cxGGFAk4*U*u9A|8g
zy&H@fmyEb|MpuAfcF+ZSHKk>a^-bnRS$R5cjbDprlQ+fJV_)VO=<OkQd1<5^--L|r
zhW|R0j{)^^Bxg?GNv|SHBWsQOF4X7~R*<-HZ9UZ?y<Llai@bHfpJi5mrN_|6DZ?{y
zS6mVgCg-7#FV8%QKimPcQZl+9g36C$E5=8&4J~w*6Uae>@%<Pu)b|?iT{N@9KF@Fh
z3Ti+Y_&fJ>*{kH1kivla5_^@@cHGGc-k&^|6ymY?nao4zqIsC{Vk!=>`glA6yvu-W
z{^QAXfl>uuFFmaaUZTwCJUbe!ISQ6lsE`8vviU=war2TDAUSFdSzu-g2z~s|$#@+!
z{4pBwW90Av{&^2xjKx>5;%qz&zDuysCN$4Mg&NoAl3OzbW4GsF)>ng$Y2M8Recyl=
zX<JS`VIB!@u-2TRge1*9TWG=!ynQEmD7g|(^b?sEIr+2r$quV>+`kylLhqG$3nR8v
z8!h#4hoHy^SpsS^#iP|5J#$dkgGyzx!K$s)8@Acy{5B(5Ahmfmj`WENw_%ZwK*J$u
z_;I`tkK>tcgzKA_d3EOgcsm++19+#|G0A-#EOS6m-b(f;xQwAi{+EG20_U5o>oV7b
z?=~ZoaI9w3!PtSWEq1M?a`Jrcgy;9+mwu33%F{WS7w`<<=Tz=N2hPnb$D6^m0!4*X
zCyVg7z_rYnIhDC-4ZYCAEr7oUU8~5<sH9CTL5F9z1nxTbE|Ao!Y4~p;E9KJT?9awK
z;+f=$<U;2EExya|>G%&Q>BS4lTZ!aW6cuRj{Hp9WLYvPGG9{zH4r88*S%DtYxNI)i
zfr=g@o=KVa3Q^fC$H;5upcgZbz8NeZivLNznIz;0mG~GL!aN%IpUFk&$__kMkfqV}
z2<z*>++wr<*Luuta5uqHMazr?j$0X>;%Xv`6Ud+hcJsYHXRyHA8sl@kDMe%3R(ulg
zRERgk_mW4G$FR>IaGevsk2c)Qyf&vT7~M-iJ<6_AjGOsR!i7FTO)EpiC?2ZJnA%u{
zqCIBJ68emp;cYX!3N+=l$5@^jsa!w`XNeTGcu$-MC2z$7Ht`SFA>pgzDt7ea&)K6#
zREkxo)+5z<s{pgY(;}-@kcT<7+?kKGIX|PcG0Xg_%gppkG0Ylnf!R6~m~Z5HI|I&7
z!uRbjJ)OWS)|oXYSYn?w<zYD3gqAY<wDdlFSs4|<GRNJTr4Bwdt^-zMq1e?@ZP~r%
z<J+8>5ym_-&*;EcPw-SiHn*}ueXFuh4Y#)G!I`JM$_i^uJ-2qS#5$vbHH0~Zk>VrZ
z=<#d@px-u|I|4mcWuDY7YdSocX%~P!&Ds^_mf+EPtpi_1baNT=3!`wEYaObqz?Ktz
zk)E~#^bYTnK=QZCv({%Eu*&wlR)Dw8oHe$#Vf2}UUhT>HemIq5fi)}XsTqN;ArhId
zFQQl0b)|H_Cv0`vWq$*T^b=ihSvU3JQ7g1YTHt)Nz#0^bbx^AtAI-V;Qt9ct=a@79
z)stIElv7GtjBc>UtmESN9Ws+VGoGN*Ux2!CXdC4JW@&nQ<9-$`(OVo~%t}RD*Je9V
zX{6fZ{~n`ih2F$G;BYD>ZQS~+#GLu}ESQVDm65`G@l)mmv#hXY*5mY3DHth>oVuP$
z?_7l=eS>~^1qfo=#a=hrXKkR2%$l?VGkc?z^@KeQeOv(=t&<%qB2B9ZJ>V7bZt}w;
z?CPa_j05c;)M@!+te$~iD|zkF>}&+c2Y_v!q<wAzr^b4vueY|-V2x(ZEG6dnRv%{0
zQni%kO|$V~{EhZdfVv#iSa)WDo`;G8kgb*4_=pC37Wge9YqMv)elC^CE>C*u9+H`2
z%q&5T8TjHH($Q;Zk<zsZB~3=I;2g8;)Sox7##3afXLwg1^|?dL8y)9>+J}C1&bq|j
z%?fz*+t)##mU;tMoi$c&`jQhsw5!l(^$zqbLbo-Pr!vOnDIQ{Hla(KfPf;_ShRW^u
zJF=Iv$Y&(ggkfYg3_n`=5^%M(3MW1WBq_AJ(gnf<PsRpgsHeZdh*fF}e0A1u@@B=M
z23*74n7Rml;hYBhX8d9NN_;<gUA!^=1c_dUG(4jme!0&1=x^FUUtotiEasEoS%w;Q
z$BxAe>z7$=er$zgo+Qm$qZy=jW&}5XRc7jsvGpX>zB#^1RN9a3sd4X$-v#fr<S=F8
zW{%wj_S$i2p%Z2r>+H8DWbbE(5xsymqlYqgm`^>YCz4}l7YtIi3Qm1dJ(Y#oUY`B8
zaQ$R_K6xcv-5uYHOy*%K$Ytby8L+i^ebO3xN5P;b>LVxMP`lU#hCPTq#+qPLckQ=G
zowb2kq?&5I;ZA!T`qTz=s_kk=o2udQ<UR3u`UsE2SEy9(B15<j36#*X8^Nm1>v;yA
zK?~F)YyG>7TN%`$W1e?2kTTdjV>>ct4^~Zf?@FAWtF0D_P(04sHF_s|@g&vj7`5Ad
zWJaGuo~<-)-9|P2aZah1YI%d*`gwDKS>$V1P<h+avtp_<W^JII8;guW2cg3JQK{(l
z)C@bZ>gWvPc3kg*!=H-#k}KkY_#qjZ`MXijjBFKoJc-Old9pUPUi2h95pBwhv(4Rl
zMGqs#YPnXs4puv<^LP&JtOz|;LsoBgGuM!d`OYomsh7l;lXvm|Y4BSuoJ4MR3x>Gv
zqNC7>4+47^{nVc1XwZ!_^Q@((BgIBqqk(f)3q}jQ_6exB*K2&QaH?mZ*`A|Wj4|_0
z_B@un8asT3-|dXr#oFRpAjZ$r%XpIYSM&Z+C^oN@j}B*J&uRvl+TS%w*grYWIW>@&
zdTs~B?AZ=n9ZlOrhWd;qD~&;VzT^0wZ^m!Omy!?0KSX5ig?r<oHIp5^B9uG@U6*m1
zU2?V|VCUIw%(hB0bF^bH3AP>>tRan-#(_bv(P&|YY>YI&wR*F+X(wd|S_^3WXW~`y
zRMOzQe;EG){_F8mR>0bR0}uIgnQz5~U^iaceIJGjGY+-H%3WW33^}WZMP#$XxUpsp
zE{*>Bn<my_1~ks|Djb>_tA$1rv%yS!H#vd@{8C0Qyvu(5jC$~icv<E-*6)Y6Wn!M4
zd^_}Z+pYf>c(X^GWp;21>#3}*PL0KCqCHG~)GE(v@g^LaujrqR*>_MGC0OD?{J`t+
zL7#x4S$_6gZlQwv^~`N?0riyMNDgs*h`ZewBjPwe`;GbuBb@qW)@~MVJXPwaz`2UI
zS9`1x%}%UN&M<D*aSFU&qlfY+9gOSawV79B?m{z^=mz=nkK$J|e@%|G4|qq22*W@h
zgL3PGCE~XczJR2)X?>cpNUx`#FkaY^JOxf=Vbwavdl$>slUZdHfn!X0JpMH9r7~%t
zlS$?^nS0><Ry@<IGQXVpdi*AF>cj9kiX8khw9QVvn7yPy7N(zV!kJ$Pv_Ug$^T-|W
zt|D=(LSvbF&<EcZ_bT!za5tmBj$YdFcx`+o`B~n+5f8<GL(@~}-?^DHM91^+1bJk2
zIW!DI!7O~pK@IEBrb<v}&7$uzW6Gu+nyH=OwDb_x@Ma)6Xfc!VJJc@g?NZxM-QevD
zz<oCP4m2F*44xn=y)ttjYu*3_+TWFoU!I<<p50iaUh4JC3uieIy`A~H8F&$FgGg<^
zM4#lj*vo1IOZ{xYgZ@i9Y9h@lHPWY$$@9s#lMKE=eD9B6h@Yo+)7FQG&%<zOBpu<t
z1AhBy`rcvm;xJJ4{0mT5<xL5jX&U>jT=hw2LuQhm&NlCUa}b7>Yz29eNbp(KpN~~M
zPv!ih<ox(OIJ4@#n$>nq4)JVUQX?Aa={7h$<!Jt7th0-4?6=!*HsrI`yN<U|XUrgt
z8)CGxq2E8j2^8VEz&Txu?tcxxYqYViU>%eN_TWpUGEi?iXki5zWWhHAJ=S^Jf?p*&
zNZY!`y30(!Zl)Q(Rh+qkUd-y<j;=N29Ga~(tf!lBR^YDZydL;xSZgn|z#6}!m>Wx%
zo?Sn+&K~>K#>8HFQhnfP6Xw<G!yxzHN@;EYF%MR8dve;WeTEv<u_raK5VMCS6#Mm0
zKbi%Wb@Ct%+Bx;>g>&|7tq{yx)VBg_^H@|1nas0Z-(-DdeAeDd{I_=W>!DHE=PtPP
zrh3Fh{DT#L2e@5qP48(eU*g%^^av~M|8zNXzY4GL-o!uH&2+U=Hup0gnWfrKUWQwJ
z-x}Id<JsE%1azz*!B#58KD!3lg8GwXg_>%APTRCUXtguJ-LKTjR87%`TkqJ5R2qI$
zF#c?&{A9stZ8`8huHA$$zZ<HZ$J0IA?AZZ}Yw~EIQC(@7`I<LoiO6P}>cnAq^Si8b
z6?oUy%)$IVsebGKv@Ub+<Lt;X?^jy$9c63&VCP{TT(0&Tbf0mn46VUR#wyrTFb6Hd
zq5Y&C#`UkBc^Qq=16ciex@K2{FXBe_!$``=YmM#yO?D3hafa1?<MoU0AQqm4FDryq
zxHd~H0@3cCKfl(VV7>O`*CM6k*Gnx%$_ElMn(0@qPSj^Hufjzis^z}On_s05f@PB3
zemOBJ86B*qtPbpU*z=GVcP(=oVj0+lwk9-t2(I$1#MgpPJ4-$fF>8lZ4`G&NwK~ST
U9S6G%W)j+mS=lVu+T45be;O+8WdHyG

diff --git a/pcore/trap_templates/trap_bm_2.npy b/pcore/trap_templates/trap_bm_2.npy
deleted file mode 100644
index 28f399c3062429d2de63c5e9bcd1c42ded66171a..0000000000000000000000000000000000000000
GIT binary patch
literal 0
HcmV?d00001

literal 8708
zcmbW6hkq2+*Txf4D7%|2bIz3A4TOXwgc4c^y>}^sR6#_Vpn~`VMUkQ+*sy`9D2noX
zX(}j1MM1hWL3;01dP0YQB=2{4|AUwPESa5|d+#~VdCqgro%y6g*Y+JBkBu1_Gdgp?
zz!Ck2XVy*0tp8H=%t|SlPY)aZ;_$xD_Zc>Pz(DtW%f3TK4CMTX!F^vC$nWyGH7lj$
z=4Mq&8JqI|{8y@>rs{S{(J!UB?&EJUog?XbMDEHdxgqCdnVgh1x<IbUk8)i6dQEoA
zAt|M%`qis({9j!=Xc@gDXQY=Nmh}?Qiu`u(sI0psUkc?{Nzkp5q(1#ciX>ZG>jSa4
z>yF%)um<!gZLW94r++f<Dlu52NM^8BDRndltES6`qFO^6Ym)vWu^OwZWu2UsKScC^
zY?13yfjI_fS?#P5%~X$uInI|TBbGDQ8i~_ovO!8?YYQ#U%GumgRvYpAy8I&<Sk_x#
z<F|?bH|1|xB^Ttp?8SP~25NIxyq=Nzd^>0zJtkY_gycz@ChHElEJv}Vq#niMW6~0<
zi)orZhOOz~kO)f4Wsh{$EJpXx1XX=e3#1rpC2A#YiQT`l&I9fkpmQ~a^Ci{No${9)
z<ox#%<gTr(wH^Gg;dPwW*9<MErPcM!&#XRXX#i5yRCJ>(#`4BmMQejZo}86S@~=d|
zWrH+O14RD^yMlNe*5bOKa~GrrSe0N-1AE`njv(ED*a>HHz^g6y-;^uho`v;s#5lw%
zw-^<V&*#O)j)wXy>!o4GGx|0&H`KcP5}46eZ%BEK60@B$LAw+Avdo{0Z85~4K4{!#
zbWnfBf-SNc2K*%>^hd6iB1)aKgjU6>*6_^Ys2t<(;L%Mfu9r9p!PYeV4(U+YVB|;_
z=`EkgaQRM#a_?ZRr%$q07pxhCM~`!718vGYnc5x%YH0<KZG!j7_!+DBh{7!K;_XhE
zsV{3k9f}7zAkv(x+1iTFtBlTK+*~4_tShnL0;3DXD{mO><PEKB{a}UcYF2NvhmOae
zPK=$b=cP5gb9sIN{*NTe2{6RM4i8^1T-nBaX%E?P4Bv{A13_J-BN){~n}O3o7*`H_
zv$^(+_R%IprwRVtfW2AFyGh<OrWluXwB0WJM%Wvk?2L4ta+cWNT3vM_IaN=;)%W=f
zVWnhN8bY+6#Di3hI%-d_$Rj83$W<`dNiM%5gXK%I>;?T0B;Npq2=~^~M|F?X(yriA
z6aLl))lHJixH61?*VtejGOLH{I$vmR=*Pg5fxm+d<*50J-P9gqZkE@iruNt8neUEd
z!t>|!CAf7TMR^P#Ek0)@6;=gU>o&UcEuSFy|0=m!29z@J^FKs0o4oFZ1sz~rW$?3M
zMIkI3sgsR{Monv+b5CFM|KO?UdCxQ3GsIKcJ2`2BvD^CFUTpqOR{6=Z^2}D994Z4A
zhw!`<^LxmpLim&c#)VRwb@Jo~`Az-@MQDtrbzo~1V%PEEsQ$33C6Rd<{B1_~@L`I+
zDP@i8TG{C))4XqaMkIX}m=`=091}R?``**cTPt|T{Lc1S%h1)^(i7&DCA%x>WwNLQ
zmUIQv6x881yeWywUBuc1xHgx(-;I7nxyK75+A}6YKPG|=VQv>P>aIM?(Jg7DV|9_y
zTkhC@>3#1j-aWy$^f_yz9d#<(=k?cMp7%G;;6N{PjJ?`CjG~ML^<!vQhMtzyQUITS
zVVzBA+HiQY7G%BvuZ=QUR>*A5#*^*!S?QXz2mg<C96b7rXnd_t!=gSgy&^f*T0_P%
zbBkRyP}=)?sG*$}els#YS}VC;a%S|Bz0*iZ`q4Afe@<`PmbG10k`p=j{V3TH#)kyr
zy$;pd&PvD8*()%vwyu$Rpf*XCkROi`gIuz3AZv9(V=54vx<qp@%qhzZm0WA&kny%X
z(aiUD4P@J;BU_zDWW+w_#mMdGuIL<RlEfvw@5u^WGV|=J<~#Be7+6%klDb~*fy_aw
z;W?Sl`b+V?A~Q$zgj9tY$Kb?BG^-i$uK-_NO=^w>CCQKy<Zv16@Q|4U^@1@;W;l<^
z5?`Xu3V-eVq0>>O@mj|`V!s+W6j|jwCg*(@J$b>7b_Khn4igWv6%mJj#Ea%#2ZbHf
z;pNoa`BWu8jM)PhPEehvQcbF9JN%vqIwj%XRcQ(z+EVMTh=ckDV0Hz)Y%I_dj%`f#
zZ?c+1Mwru$@><;NuZyI=7PpfkC8OP)cw?%!hVNY|?!0Y|q4t%c+T215_fP}F)TbFz
zS=Ul$lffVkuJ_`TCqKiO{}AhT`UJUB0c)#~A8i<06@ENm^c-S!6pn{vjUIIt8Vdqn
z*{#E^^r$h2O#Xv7?v#Lr>>iQ%k<nH(u-)@|ptF6;+6Deau&p%8UKOQZOa@HHt1l#~
zr%<Pp<Y_#}Ulu{x`sh38a|?3f4LDF921KYIQ4p!hd7FskOBHQPg`MVns563Bt;*qX
zI?{MW9tWRk)Q0zo?;}pV=qabYaoxMjSKHid_aP5c$g5ItBAe(s`jI4pX=9LZxfxI1
zUxght^djf`61Se5X@ecxIXfTa@aezgPa=G96)I2aXeL_np&ho`gg(%6&P?-1qpy(+
zo5H$CPfAHCGDk)V!lm@3q_B6940CeL;pkB!9ZM~gu8^*wGG4oN-|e>^u*!b=n#-{7
zDD$^P6aM8mQ)kHl86#z-ykx<R7&M~=`LkB4!`juHPqMb#DYC#AY;CtrN!X}kyd?d=
zViSzA%mxuFa!YIb?|P0H(_zGy#9}p9-7cjQac+m|CBgOWbUQh^7b`Bo$J5k~1hlCF
zs8@jhUrKA4BDF<f?@fuJZ%EX|^n!c2`)4^}9FXDm4fBApMxL??%oY+d&S1r#a#TN)
z@AT{Nv*EFNHtCw@jAVzKn+K#X6=Mf0WK*^BP=br#Y3Vln-Hl%tK)WvbeTuBGU_hR9
zq@NrsZ_E4ASen3}Us%69vARxf6rnl0WSnd=a?R`ZT=Op@RWq%(%yH7vI2vkUbT=9q
zbIlFm)=m>y<(uJs!(8Nik1Do8qfGjOrf6LiD%UaWSx;7`65#@74x-QHu&*+xw}h#0
zf!WV8Ufz&r`K*P(YhiT?T31Bx`<s*?n#YYga?+e;XPa%w^myx2%QP#=72}XGPJ25S
z!!h=6!7ZLVe@o|k`!KyxCc024=jAYnM~Q3(8JR#9+=Ugzv7-)r&OrkzVaa>&A;R}8
zxIQlv=;zDPh15nHqI9VNs_bvfv4D!d)Ob<y%;EN%=6@tfM_D)Qb@oU*%V`*njZAiy
z%0k}<-q~6&{GNFYCe(pbHQ>NFcoj#s)d2gw5(iWF&<S)#C!4Yk-cTKCQ?<L1o6X4s
z1LOwj4$dWE-Q)bd&e}O#v*G{yvc_m2>&$`nee)aQ<uOlLSM0IQr_NOCcjK0?hPRw?
z!Wm+3Kr;+1c!GTYf_|b6-skEI_)~>SRYbNQgsDPLUxvtJ(~thk9;Q62RVF^8ShE*(
z<9X^=CRaUJUx%?-dIdBtQ0K-N{~9lu-`ZtuuWkw@gyKV`L)C&8lKj4^NdqL$`P&{s
zuUH6jSy(ZJ4r(m(9V4zQbO3)>vwJEcSKZzsohvoKyBs-lUef7i;^}&9R^34kSLXiR
zVxZoa7@Y(*F%Pey&9c$B1UoC(U)j~ou+cDh&_BVyJn)@i*moSuPSX`cyFL|4$mx0b
zIUH^W>1kZ8%3zi>^57JGmY20_gMeGFJ8^H6T)9jB92AoXZ33xy?#(0`&9Ul)>=PS?
z?V|!+LPZJ;f%S#v7AwO(Yb~}4taWw?r<(n?`98Jy1lMfliz6S#(g_WwR_vmB79#`Q
z9bJ2T-%8%q1cy{CaWk+YTnO{6NE{Br_@FL9!7OyOGzcW3+I9Kc8f4}Wi@&k(C(x*Z
zV&vfQYIRJnnTt*hlo#1;x;wov9CQ({1gD$9#-phHRe1)kIn?8)xE=+8a$r!GRd1u-
zt%yY<`sWzz+6saRjP!EPtMpO>K=m>?odv6oQHgKT1t$@qMR?U)wi~C6t46+Y+USGs
zt(QlrxTjI!-z9@uRh%_a@S`S2#mIxP90jn!_prwdqmEvBmK)rYi`_Y(=3#sfq7vc$
zsQy7NxI04wn?qP1&pb9`j=}hJYUO>h<-QCen?Ivdy9aZ!iQA{_nGTWT%f;<Ka^S#|
z;M|9Ko5J>=$kH@s3}TB1?XE&}ZSXk>8co2XfUf)id$V5p2+?XlMs=b#=h97gBeFM8
z?hB}X1dI>L4x)9D94QH6>(SanT)j<{wh_M{s5lek030vREZxYyh9J<HPOY(iK(5xq
zms?aRKUiLcBVl;z_TRmT_a4wI$?pyzGJ-6~#ixOc^KrI1=NqHB&*9kv;t@mUT&F`W
zK<{=@Wq)V)Jr8gGf{pi4{9UMAJruS&EP6`cM4`G-k+SHEYOtPy!VRpb4IXaSpUPU@
z;OPojTAC*S${r+Ee*p_R0=#Yua>Y4UgPbZ!3~G}hx5=MGs^5Nk(JAys(`6LB(k}TF
zuB6j(+=8`-xhs~jJz2FbxR(L<B%<{s+MNk6KEWmvze><elwwS2n17erm`kO<2jAT8
zCWfqZ_o`K4Kr7ZSgO1!LJCoQyB`|IYJ>Y28UrRMGbRoX)q}n_RgI)#gjpFW{+cI(x
zS<w!^4X|y9Umft*?QL?XQRQG-9+{fJ`Ynh{A}Vl|Xf^|t$|&n`NyfHJR`yZX1RFi<
zQf^cEzLu+C{(*c>ceY=;%W31BQCm8&%WKOHd_6f>4e#2*&vzL+oLQRTQFA(-5H^$n
zcehg7tddN0hQRhB5n}R-<UB+yp3xR|#ZhOIiKmiV?lT4ViH6CzIPAPn{I8-Sxq9Dt
z-MC<k)&y&VRm)DbH(1Ne9$@kfOgjU^E28d=$VK<rPbaE)H&|5<&)e`!qA7?)SicI^
zwID+I#LnFX-6S%JL_bW_{aBEWHANs@5?{`u21WG9eiV2=+Da8QzBcY_qSG<F(y8Gb
zvHLmK96Oxuw6>mNClw;v5mc`%oniuW^@N{3vZFcu*mVieF%?KN5cBbE!qq+0qGaqk
zC?%LXllz*%qI7KTLw<>FfdR3cF}Zq^t}0XimUE$+5@**B&$H)9Fxb~$-+w!B(%5S@
zaPB*U?J2sH9mHRJ6Cbi+D7g_se6v~YS>`MbtCE@jCn*j#gW={j;&DvMgSFc?_QGec
z?&E4ZQ0dC*VRUCV?61hLeKKA@l{DjB>E?XnbQdS-L(i9<j^3S~*F0ssE0bc3>(&@2
z-zuh`gKjP!gz5e}lhq~gI1#<~qBv#1#?R**8lza%40JriWG%m{fuBVdxcU_ZiLxki
zgySr1uY}4JN4q=8@z7qG;!L$y2CsRZ_D&1fSg6LlK>egwyp4SEq5IYpXOcM{PBdZl
z33NzLz|rat8PQXR!VfQ*a+KaG9$V5`+uf11Ct|IMQ6^T8<?LPb;7NF6aIdSkO;E+!
z#BL0Xx)B;LC7hX7dSJe1Nzw=z<(y%sH`U3p>dD-|d2fB+3M0m8Z=Yr_P!%6L>kM*a
zI!Zl`*%H{frlJ{5IG4>z26}T|f<z>Rvp%%A6u7xcR)Z@cY%fg|dw}3kI^uk+7_38#
z6xm}>w5Is)d1eMY)-U1Jk)lXSv{v-fNX_s{H3RLu75%5pTxWx>l)fP9)7Q~T_X$^1
zxZa7_KT3YOPiov}t`)((7+RS{%nMLgn+$nG8=xi`s8ku^a$H<(ECF_ZaP<>iV|;F0
zwzgRp{q?=|f<Id6kx=-Ib;~SnO|n}=4n;q9h8bV@zVsfI=bdflA$s=a`2K)iWEg)B
zgG;H0&v~lClX7s-<$ZasmmunAu=pmdEsq}51P>EFb|X4&3=FI`u+`;iNxG*M_LQJ_
zUk^m=W#J5`v3UjMjza~0w)#hIgg0qR|5Z<UqmNU@x**MA!Z6gg87P#e189n8)yR#v
zVRts`=Rf332FC#u`VZD}pVVF;M@({hFj`udr_<M&-%k`v(B&OB?isJx%VniE)gNyy
z2v4PN{6hZJ6MBukWRhNY(j!OAX96ocqe4raJ?2Vz9;`cH{S=*qN6C1W&e3`44$G>-
z)iK1NG5*~|cW$8?`9#Cjr+A|AIQM)=7k-6q%6&TeE9#gpDMkzPWxGef@O`2$hxgg_
zb&42fz??1LQzh<5x>GhX&FT`^;d#d>6&?;kO?3_yJO}f8<NsB<{aEmI`^y}ZAdUX$
z06CdT_f{I~+`iUN#O}gHo4RnG{b-TIu-jZn?{G-gQ<=LP_pRnuyQFaNBPSZ3rgbIR
zJfzEcs^82El1H<hN|BOQD}T0kteF;`MdV*%?b>`i;NF(FpOz}%+W@??h|F2)QwK2K
z%f7Dw4Jbv{3D_r7BbuQD$Ed!?(dPtpJ2#h+>FlTCojID7R8u|*@3mGKpGl!!<GwoP
zRGlWamB7BctFH8lztEm)A0ZMIU}6VytrhW2p=L#ih}%gwg=zcP>o??Cd0DQF0;S8;
z!__E_fYnCH0Gp*!4b2Sm?7&M_c!*X4wRxJeMt%Mxx+2`(>|rD`!+fn_{#RpVxE^$#
zjjplZ4;1s(lgzNk{7S}PL2+394vM)_j=;HERI9$=kb@qSC(m9+v07l+IZ0rp*~I?-
z!)kY&s1-x43gJi`R(P;*5q)nP`jiRIPBS;KQr`{Nvd$V)q^|Bp>Eoo5dK_Q0U$~6X
z*;^y<ffKRcgGV0{@d`xXFzT|C{>Fnho8f+Mbf+_@+>^#I)1g;t3+^|u@e~Y7LKALC
z1NJsvw4+c``R!IxKPx@TemKLaX*UawGqyWLb}x;Sg~l{vk#SOII_Dyvh6@d!uR_uz
z)=~Q|^NqpR)x>iH)%G&g>kn%4eGu^yhwfafOI#YG{}-eSn0x^{{v`|j=wlLV-y#nG
zN-QY2)!OrrFaOX#kCa)~N6sqaiQs9oOt_76&^)V6%=6aVaMNh}@IfQlH^+BLyE#v3
z3t7erjnKGtaC|>LzQ=fXzfu6>US{Q|;lxlnm^Qpa>Iq}=#NCzbmP`;yq`wJLn?opS
zA$G@t%RPK~K-V0{Q?U=6HrDiDn@|hO4)+Kz508yhiXM*Kw8w;od%y8D)&9=U+5kj5
zqHFu<(^iR+wb!WQSHyijnga?!X3k>e94zTaBwj!_^09L<XDbjJx8GeM|An7rQTmJY
z&2x$1BH2vs&X9-<v!*+%v`uh+FswgWHJr1~DtnOL^rv}eCr#AO_9UHW%toW#w@Ulj
zIsTVUdl|de4WPV}nJS@(UC9jxKE$*B1oEa8N_Gp@xcjn3#3fcYF~WV87mrSC#rDy1
zj2*)z;<Z77M5(m>xbv793_b9751b9G38eWgU)a|pxY2yXj_NhzG`*Uu$nG=nQ^fLV
zxV@J5WAi|x6ewSo)?nO|I8*|wZs^58vhzE<yoBf5(ZfI4)1HyN92d$Kc9pwiGNWH3
zR~K-0p_BvDh1Otai506&LgxZ40-1p>p*>n?T{8R0Y~xpXQfCs$G}zFFC|tz$*67O=
zD)~4NxrW*_=M6_oT>v8<<2~O*Dswv7_b49wskPUL%0_z9ZCG52N_Lhwx;AW<uVe>j
zzmbonnY@ZNjkbo^mF;8JC~Kg#+WLX#Uz3cj##geR^(&G`X>?1Iz`PM}gW8ed?%PoJ
zo#z~MFUoGS94NGg4OOY;`O=QwFP(9-P~@5H>KDU@7eQnx*}9H>#0iw;W8%|_%((_z
z_VOMrNlKG#H`zxIVu!xo=xBUnbdrhe`cE-?Q+QN|sISz|v0(sK#8Nv;)35&Dj^Q5c
z-ww+yY-y#{VQ4e*u^OY!vQKKvcR7e8^Z6S_{0;`YiBFOAN5c;hr^R%D3wZ-IN>&<Y
zjcvw6I*LSgIOmxE7b0h{RyOm!h6PJ>7gf3s^}7LBRG>B`(3uRSS`5O@epILJ>=L4=
zcq+3WVVCMgVF=#m(?O)c>J#XvtB*0dp8V<qb6e4Q#8dr_f#+uM_)=ct`P5(XCh^+D
zv$PphzpGLiD-wxrOI@e0P&2!5)E(Bn#|~?vegp$vB|pod<L^)>%d&bZI{7b*U(IT(
zn0pAd_AP9+sGYswR2h_NH!9JT)k9?NUiusNee)gk?j-Ep&Y67vzr_A`EJ)0Uvkn$?
z#qW1OVHEvo2o8QucfUdRfk_`7jf#&YLY3)`MpHjxu+fi|d2~@1z@;754kcrTQZd|p
zS#xZtz+JIiX~!MabUzHa1{U{-PzZGH60PE(Jqs){(Vr;u_v6!tS%2g$<4Ee-NIk$C
z%u{+#3-mDGt@<->LT6#wyD+f>Xr$tM9k^Qn_xoYjvuODU&ODHY*w+{a=TqHNQIv+D
z;qHFIpdF$5+I-iN!AaDV6rGMA?weFsiJr#K(#+Er%V+2%jhdS^!Cb}qLv=ls?oA?9
z+*}7vfAXYbDcRx2nh>J{>=3hg7Fvvm#lU5^lkCF%?%$o#jSRjaajcV%_wLiOIL;Tq
z-4NPr)0fnRbqir;OLD#%`q!TtSp_|MMKjDTU~+*hNTbIeuZd<=^S&181N}$80g3+w
D(IUrW

diff --git a/pcore/trap_templates/trap_prime.npy b/pcore/trap_templates/trap_prime.npy
deleted file mode 100644
index 146cdae25b50d81f6f05a1f7818aedcc2210228a..0000000000000000000000000000000000000000
GIT binary patch
literal 0
HcmV?d00001

literal 27506
zcmbSzg<qA~_qE;K-HM4~Hx^<GCL$8|-si+V=ecwUA|+jlo!E)p?bzMjI<{kuHRf-9
z-#_7<`Jjk^oagLVd#$za^kJiix{b@9FDze_gUtxG203)C=g>X0twYOt4$MC&B*@S1
z;~!)*{`c|zemdB|$Ag7mfPp_Zb#m&|vL1eE*0Nr7z5lQOO5~YG&4Ts;dZfLNK5ACd
zdD=%G&?nSu9dDjiPjwHyRX<Zd)j{1&OSM=xPzCfu<*Ar%tZHay{af8oPI{f5pjPW#
z^-H;{OG>DQ%1a)SH>r+pua2oa`Cc`l-Kwber6IIZ9+qjsQy!t-`2DfyEMvrCQCpl6
zedSE<CR@tNbXnY>PE?8fg|~E-9|e<D<up-{ddNO9S6&s3Wt?awP4SPIDdOZZ(N!qg
zLSA%Ew5Q*~RsO>-iY8Qo8q+!|DNl(tVy+ym8kq~t{N^6}T=h;ZGxz8n`kT6<Ug$--
zsD7`?>0x@8x}bv1jrym4K@Zh7wN%HeH@d6YU02oB^?KC>C+?|Q=;f-9YNa%V(F|RX
zKFQbQNh@`%N>$@kJ{mzmsv3=^IkZ6aqU&<6m?z$gS8}o(Bc6!0(i7*bPHknH97hMF
zRc;ak<bJtbUXcIEVj^5fVH4%$H-1~(;w8mMJc%VDi!YEnWe?d+c9vVkFj1czDM-E(
z^W+TiN^BQr<ak+7R>t>?6nk_Zm0|wYf6O_$jOnR6sKc~C57FDTqaI*((Eq7u-OQ}2
z$LghO8eOI9DqF9m`g)q`sejRCwONnT*(y#yRsX5ZdW2q#v(%+a>L?wj0jjiGL}QhU
z-bQ(<u_{EJWdwzadsI(0k!Qr;d=z!$gJe5dUDT6Tse%j?gT)<jQbf=b>M9pYJD$fr
zSwkj>qj(-c;v4r6hxiC;M>cvXNG8d8e6h$CS@MYtlszd@1k!ggSIm&h=#Ov^-eRUK
zA{SAvSR!iB2sPO3Wq)9nFq3tTo{s38tDe&rT}}Oicq^h?>5HnnTBkRtt6J*wY8mc)
znp&m`m=8=xz1Uo=nOdu6s`aY0o}qrr5&D6=B~MZg4W!~`YZa?kQU!HU)*uJUqad0t
zm&pF~LaveTd2=~LJ`jm=x4I-Q$(cCS16f;ckO!!&%$D`!5Rppj<VJi}9PX@zTrL`k
zVu;?4e48wds9r{|#eBI_^pFe0Y7rsVOC<~9IZhIz#0{}Wypa}BT@;{yaLeoHiLPN+
zc31O~YOWWk<@&oSNyXI+^%u>@&!4SRRb4e$ZP#zqEyYy>9f-IctJkU}G+ph~?acw^
zE!|R&Hkaxh$|kE)0-jY-wVn3MlkyzCQw|N1In+~~lEvi;`9&6!gXk}rD3>Dx2gy8<
zChFmYTuzWK`06<Penfu8v+6JV&>6W_4v;RQn3%^eh!8mf=PV@t7M;a@spUNJTDF!s
z;+Dvk8$|~H$cxZJ{z1%=-9-(tRZJ3-WPABS4knUra+k8`X7+2^-Mpfb^aIsRRngI^
z9{EwEdap9|4mzPc^+MInoTuN=D;2D-sRQ~e?zE3~)7MoWbAfW!msAT>q2g*VqQ4g1
zp$6(R<x;LpN3=Ip8|Z=fO3l<+Y9kxUr7}Q7ioK$ucqp34T<$9?^B{Rsx{AN$IN4a*
z<Ue96rK1L|ly~Wn_#so|Vc{ru@w0rVSR$Lz9>nb;c}NteDtK~_g_CfSYxx-Qp3mSF
z#RRE&A$eMil}X}}cqhN2=G_!4<!@cy|B~6vJfgm9S6$qkrf;hq)LwN!UJ3n6l{6Qt
z4!Wu7tB#nJ%;sjE>7c`rrDYJu$Mpr>#Prwob&Q#*;#44Qrth-8I!~8qm_AO<YPeb^
z3(!nim+s>%y`(dx$og_5RTr5u9{1TpoE90l)t#uQ-^Fz*Dw%qSCv=PY(lOMfGqSf_
zz=QcX6)WnB-@J-gDt_?FVzj6v{^7x*v@9&M#bVhTm3WKjBC|v_F;L7Be|Qy|MT=#S
zJcbylsA>yC>@kDQx@K{+g;_yW(tA-?&QL+!3H{)aTA)s-lFC&5^%7N04_9y1OscGY
zYpXh}7V6Tfi~5A8v0i2A+Ug}WRDaVn>Y(nERVli!CaNdM0Xw-+2-QY4bEebuQXUhn
zagt&bD3|fV^p_YYO<5h!ptrEmIhr72WI36Q=T#Ebuciz~eKDj1m6xSxfUJX_5>5@I
zx9~!KW=KEohI?!zgT**`N%Ti=@jxzTQ4(De1;u``MKlpL#d$G66u`eQ)O6Eb(Dgp4
zuI4jD<t0>_DLC_Y%CE0bk{XLn^Ig}`?aXhwgqf;+&1^MSpVF!NFS?*A>o(?QeNLa$
z0qUD7i{AP-a&V_wP5;urh>e}}S}viTGF};~J$0jth`ROMTg;#_a)*dT6i=6@cx~B1
z6p>A6lC+bcPhy#@LR;k<86op%np`T{kvk%z4kEImoJ@ft0_S0JGY=K{#UVs)vV0+a
zi8wJ{=0lZs7JYHwLKMXlS<IWtet3orX|;&f9}#zFR8O@HmBw3dQXZ<T?ty!`uA1S!
z5LHMg=(6@+>Y$mTcj`b>nlsF8I=|_Go?BRF>u}Xf569C=)Bc+4tyD-IlEcv@YU{If
zn_ki<)ra;{Rk|sI)nj>IUgci$sc=wjWCoCfx2!88L@ALb-^o;*?GDA!cAAZ7Z%N(h
z9ilo!Zsob$E`8-bahHGPU*v1iPqY?8M3T&h-da#B7j}^<8pzx7JpX}w_{1;qUGf~?
zESt&s(jPtdoVYEwDihV`IPFob%oKG`7t^(L6|<V2t)tXxbC(L$cU7FZLwV_f_*j}M
zV}D}?soClbKK4Y%s6UjZlT<V!dn)cfTHU1><)S_!M|-K$>KIKIC4f#=p!=^Rsmjo6
z8l{TsuBe7Pq@;@S7$-S}`jAGBb4nqTR?<|NK*w;>3+Ph?<ujlNEf0#X=o)>{aSMt5
z^hF#IS5ak>gs<3um>n+N`FQCdr;Cq3Td7n{4if9RkNC{JP=n)<%a=t#)mtw>^!-4;
z_<>q-Q6EPn3e^=ESX%#0T=&+GsF!J1{nQZL;RF4FvUO{HRgG4TKtF4M1q&j&$0(_q
ztNj#AyXYi!R3FqHV)UJ|<!o}J|70S?(<}7Sw)9xskk=?hj1Z<w7tYc`Kjkg)RfbS4
zdLgo@zcgquz7L6?qMCXqUm#0IkVS?G2l+2=gUX!B-vMcC5G%xL8ICg#K%I2~hPn*2
zbP;%@8xYYb`5SLdR9(4JRFXwe@r&zndODD99o<OpCU2attPWBY%zw3~c|cDxW9;2b
zi@t+Cny6Ol3HpF~s?O=+YKp!@m&vY$+C&R*uMPBA`IdZ@3%Q}gIODUrs7(1B7-I%q
zmG@O7-9Rmj76);ccjZ~JP)tJ?`2$RvB%@GouF_IGBY&!)dQd;~|1tCy^=Kxo!q@D^
z308?}^0Ig?OUMco3e@ogc;YLsihA5$cp_E>e}<><9GN^#4gh8<D7(l4axOB~Ri2WZ
z?-LzRhvw^Lx{{uP?%7J6)&qd?$LOkND_z8l(?iXw_9FJ0=1rBN>Y6q6F!Q;ptQ>H+
zhtX@o5veuMNt$a{DuB-W9o^xvR6t6V)pq%S+Tm{VXqz;|Ir$VF<`2co6Jm#~2|N%C
zl=BPcUQ91&Fx8<i>IwB%XMrg)<qWwLC;v+Z;VwtZI<gax)JN$g_OeEZ)C0)TOJb0C
z##@SW;w!%+Y6&LR<8#tbpKFTucryNSo-_~#)#O?EN5r5quAn5nO1rAgrjuFNY+yQ?
z6Vy7DsV1q$=18-iIYJk*Po%ZFlZw)#?0a-wV8#`Cv~HwEsd1{lx`>-l=rAtykZMqr
z+JGDCjF=rK|B=Tj7&SkL9-tomkiC>MCD3+x0-Z099`Ic0i)ei%cLS?C&=guO>rgLA
z(gV+-DQy&&$rC-cxC{k$ZHpYu1Ckny+kC~x$=ad;e~zmA9;i8w`^zQbfv76o@p<3G
z93Z1~<hFw>B%6sRxXlb%9{9no4^f<sHnsXscQCu?<vLy8p?KX~KgBHO3;aGy_cFh$
zrRuzP*MFHM?X9$r`Urd<WsXqK<Ri+bqN$>4M@iC$4$^ITO%|rlm}gGNSMs`ehS+YT
zipdi+gI43I&m$9A<s@6kXIx5)%n$5zTK<-wQHPpPADp&0YV1*wPo1M=`CSeq7es8j
z{6g0;dmR*AaFQ=PhKGoSJb|C$J>^|g-!{A>=9|ItIiD>`(Li+N?dUn{<tF(rkl0|^
z6aBWhPBzEVR3O1fGgH?^&%CSas(h-S-5K{c4VZqiPBSmz33%$uss&<q1uArgu8L|@
zQ*A*l&7{lH6TP#Qs*Lv&fmR2~%hZP2$?N#J^Qi(<Og)zk<Ux9mI6T0IicP@ZyTuz`
zTJ!`q&QC{$5B)3sfb3I&)T<-jyv2U%BAU@I{LJe_H4@T8+~)Ie&I<fLew**Y$sY1Z
zc@Q^iN00JHC;GtqqSHj;Wc_4WOb9DQyts}kmLn$1ll(0ok6Cn#+JRo!(A0Xdo~=sh
zN0`E*)NWMTPwK3GWcD)+vkmeRVXDIQICBT8ZGAIFmsE)=grX=5UH$`2r4`g!S%7}s
z$(8B@^9IXMiLMPC(FM~gkU9Iq4zPPhXV!_IH-__O{BK!G<U>wfL6z!>yp0rbbOGI`
z8RG2+rk`w7&2lsd&%uW8FqCr9DgP7Y#bI7m-r>9XQhr?Q;i1Um71B|5#<{Gr1LlSU
z(m@Q7pTrU7jQm)IZhBKz*8B8hbyV-c3HE8F=4&r?NJ-44yQqzdH=C-H<}rOpTg`C&
z8HjK+u){i4L5Hc*=1j~3Hkw3xWu~g2UW>w1OZB6VIG+a)sFi+EA?+jUs!^DqH?if$
zFk5va$EK_=tfg%;ti=t@&auxdTXYw}vbm~{Sl=lQi<3Y|E;39;(JJv7m28*vpqjE5
zBKMehjr#hPKj7|sk?1Wh^5eV^AB|3164Q5Uc};Xhju%H<)TAxq1HQJP^u+{phd0E`
z=cJCRhGu;;96x`8K46Ba7QiUobp`DWG=Rwle^=N1tmm5d&9eHH+OBfdSUQJFyhddK
z<&IW2z>}0j-u;x-b#vN^DjF*jWVD<Kwxy=_6Q5ZI3%1U%Z8DPC0@jW<<=>5RwzifD
zRu5}YmSxQ2FZd^6N2LizxABlE{2q{bCq!gxOxM>@V{VD#G8{c)jJSXbbB5R9h4~R=
zWCY(W%85myj(Cd6C`aU{%~Ilo%klLa<rVQ4Jp*p^mp0V2bNo0eQI@^By%(nFpSq9=
zN2TneR;mc~8y&3>{Q(ygs_vP$%+BT-JxckRPN>5hfu(8y2~Lrps4RNw9T})LV-^io
zt%Mh*>6KIf3{-nmkvwavwVG`se=jQd9}2n?Tr6~BNODlMpjT#PN;TG4=2&uVvsiWc
zTs8+bY>ujQR~{8EA_Y-+p5GH6z$|IZ(>M7qF&n39CztXTG9S0{A$)<+A2WF|bj@?f
z#zL~O*vr3*x<J1M*s7YC=_VuMkBQ^R<T2uv@-ZKqEzMH6xeN4-(!n@1P~Pe*StwN<
zqEo;W$G|ncG2du!^MU@P2dI6>>b*2WZC3xthcrx15#un2G(=@`k>h}TPO6?}TXViV
zW1P3ztSwlSt`Iz8Vn$5GgewW%<D12{jOjG#NqG72@j-cdpV8Pl&hpq&O;p3p#)^Wn
zKc<}^%--vyJGGYE5G@yRM>FwnEa$_(Of;02xg&bcF7CoC;yd5LuW>^JN*8{J7e$_Q
z5Vet+&t*L-O5Uis9J5Yeaa(K!LU%EJbOCS*j%I=?qFbmXx-Lyo6Uk8(Rw*)>8UscB
zt$pm7<|K0xCYwcS4m#;XHD8U?x8*m~qAYrdYzm_%n2{IaGmhwzber|CGGm%J6{w=D
z@vo8(B=?P9G_gZ)j(wH+kH1UEt+4Ik3&KVO+?V?-YyGk;`Hf|)weS)PF>4<ejlfAn
zViF4zyW}Kk6$f}_`G)_4Oe}>7v^U=Zgj-X(@)n}Lw2NYN7qdDSEii|#$3&WjN_HCf
zA(vkPGOjF3VUB$PJf8}VXsO-}7O98MP&R!K_2n*V=v8`;Dl-VUJD+Z4mH-p@*6w0H
z*6;K!VEXq|RsJh`sBSch_Ru$3or(a7G*KJuj^=(@gMGFPXK8A7SY%wy)URoq;|>KL
z^4UDBwW~*;!~JRwZ0I)8vj?jga3~}`__Eq*DLujNv(z?<o#h2F!;S@u_D)WqZkWT{
ziD)T+>;G4!kBQGfe2K!wpNMs03TC$Jz?AiXq&p&$R|ywBSbi3-g@;JNuNjzXs)B*a
z5?76Zq5yc6UuL?lqgn&YcU9H3lRiPKfc4$T2|P{}+*WN&botGa`kL9?zRLc$y{c}b
z(h%7dR2N!{ZV)Xe%FA*O4Wn2vzIV(Vm1)$my|gv<Ulln#c}GTInoETLgp@v;+WNOh
z>h95JQonNTk93;QBf+K9s5RCB0Rsd4d5(9}@wFx_@EyjyM0@Ft`7H;m${9Wzb6ZV5
zQw&GnDIlXn3J*Z#UBCyjfjpl)&bNycDkS^KGQ0qJVv=nx9qET4F@bi-B)$-sFpH<~
zBVwaS!7L3<5d2WC>R?_0kNE<1wU<h#E7V+l1s)rMd9W+(R3)^buWPF=?!O(lF$%oc
zJ^3DWcei{C{_206&QB_AI@@3B&AggzB&%tk8d)f1eRi$1BL4M;^lE#%anoj>oRYdv
z>i5XC%YZ`ttj^oI4eK%3b;j5nzm~>0TX$cFappLS&u_~Heo*YBj#M3$w=WF`mS_&f
z*8`L1U*N#!fzO^Uo}$+b0WYo)b-%b1dQk=WN<@oqU`A^pyDdPy)8t$^3aocNO!`BG
zgINW$%RTLeY@LLvRNE|L4pL#5vwUbWod?ziPL=aSRTZErBhG*74falExN4)fs_NiO
zHUPzKMbt7x)jo2vzcVk&Orx;z%8ZM$Bu~t)o%%x6>g&^dT@&8cyZ3#!*<MTeW4qyB
zO)vCq;;#Bl=)SIJ;K2Rf_k!9-v>_MIZDV(M{kH7jb%B_I<Y-zhVr4r~0Ej+<&qdUZ
zK!1EEe(|*;pKy~8`BCmCdIL3V6xl!z^Taivu~i}s4Ead<z&?u|VmA+z1HmC*1D}0I
zAH-aJ+*}Dx;V$Ztla8f5(n@)<m2?Ad;7qMmesx~0*L`(q^jBY<A1qpqVrsM8M}8_@
zc@h)V>8E<2`>H}b-PoiCMAVsbA?r$Nt}fMgQPZ-`?swTdBy#)<nHqjF@oHwT>GiT+
zBo+w2Ypp$WWsfD@*10#1C^NHe`eb{oXHSng-V>3nHAP$Wl!^Q=-b6OzBf-aC1!sK_
zQ*}q&-C%IPPRPlEvYSXp#SaC}oi1-+j@gJQX&Nf^8L(vMfc=MJy3z79TaG!buYRW*
z+6UXuV!|G4HiGsNkBI#b)6Nd@L!85G?4<guOIn-9QH7VOj)?J>%7NNaU9}h;cMXjO
zGw@E9Gz;qPGC%vsor0#uZ%)6LIXd+Fz?04XG%MTds{1V8^q@0w9i}f|czRXDs`1Ms
z7halPXv$CNF}zf-f&Kfc8#7`S=Ea{ewvCzSdEGC7HNyn`gKyyvfRax`sW=T3&;r$H
zh)719Eyqt^OlHZ$e4=cGPU<SI%6(vzu3*ZW0Mt+hY*PjC1WJV$TZoF>SQOJAFte31
z!^|f3yI{vE=r|PtWc?jXMgVUMlu}jBqk8JHdI?VVBRK49dIeC@S$&zVQxjDV9cO|n
zM)ie@ayDygDQdAxb{*9}<#qP!I5qBE>uF6d^r$v^r1f#Aclw+84ofC1dA#Jo;-<MX
z7j;_5X7-4+@D^^zTw1uh`iCSuj7hg&_c=dq>xA!q$5{&U@E=}7M2QlJ;Tu4T2hnHx
zqXN4?iFw6)0UJF4gESf#ur6Y_53<w+(~pM;m+`1gJB6D_q^6jKO3+1~qerW^(1{*`
zx!zzF1vh_6u124}B(e}8SNS@z3s@i&Okqi#&m69U^}p&4O~UQ(p^<7Yd8rt!=r8(2
zKA7I)=^Brd?t!D?)@3x#u$gJjEgYtHXyR7PYKD%;_E@rVshHn;YU5PD#C=JFG8@e3
zG9x4F=46A0j(p?dJ#dU?adHprY8JI_o$#-Bp6x3yBDU~&kzY=go|sv-VXk>8JSY_y
zmw<zd%dsK=(HSDscqH)CKzS30>ZPnjQ}KBf<sPv>Iz#iRfIpq*^~^HJ%lE(!{lKW0
zVDWdO$~Xhjg^5Lclz56sc)mPDuT=&Z6o2hv)={b89(t%Usz1#}Rk}il5y5RJ&U|Vv
zl%YIH^$)+BcqBbCHf;RQHcMM{c6mH5z*K3WD@LzwJ}Wt{YS_-8Si4hD`b2yDwX|*7
z_cGqbc*#t+JH1-;I^q6BW`$M?_-e`YD(1JJy)+K;Rop>(0rQjtPf-OZc0W+SYjA@n
zc^UbHmk`5v9;USt!jCI{8U3y(S;P!*HzRofdTL{QU43y;l;IQb^T*n&f!Y3}7C~Ei
zsM@K{<bbD;0_NfaPr|c&DqN_YZmOD^2X%r;N~tK~;8+e~_9+a^?+DC0TzxWInft{)
z?xHVGJeRyIV?@N`p;@hpv>!I?h40IVG7Dnz8Z5dP+t+_LK><)d)H?H9P|T#=NnNHr
z$S@|Shz7&U_AK4&*q9s@AGC;uc`xz~x5l$8d@kQ8#(^DsjEWi}MxrL?M;uq>1HkaE
z;`RAu(GvRGAHIgq!(>>Po<NU12^_smG!+Cj=L?w8n_$5wKxNvimMdoZfC>9oIRg>w
zkcw`gYV`o-OqE+v;Yv|3xarI41ihu_h}l=PNX`P2J=R{xG}LO90Cmn4ll&Mtoc}aS
zOlqC{G-Ybw#(sla1a(>J{@RblO<3l+W>Qv4&@1jN!!ZdLH}_Blof$w;?NVQ4UrN6`
zsgki}z?-g{dNmz&So~wQHl})S^Nlt*_s6U=949>hCUZ09npWV%pNK)|FSYTu8j90-
zWE$=S6=e-(qVeLs+{E9C6LJjyBTn%G{5LvLQT|=olq-}LJD9Rs;LfgyNN_z%z)nxb
zy&ED%_{e_H8{G79wFZ-VL9i_i!6GzL4r-KnRpr;UpsPjdUwWJQ)?6#%<jDXr>2Pw#
z<WBa$zJ*%8?RLX$o8OA~O}TZJ2BviIFU=}J2VuG*u;)8W`zb-z$?emkrWMF27d72?
zV1M1&(r1y^VgEz^3ATN{={C*R^D;o_SD=%Q1PgkMU;CeiX0#C-!P6fX?L~iaga6=#
zfFRG|?JsaJP!{D65e&tm3;3+rnA>}>Q0@){_)9IpjM`RD)c(*XER+nTWjt_perROJ
z$b~|o`4-hTRj#(^er9uZ04&)r)smXx8NDV)s2W|sgGTBs^|!1bX4oAkZA+e!biw|t
zpK!d=<@1mpzI7)Zo&9j$oYdaI1LQ2;7SZ)oZAX3UuPtVH$l>_mQzK@5%f1(V&3lHk
zz4Mi!St=)NFqyvJ{PwUn##xpJ?zacjs8nFU8$h#O{G}+y|7Cml6gHFl@}7J@zXWH%
zB>7Nu0w#{(OOTt#Fu&H9N03qbc?PSZuIMbYJ)ZAu=uQvSbk!cbW>4A;R6Ad8p-Zw8
zJyoq$Z}SC|)?sEXsN4w@fwxs)^rO&Sv!Mrf1eaI_$hb84utfj=qOK)PPwpFZYH*1*
zBRV#6{mbJ{(602XS+6o(qfglzb3bFgNT(&V(mbnc+n4%(3*DLGIOoNp<uitc7a6{)
zQ|DgyeHMgO3|w!i>lek2h~`jk;(!ls^F&1BR&>gLco)oJB}6kZiudLB_(Fb%2l4BC
zpvdJWCfVuY9G}dC@Oc~@e`B$Z`|+ph1~r79<f8in9dW1)b158{_YHFLC;G{AFx4$}
zZ}mb=(wEJRdI+Ljsv4L^SHanE46cXnw2y+IKh6USlZ^fpp$~?;PRU7eoES3ZNrxV7
z#yUS5ewqE6+$uY9`j3>o5kcl)aAjlZ9<5OQz$p*VVS$$>>zt>{VwUzz`{TQ*XNS(C
z-2DQEhU_v@d^gz&iF&*<IN~tyeM9+Gev2Pur*OuNP)U=ZE<QoFK43@r9`3+B#ZK@p
zRYVl;BmNV9Kw0zoF)(Jg`5N|1dFsEQRJiC7>M`)$a+#tEs*co66;s2|d(xo^Ta=e7
zVD|>!y%$VG7u>lg^)=s92XhJ7|9+}GJRPer{q+Zi{KNMv_mFe3E0bO09@}pAN^kwD
z!!4JS<2w33PU<uLTh6=07U52&9kcTtv6ljMKKl{9&z={uHGT2o^DDp1NeK*hoz!kg
zfAGU0o%tajPupr<ShyoM4+7EN7hJ3brYQmZ_ZIBnf4n?z%pV&afgXb7U|^w_yd$bp
z2{DG(gi42L8Tb4h%2geH6>8#d^;P}Ct=jAbP%Wasz)mCr-)`tWdLeYD2D+<en0Nw#
z8xrvuftcya=~d=Om7(&$ul)x5H&f+<2jn31%vI7Uz&-Nal*NgDA-~<Xv|rNZbvGLP
z)>niVN~<=5XJ^D;4R^Ay<WcMxKS_slBY(rcdJvBvlRIK#<0Tto`nqMcEzo-kyB>U+
z&$lF4TZ(tQ6OTY|e2<>;4!X->s7rg1o#8-oKk(-g@+mvaYl&;P$zxD#oRFd4klS;S
zpYM1-F$Fqntmpz3Y(Csa1?}g+D<Ddt*PjOedxri7PrV5-yHQ8zXW+>4LAi;i4QdMd
zVW|GCqfxCJ>QeZ-rn)u#mhWk$8YsUqCy}jAghs|5Og<Y^hYjoZwDokSu{}q)t+gJE
z*qyR3YgWd?gb@)X{ac96e5lOQB{A`ix8IBzx4h4`6Dxnlw{vgZW>XJ0+tiTvRM2|R
za+LMp4cTM9gr5Zto{FqogC1G}6Ww(-2)yDWVennTiI<d~Yz5SbqCj=!appC=m^jXx
zK#}y~E7);127ZlG&<r2yFtamo`+CfwJ&{2gQ}#cYQ1ip5Q2=_WU2j&c=`Z|!AYD;M
zbSbEBW0BJ}F%Q_`k@5!Kcck~26+5$H{A%F6$i0b!k{(Cc#<%Se;aI-iV&@Sf_SyVH
z-Qzc=#-;})O^Xu2UgidHRU^y{vw2{R)V%e_clKKD8S`xD&K^5^Ot3|UOi*L|URkEH
zDp2#ggAotpHGy@ui4NfEA0Z#}i7@1EDIkMys8Gdt5c3dkp=<mRchF@s`6zx0r``j8
zxV(794pK5XYl|9Y7Q<{hUjL=e>uP$B8bkYOwep4QfG87ghOvn7YN%D^p;O#e`R(99
z%_Q}QdVx`@udZUc3(@|nIHu<cya5lQ^TGDmkx4VAGzm!=yP)fM$6{??I8Pov)VH7g
zYm`rNfee{?E#YHim(Y44$AiCy+=;K6YuR0Ud%HPdK@)oK?D4+$2_6_yg{u2Lu^trL
zjb7|It0_-mF5L!KN@a0Y)JNYe4b0~vuYl2i#W%1Qd@JwAoiX42!RJg9J^2Lj9JBjS
zu}`dq8>AfOvDeB64xB{Qz}x^vDW2A-G5P`dptdfBTHXbEcTIf`+Wl+!T_u5Gs;Xz1
z>FOE$WGV1g9j7nyA5j7d_Dj)3B~TsI^}l#alZCs*N2SzF92*=rCcNvPR`E`uJ)#Dm
z_gpE{qYkAO&GDUfDJwRmTe4&7%d94gVz!*v9lP2o)y>Dd{n=)x2DP?F1~-(szDq4*
zg+IS%^yH7BMm**NxFJW#|M9-P#0$QFRTD+UU|s>LRuQ21?`#Qh@>{W-pJFS3BVB-x
ziXnn?@C<A~E8n07B;xjxp<MXthVYHuz<CPkBNU68S`~RZ3wIex)qqz6DN2Vxmudh9
zUI{R3HK2QU!&Gw~Sn4P(qe1Efuf;p?L_XUd5^*cxNz(O%jNpI9eC_7$IJ0%(ZuJIK
z@Hj_jqjbuToF?-wEbz+xuw>?{G8+T7cinV+)~!IFelwf?)&BOl2f;u68``wrT3ZI6
zWef$!xL6F6^T2s71$QPy73ihwQIA&O<4JIDc!;IomrfZ!c{bGYbNoKfgUZIlzlicN
zh<h*DSQiKLK3bPkBk3@H?g_xRQ}tN%p8jw;bksk<>Xy?T)j%k?$<T9;Q;b<bZ&Z8q
zJar$=j}&qT!q@><&;;mO!zciXeGq?Qd&!H)O#gO~&l7qlpPjNO^r~mgp5GlaTPJmy
z=URT;FS$AVagv;IDVHt(vg*^ScWWQ5`ZdEovDWZ>j^53e54=G4L(_C+%TK@OhNFDT
z{$XvPD^2F}5xw1HhUkS}nZ<wcEx_@EFn!Mw+r$}um(ArQEc`vVvOXdQ{l!Df=7Ysb
z{*%4q?{Jd*`j5J%^JpuS?(cB;5!6v1)eTHhQ#AtGS}t{^Sa|go!UN~59>Se;1RC)R
zX#3^V8L^yhA^V4ei?(5sb%X|90#zc+=xS7yZ~SjXoK2{ctdd;A$9UK6+o0{<*5AAI
z9N;o0-PS#zRm{nho6}Cr%$r#+r&~&rf99|bPQ@Hv^!V53XQ&F=Z#?svXK?}g`Oe)%
z3o=k`$MO#JLG<A-QEMK^g_u*P(s=0ON#I+`Neg(xL|z^av}U3yaD*3rqW@SsVCG&d
zobOSN+8uM3GrVI@)j~K|nxTtera{J9k-f9!TR774C=$xWCwL6kgT+tP)uGOOK!whQ
z8|16pD_aS7=_t;Cw@akc;u~Gz8GMN?$QCDt`@arf7w4ApGig<1tflUN8SMjF7wUAW
zPtW12y<GUrz{#O`5f4Lqn)${L?Ypa0SkrS|T|M^)WrQ!*7k%IOtY$mmr5X%gp&Qji
zb=ik$+Xl9!4eu|zh){VCbL$K^)G}q||MeCs1HEE5H{of?6Q@{zeAZ*|{!7>)V<X%$
zuXGErn;kKiu0|EDrFN-7h{3hc%$}%&aQ-zRUwAWW%JT5+f=7X#SjW7BIbefw1STw}
zi@`_GmkP*wn3I=^hS2)`=`3YHM^J2|5h5nr{|&Dd=apP0X>wG1QFPeYE(vXBwyD~A
zP#@di)$X51M!UNX9^Wgvt#8v6Esi<A8b33zUbqUFYxDEjXXzof%Zn_5zk(~Y419*m
zps$6%VHS?3x)OMjM0YTPRp3<`2e;2v=x_IVMgH8_z)JJ#VjfF_&v!mQ3)kUfzEV%d
zP3EXg%8sbLqVD0OJ=FlUl2*t}+DJ>lKAZs?8bjL^92sz3eS;6UxDJ5FtcNP0j=&@4
z40hX&9<T-#z6X4pBjE$f5kb;{7cv&GBfNvXSeVmf&neM~YbW=&kMuD5HR$NjdV`~1
z>!dcn+f-@o)1q+4FU|LKiW;14n-sDy!Z$z|j(#_M@3K1ToE(Vnype9fl~N8)v4Ua)
zoMt`IXP(F}nBy||8sW=Z0sr2G3fDj;!oyM&9cdM>i%2%a8McrOMc=E;KPUx<*%oEh
zd$rcFaDGlvrQomL3(j{lm4$O}IQXzFP?ZY7S$9&V>C@1am%stCRt?vk;Z4a@<tYpL
z^F2(9K4Ln*3Lmb8+S3MbJSNYzO)%Q>v1(b+&`GW1=O)aLT@=2WCwO#q{nS0FL-#gq
z+st(;+_9T;v0>wVg6(`_#Kh>pA1vGVjb9l)7v7a*4nHidg3sFps)bdS0fYVtH75ru
z%u4WvMS<O08ZK-kAC5^b2XlIa=*siMAK(H^94or<8~g(!c8p~J-NE|<)Vl~3=`#7^
z1Y>l0xH+HDCLs0}sJ4#KW{Si2I$xbcRNtYqaDz%ED3yL79+pr`c-|U7y^Di7e}&fu
zr%_uDlCJa!ZrtX)w@q5d86I*>z{!a``gZJ>*a|V*BJTSCvVQZ*9ox;rZ~PD6Z)#M?
zx~S2U&P~h=s3=xhF8O*Hj!?uGNf)Y%T$+yAF%R6pE*TGPDi#x54fzO;tLmaRo67_{
zXneH|GKOMK8;p!i04KDb2e5~DuMyPM`RoZxU_Ws#E4s`Obp+X%i>|trno<Z{43X4U
z-J;q=a5J=1JHVzqm++V4WEW7o#)6eutKO)m;Mc<ST(B{A{JXo+11^bKGMYxfId(|q
zQ$u+_R>$_pmTW5~e9f;x-y;@H_Kw{fmlx9{x^SfH#7h%{BNs(~jcFcxX;Pig&SsSH
z*zbgI49gJHsGk^2`(&I-lW*WrYfZnPuGSNqz=AqJ0lX*}xWmcp#{a6kal(*%qVR?i
zbp=SV6YjMrZnzuo#mC^*8}lJ-BPQ>EX{K2Pv*=^>|6GD<2voLzfn@8dyE31is*cNk
zsuXHf3tA2>c$W%N6ZC4T1jb-D&4&BgfpX<$+90pWB5<(Omv!K-xG3I%)440tp@;rs
zt7&t!Zndppp2E|9Gw5r0WMs+ct<foyn@#>2eIVL4d1mDF(DQ)<<Q-cjzYvSv_@8?_
zKm99w;r*^pr|3MbrcyxHUBn=Cy2tQ_gi<5wDRS9%n=<a&IvCzYAUL${;wpCo-ggn_
z*=PQWdGUE{KhOqaU0Aw~(gndcoQ21zh<=0W<AvG#9d!M=`Y3L(Ct`S#x=F8O20U&f
zQH37iZ(7hLRH%9CvnpyXpleVSW1);K!Q9vguJBuM&%^?a|72gG@;wA*v#}4>^VTue
z{<f*M7vin<v+oZ~4K5#2Cb(MYx*(q*7kf=ngWa_#tKDeDtH3FdD87JIuco%i_B0+Y
zkDjoT{K9i=2JLT&Tn{efKWM2R*=knWcx!Yp#xj$60}IUrYqW_K<wx0O)(5QkXQ0F3
zg0X?Dq%Ht2Xi>yzeN_)#e?8UEj9lQr2|^!HWPlw@21+QaTdKqG8oifY!85i1D;%ya
z%a1BhU!X&35*1a~DFQtELD>|FQ4Fg8QPzd8#4XlGRgC11S&*%g<-Ol_>jLX`n=ss1
zYrYOF*i6wt95#B{f~_a5{<c<ZJpY^dK^?CJF7p>f$@B6N9OQqfCwwtI<O?Y9-^2m9
zAo`Mz`~<DZkqzRtSQsnI7Vxt4R%mF$o%scJne8?@aUZ7nbl!?TV*}V?RUdA^fqJT5
z4Ufie`40MF19O{dKtJHaZw&;05$@Lpxas!l7d9GNt1KA_S5y_XoV+lxWgx$QgJaoC
zi7JV}YEd~%!bQ+^>hk%9#dr)C$71dQWL}>gvMsYVv0k#wxAeAF^NY23TlQLyS+Xqk
ztZCM3)_CJQd@P0J5S{^ze<IFkp+xw7v*0D%N9Uj$4@V^_kA71EHKc(0B%ku1z(d#J
zc=(Tv<C)OYE`yE#-&E?(F7W02D)SMKS$T1Y1smmAyp9EaJ&6<LlGHEh8E$MO97;!3
zjM_?z)gY)b+i4>z=}}rq9@w>+EBnHkbsy2&7<1hiOzT_KKrl}iXe>>{R>L4{P7DT~
z>4Vwnj3Kx$OXNE-D{e77**L?&_-JWhjkO-PT=UCkJ8W5H3Agn%+OvhcJ0rMvJ@K>l
zhkqytI#Vu?Q4}4+tn^-LRE<b^2^&U7pa52fGxZTIfbNBxhco&BH~4;0Mb5`W^Hm6R
z(&Bt7FmEM!nf*5W*lVK!%fqh75wO`WXa|^vv)T{t<w$r!lc>G6t60?$e3Cyr9_!%#
zpAQbHF?8|_@V`@`uw?;tbkP~`Ev-kcHU>L51X<)y3c9<&3y7~Qm#;N0vk92Z2eEp5
zDDyX_7(T`?>t|aXxOR#gZ>&dbosAxFP_1M;;nP?QJ|U4G;1j^BJ0mA2(FZt%X39y_
z3GCJ<ynh>ePK<^Caj)Y`>8Yqf*O9SaP-!ND+0G|@fQE1I8LT1HjPsmWQ(lC3W6zCE
zh9B6UG0GQwdIPXQO_6am)G*W<4{VXd<ECA-v#JBl*ct9Wla|R?Dnr@yQr?DRE(9Au
z*VHEMh%F4bdl1Q~sA$>nI*upKpKwPY^L=bE-)2M^>x||`fN{<C78x^;Z!%8U+SyWU
zjf`6Cj}goMX7hM+eg?BhH(}*pj5e$*RF`ZS1qC`9F}z;Pfj+VrsBbR3v*YL@Nt&qq
zWhDF+C4kH)%RBH#ra^JKBqMk%a9J4l<g@riV=cSEmb16U3WMOi4^f}s&$%gYfZ?2o
z+#Ia)<Go6Ff&);0&nY)>ynkdbY<Boc0bf>*=&3#nU+M=}L`O9a%u`#v3(VPjZ2ov+
zU!fG0k!^W0?#Gk$<mv1Vn`b*^WV2r2s0Xo%%+nTQTWqo0idmoAs@g2ZX<G&x%r>A(
zeKNe*ZC;vv!X0lGACQr|;8bs`oZyRfg`?&my%ufYW0TSg2!FF2MHTP_&dS?#9-Lu_
z{EJ5+Gi&fhe2nY{FT^%>iQO`083UQH-3CwOhg&_MejyiLBOBYpHC7GYlo8n8nh)Qu
zE2{ftnU0K{LX(vh`z-a5w<)p`?)?XROKy4@g(2JB(2-xzQ}}>h%6o7=I!I@r(Qb&K
znQSZD$>xJ4Naf>=VMe;&Tfa7b7N5huD}C<yobjFF=jog0x6azZ_TJjZ`rBqR`txMC
zv0g)MDyI5V5$%9Yog7U3ooSbvCWC37n2gzEH+*fuR0Gv`E;Nab$jVFnHH+Y#c{4VZ
z9b(IvMHDoAm=m7mU}GQMLM{%JWuTh*;`wb**WuW^iG90o`Yfh3fAvLm(!s#7dDv<i
zgn7JxlF&3<fTeEHPrMaZXQ`Q<0PgH0@WvMDiP@kyJn~9B1g_r5&tZ!G5A#zY*_CfF
z-ubTe&F^#7`@VM*pLRZ%e57xj&tzYV_b~5lp8<X={Ek^S+qM{9#&W(8__+$6SSjcV
zKKOqfRma<OAc=73VuP@0@rb{HyTci3jXQ4$=0N~4y<^jPaZGFrSU>jBXoFpwA;xxN
zrmd`Tfh<6?ZOJaT)ARqu0@UOx;D$;913yEh@rFYB6*%8b1=AAciM#xO&8?Z(UU>*t
zqmz0mUG*>d37z+|8VZ+hwtfuX<^P=9W3Y3zpFQEPggYur3lYOMSpN8Kn~>wP-Otas
z!7HmE{U2&;m}p~Ew=MKd^Bv-S$mfk;Bg;9PpJ8F4tT?ol<6;qf;4^8XdWOos5pi>e
z%F+qkYAWXMm+*XgLrIo!G~{uU%|JI^iTsQQ>lDVF_&RpNsLcv9ccZ1zQq|T+X{gKt
z`&E(Z%A$ytyFi=Y!H+$cSwIeV&|}Ym!B`75<}e+WRZ;03p*a<T=QtR>UD0lM(+oWw
zPW2|pp$IwvZmAA+z&?bpe5q2A6Q4yT-o=vU?dmhaQkP$`2ZT0>h!3OSyughCmHj>J
z^VA)&!gj%Dk$0STOP@50m(4U>d0BB(PQ<KMOD<E`w!<tkRCiMwu*>im`r!(A9<sQ^
z2G|E=Wf@UeaDI_>MHD-UJRXWO=kU_}ficFoZQL`0*;(3#o_U>G!r$MJ9@1*r2Cnb{
z@Lm^z_o^5@rCIRZO~d|DYg!MD*bZOF-*N@I&OSPbCtMACQpJ=Vo)x>=2yAr<Ur`=A
z79`$?7vNgPf*Ie&%NtdEhxqjI%@%Wlc1Mkw>>1@0d{;VKo_Y87DeC*jQi@f^-8APl
zY(so!dKdGlXz6O41e)6m_ex{TYSYkhR)Lp{KqQA_)8Qp_-Wb^#Zmj3XevbXkN8&ge
z&x!y=xUgKKCBMTW*#+YO3p7TuUaY%u!L|fQ@FODes2GJEyaAYymSV1UN532d-So1U
zglZ!xP*zcKvLcOFtB}h}L<QXZ8}&d2V_WqHHozL9!z9BG0xnv8LEnvrtFZ`rt*O80
zExe|6v(HgqPosLk>F6d?*2nY<?Bnx&*rk5=oO|?|*r$XGAGm+`pfPsup|+3ww{5&%
z!wEgTpZX>lZ+MEBFB*!w;vKMp2e5sXzKnlsF|7l3Ek|?U3g{yu`F(L)<RWXY^KRUq
zr5lN?EpN^qF*nrR^Q;A1$CSYh7kt%C<Rvy`u%TItgxZRYHxB>xAt*G(;ZJFfu6YsM
z#v1s}CsQS`*vXhGzEG%oLs_WNeeru3x~e;zX~WfVRZ<s1-fxmQ=6v&`zDd7q9erQ;
zb(A&2_fNT>+COTy-)7f@PAl5>beh^ByQ|?`y3gOeSM{##vT4x8(fNEU!^c|7caK-F
z_bTfbqdvcj9_b?v%h~cKbj;R>qP3`@)uEIHz?%{cPV8@f)F=y8rZYdrx`?){Il9|(
zR*c;?)`MpW0bBMO{WQmD1CIBz&~TJjrT*|`6a;UC{Y^0U2eC=89-3o8?0%ElNprxp
z6a-%A4xju^?6W%2SMmhPc%&v`XYnceZwB`KKFVeChHNH#m<{aX^*Z5h+wc3qdfCpR
z!qT>+j}9r}{;N}Z%lEBHb}HVtkz2`e5x&KJ!^c-0)o$RNzCT?WxP^H?;kj(O-`oja
zKILtzjar!Nr@?Ji8v7R`DN`k&cg_P>wHat=sMyMT@Q3Ui81Hf7D0{&EVrlFF@V$*K
z<)hgeRuBB?T_eV*25fQ&2r`w1pzrs_hV5`*geUT#N<`hANoVL7ChrI^4rgQ=WZ-vf
z_tc?b(2@7zElmAK{{Szx(*i!{)%rhOPoE+VS6dV8hutyPn^w8Uua#vV3ka$bcOdgv
z+DGN&TA=m0X8GC`>@#5WFy1GuMBJF<uBiv7l#TpG-8>$-M)W%|{F1lgU06-u0zM9w
zGR7=k4V9)SRDyi)?WI#4^n?}2Ef;9Vuh@HRR6zM=_YFtBmMt{KV&7t~(VkD@msokD
z0PAfO;!cLEk-`?SIY0;(a7)ujQxBkrRNUNpV7lMP$9r&Y9F#6<o}3I{b8)gEGWUqn
zi1MmnKaWwo`4}E&51cSn9mD=&w2FXE;-W9<ZT^qUXwk&B%CCy>3ZrC?oUUp8d6(YR
zTR1hJ)p_Kwv6iP{xl`U`4xBY=p6~qT3meXxJoV_L48PKYW#0|Mt$xKR)mGKJh|gxr
zAMhe`*bDe)icmeo(p3DEg=iiaIrz`9ec%CvUzzn0TX3JHp;7E*b{50;8JE~Y+i~X3
z?*nW8GEVU|Mi%PWE*%4wq?x{got7)mk~UI3=nnq@t%u|N8Q{>KU>>}H4w($MWN&4W
z52yi+Qnk&~+7)hvh46Vd)PDLdofR8VgU{M`=_|aOrLq-U@Dn4_>dow(TE{w~+m)sb
zT7ByFdc;CeAgX+7RCb%3iPK(Y7N7PmN6jC#WYqkpQ=VBixW4ZDY?O`dr3A|XZ%4~s
z<EycrO#!<-8hYUkJo8LAJhRca+KNmA)tQB`TVNLkvv@WP^U?=n4!h2ti9A-6IkB6p
z3bd2{Oz~N)C@}gV^#ZR*@He)V?qf!1q>4g;NJU;uMpa!$rQma~jm_;*^Z}D@JYJ{a
z9(FE!s>v$GoB(fKExgjhPvETn%EYs{u2<Rb*`wuIYiY{|xoBed^q#ZQQ#$%J?7XLG
z$<|YPe;$2YZJ69NB_p$Hrc4TrF{3VqpO5lMJTPnj$~lXl#@!uV+xh%}#y**5Q8Cx2
zi{(CRf-}rVkDP<*8Y{-YbNUY==Nmk}C-@-#lFeckb`APyHV?xbf0R8mvRF6v$hgf`
zu%AXnJ{)~!Gs|ae1Y7+YJK;+x9XxLh@&FbIA~?i_fFJRqtOT~&AKm1ubif_gz}C+l
zk&6f@ZT_R$nENorr|S>uFTE7ocZcLFm1fWKpDVlC_E_N1jBraUIqP)lQa^`|cO4eC
zso8J%_(|q}(R0(>GyYCl9Xljsjt(I!#i;OrMR6;0XKbiE_X#!WyS!)4sCwY&miSq`
zOR$TGzk6&p=59AA1v%n`xQ5<460c!!8{1FS#B;X5u<_Y^4*uV5EHr8(n(OfpBc8or
zoJAThnIl_dFt!#-eg*jH|2Me|qE(3Bc*>wPR2bdT7yi*;s2+vnK+JHJ=@q=BcQN}f
z#xCDWsKW1csC_y3hQfN9-j90WMiz6EJxiyupOz}3TG*+S%o%$#3R-<SzHidO$=M}n
z{9*s;lN+X&&2UVvH>pHmo}9?Kvc+%$J*3aU|4uKm_1ucqQ9lQ_?Q9=<Mf%y#SQh#e
zwneZPwoq1yzhOn74F>>MKZQ?q2$;b{D5hmadrU6*z`Z)jjVzB{H6HWBMssN7UyX2<
z2KL^86~Qz$z^DOj?Him^Z^3Xn%XRW!@F-WQA~LVKik3%#qvvBL&6S&IE}WLb|A(LS
zRn?p3;>nbQzPJvXd<%6KHJ?gSA9KI`s99dLvNhs7=x9QRocvQ~s_Wgj!{FA_`c4`D
zDR4pD$n^A#^$GPNT+I)LVY|-vh{5``{}849zhxVn!`9!7uQ%*jhnM}LjQjqDjUV32
ztOt#K#z!N97myW!DO+NTCYn0^&(RB(-ULJSiP_-b{mD)vPxrth&<Goh8_-=^urW}f
z!ZG(=VUt*Qb^%_TP~`^3;I<+#^#bfcT{!BYl>(ETz{x5=6=)+n(n)MLJOlfZsRH!>
zZoVvZs_$@Q{8Wea1MtIhRJ{K_`#p-n{2O7P7M(BqSI)qRajy3qrngM*9XqyLVEmNI
z+5NIb!s76k@*y@%j=<+xL!Yvr4j8Xa2lSosU`z7)iV3Mh3p#b{-O<v*f01phx3V+^
zzPo9xg#vzr&q6-!=C`o57t9i+9Ui0?d<xXA?xG<40Ubp%bl0P-4BQ@1Sxfc<nL8GY
zL!hyUXBxlNAvsvprHjyFu2Bm($~ve3a?&foXP3ompX7ssKUc=XgE<g8DLoM1<M5gs
zmDCDq32u8bxZhlyd;_MsANsMspIJ+OG<@Wq@ENJ2rcFrnA7^iM(_u@O7H)k+P~5|5
z%X4-nw+LG(n;I#8iPn+E75)R$;Y+iqii!HZ@ZDCgRc)i|3>e*ObgwR!1OBh92gbWw
z1iNg6+WulIp%FxZQ9C3GfMfU#{5MZb75#af*ansHF7VKNeuT|NH0SX>24|<CvNq>S
zjqB{W?Kzukdqj2ND)Oc7aKhE0RBXzfAt&5WS=C1l0XmEX54H)fD)C)Z!$#9OAZbNA
zFim`+k8n0D)`OteEJU}RukgAK_H?s8xVifNk+JdFeY2n2-}W5Q<YBuc*F9eSA`&ti
z%(kY_n)r*ZST9=+u%fo9)>Hf~#oF<TA0eeO@2{iHOXtt>_wDK1^Hg6q(cB*G*U9Uu
z<tg(uF0yiP+%$ntrxP~GD$8I#9=t|2Ua8_CVs$n2;XpW7PlywIC%iwK*bi2pb>Ias
z#Vq4LZ2ubVjRbB{1rUWUaHd=W^VJ=URH`fu{N5NnWfIhnGxS{a#jSsYt9Aw0^qJ^J
z1C+b=qzttftnhfmLK(eDV&l)=-@aHKXPac#usul~r;bR79{0<!Ym2GQy+*E7$CDCg
zMa;;Hy=}(WdV>p`V6)mXtwHRutfIGse4JumG;KrZs^O_oo~}+sT0I^LH<)(u2^qiC
zdKHXf5L*KkW)`N^8R&9{<S1-H?SUUO7uxV<RNzT?1&@4uDU0HQ&1S=m`%s)dvBO{m
z9T5?$*x$xyBL|%ILCOQh+DJLr?J0oUDT#^q6(;jsXu$sbGCu~#^lmu4(|`)w<8<!e
z^813LSOdR69(=wV;iuc8Ti~P>)C~TQ?lS3JYD$JzSY6j4%?GusKj4XH+pr%~FV8)h
zo*3f5MpzTrDI>}D%j(I-ibAAAN+ph-8?h>R_2C)6>`(g4Zc?dZ%J_`|St8P_)Pxeo
zO{Q(<c`Iz74g-%|PX0kA{ef@(7QAPS7Gh(s8hp$iVyt*#Ok~OYg>lYk2+!yl))2~T
zq*0ZvVoz+BY>v34VrnR*!6DZXOk*GL$5DvbgUC;R<mq#;ZU!~MR^%NhUyZTV7XuFE
zzVw2!SslpiZ>Ts<i2SR%oIS|=TP~E^zfA1L^nX%ZsAbQNT|0JdIqZ}7is-d-PR|>k
z7;lcSb+#2_^`S`(gs$I|4h6X+-ktkvdC{e#ray}~>^8VnrH&_EAB$fBmn~}CTd!Hx
zV}>2u2Zv-P@}@bql~>CXc<rG8u*oLebZ}(J5V+@Dg&VenR%1eIf(q1{Me|n7gD+xR
z*cRIfTWPRY?=fK?SGn>bxPsZx3Ez{0dI^3-$yKxxo{)N?EOec9@c6$Fxj;vU<S}e_
z1*_@6jf0^JJ*9J$0JSDV&#@miEpmfs6tpC6Uz&5$I9qzx9<9QAdJkXV_aJu9{J{AG
z;~on?>my^I^|K|+2oqIlK=7W#zVr4jKeqT}R?EmI<34rV(&%=_w_Yl6f{yVP9{#@b
z*$`HMU&Jd~c*_;yEpl`fCZ*2uBXEBzFmD`xgjaCb2WR1D^q4E)R6UF-Y&D;4)MpN?
zB>HH!@qraKJmF;ifT;vJ3-r=g;N}m4Rg!8rSh%K$7hka*ZnbT2L2pHF90GH*3%YGD
zXyH@A=oiQ9?tH@J7Kq(M>@@oCH227c6c+qC);;}Wysvd|S6|1>o|lGp_h~Ze*X%;`
zHcXkW<7`)qPR2&tR%53;5*m>@Ykr^QZi{MVb&Y84HKA+mMu%H&89v58J1D?t>zOpZ
zhIN>6*;bcbmJL*Y`2;LwY3%PT!|Okch8wU0oEwGlGZus^zdiP<H-Sg$ie1fL#xM2~
zd&?Q%wV!}X?+O;Qu@MioGzOlwH2stIVH@*4wW4a^Y1&f}Iaa>M7H^0Q5f!joo&wBo
z0h-xDTBVSeG^v9gNilK;?$lrRw$HKW>BZnJ?*#|Oo=7#P_^__M`Z;#(?l35HtXt59
zOt0DN(wc{U60;3qwb;6<2NSDgdF6Im-g{wI`kb(Do>jW#HD26oh-)Tq8*)*Vo$!0y
z5uXb-h8guHJdr&xJAISGkX3ImLHAT{Q1zDZr{b0P$$!9ub5%~^hu9gQ-L1Gsg*mmn
z$j>LT5zG!IEx?$}FOaLMh_10g|E8WgLEQvm7!B6m1Nm82p5=q!P7TKfP7EEFuQA0G
z!fP9x$Bo|yc327T_D)qCyaRS^%{lhc`Z@QJlL9VB|4P}Fa6(UZUEQWyr`!GNxH;Rx
zCtsiDnR7hxLS%GMiNNup=9IIuPOW;qCU*Y1v??J3JnMGv(_~50%6&>0M?;ncjI|!{
zEa&BA-EFIFtBffu4LjtUzyXE8%UDq@0R~@;?Wj~<2^`W$YytJ<JJ4Y&!LfV{b?72<
zWnWoiR*G*lj_{qvUToP}*%fd;|9f?w;&{!GyO^~P$#3|c;CxZVn_|P_6<?11Kth%K
ziG3<x=ry^(6wc5Yhsc9aHeSQw(+<dLhWQ@PXE~6Jv=5K`o;V|UX^8j8%N=(*UF<%8
zK*;F1x?JM(oar<8w7|?xQ;pe)ONOpnzUI+_>8YcFmU{H>UZKfmhg!Y6Slq*gg>+z@
z#;+gm?pu^KH{PT3f00U#1Xk-xZgPQ~4aOF88`Q`q;8e=<EO_f2u|HE>v=d)g1|JJ%
zyFWi-ykL)wV$g+d@o!+x2Jy8-xTQwmpC*GBDWKlSuVCqyz=O0OyVzOsCm4(n;mo7(
z(cRD|f{>l7p)KYEM|Tyoc{x0rc4{NkjqP|nq+xh<stxLU_{;>ylzCCLt?&9NrzPzj
zdnFBC=#gOGG^N}Oj|EQ_4O<qpy7HQF%gfDLKP5fj_}CBs*U@=L*LeSNymk=eM=5Cp
zt*BW-Yb8c#MQc>V4r$zbpYM8}n+O>alGrh7MvOyjKP%c&s!~)`YotXLwOXsjPhvMJ
zwEBCcr~jzq<lN^z-|uI<S2_+0dZTIIunnU(CmtC0Q00u=HX`}sNuKH=+A0)VaXvL<
z+O3FVak<VxU(&=rL7q~7-sM{Pnf8!i5T{~!8<(W78Ut!J2Bu`UI71zlXdrG_Phe$2
z#UT46j7$^fGtTTT(}F#w4^D$rwBQA50aZL%HP8a>a~=%e9#aMe=6BBeK5}nXfr%E;
zRefL)Dxur%h87@Lm4yMj8t+XD%uJkGC!ugiVo0+#{n|GT+tl~XsIsx^Q-7EdJwGga
zM%KLrVrJ%q#pc_HW9@4NEo!QJ1iBX_Hca|l1$=yE#L!U#-Sa?2nt(XGKu<ORPn$2g
zqY&Lx!M{^2KhqhgJ36Al3R3|jer%_ME)%mvAR5qUSxH99ezK1^D)yo2=q-lB(6qPL
z^O;98$xj8*^XgSN+UL<def0mRY1=vZ`&3i1P|{31=wS_191a<D$z~LM#WB>{<(c#k
zp|n~|kMbu&q!%pNcyFZ?-ziBmzRVb=oAkfcA-{Fc_T9ruKKyQw>*Hn08S`yI5brL-
zxjyQP@cF?%2K8wf`~D}Mxk<k!{l~5ly=-{y$PZk3R;=|GbKG5-4l?)?JI@)EKr3|-
zYJ>~)oK!l^SG<ee<lFs$LnzebqMI2a=b`hx&1U+iwVU~50GxQFb;oL<E{bQSBD_jF
z8~qSfwNO_>)AR<}jkDEhoEU+qsVc)|M$lz?q5XKotXn|84RHE;gQ-kIoohV#2r#A;
z^fZmkQ27&mtg&enpEU97X+KXZO1b6U%D!2zbyT}DA#>jw*tb$-!LY3(c8$0(cxUgu
z9e!?hFQ{zWl0IET!{o^1Dt5n^>LX^4Z0o*5Z^-ua5aIOl3u*#M4G-ye71c@E7CfXZ
z8r&!|1(rCQ`KSz9t33P@1LPr|u$(+9Z^P0IWFCB57Snf%*yaX+ODxp;ISqkktLh5-
z;AQjP2*Nm)y`er%hTbSDx1$@q1m^mR_A^7+T@Km%?43>lPM~}CU0CKjY^L9UF9n+i
zWMDS*9P<1qUTD|2eQ9E9_T;z67rP(!YY_5%8~OIm;Eo+)L&|iy8d|U8w&0Rx`9aYw
zibLlNkxqO{`;;%dp`(|M_+M1K%UA9fGprq!AI#ZWbl6+aA^oj$bsp-^TKs7jQ(PfB
zyL)Kb9-t#DA^+ee+(n^04^DzML_Nh_|44jQia1y^MO`@%R5Su@Mua(_j=@p{qbag*
zxgWyiwH6M(3c9gdaOuJLqvwOl4Fmztr^1HYd3I}WHSY<W7kiwa>`HLDajFxUD?Rlt
zQB(AApO%Lml~^vV#?%EF6UWz9;X^~i&vkm%=4Q(+onu3`bgtL2d$X+8r#o-zJI3{C
zVp`hM6u;QhuFR2>qK~^CGh0mel-3dl-9xbu`msKYbKo(|L~Zk>Do1+372b+LyX(RS
zGzYy(7gQtFP}O~`-o`PNqpE^AL|e8MFS>{r*&5!hP_!gh@&%j0G7?!D!ZvrKJCA^g
zX{O&n54-@a)_b}w4(wM{Z4<_SmD$^0I7{qu-hA7~`^-B5cH$ua_8(FQqG8~6sXC&%
z=f3BZjP}}zPbXfT^if9t)UNT{Jtf0}`u^QxN0%*~+l4OZwz_x7pq8#PW3na`ryorz
zio4|=8vS*2Cr@YVEpg1#i(RvTdJ-gRDhECCKt915I!wG?OYYK8yF|I^4%<MFzJY0d
zk9mB9?5L0PeixyttEKYMRlG!t<q~T|3fipS<Oyr9d7TOCF_U~(kj06pB}Q>di#fZK
z;5;UhC6%f=$$VXvsk$P3=?b)TwVVpx8r~Xqmh&sO^iQ_%7+iHv;5&cehWlId-4)5@
z*eA}}nPZov_@%wbsGsgPp;yumv3+HKmtTy>^{)8SyCz}5gzFhI(kmtV#%*+;i8(d;
zOHV_vkx22rm@FHp`D{^r<s#0apUNW*AO>vbk=}xm%nx<W6MXXR)m5}r=kbhBQ-@`s
zK1oW+KWJ7qsi)v*Db_FW@Z+pp%L5zbSgE9CjW@P_*DU6)oz=@o+d`{FkGYEGWhMHT
z_3AnL-A;N3TIPc$#4%2FC)Yj)`?(ke@MRLchQKziL`hqTv_=bWQa8)TJ!y2f`v<F;
z-8kMid3tJo`pQWUGv3T7o;Y{H#0jAjthA8yK^aTZ`%idv++BOWi1B>q8O5gzfc0-L
zNSEY}J+s2mA6G{q)|~s~!}c)?ZFpTB0E3dOv(aA;Ruy=wF>HQIQ7o=iOXP0d3zb71
zboh<MBz{H(5g;yEUQy`TW@+>_lT~|mp=?sB4l*hDGb2>fx9~a!$UDqD8}T;wr}p-T
zC(lLe+t7Joe&>cRbBb+$Z!1$Dhg2W4jvg=w4aqq6i?VXJr<Z%|=wf$Q>$0-E<;Nu^
zKS)VUi_N&7acJ^|^v4q(Pbg05Grr&0hu$Ts&>Cme#tR&7df_vjCtr%LYA`<|iQ0Kc
zrK00<k*~Q6Mrfwlgo`hiin|-t;1m4|l{KOi69b3XV`5Q_cT{J^Wl@@B^HfM{h-@Ny
zikTu%eq!|z4t3}zb7Up<GCvrtBV-f4FdmhH=QtAw#(7y2HSG+%td-ayX4^-cDA>~9
z%?4+YH<WqfAk*MoCW;bR_-k^9S|`e&e7((9c-p<iHOsZi6Jgy^JMFG<>&6~S*qrE>
zydkMUVxzI&$F;FLs$14%&sniWMS?VM!}pVb;;I<cZatJXAA!;*lcD#$4&#Q6GA(pF
z_Q>sMmWS%CXwaJJ-6$dRU=Sntd>6nCvvE*o%P<&mUl}f*$T;g$>!p<;H^3IZYTZR0
ze2PioPt>Gy)K|RIf#jaG(IfF?#Dc~}qLrP-46z)gMqRUl$*+t_GtIrNaJyCZmT_*F
z!!X5Jwqyq=(Pue<Z_6A`%W~_vRm=UR>m65=doi{CjL23;$#m-m8q?V%ksSFgYK8--
zm4c}H*T@W+rIxb)R%b`4p#rs+xA`$nQn&t2r|H2c3-(g^a`2b#0Rh>Kqk0{9eJi;J
z7wSt+?q>OfywJla7RPf3ydsbu&`#V3{rt;1hJG^3YRfElh^KrVj=dG?$|Q{*Oly6G
zP4qOmAVG2!JAzT+D8Axwur6U@{4crA0cb?efa<rVE=AfEaZ>rBUKtFFnZrCb3ud;8
z<!d!?`@7$BPj|0&KXxs&+FCuV1k^#_hyWPfZB+gr@%C(B`t%3i*kDG(F!e=EG8=uv
z5b|ZJsD@~K{n#hAp@#W~oXr7bT)nRk;2fDNvs7Pjqz&Q{cexSFo4@J+qQ4u>WTgBa
zeqw`YMI|b*{_vC)wXJiO8->I*QeT76IF;tPC2+(0kz;e2i8>VQ?K?6d{9!3qgI2A?
zzfnwY%`s$PnydC-=BTMcU0P)au=%w!38e1k>5oOE)f>G?1`NqB?k3i1ceZPvJH+kl
z*>3d&xzeEV_rx>i)nHEIHPxDGKa3QfDdr6SU#bV0h>xOJ&L=x&t?G#fJ%wr{>4VQv
z6hz}#TuJrlsLrd$urV9eOccwt!F*51cu|}AezmoT?Q}7WcoABOaws!>z#((3{^TOm
zgxRS;4wlp(@pG22AuZ$`Hb%J@DKl~Wbf+qe#8-BK%#R(qF_YtKx(-UvWOLE(;+!Je
zY&wdL$@Vq<Bm4PnP}ehPKL;>-wver?E1naa)7P!8)<E&d%7w{&jv{ukTt|IKgb67$
zzu^)d3}*FLX}!@kz-^M%Xz=yY<h@?xW-o;!@xb$?;1wQ;KKV--rIKV*e5RGbqRx}w
zJ4ar!DylTOS)3G=WgG6?6Pb$NVY?M1;!t$&L(|cnr1DuH#tT&ecQac#W)TX1U$kjw
zdCML6BqPjjPD@i9>049>-rGS{ANKzm{idbe3{}*(_=B3FeeJ*|=L4!#7X99CsbmdM
zNhuRhoGyc-K4WD`f3XwXs|UFWeer^PrTv*62dky1Z}4dF&Lo+;OL(HyWQ=^FI)J0s
zHSe0eoZ%SzXEFva$O0TCO>pynsZ!)c5*1!!0`yUn_1`E}61d+ra2Icc@9!>t7dN?~
zmn?sI)jDcjw$^zzq5KXdZzKfY%>xt?XVIG8P;<?nG812`FFALO>0;;jF3Ymf_?Zgk
zfevApU4zE#4%)jZdKFkp9#~<rd0^bIG%HX-&7@}kChy6!D5u{Q^VurypgtJKUP6u?
zsbW#463;V?YQCDEUe4sJ&tZMLnNIqoUWV6dw#jGi8A>N8r%p2WCE>8BgKMUra_}>K
zj&|*wkW4l|%QtYuhwIIxs(*+kGL|0oh!gw5%A*6VmlxP$KCzC2nvJx&n(`<#a_LTm
z`hVQcPWlCVNg7CRj(*80>CMx|lFbu=ul{Sf23>dq-hCo<@JA-?5HS3upxbHOq+Ld!
z_TI`#i-X^sO%JZiw_eDW5DV`WB443WEg;1xi%GKz(|RS&X%978-(g~TpxT4s^ns24
z9UaPKR8=SW#$8MV->F=7jsxUZy#jZ8N*1d?rl4STpSm_(?Nkf#biIWye5c&St@%xQ
zP_WFlcGIP%iNW;M^)lI-V_mgk#IJacZ<OL-BvF<y8-=5CuLkB&ixh~?)a)NoIJMNB
zQHd;KCy!L!ajzZY930fE=r41?h<`)p?ZQFd8n;3-RFdoM`<w<3_pUo#ZaJSqqwvgx
zC7LXokQmsUjcFG3;9GFLKU9J#q4IxZ?&&EgP{)A8^|Y_+N1UHb?lD<rY|Q64Zzq`l
z_LG~`4c5X|PheF0frISD9p4hg-X%1)7iAP2&2m_ZYgPq00-ft7@ip6dwrEF<`b<1P
z#c>1ANNGM`6gYSl>ik^l_e@-3>qR}ePk+js|0}7RV?lgBq8bYJ!m6n1BJ4KQ;HEIE
z+dzQl>EU)hN|4%i0ktv>UFuu*ew23^_*#zA&kw^Yq_Yn;&{ybH!6X_DqVCSYulfk>
zp@(hr7v5bS3cc|#v|;E}vUCe`L_X&I4y2Z}(sN+OH}P{W%P{dec@_aY^K|JbKhRJq
zD#6(dlfl%&<MLGzBb0i_DuPcP3$u6_6@Hj?5oLS7(w=WFQ17>sG318_Cx?F8869Uh
z8vZG2E<LC^EZ1zbms#}qhM?Zta5}k3WA4t3|4?7k{dxXI{OwzCZ)v)wy@rloOcHnv
z+_^XD%iBounW8>|Zy3jI9OmSJK)K9MI07&8gsZ_C@ALg!HjBV(Zu8{l%-8yqIVa1I
zHk~d8(SJhpWO86KVV8W=D3-d{WSWX*S2N5>h4_Fvs|P|+ce7yr4~PWomNiB^uq>2n
zEvykJb>ex44@np*;ydoor&`E#G9Q%f5<kI#lhoia_4vM0=;L*nw2!bMzGj9iKhXY-
zu<7ObOgqt_eFLX)&3+AhCWn93np-vt{;m%w++*{VewF;cizGqslK03~e-%wkSEinx
zjwIdsHnq1mI+?kqg`LXX59V)%(AnFP!d-{%9jBuBG!D4JIcelP&L~W<`V#l}eE7r<
zNOU+yPwDq>%E44P)FScHQ&aX3{l#^UVY*k=A-4y$*%2la%Zwqjt~}N7m~KhR#xY#i
zzr#tcRz>*2S~K(CR?kFpbe9L|L;MB<Rmks!c!KI-EYCW{)JIKxmzlx?8yT+~z(~&J
zw(ddk)(NFYqUk`T+t2MRL-yx*xPk#rJ@09fpQk%Jyfdg|(@+N$!)Yx9v%ZAqD3r{#
zr|NZjsG_$GqN_hw3#iWT>!<Q_T>t%K1va!B%x<}KoU$<dS$q#iz*4qTZxdl3A6tvj
zm<6JQ-(YQ$zG5RM`6#&GA(XSnz^u2UVaX>GYYgAZ3;B=ulxlJUr@}~Ps2uk7_u1`+
z!L0Q%gLsGjsJNrdD)S5tQ%l(YH6*h7+NpLaf1a*Mzl1&i6qS4ouH6~rx}BxUB{13g
zqGb<2^HPgUrl)8PBb*0db<t*>{RE{Gdm$)m8*c47eD2Tq?$elEno-^I*dO9x{OeFR
zQt>b>6qlI}b8+Xb;LL7C?f<hHE{~(jIgbv0ruBstA#O4sdBt4L@&$1lol$ir?uq()
zbyG`bu_-umD^tmysn2lvtmfow#hG#ezttK%DnsqEDEIcVFRn0u=+3;Mt4tC8b`$3<
zIMz)hV|C*#-GM!+Y7ayooeLLo+gKnKL(OI0Zw(S$x;q~__wDf{9&dJLI`f?m&@hz+
zp=?45>H$*>HrbGjr-pV-a*%@c4wAH)OxYd^z);9im0jpbzfy0n$lk(OU!w$whOsyz
z_o349h$iwl)7TmmN>e>|M14_SRTW#o&m7)~W>b2C=eixpX*QbNSJeXgNHtw3o3b@l
z*6l&{Gt6-PH??*HeLaqzR9_X~E1$v%J*(2uMxg6OadQBdLL8cqe)Rq8Ahc8MBHSkR
zyl#9N&vCF;_%~NJz!~d2!~faMY3mf(=betu4)YqnC7?ewJRJYk20R3xF(o`xaX5ze
zDmVM%Q1<RG*jZYU2r2btE0=BZGKz+SB#~4X<4CSph(n_rZ*_*WP}dy+W%!R6Wt~Q!
z^H3ZDm6}QAi05}+JXDoIcw;%Oo54KWf;$e=5vafS&>03%6TF~Saon_gy$CcW-2{QE
zNK~?^+^sS800$)>YRN}n-97E)_Hr=Q-ela)wL^JEANwTu{$uXpXuH^Cmi~Sd?<qTq
zWZY}^8B$#~@l9pghjCSXhv)S!j8Ii>Q5O@&Od#<P#IPUv$hTA@e5M_7nDi2UJo6yw
z2g>Cq{q=p>)yfAc+kuYdC4B`3gT3?<tAaRgo$^#*(!V7N#6r}{D-@|ns=2O5#X#ka
zrfdN7<TN&+Qv9Ygp?@{$j8{~9Jq^C*I&ah9J~(LSFsbg;)4{Yaf&pJ4>+BCSFK10F
zr;P2#(*|lEXARHT$a%>o(AY`EKlhU7uk6@lULSWdo&UnrdN~8>;HV=(or~BUtLkX`
z1be*?9wQe?+TBP3$`<i>lpBCsC+SQo&T|~Y<CyYWqig92JF$*B)k)S9Gi47cWlMQN
z424Nev$BLQjCdI_5?-pQJzWQydFlfk3WrSz)7k-i2A{KYD>$dMa4}l11Bdf9tEddE
z)jItxoibhxu?skjeaUK@NC(WaSMt0Oc7}1BGv<!H$*v14o6cQ4X|D5|d4`jow2a)l
z#m-4Ms{~S|;C0ycsyVyhNM3@>pE5!G{*L=})-afoMIfH{nBBwq#)rcT9hb+|blH$P
zq?ooAsrR^Hvw6Y_WS$0y6{3hP+7?f(gC{6d%(hCHkLHT+QRldX0N1W;?XkXL!)?yH
zO0?Un{q%yd^fJ(RCV@SwCY3W3+<KQbn(Rq_{&AjpJWqX;T(tGPzh-D%x|&~b*d2sp
zZihapJXr!Wc+#GB6qTzx+)WSX5SZy+=ZSOBKIV)-M<48LvWuKZ=d^R3z1#Kgo{(rp
z;atsv`CFzPZsSyH&jot?M4aK><w3gnS8TLPz}YXt#pJ2;WLA`rAcSrXomF$SKui~D
b;uUcG$?V^|V2WGFhv<6uf$AH+ixTmFYAn+m

diff --git a/pcore/traps.py b/pcore/traps.py
deleted file mode 100644
index e37eb925..00000000
--- a/pcore/traps.py
+++ /dev/null
@@ -1,480 +0,0 @@
-"""
-A set of utilities for dealing with ALCATRAS traps
-"""
-
-import numpy as np
-from tqdm import tqdm
-
-from skimage import transform, feature
-from skimage.filters.rank import entropy
-from skimage.filters import threshold_otsu
-from skimage.segmentation import clear_border
-from skimage.measure import label, regionprops
-from skimage.morphology import disk, closing, square
-
-
-def stretch_image(image):
-    image = ((image - image.min()) / (image.max() - image.min())) * 255
-    minval = np.percentile(image, 2)
-    maxval = np.percentile(image, 98)
-    image = np.clip(image, minval, maxval)
-    image = (image - minval) / (maxval - minval)
-    return image
-
-
-def segment_traps(image, tile_size, downscale=0.4):
-    # Make image go between 0 and 255
-    img = image  # Keep a memory of image in case need to re-run
-    # stretched = stretch_image(image)
-    # img = stretch_image(image)
-    # TODO Optimise the hyperparameters
-    disk_radius = int(min([0.01 * x for x in img.shape]))
-    min_area = 0.2 * (tile_size ** 2)
-    if downscale != 1:
-        img = transform.rescale(image, downscale)
-    entropy_image = entropy(img, disk(disk_radius))
-    if downscale != 1:
-        entropy_image = transform.rescale(entropy_image, 1 / downscale)
-
-    # apply threshold
-    thresh = threshold_otsu(entropy_image)
-    bw = closing(entropy_image > thresh, square(3))
-
-    # remove artifacts connected to image border
-    cleared = clear_border(bw)
-
-    # label image regions
-    label_image = label(cleared)
-    areas = [
-        region.area
-        for region in regionprops(label_image)
-        if region.area > min_area and region.area < tile_size ** 2 * 0.8
-    ]
-    traps = (
-        np.array(
-            [
-                region.centroid
-                for region in regionprops(label_image)
-                if region.area > min_area and region.area < tile_size ** 2 * 0.8
-            ]
-        )
-        .round()
-        .astype(int)
-    )
-    ma = (
-        np.array(
-            [
-                region.minor_axis_length
-                for region in regionprops(label_image)
-                if region.area > min_area and region.area < tile_size ** 2 * 0.8
-            ]
-        )
-        .round()
-        .astype(int)
-    )
-    maskx = (tile_size // 2 < traps[:, 0]) & (
-        traps[:, 0] < image.shape[0] - tile_size // 2
-    )
-    masky = (tile_size // 2 < traps[:, 1]) & (
-        traps[:, 1] < image.shape[1] - tile_size // 2
-    )
-
-    traps = traps[maskx & masky, :]
-    ma = ma[maskx & masky]
-
-    chosen_trap_coords = np.round(traps[ma.argmin()]).astype(int)
-    x, y = chosen_trap_coords
-    template = image[
-        x - tile_size // 2 : x + tile_size // 2, y - tile_size // 2 : y + tile_size // 2
-    ]
-
-    traps = identify_trap_locations(image, template)
-
-    if len(traps) < 10 and downscale != 1:
-        print("Trying again.")
-        return segment_traps(image, tile_size, downscale=1)
-
-    return traps
-
-
-# def segment_traps(image, tile_size, downscale=0.4):
-#     # Make image go between 0 and 255
-#     img = image  # Keep a memory of image in case need to re-run
-#     image = stretch_image(image)
-#     # TODO Optimise the hyperparameters
-#     disk_radius = int(min([0.01 * x for x in img.shape]))
-#     min_area = 0.1 * (tile_size ** 2)
-#     if downscale != 1:
-#         img = transform.rescale(image, downscale)
-#     entropy_image = entropy(img, disk(disk_radius))
-#     if downscale != 1:
-#         entropy_image = transform.rescale(entropy_image, 1 / downscale)
-
-#     # apply threshold
-#     thresh = threshold_otsu(entropy_image)
-#     bw = closing(entropy_image > thresh, square(3))
-
-#     # remove artifacts connected to image border
-#     cleared = clear_border(bw)
-
-#     # label image regions
-#     label_image = label(cleared)
-#     traps = [
-#         region.centroid for region in regionprops(label_image) if region.area > min_area
-#     ]
-#     if len(traps) < 10 and downscale != 1:
-#         print("Trying again.")
-#         return segment_traps(image, tile_size, downscale=1)
-#     return traps
-
-
-def identify_trap_locations(
-    image, trap_template, optimize_scale=True, downscale=0.35, trap_size=None
-):
-    """
-    Identify the traps in a single image based on a trap template.
-    This assumes a trap template that is similar to the image in question
-    (same camera, same magification; ideally same experiment).
-
-    This method speeds up the search by downscaling both the image and
-    the trap template before running the template match.
-    It also optimizes the scale and the rotation of the trap template.
-
-    :param image:
-    :param trap_template:
-    :param optimize_scale:
-    :param downscale:
-    :param trap_rotation:
-    :return:
-    """
-    trap_size = trap_size if trap_size is not None else trap_template.shape[0]
-    # Careful, the image is float16!
-    img = transform.rescale(image.astype(float), downscale)
-    temp = transform.rescale(trap_template, downscale)
-
-    # TODO random search hyperparameter optimization
-    # optimize rotation
-    matches = {
-        rotation: feature.match_template(
-            img,
-            transform.rotate(temp, rotation, cval=np.median(img)),
-            pad_input=True,
-            mode="median",
-        )
-        ** 2
-        for rotation in [0, 90, 180, 270]
-    }
-    best_rotation = max(matches, key=lambda x: np.percentile(matches[x], 99.9))
-    temp = transform.rotate(temp, best_rotation, cval=np.median(img))
-
-    if optimize_scale:
-        scales = np.linspace(0.5, 2, 10)
-        matches = {
-            scale: feature.match_template(
-                img, transform.rescale(temp, scale), mode="median", pad_input=True
-            )
-            ** 2
-            for scale in scales
-        }
-        best_scale = max(matches, key=lambda x: np.percentile(matches[x], 99.9))
-        matched = matches[best_scale]
-    else:
-        matched = feature.match_template(img, temp, pad_input=True, mode="median")
-
-    coordinates = feature.peak_local_max(
-        transform.rescale(matched, 1 / downscale),
-        min_distance=int(trap_template.shape[0] * 0.70),
-        exclude_border=(trap_size // 3),
-    )
-    return coordinates
-
-
-def get_tile_shapes(x, tile_size, max_shape):
-    half_size = tile_size // 2
-    xmin = int(x[0] - half_size)
-    ymin = max(0, int(x[1] - half_size))
-    # if xmin + tile_size > max_shape[0]:
-    #     xmin = max_shape[0] - tile_size
-    # if ymin + tile_size > max_shape[1]:
-    # #     ymin = max_shape[1] - tile_size
-    # return max(xmin, 0), xmin + tile_size, max(ymin, 0), ymin + tile_size
-    return xmin, xmin + tile_size, ymin, ymin + tile_size
-
-
-def in_image(img, xmin, xmax, ymin, ymax, xidx=2, yidx=3):
-    if xmin >= 0 and ymin >= 0:
-        if xmax < img.shape[xidx] and ymax < img.shape[yidx]:
-            return True
-    else:
-        return False
-
-
-def get_xy_tile(img, xmin, xmax, ymin, ymax, xidx=2, yidx=3, pad_val=None):
-    if pad_val is None:
-        pad_val = np.median(img)
-    # Get the tile from the image
-    idx = [slice(None)] * len(img.shape)
-    idx[xidx] = slice(max(0, xmin), min(xmax, img.shape[xidx]))
-    idx[yidx] = slice(max(0, ymin), min(ymax, img.shape[yidx]))
-    tile = img[tuple(idx)]
-    # Check if the tile is in the image
-    if in_image(img, xmin, xmax, ymin, ymax, xidx, yidx):
-        return tile
-    else:
-        # Add padding
-        pad_shape = [(0, 0)] * len(img.shape)
-        pad_shape[xidx] = (max(-xmin, 0), max(xmax - img.shape[xidx], 0))
-        pad_shape[yidx] = (max(-ymin, 0), max(ymax - img.shape[yidx], 0))
-        tile = np.pad(tile, pad_shape, constant_values=pad_val)
-    return tile
-
-
-def get_trap_timelapse(
-    raw_expt, trap_locations, trap_id, tile_size=117, channels=None, z=None
-):
-    """
-    Get a timelapse for a given trap by specifying the trap_id
-    :param trap_id: An integer defining which trap to choose. Counted
-    between 0 and Tiler.n_traps - 1
-    :param tile_size: The size of the trap tile (centered around the
-    trap as much as possible, edge cases exist)
-    :param channels: Which channels to fetch, indexed from 0.
-    If None, defaults to [0]
-    :param z: Which z_stacks to fetch, indexed from 0.
-    If None, defaults to [0].
-    :return: A numpy array with the timelapse in (C,T,X,Y,Z) order
-    """
-    # Set the defaults (list is mutable)
-    channels = channels if channels is not None else [0]
-    z = z if z is not None else [0]
-    # Get trap location for that id:
-    trap_centers = [trap_locations[i][trap_id] for i in range(len(trap_locations))]
-
-    max_shape = (raw_expt.shape[2], raw_expt.shape[3])
-    tiles_shapes = [
-        get_tile_shapes((x[0], x[1]), tile_size, max_shape) for x in trap_centers
-    ]
-
-    timelapse = [
-        get_xy_tile(
-            raw_expt[channels, i, :, :, z], xmin, xmax, ymin, ymax, pad_val=None
-        )
-        for i, (xmin, xmax, ymin, ymax) in enumerate(tiles_shapes)
-    ]
-    return np.hstack(timelapse)
-
-
-def get_trap_timelapse_omero(
-    raw_expt, trap_locations, trap_id, tile_size=117, channels=None, z=None, t=None
-):
-    """
-    Get a timelapse for a given trap by specifying the trap_id
-    :param raw_expt: A Timelapse object from which data is obtained
-    :param trap_id: An integer defining which trap to choose. Counted
-    between 0 and Tiler.n_traps - 1
-    :param tile_size: The size of the trap tile (centered around the
-    trap as much as possible, edge cases exist)
-    :param channels: Which channels to fetch, indexed from 0.
-    If None, defaults to [0]
-    :param z: Which z_stacks to fetch, indexed from 0.
-    If None, defaults to [0].
-    :return: A numpy array with the timelapse in (C,T,X,Y,Z) order
-    """
-    # Set the defaults (list is mutable)
-    channels = channels if channels is not None else [0]
-    z_positions = z if z is not None else [0]
-    times = (
-        t if t is not None else np.arange(raw_expt.shape[1])
-    )  # TODO choose sub-set of time points
-    shape = (len(channels), len(times), tile_size, tile_size, len(z_positions))
-    # Get trap location for that id:
-    zct_tiles, slices, trap_ids = all_tiles(
-        trap_locations, shape, raw_expt, z_positions, channels, times, [trap_id]
-    )
-
-    # TODO Make this an explicit function in TimelapseOMERO
-    images = raw_expt.pixels.getTiles(zct_tiles)
-    timelapse = np.full(shape, np.nan)
-    total = len(zct_tiles)
-    for (z, c, t, _), (y, x), image in tqdm(
-        zip(zct_tiles, slices, images), total=total
-    ):
-        ch = channels.index(c)
-        tp = times.tolist().index(t)
-        z_pos = z_positions.index(z)
-        timelapse[ch, tp, x[0] : x[1], y[0] : y[1], z_pos] = image
-
-    # for x in timelapse:  # By channel
-    #    np.nan_to_num(x, nan=np.nanmedian(x), copy=False)
-    return timelapse
-
-
-def all_tiles(trap_locations, shape, raw_expt, z_positions, channels, times, traps):
-    _, _, x, y, _ = shape
-    _, _, MAX_X, MAX_Y, _ = raw_expt.shape
-
-    trap_ids = []
-    zct_tiles = []
-    slices = []
-    for z in z_positions:
-        for ch in channels:
-            for t in times:
-                for trap_id in traps:
-                    centre = trap_locations[t][trap_id]
-                    xmin, ymin, xmax, ymax, r_xmin, r_ymin, r_xmax, r_ymax = tile_where(
-                        centre, x, y, MAX_X, MAX_Y
-                    )
-                    slices.append(
-                        ((r_ymin - ymin, r_ymax - ymin), (r_xmin - xmin, r_xmax - xmin))
-                    )
-                    tile = (r_ymin, r_xmin, r_ymax - r_ymin, r_xmax - r_xmin)
-                    zct_tiles.append((z, ch, t, tile))
-                    trap_ids.append(trap_id)  # So we remember the order!
-    return zct_tiles, slices, trap_ids
-
-
-def tile_where(centre, x, y, MAX_X, MAX_Y):
-    # Find the position of the tile
-    xmin = int(centre[1] - x // 2)
-    ymin = int(centre[0] - y // 2)
-    xmax = xmin + x
-    ymax = ymin + y
-    # What do we actually have available?
-    r_xmin = max(0, xmin)
-    r_xmax = min(MAX_X, xmax)
-    r_ymin = max(0, ymin)
-    r_ymax = min(MAX_Y, ymax)
-    return xmin, ymin, xmax, ymax, r_xmin, r_ymin, r_xmax, r_ymax
-
-
-def get_tile(shape, center, raw_expt, ch, t, z):
-    """Returns a tile from the raw experiment with a given shape.
-
-    :param shape: The shape of the tile in (C, T, Z, Y, X) order.
-    :param center: The x,y position of the centre of the tile
-    :param
-    """
-    _, _, x, y, _ = shape
-    _, _, MAX_X, MAX_Y, _ = raw_expt.shape
-    tile = np.full(shape, np.nan)
-
-    # Find the position of the tile
-    xmin = int(center[1] - x // 2)
-    ymin = int(center[0] - y // 2)
-    xmax = xmin + x
-    ymax = ymin + y
-    # What do we actually have available?
-    r_xmin = max(0, xmin)
-    r_xmax = min(MAX_X, xmax)
-    r_ymin = max(0, ymin)
-    r_ymax = min(MAX_Y, ymax)
-
-    # Fill values
-    tile[
-        :, :, (r_xmin - xmin) : (r_xmax - xmin), (r_ymin - ymin) : (r_ymax - ymin), :
-    ] = raw_expt[ch, t, r_xmin:r_xmax, r_ymin:r_ymax, z]
-    # fill_val = np.nanmedian(tile)
-    # np.nan_to_num(tile, nan=fill_val, copy=False)
-    return tile
-
-
-def get_traps_timepoint(
-    raw_expt, trap_locations, tp, tile_size=96, channels=None, z=None
-):
-    """
-    Get all the traps from a given time point
-    :param raw_expt:
-    :param trap_locations:
-    :param tp:
-    :param tile_size:
-    :param channels:
-    :param z:
-    :return: A numpy array with the traps in the (trap, C, T, X, Y,
-    Z) order
-    """
-
-    # Set the defaults (list is mutable)
-    channels = channels if channels is not None else [0]
-    z_positions = z if z is not None else [0]
-    if isinstance(z_positions, slice):
-        n_z = z_positions.stop
-        z_positions = list(range(n_z))  # slice is not iterable error
-    elif isinstance(z_positions, list):
-        n_z = len(z_positions)
-    else:
-        n_z = 1
-
-    n_traps = len(trap_locations[tp])
-    trap_ids = list(range(n_traps))
-    shape = (len(channels), 1, tile_size, tile_size, n_z)
-    # all tiles
-    zct_tiles, slices, trap_ids = all_tiles(
-        trap_locations, shape, raw_expt, z_positions, channels, [tp], trap_ids
-    )
-    # TODO Make this an explicit function in TimelapseOMERO
-    images = raw_expt.pixels.getTiles(zct_tiles)
-    # Initialise empty traps
-    traps = np.full((n_traps,) + shape, np.nan)
-    for trap_id, (z, c, _, _), (y, x), image in zip(
-        trap_ids, zct_tiles, slices, images
-    ):
-        ch = channels.index(c)
-        z_pos = z_positions.index(z)
-        traps[trap_id, ch, 0, x[0] : x[1], y[0] : y[1], z_pos] = image
-    for x in traps:  # By channel
-        np.nan_to_num(x, nan=np.nanmedian(x), copy=False)
-    return traps
-
-
-def centre(img, percentage=0.3):
-    y, x = img.shape
-    cropx = int(np.ceil(x * percentage))
-    cropy = int(np.ceil(y * percentage))
-    startx = int(x // 2 - (cropx // 2))
-    starty = int(y // 2 - (cropy // 2))
-    return img[starty : starty + cropy, startx : startx + cropx]
-
-
-def align_timelapse_images(
-    raw_data, channel=0, reference_reset_time=80, reference_reset_drift=25
-):
-    """
-    Uses image registration to align images in the timelapse.
-    Uses the channel with id `channel` to perform the registration.
-
-    Starts with the first timepoint as a reference and changes the
-    reference to the current timepoint if either the images have moved
-    by half of a trap width or `reference_reset_time` has been reached.
-
-    Sets `self.drift`, a 3D numpy array with shape (t, drift_x, drift_y).
-    We assume no drift occurs in the z-direction.
-
-    :param reference_reset_drift: Upper bound on the allowed drift before
-    resetting the reference image.
-    :param reference_reset_time: Upper bound on number of time points to
-    register before resetting the reference image.
-    :param channel: index of the channel to use for image registration.
-    """
-    ref = centre(np.squeeze(raw_data[channel, 0, :, :, 0]))
-    size_t = raw_data.shape[1]
-
-    drift = [np.array([0, 0])]
-    for i in range(1, size_t):
-        img = centre(np.squeeze(raw_data[channel, i, :, :, 0]))
-
-        shifts, _, _ = feature.register_translation(ref, img)
-        # If a huge move is detected at a single time point it is taken
-        # to be inaccurate and the correction from the previous time point
-        # is used.
-        # This might be common if there is a focus loss for example.
-        if any([abs(x - y) > reference_reset_drift for x, y in zip(shifts, drift[-1])]):
-            shifts = drift[-1]
-
-        drift.append(shifts)
-        ref = img
-
-        # TODO test necessity for references, description below
-        #   If the images have drifted too far from the reference or too
-        #   much time has passed we change the reference and keep track of
-        #   which images are kept as references
-    return np.stack(drift)
diff --git a/pcore/utils.py b/pcore/utils.py
deleted file mode 100644
index 613bdb72..00000000
--- a/pcore/utils.py
+++ /dev/null
@@ -1,135 +0,0 @@
-"""
-Utility functions and classes
-"""
-import itertools
-import logging
-import operator
-from pathlib import Path
-from typing import Callable
-
-import h5py
-import imageio
-import cv2
-import numpy as np
-
-def repr_obj(obj, indent=0):
-    """
-    Helper function to display info about OMERO objects.
-    Not all objects will have a "name" or owner field.
-    """
-    string = """%s%s:%s  Name:"%s" (owner=%s)""" % (
-        " " * indent,
-        obj.OMERO_CLASS,
-        obj.getId(),
-        obj.getName(),
-        obj.getAnnotation())
-
-    return string
-
-def imread(path):
-    return cv2.imread(str(path), -1)
-
-
-class ImageCache:
-    """HDF5-based image cache for faster loading of the images once they've
-    been read.
-    """
-    def __init__(self, file, name, shape, remote_fn):
-        self.store = h5py.File(file, 'a')
-        # Create a dataset
-        self.dataset = self.store.create_dataset(name, shape,
-                                                 dtype=np.float,
-                                                 fill_value=np.nan)
-        self.remote_fn = remote_fn
-
-    def __getitem__(self, item):
-        cached = self.dataset[item]
-        if np.any(np.isnan(cached)):
-            full = self.remote_fn(item)
-            self.dataset[item] = full
-            return full
-        else:
-            return cached
-
-
-class Cache:
-    """
-    Fixed-length mapping to use as a cache.
-    Deletes items in FIFO manner when maximum allowed length is reached.
-    """
-    def __init__(self, max_len=5000, load_fn: Callable = imread):
-        """
-        :param max_len: Maximum number of items in the cache.
-        :param load_fn: The function used to load new items if they are not
-        available in the Cache
-        """
-        self._dict = dict()
-        self._queue = []
-        self.load_fn = load_fn
-        self.max_len=max_len
-
-    def __getitem__(self, item):
-        if item not in self._dict:
-            self.load_item(item)
-        return self._dict[item]
-
-    def load_item(self, item):
-        self._dict[item] = self.load_fn(item)
-        # Clean up the queue
-        self._queue.append(item)
-        if len(self._queue) > self.max_len:
-            del self._dict[self._queue.pop(0)]
-
-    def clear(self):
-        self._dict.clear()
-        self._queue.clear()
-
-
-def accumulate(l: list):
-    l = sorted(l)
-    it = itertools.groupby(l, operator.itemgetter(0))
-    for key, sub_iter in it:
-        yield key, [x[1] for x in sub_iter]
-
-
-def get_store_path(save_dir, store, name):
-    """Create a path to a position-specific store.
-
-    This combines the name and the store's base name into a file path within save_dir.
-    For example.
-    >>> get_store_path('data', 'baby_seg.h5', 'pos001')
-    Path(data/pos001baby_seg.h5')
-
-    :param save_dir: The root directory in which to save the file, absolute
-    path.
-    :param store: The base name of the store
-    :param name: The name of the position
-    :return: Path(save_dir) / name+store
-    """
-    store = Path(save_dir) / store
-    store = store.with_name(name + store.name)
-    return store
-
-def parametrized(dec):
-    def layer(*args, **kwargs):
-        def repl(f):
-            return dec(f, *args, **kwargs)
-        return repl
-    return layer
-
-from functools import wraps, partial
-from time import perf_counter
-import logging
-@parametrized
-def timed(f, name=None):
-    @wraps(f)
-    def decorated(*args, **kwargs):
-        t = perf_counter()
-        res = f(*args, **kwargs)
-        to_print = name or f.__name__
-        logging.debug(f'Timing:{to_print}:{perf_counter() - t}s')
-        return res
-    return decorated
-
-
-
diff --git a/setup.py b/setup.py
index 64bb81ee..aab0a27d 100644
--- a/setup.py
+++ b/setup.py
@@ -2,8 +2,8 @@ from setuptools import setup, find_packages
 
 print("find_packages outputs ", find_packages("aliby"))
 setup(
-    name="pipeline-core",
-    version="0.1.1-dev",
+    name="aliby",
+    version="0.1.2",
     packages=find_packages(),
     # package_dir={"": "aliby"},
     # packages=['aliby', 'aliby.io'],
-- 
GitLab