diff --git a/aliby/pipeline.py b/aliby/pipeline.py
index fa94f7bea6c0227c95b464a365a6feeabdeaca46..c77e56bafb587e6db9b14302e01eaecf088b8f3f 100644
--- a/aliby/pipeline.py
+++ b/aliby/pipeline.py
@@ -24,7 +24,7 @@ from agora.io.writer import (  # BabyWriter,
     TilerWriter,
 )
 from pathos.multiprocessing import Pool
-from postprocessor.core.processor import PostProcessor, PostProcessorParameters
+# from postprocessor.core.processor import PostProcessor, PostProcessorParameters
 
 # import pandas as pd
 from scipy import ndimage
@@ -141,11 +141,14 @@ class PipelineParameters(ParametersABC):
             exparams_from_meta(meta_d)
             or BabyParameters.default(**extraction).to_dict()
         )
-        defaults["postprocessing"] = PostProcessorParameters.default(
-            **postprocessing
-        ).to_dict()
+        defaults["postprocessing"] = {}
         defaults["reporting"] = {}
 
+        # defaults["postprocessing"] = PostProcessorParameters.default(
+        #     **postprocessing
+        # ).to_dict()
+        # defaults["reporting"] = {}
+
         return cls(**{k: v for k, v in defaults.items()})
 
     def load_logs(self):
@@ -485,6 +488,7 @@ class Pipeline(ProcessABC):
                             frac_clogged_traps = self.check_earlystop(
                                 filename, earlystop, steps["tiler"].tile_size
                             )
+                            print(f"Runs to frame {i}")
                             logging.debug(
                                 f"Quality:Clogged_traps:{frac_clogged_traps}"
                             )
@@ -503,12 +507,12 @@ class Pipeline(ProcessABC):
 
                         meta.add_fields({"last_processed": i})
                     # Run post processing
-
+                    # 1/0
                     meta.add_fields({"end_status": "Success"})
-                    post_proc_params = PostProcessorParameters.from_dict(
-                        config["postprocessing"]
-                    )
-                    PostProcessor(filename, post_proc_params).run()
+                    # post_proc_params = PostProcessorParameters.from_dict(
+                    #     config["postprocessing"]
+                    # )
+                    # PostProcessor(filename, post_proc_params).run()
 
                     return 1
 
diff --git a/extraction/core/extractor.py b/extraction/core/extractor.py
index c2406a16c8a188526fa3f3c5e840ed2842f33591..ee7e58b61ecba102672844bf4bd377ad6c0a00d8 100644
--- a/extraction/core/extractor.py
+++ b/extraction/core/extractor.py
@@ -89,7 +89,7 @@ class Extractor(ProcessABC):
 
     Its methods therefore require both tile images and masks.
 
-    Usually one metric is applied per mask, but there are tile-specific backgrounds (Alan), which apply one metric per tile.
+    Usually one metric is applied to the masked area in a tile, but there are metrics that depend on the whole tile.
 
     Extraction follows a three-level tree structure. Channels, such as GFP, are the root level; the second level is the reduction algorithm, such as maximum projection; the last level is the metric - the specific operation to apply to the cells in the image identified by the mask, such as median, which is the median value of the pixels in each cell.
 
@@ -166,9 +166,8 @@ class Extractor(ProcessABC):
         return self._channels
 
     @property
-    # Alan: does this work. local is not a string.
     def current_position(self):
-        return self.local.split("/")[-1][:-3]
+        return str(self.local).split("/")[-1][:-3]
 
     @property
     def group(self):
@@ -295,7 +294,6 @@ class Extractor(ProcessABC):
             A two-tuple of a tuple of results and a tuple with the corresponding trap_id and cell labels
         """
         if labels is None:
-            # Alan: it looks like this will crash if Labels is None
             raise Warning("No labels given. Sorting cells using index.")
         cell_fun = True if metric in self._all_cell_funs else False
         idx = []
@@ -357,7 +355,7 @@ class Extractor(ProcessABC):
             dict for which keys are reduction functions and values are either a list or a set of strings giving the metric functions.
             For example: {'np_max': {'max5px', 'mean', 'median'}}
         **kwargs: dict
-            All other arguments and must include masks and traps. Alan: stll true?
+            All other arguments passed to Extractor.extract_funs.
 
         Returns
         ------
@@ -509,14 +507,13 @@ class Extractor(ProcessABC):
                 ch_bs = ch + "_bgsub"
                 self.img_bgsub[ch_bs] = []
                 for trap, bg in zip(img, bgs):
-                    cells_fl = np.zeros_like(trap)
-                    # Alan: should this not be is_not_cell?
-                    is_cell = np.where(bg)
+                    bg_fluo = np.zeros_like(trap)
+                    not_cell = np.where(bg)
                     # skip for empty traps
-                    if len(is_cell[0]):
-                        cells_fl = np.median(trap[is_cell], axis=0)
+                    if len(not_cell[0]):
+                        bg_fluo = np.median(trap[not_cell], axis=0)
                     # subtract median background
-                    self.img_bgsub[ch_bs].append(trap - cells_fl)
+                    self.img_bgsub[ch_bs].append(trap - bg_fluo)
                 # apply metrics to background-corrected data
                 d[ch_bs] = self.reduce_extract(
                     red_metrics=ch_tree[ch],
@@ -636,45 +633,6 @@ class Extractor(ProcessABC):
             self.save_to_hdf(d)
         return d
 
-    # Alan: isn't this identical to run?
-    # def extract_pos(
-    #     self, tree=None, tps: List[int] = None, save=True, **kwargs
-    # ) -> dict:
-
-    #     if tree is None:
-    #         tree = self.params.tree
-
-    #     if tps is None:
-    #         tps = list(range(self.meta["time_settings/ntimepoints"]))
-
-    #     d = {}
-    #     for tp in tps:
-    #         new = flatten_nest(
-    #             self.extract_tp(tp=tp, tree=tree, **kwargs),
-    #             to="series",
-    #             tp=tp,
-    #         )
-
-    #         for k in new.keys():
-    #             n = new[k]
-    #             d[k] = pd.concat((d.get(k, None), n), axis=1)
-
-    #     for k in d.keys():
-    #         indices = ["experiment", "position", "trap", "cell_label"]
-    #         idx = (
-    #             indices[-d[k].index.nlevels :]
-    #             if d[k].index.nlevels > 1
-    #             else [indices[-2]]
-    #         )
-    #         d[k].index.names = idx
-
-    #         toreturn = d
-
-    #     if save:
-    #         self.save_to_hdf(toreturn)
-
-    #     return toreturn
-
     def save_to_hdf(self, dict_series, path=None):
         """
         Save the extracted data to the h5 file.
@@ -734,20 +692,6 @@ def flatten_nesteddict(nest: dict, to="series", tp: int = None) -> dict:
     return d
 
 
-# Alan: this no longer seems to be used
-def fill_tree(tree):
-    if tree is None:
-        return None
-    tree_depth = depth(tree)
-    if depth(tree) < 3:
-        d = {None: {None: {None: []}}}
-        for _ in range(2 - tree_depth):
-            d = d[None]
-        d[None] = tree
-        tree = d
-    return tree
-
-
 class hollowExtractor(Extractor):
     """
     Extractor that only cares about receiving images and masks.
diff --git a/extraction/core/functions/cell.py b/extraction/core/functions/cell.py
index 4ecf94b0b239333b2ae47dadd34a6718e49cc18a..8c4ea97ef4343e40dc985dcca91439208ab420ad 100644
--- a/extraction/core/functions/cell.py
+++ b/extraction/core/functions/cell.py
@@ -269,10 +269,17 @@ def min_maj_approximation(cell_mask):
     cell_mask: 3d array
         Segmentation masks for cells
     """
+    # pad outside with zeros so that the distance transforms have no edge artifacts
     padded = np.pad(cell_mask, 1, mode="constant", constant_values=0)
+    # get the distance from the edge, masked
     nn = ndimage.morphology.distance_transform_edt(padded == 1) * padded
+    # get the distance from the top of the cone, masked
     dn = ndimage.morphology.distance_transform_edt(nn - nn.max()) * padded
+    # get the size of the top of the cone (points that are equally maximal)
     cone_top = ndimage.morphology.distance_transform_edt(dn == 0) * padded
+    # minor axis = largest distance from the edge of the ellipse
     min_ax = np.round(nn.max())
+    # major axis = largest distance from the cone top
+    # + distance from the center of cone top to edge of cone top
     maj_ax = np.round(dn.max() + cone_top.sum() / 2)
     return min_ax, maj_ax