Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • swain-lab/aliby/aliby-mirror
  • swain-lab/aliby/alibylite
2 results
Show changes
Showing
with 1097 additions and 392 deletions
...@@ -14,5 +14,4 @@ params = Parameters( ...@@ -14,5 +14,4 @@ params = Parameters(
ext = Extractor(params, omero_id=19310) ext = Extractor(params, omero_id=19310)
# ext.extract_exp(tile_size=117)
d = ext.extract_tp(tp=1, tile_size=117) d = ext.extract_tp(tp=1, tile_size=117)
...@@ -27,7 +27,7 @@ plt.show() ...@@ -27,7 +27,7 @@ plt.show()
tiler = Tiler(expt, template=trap_template) tiler = Tiler(expt, template=trap_template)
# Load images (takes about 5 mins) # Load images (takes about 5 mins)
trap_tps = tiler.get_traps_timepoint(0, tile_size=117, z=[2]) trap_tps = tiler.get_tiles_timepoint(0, tile_size=117, z=[2])
# Plot found traps # Plot found traps
nrows, ncols = (5, 5) nrows, ncols = (5, 5)
......
=====================
06-Jan-2020 18:30:59 Start creating new experiment using parameters:
Omero experiment name: 001
Temporary working directory: C:06-Jan-2020 18:30:59 Processing position 2 (1108_002)
06-Jan-2020 18:31:00 Processing position 3 (1108_003)
06-Jan-2020 18:31:01 Processing position 4 (1109_004)
06-Jan-2020 18:31:02 Processing position 5 (1109_005)
06-Jan-2020 18:31:04 Processing position 6 (1109_006)
06-Jan-2020 18:31:05 Processing position 7 (1110_007)
06-Jan-2020 18:31:06 Processing position 8 (1110_008)
06-Jan-2020 18:31:07 Processing position 9 (1110_009)
06-Jan-2020 18:31:10 Successfully completed creating new experiment in 11 secs.
---------------------
=====================
06-Jan-2020 18:31:33 Start selecting traps...
06-Jan-2020 18:31:33 Processing position 1 (1108_001)
06-Jan-2020 18:31:40 Remove trap at 550 1188
06-Jan-2020 18:31:40 Remove trap at 733 1179
06-Jan-2020 18:31:41 Remove trap at 384 1189
06-Jan-2020 18:31:42 Remove trap at 201 1186
06-Jan-2020 18:31:47 Processing position 2 (1108_002)
06-Jan-2020 18:31:52 Remove trap at 384 1060
06-Jan-2020 18:31:54 Remove trap at 1081 571
06-Jan-2020 18:32:01 Processing position 3 (1108_003)
06-Jan-2020 18:32:05 Remove trap at 948 1140
06-Jan-2020 18:32:06 Remove trap at 1141 1174
06-Jan-2020 18:32:17 Remove trap at 139 1111
06-Jan-2020 18:32:18 Add trap at 130 1138
06-Jan-2020 18:32:26 Processing position 4 (1109_004)
06-Jan-2020 18:32:32 Remove trap at 1176 388
06-Jan-2020 18:32:39 Processing position 5 (1109_005)
06-Jan-2020 18:32:44 Remove trap at 1141 1135
06-Jan-2020 18:32:51 Remove trap at 955 379
06-Jan-2020 18:32:55 Processing position 6 (1109_006)
06-Jan-2020 18:33:00 Remove trap at 676 1177
06-Jan-2020 18:33:01 Remove trap at 1111 1147
06-Jan-2020 18:33:14 Processing position 7 (1110_007)
06-Jan-2020 18:33:20 Remove trap at 46 46
06-Jan-2020 18:33:28 Remove trap at 1150 84
06-Jan-2020 18:33:34 Processing position 8 (1110_008)
06-Jan-2020 18:33:49 Processing position 9 (1110_009)
06-Jan-2020 18:33:55 Add trap at 1153 1129
06-Jan-2020 18:33:57 Remove trap at 1135 1141
06-Jan-2020 18:33:57 Remove trap at 1176 1095
06-Jan-2020 18:34:15 Successfully completed selecting traps in 2.7 mins.
---------------------
=====================
06-Jan-2020 18:34:28 Start setting extraction parameters using parameters:
extractionParameters: {
extractFunction: extractCellDataStandardParfor
functionParameters: {
type: max
channels: 2 3
nuclearMarkerChannel: NaN
maxPixOverlap: 5
maxAllowedOverlap: 25
}
}
06-Jan-2020 18:34:28 Processing position 1 (1108_001)
06-Jan-2020 18:34:28 Processing position 2 (1108_002)
06-Jan-2020 18:34:29 Processing position 3 (1108_003)
06-Jan-2020 18:34:29 Processing position 4 (1109_004)
06-Jan-2020 18:34:30 Processing position 5 (1109_005)
06-Jan-2020 18:34:30 Processing position 6 (1109_006)
06-Jan-2020 18:34:30 Processing position 7 (1110_007)
06-Jan-2020 18:34:31 Processing position 8 (1110_008)
06-Jan-2020 18:34:31 Processing position 9 (1110_009)
06-Jan-2020 18:34:33 Successfully completed setting extraction parameters in 5 secs.
---------------------
=====================
07-Jan-2020 13:17:43 Start tracking traps in time...
07-Jan-2020 13:17:43 Processing position 1 (1108_001)
07-Jan-2020 13:23:31 Processing position 2 (1108_002)
07-Jan-2020 13:29:21 Processing position 3 (1108_003)
07-Jan-2020 13:35:13 Processing position 4 (1109_004)
07-Jan-2020 13:41:19 Processing position 5 (1109_005)
07-Jan-2020 13:47:09 Processing position 6 (1109_006)
07-Jan-2020 13:52:57 Processing position 7 (1110_007)
07-Jan-2020 13:58:41 Processing position 8 (1110_008)
07-Jan-2020 14:04:41 Processing position 9 (1110_009)
07-Jan-2020 14:10:38 Successfully completed tracking traps in time in 52.9 mins.
---------------------
=====================
07-Jan-2020 14:10:38 Start baby segmentation...
07-Jan-2020 14:10:39 Processing position 1 (1108_001)
07-Jan-2020 14:14:32 cTimelapse: 210.344 MB; posOverviewGUI: 741.580 MB
07-Jan-2020 14:18:30 cTimelapse: 240.345 MB; posOverviewGUI: 741.580 MB
07-Jan-2020 14:22:31 cTimelapse: 272.459 MB; posOverviewGUI: 741.580 MB
07-Jan-2020 14:26:32 cTimelapse: 303.876 MB; posOverviewGUI: 741.580 MB
07-Jan-2020 14:30:34 cTimelapse: 336.470 MB; posOverviewGUI: 741.580 MB
07-Jan-2020 14:32:32 Processing position 2 (1108_002)
07-Jan-2020 14:36:22 cTimelapse: 206.699 MB; posOverviewGUI: 741.580 MB
07-Jan-2020 14:40:12 cTimelapse: 235.726 MB; posOverviewGUI: 741.580 MB
07-Jan-2020 14:44:13 cTimelapse: 268.814 MB; posOverviewGUI: 741.580 MB
07-Jan-2020 14:48:27 cTimelapse: 306.046 MB; posOverviewGUI: 741.580 MB
07-Jan-2020 14:52:43 cTimelapse: 343.681 MB; posOverviewGUI: 741.580 MB
07-Jan-2020 14:54:44 Processing position 3 (1108_003)
07-Jan-2020 14:58:47 cTimelapse: 214.895 MB; posOverviewGUI: 741.580 MB
07-Jan-2020 15:02:44 cTimelapse: 247.137 MB; posOverviewGUI: 741.580 MB
07-Jan-2020 15:06:47 cTimelapse: 280.902 MB; posOverviewGUI: 741.580 MB
07-Jan-2020 15:10:51 cTimelapse: 314.796 MB; posOverviewGUI: 741.580 MB
07-Jan-2020 15:15:13 cTimelapse: 354.774 MB; posOverviewGUI: 741.580 MB
07-Jan-2020 15:17:16 Processing position 4 (1109_004)
07-Jan-2020 15:21:06 cTimelapse: 222.663 MB; posOverviewGUI: 741.580 MB
07-Jan-2020 15:25:09 cTimelapse: 253.596 MB; posOverviewGUI: 741.580 MB
07-Jan-2020 15:29:16 cTimelapse: 286.597 MB; posOverviewGUI: 741.580 MB
07-Jan-2020 15:33:46 cTimelapse: 325.040 MB; posOverviewGUI: 741.580 MB
07-Jan-2020 15:38:35 cTimelapse: 369.190 MB; posOverviewGUI: 741.580 MB
07-Jan-2020 15:40:50 Processing position 5 (1109_005)
07-Jan-2020 15:45:01 cTimelapse: 235.110 MB; posOverviewGUI: 741.580 MB
07-Jan-2020 15:49:23 cTimelapse: 268.760 MB; posOverviewGUI: 741.580 MB
07-Jan-2020 15:53:50 cTimelapse: 304.703 MB; posOverviewGUI: 741.580 MB
07-Jan-2020 15:58:15 cTimelapse: 339.861 MB; posOverviewGUI: 741.580 MB
07-Jan-2020 16:02:47 cTimelapse: 377.877 MB; posOverviewGUI: 741.580 MB
07-Jan-2020 16:04:53 Processing position 6 (1109_006)
07-Jan-2020 16:08:32 cTimelapse: 205.246 MB; posOverviewGUI: 741.580 MB
07-Jan-2020 16:12:09 cTimelapse: 231.500 MB; posOverviewGUI: 741.580 MB
07-Jan-2020 16:15:49 cTimelapse: 259.276 MB; posOverviewGUI: 741.580 MB
07-Jan-2020 16:19:45 cTimelapse: 291.813 MB; posOverviewGUI: 741.580 MB
07-Jan-2020 16:24:03 cTimelapse: 331.193 MB; posOverviewGUI: 741.580 MB
07-Jan-2020 16:26:11 Processing position 7 (1110_007)
07-Jan-2020 16:29:30 cTimelapse: 222.990 MB; posOverviewGUI: 741.580 MB
07-Jan-2020 16:32:46 cTimelapse: 238.288 MB; posOverviewGUI: 741.580 MB
07-Jan-2020 16:36:03 cTimelapse: 255.524 MB; posOverviewGUI: 741.580 MB
07-Jan-2020 16:39:21 cTimelapse: 275.165 MB; posOverviewGUI: 741.580 MB
07-Jan-2020 16:42:40 cTimelapse: 297.244 MB; posOverviewGUI: 741.580 MB
07-Jan-2020 16:44:14 Processing position 8 (1110_008)
07-Jan-2020 16:47:32 cTimelapse: 215.583 MB; posOverviewGUI: 741.580 MB
07-Jan-2020 16:50:51 cTimelapse: 235.959 MB; posOverviewGUI: 741.580 MB
07-Jan-2020 16:54:09 cTimelapse: 256.409 MB; posOverviewGUI: 741.580 MB
07-Jan-2020 16:57:25 cTimelapse: 275.563 MB; posOverviewGUI: 741.580 MB
07-Jan-2020 17:00:43 cTimelapse: 296.390 MB; posOverviewGUI: 741.580 MB
07-Jan-2020 17:02:13 Processing position 9 (1110_009)
07-Jan-2020 17:05:35 cTimelapse: 225.847 MB; posOverviewGUI: 741.580 MB
07-Jan-2020 17:08:54 cTimelapse: 245.291 MB; posOverviewGUI: 741.580 MB
07-Jan-2020 17:12:17 cTimelapse: 266.060 MB; posOverviewGUI: 741.580 MB
07-Jan-2020 17:15:41 cTimelapse: 288.448 MB; posOverviewGUI: 741.580 MB
07-Jan-2020 17:19:04 cTimelapse: 311.290 MB; posOverviewGUI: 741.580 MB
07-Jan-2020 17:20:36 Successfully completed baby segmentation in 3.2 hours.
---------------------
=====================
07-Jan-2020 17:20:37 Start tracking cells using parameters:
Tracking threshold: 10
07-Jan-2020 17:20:39 Processing position 1 (1108_001)
07-Jan-2020 17:21:23 Processing position 2 (1108_002)
07-Jan-2020 17:22:06 Processing position 3 (1108_003)
07-Jan-2020 17:22:49 Processing position 4 (1109_004)
07-Jan-2020 17:23:33 Processing position 5 (1109_005)
07-Jan-2020 17:24:18 Processing position 6 (1109_006)
07-Jan-2020 17:24:58 Processing position 7 (1110_007)
07-Jan-2020 17:25:25 Processing position 8 (1110_008)
07-Jan-2020 17:25:55 Processing position 9 (1110_009)
07-Jan-2020 17:26:25 Successfully completed tracking cells in 5.8 mins.
---------------------
=====================
07-Jan-2020 17:26:25 Start autoselecting cells using parameters:
Fraction of timelapse that cells are present for: 0.5
Number of frames a cell must be present: 540
Cell must appear by frame: 540
Cell must still be present by frame: 1
Maximum number of cells: Inf
07-Jan-2020 17:26:27 Processing position 1 (1108_001)
07-Jan-2020 17:26:42 Processing position 2 (1108_002)
07-Jan-2020 17:26:58 Processing position 3 (1108_003)
07-Jan-2020 17:27:14 Processing position 4 (1109_004)
07-Jan-2020 17:27:31 Processing position 5 (1109_005)
07-Jan-2020 17:27:48 Processing position 6 (1109_006)
07-Jan-2020 17:28:03 Processing position 7 (1110_007)
07-Jan-2020 17:28:13 Processing position 8 (1110_008)
07-Jan-2020 17:28:25 Processing position 9 (1110_009)
07-Jan-2020 17:28:36 Successfully completed autoselecting cells in 2.2 mins.
---------------------
=====================
07-Jan-2020 17:28:37 Start extracting cell information...
07-Jan-2020 17:28:39 Processing position 1 (1108_001)
07-Jan-2020 17:58:38 Processing position 2 (1108_002)
07-Jan-2020 18:28:43 Processing position 3 (1108_003)
07-Jan-2020 18:58:45 Processing position 4 (1109_004)
07-Jan-2020 19:29:03 Processing position 5 (1109_005)
07-Jan-2020 19:59:31 Processing position 6 (1109_006)
07-Jan-2020 20:29:01 Processing position 7 (1110_007)
07-Jan-2020 20:56:05 Processing position 8 (1110_008)
07-Jan-2020 21:23:53 Processing position 9 (1110_009)
07-Jan-2020 21:51:15 Successfully completed extracting cell information in 4.4 hours.
---------------------
=====================
07-Jan-2020 21:51:16 Start baby lineage extraction...
07-Jan-2020 21:51:18 Processing position 1 (1108_001)
07-Jan-2020 21:52:37 Processing position 2 (1108_002)
07-Jan-2020 21:53:57 Processing position 3 (1108_003)
07-Jan-2020 21:55:16 Processing position 4 (1109_004)
07-Jan-2020 21:56:36 Processing position 5 (1109_005)
07-Jan-2020 21:57:59 Processing position 6 (1109_006)
07-Jan-2020 21:59:08 Processing position 7 (1110_007)
07-Jan-2020 21:59:50 Processing position 8 (1110_008)
07-Jan-2020 22:00:41 Processing position 9 (1110_009)
07-Jan-2020 22:01:26 Successfully completed baby lineage extraction in 10.2 mins.
---------------------
=====================
07-Jan-2020 22:01:26 Start compiling cell information...
07-Jan-2020 22:01:28 Processing position 1 (1108_001)
07-Jan-2020 22:01:30 Processing position 2 (1108_002)
07-Jan-2020 22:01:33 Processing position 3 (1108_003)
07-Jan-2020 22:01:35 Processing position 4 (1109_004)
07-Jan-2020 22:01:38 Processing position 5 (1109_005)
07-Jan-2020 22:01:40 Processing position 6 (1109_006)
07-Jan-2020 22:01:42 Processing position 7 (1110_007)
07-Jan-2020 22:01:44 Processing position 8 (1110_008)
07-Jan-2020 22:01:46 Processing position 9 (1110_009)
07-Jan-2020 22:02:20 Successfully completed compiling cell information in 54 secs.
---------------------
Channels:
Channel name, Exposure time, Skip, Z sect., Start time, Camera mode, EM gain, Voltage
Brightfield, 30, 1, 1, 1, 2, 270, 1.000
GFPFast, 30, 1, 1, 1, 2, 270, 3.500
mCherry, 100, 1, 1, 1, 2, 270, 2.500
Z_sectioning:
Sections,Spacing,PFSon?,AnyZ?,Drift,Method
3, 0.80, 1, 1, 0, 2
Time_settings:
1,120,660,79200
Points:
Position name, X position, Y position, Z position, PFS offset, Group, Brightfield, GFPFast, mCherry
pos001, 568.00, 1302.00, 1876.500, 122.450, 1, 30, 30, 100
pos002, 1267.00, 1302.00, 1880.125, 119.950, 1, 30, 30, 100
pos003, 1026.00, 977.00, 1877.575, 120.100, 1, 30, 30, 100
pos004, 540.00, -347.00, 1868.725, 121.200, 2, 30, 30, 100
pos005, 510.00, -687.00, 1867.150, 122.900, 2, 30, 30, 100
pos006, -187.00, -470.00, 1864.050, 119.600, 2, 30, 30, 100
pos007, -731.00, 916.00, 1867.050, 117.050, 3, 30, 30, 100
pos008, -1003.00, 1178.00, 1866.425, 121.700, 3, 30, 30, 100
pos009, -568.00, 1157.00, 1868.450, 119.350, 3, 30, 30, 100
Flow_control:
Syringe pump details: 2 pumps.
Pump states at beginning of experiment:
Pump port, Diameter, Current rate, Direction, Running, Contents
COM7, 14.43, 0.00, INF, 1, 2% glucose in SC
COM8, 14.43, 4.00, INF, 1, 0.1% glucose in SC
Dynamic flow details:
Number of pump changes:
1
Switching parameters:
Infuse/withdraw volumes:
50
Infuse/withdraw rates:
100
Times:
0
Switched to:
2
Switched from:
1
Flow post switch:
0
4
This diff is collapsed.
2022-10-10 15:31:27,350 - INFO
Swain Lab microscope experiment log file
GIT commit: e5d5e33 fix: changes to a few issues with focus control on Batman.
Microscope name: Batman
Date: 022-10-10 15:31:27
Log file path: D:\AcquisitionDataBatman\Swain Lab\Ivan\RAW DATA\2022\Oct\10-Oct-2022\pH_med_to_low00\pH_med_to_low.log
Micromanager config file: C:\Users\Public\Microscope control\Micromanager config files\Batman_python_15_4_22.cfg
Omero project: Default project
Omero tags:
Experiment details: Effect on growth and cytoplasmic pH of switch from normal pH (4.25) media to higher pH (5.69). Switching is run using the Oxygen software
-----Acquisition settings-----
2022-10-10 15:31:27,350 - INFO Image Configs:
Image config,Channel,Description,Exposure (ms), Number of Z sections,Z spacing (um),Sectioning method
brightfield1,Brightfield,Default bright field config,30,5,0.6,PIFOC
pHluorin405_0_4,pHluorin405,Phluorin excitation from 405 LED 0.4v and 10ms exposure,5,1,0.6,PIFOC
pHluorin488_0_4,GFPFast,Phluorin excitation from 488 LED 0.4v,10,1,0.6,PIFOC
cy5,cy5,Default cy5,30,1,0.6,PIFOC
Device properties:
Image config,device,property,value
pHluorin405_0_4,DTOL-DAC-1,Volts,0.4
pHluorin488_0_4,DTOL-DAC-2,Volts,0.4
cy5,DTOL-DAC-3,Volts,4
2022-10-10 15:31:27,353 - INFO
group: YST_247 field: position
Name, X, Y, Z, Autofocus offset
YST_247_001,-8968,-3319,2731.125040696934,123.25
YST_247_002,-8953,-3091,2731.3000406995416,123.25
YST_247_003,-8954,-2849,2731.600040704012,122.8
YST_247_004,-8941,-2611,2730.7750406917185,122.8
YST_247_005,-8697,-2541,2731.4500407017767,118.6
group: YST_247 field: time
start: 0
interval: 300
frames: 180
group: YST_247 field: config
brightfield1: 0xfffffffffffffffffffffffffffffffffffffffffffff
pHluorin405_0_4: 0xfffffffffffffffffffffffffffffffffffffffffffff
pHluorin488_0_4: 0xfffffffffffffffffffffffffffffffffffffffffffff
cy5: 0xfffffffffffffffffffffffffffffffffffffffffffff
2022-10-10 15:31:27,356 - INFO
group: YST_1510 field: position
Name,X,Y,Z,Autofocus offset
YST_1510_001,-6450,-230,2343.300034917891,112.55
YST_1510_002,-6450,-436,2343.350034918636,112.55
YST_1510_003,-6450,-639,2344.000034928322,116.8
YST_1510_004,-6450,-831,2344.250034932047,116.8
YST_1510_005,-6848,-536,2343.3250349182636,110
group: YST_1510 field: time
start: 0
interval: 300
frames: 180
group: YST_1510 field: config
brightfield1: 0xfffffffffffffffffffffffffffffffffffffffffffff
pHluorin405_0_4: 0xfffffffffffffffffffffffffffffffffffffffffffff
pHluorin488_0_4: 0xfffffffffffffffffffffffffffffffffffffffffffff
cy5: 0xfffffffffffffffffffffffffffffffffffffffffffff
2022-10-10 15:31:27,359 - INFO
group: YST_1511 field: position
Name, X, Y, Z, Autofocus offset
YST_1511_001,-10618,-1675,2716.900040484965,118.7
YST_1511_002,-10618,-1914,2717.2250404898077,122.45
YST_1511_003,-10367,-1695,2718.2500405050814,120.95
YST_1511_004,-10367,-1937,2718.8250405136496,120.95
YST_1511_005,-10092,-1757,2719.975040530786,119.45
group: YST_1511 field: time
start: 0
interval: 300
frames: 180
group: YST_1511 field: config
brightfield1: 0xfffffffffffffffffffffffffffffffffffffffffffff
pHluorin405_0_4: 0xfffffffffffffffffffffffffffffffffffffffffffff
pHluorin488_0_4: 0xfffffffffffffffffffffffffffffffffffffffffffff
cy5: 0xfffffffffffffffffffffffffffffffffffffffffffff
2022-10-10 15:31:27,362 - INFO
group: YST_1512 field: position
Name,X,Y,Z,Autofocus offset
YST_1512_001,-8173,-2510,2339.0750348549336,115.65
YST_1512_002,-8173,-2718,2338.0250348392874,110.8
YST_1512_003,-8173,-2963,2336.625034818426,110.8
YST_1512_004,-8457,-2963,2336.350034814328,110.9
YST_1512_005,-8481,-2706,2337.575034832582,113.3
group: YST_1512 field: time
start: 0
interval: 300
frames: 180
group: YST_1512 field: config
brightfield1: 0xfffffffffffffffffffffffffffffffffffffffffffff
pHluorin405_0_4: 0xfffffffffffffffffffffffffffffffffffffffffffff
pHluorin488_0_4: 0xfffffffffffffffffffffffffffffffffffffffffffff
cy5: 0xfffffffffffffffffffffffffffffffffffffffffffff
2022-10-10 15:31:27,365 - INFO
group: YST_1513 field: position
Name,X,Y,Z,Autofocus offset
YST_1513_001,-6978,-2596,2339.8750348668545,113.3
YST_1513_002,-6978,-2380,2340.500034876168,113.3
YST_1513_003,-6971,-2163,2340.8750348817557,113.3
YST_1513_004,-6971,-1892,2341.2500348873436,113.3
YST_1513_005,-6692,-1892,2341.550034891814,113.3
group: YST_1513 field: time
start: 0
interval: 300
frames: 180
group: YST_1513 field: config
brightfield1: 0xfffffffffffffffffffffffffffffffffffffffffffff
pHluorin405_0_4: 0xfffffffffffffffffffffffffffffffffffffffffffff
pHluorin488_0_4: 0xfffffffffffffffffffffffffffffffffffffffffffff
cy5: 0xfffffffffffffffffffffffffffffffffffffffffffff
2022-10-10 15:31:27,365 - INFO
2022-10-10 15:31:27,365 - INFO
-----Experiment started-----
File added
File added
"""
Base functions to extract information from a single cell
These functions are automatically read by extractor.py, and so can only have the cell_mask and trap_image as inputs and must return only one value.
"""
import numpy as np
from scipy import ndimage
from sklearn.cluster import KMeans
def area(cell_mask):
"""
Find the area of a cell mask
Parameters
----------
cell_mask: 2d array
Segmentation mask for the cell
"""
return np.sum(cell_mask, dtype=int)
def eccentricity(cell_mask):
"""
Find the eccentricity using the approximate major and minor axes
Parameters
----------
cell_mask: 2d array
Segmentation mask for the cell
"""
min_ax, maj_ax = min_maj_approximation(cell_mask)
return np.sqrt(maj_ax**2 - min_ax**2) / maj_ax
def mean(cell_mask, trap_image):
"""
Finds the mean of the pixels in the cell.
Parameters
----------
cell_mask: 2d array
Segmentation mask for the cell
trap_image: 2d array
"""
return np.mean(trap_image[np.where(cell_mask)], dtype=float)
def median(cell_mask, trap_image):
"""
Finds the median of the pixels in the cell.
Parameters
----------
cell_mask: 2d array
Segmentation mask for the cell
trap_image: 2d array
"""
return np.median(trap_image[np.where(cell_mask)])
def max2p5pc(cell_mask, trap_image):
"""
Finds the mean of the brightest 2.5% of pixels in the cell.
Parameters
----------
cell_mask: 2d array
Segmentation mask for the cell
trap_image: 2d array
"""
# number of pixels in mask
npixels = cell_mask.sum()
top_pixels = int(np.ceil(npixels * 0.025))
# sort pixels in cell
sorted_vals = np.sort(trap_image[np.where(cell_mask)], axis=None)
# find highest 2.5%
top_vals = sorted_vals[-top_pixels:]
# find mean of these highest pixels
max2p5pc = np.mean(top_vals, dtype=float)
return max2p5pc
def max5px(cell_mask, trap_image):
"""
Finds the mean of the five brightest pixels in the cell.
Parameters
----------
cell_mask: 2d array
Segmentation mask for the cell
trap_image: 2d array
"""
# sort pixels in cell
sorted_vals = np.sort(trap_image[np.where(cell_mask)], axis=None)
top_vals = sorted_vals[-5:]
# find mean of five brightest pixels
max5px = np.mean(top_vals, dtype=float)
return max5px
def max5px_med(cell_mask, trap_image):
"""
Finds the mean of the five brightest pixels in the cell divided by the median pixel value.
Parameters
----------
cell_mask: 2d array
Segmentation mask for the cell
trap_image: 2d array
"""
# sort pixels in cell
sorted_vals = np.sort(trap_image[np.where(cell_mask)], axis=None)
top_vals = sorted_vals[-5:]
# find mean of five brightest pixels
max5px = np.mean(top_vals, dtype=float)
# find the median
med = np.median(sorted_vals)
if med == 0:
return np.nan
else:
return max5px / med
def max2p5pc_med(cell_mask, trap_image):
"""
Finds the mean of the brightest 2.5% of pixels in the cell
divided by the median pixel value.
Parameters
----------
cell_mask: 2d array
Segmentation mask for the cell
trap_image: 2d array
"""
# number of pixels in mask
npixels = cell_mask.sum()
top_pixels = int(np.ceil(npixels * 0.025))
# sort pixels in cell
sorted_vals = np.sort(trap_image[np.where(cell_mask)], axis=None)
# find highest 2.5%
top_vals = sorted_vals[-top_pixels:]
# find mean of these highest pixels
max2p5pc = np.mean(top_vals, dtype=float)
med = np.median(sorted_vals)
if med == 0:
return np.nan
else:
return max2p5pc / med
def std(cell_mask, trap_image):
"""
Finds the standard deviation of the values of the pixels in the cell.
Parameters
----------
cell_mask: 2d array
Segmentation mask for the cell
trap_image: 2d array
"""
return np.std(trap_image[np.where(cell_mask)], dtype=float)
def k2_major_median(cell_mask, trap_image):
"""
Finds the medians of the major cluster after clustering the pixels in the cell into two clusters.
Parameters
----------
cell_mask: 2d array
Segmentation mask for the cell
trap_image: 2d array
Returns
-------
median: float
The median of the major cluster of two clusters
"""
if np.any(cell_mask):
X = trap_image[np.where(cell_mask)].reshape(-1, 1)
# cluster pixels in cell into two clusters
kmeans = KMeans(n_clusters=2, random_state=0).fit(X)
high_clust_id = kmeans.cluster_centers_.argmax()
# find the median of pixels in the largest cluster
major_cluster = X[kmeans.predict(X) == high_clust_id]
major_median = np.median(major_cluster, axis=None)
return major_median
else:
return np.nan
def k2_minor_median(cell_mask, trap_image):
"""
Finds the median of the minor cluster after clustering the pixels in the cell into two clusters.
Parameters
----------
cell_mask: 2d array
Segmentation mask for the cell
trap_image: 2d array
Returns
-------
median: float
The median of the minor cluster.
"""
if np.any(cell_mask):
X = trap_image[np.where(cell_mask)].reshape(-1, 1)
# cluster pixels in cell into two clusters
kmeans = KMeans(n_clusters=2, random_state=0).fit(X)
low_clust_id = kmeans.cluster_centers_.argmin()
# find the median of pixels in the smallest cluster
minor_cluster = X[kmeans.predict(X) == low_clust_id]
minor_median = np.median(minor_cluster, axis=None)
return minor_median
else:
return np.nan
def volume(cell_mask):
"""
Estimates the volume of the cell assuming it is an ellipsoid with the mask providing a cross-section through the median plane of the ellipsoid.
Parameters
----------
cell_mask: 2d array
Segmentation mask for the cell
"""
min_ax, maj_ax = min_maj_approximation(cell_mask)
return (4 * np.pi * min_ax**2 * maj_ax) / 3
def conical_volume(cell_mask):
"""
Estimates the volume of the cell
Parameters
----------
cell_mask: 2D array
Segmentation mask for the cell
"""
padded = np.pad(cell_mask, 1, mode="constant", constant_values=0)
nearest_neighbor = (
ndimage.morphology.distance_transform_edt(padded == 1) * padded
)
return 4 * (nearest_neighbor.sum())
def spherical_volume(cell_mask):
'''
Estimates the volume of the cell assuming it is a sphere with the mask providing a cross-section through the median plane of the sphere.
Parameters
----------
cell_mask: 2d array
Segmentation mask for the cell
'''
area = cell_mask.sum()
r = np.sqrt(area / np.pi)
return (4 * np.pi * r**3) / 3
def min_maj_approximation(cell_mask):
"""
Finds the lengths of the minor and major axes of an ellipse from a cell mask.
Parameters
----------
cell_mask: 3d array
Segmentation masks for cells
"""
padded = np.pad(cell_mask, 1, mode="constant", constant_values=0)
nn = ndimage.morphology.distance_transform_edt(padded == 1) * padded
dn = ndimage.morphology.distance_transform_edt(nn - nn.max()) * padded
cone_top = ndimage.morphology.distance_transform_edt(dn == 0) * padded
min_ax = np.round(nn.max())
maj_ax = np.round(dn.max() + cone_top.sum() / 2)
return min_ax, maj_ax
# File with defaults for ease of use
from pathlib import PosixPath
from typing import Union
import h5py
def exparams_from_meta(meta: Union[dict, PosixPath, str], extras=["ph"]):
"""
Obtain parameters from metadata of hdf5 file
"""
meta = meta if isinstance(meta, dict) else load_attributes(meta)
base = {
"tree": {"general": {"None": ["area", "volume", "eccentricity"]}},
"multichannel_ops": {},
}
av_channels = {
"Citrine",
"GFP",
"GFPFast",
"mCherry",
"pHluorin405",
"Flavin",
"Cy5",
"mKO2",
}
default_reductions = {"np_max"}
default_metrics = {
"mean",
"median",
"imBackground",
"background_max5",
"max2p5pc",
"max2p5pc_med",
"max5px",
"max5px_med",
# "nuc_est_conv",
}
default_rm = {r: default_metrics for r in default_reductions}
# default_rm["None"] = ["nuc_conv_3d"]
av_flch = av_channels.intersection(meta["channels/channel"]).difference(
{"Brightfield", "DIC", "BrightfieldGFP"}
)
for ch in av_flch:
base["tree"][ch] = default_rm
base["sub_bg"] = av_flch
# Additional extraction defaults when channels available
if "ph" in extras:
if {"pHluorin405", "GFPFast"}.issubset(av_flch):
sets = {
b + a: (x, y)
for a, x in zip(
["", "_bgsub"],
(
["GFPFast", "pHluorin405"],
["GFPFast_bgsub", "pHluorin405_bgsub"],
),
)
for b, y in zip(["em_ratio", "gsum"], ["div0", "np_add"])
}
for i, v in sets.items():
base["multichannel_ops"][i] = [
*v,
default_rm,
]
return base
def load_attributes(file: str, group="/"):
with h5py.File(file, "r") as f:
meta = dict(f[group].attrs.items())
return meta
from yaml import dump, load
def dict_to_yaml(d, f):
with open(f, "w") as f:
dump(d, f)
def add_attrs(hdfile, path, files):
group = hdfile.create_group(path)
for k, v in files:
group.attrs[k] = v
from collections import deque
def depth(d):
"""
Copied from https://stackoverflow.com/a/23499088
Used to determine the depth of our config trees and fill them
"""
queue = deque([(id(d), d, 1)])
memo = set()
while queue:
id_, o, level = queue.popleft()
if id_ in memo:
continue
memo.add(id_)
if isinstance(o, dict):
queue += ((id(v), v, level + 1) for v in o.values())
return level