Skip to content

Commit c978b29

Browse files
committed
[skip ci] test run with gastruloid data
1 parent e4fb149 commit c978b29

File tree

3 files changed

+104
-84
lines changed

3 files changed

+104
-84
lines changed

cellpose/contrib/cluster_script.py

Lines changed: 99 additions & 81 deletions
Original file line numberDiff line numberDiff line change
@@ -2,90 +2,108 @@
22
import subprocess
33
import pickle
44

5-
import imageio
65
import zarr
7-
import numpy
8-
from tifffile import imwrite
6+
from tifffile import imread
7+
from pooch import retrieve
98
from cellpose.contrib.distributed_segmentation import numpy_array_to_zarr
109

1110
from cellpose.contrib.distributed_segmentation import distributed_eval
12-
from cellpose.contrib.distributed_segmentation import SlurmCluster, janeliaLSFCluster
13-
14-
## PARAMETERS
15-
# Compute node accessible directory for test input zarr dataset and outputs
16-
output_dir = Path.home() / 'link_scratch'
17-
18-
# Cluster parameters (here: https://docs.mpcdf.mpg.de/doc/computing/viper-gpu-user-guide.html)
19-
cluster_kwargs = {
20-
'job_cpu': 2, # number of CPUs per GPU worker
21-
'ncpus':1, # threads requested per GPU worker
22-
'min_workers':1, # min number of workers based on expected workload
23-
'max_workers':16, # max number of workers based on expected workload
24-
'walltime': '1:00:00', # available runtime for each GPU worker for cluster scheduler (Slurm, LSF)
25-
'queue': 'apu', # queue/ partition name for single GPU worker *
26-
'interface': 'ib0', # interface name for compute-node communication *
27-
'local_directory': '/tmp', # compute node local temporary directory *
28-
'job_extra_directives': [ # extra directives for scheduler (here: Slurm) *
29-
'--constraint apu',
30-
'--gres gpu:1',
31-
],
32-
}
33-
# * Ask your cluster support staff for assistance
34-
35-
input_zarr_path = output_dir / 'input.zarr'
36-
output_zarr_path = output_dir / 'segmentation.zarr'
37-
output_bbox_pkl = output_dir / 'bboxes.pkl'
38-
39-
if not input_zarr_path.exists():
40-
print('Download (1024 x 1024 x 1024) test data')
41-
crop = (slice(0,1), slice(2048,3072), slice(2048,3072), slice(0,1024))
42-
data_numpy = zarr.open("https://webknossos-data.mpinb.mpg.de/data/zarr/653bd498010000ae005914a1/color/16-16-2", mode='r')[crop]
43-
44-
print('Save as 3D local zarr array')
45-
data_zarr = numpy_array_to_zarr(input_zarr_path, data_numpy.squeeze(0), chunks=(256, 256, 256))
46-
del data_numpy
47-
else:
48-
data_zarr = zarr.open(input_zarr_path)
49-
50-
# parameterize cellpose however you like
51-
model_kwargs = {'gpu':True}
52-
eval_kwargs = {
53-
'z_axis':0,
54-
'do_3D':True,
55-
}
56-
57-
# Guess cluster type by checking for cluster submission commands
58-
if subprocess.getstatusoutput('sbatch -h')[0] == 0:
59-
print('Slurm sbatch command detected -> use SlurmCluster')
60-
cluster = SlurmCluster(**cluster_kwargs)
61-
elif subprocess.getstatusoutput('bsub -h')[0] == 0:
62-
print('LSF bsub command detected -> use janeliaLSFCLuster')
63-
cluster = janeliaLSFCluster(**cluster_kwargs)
64-
else:
65-
cluster = None
66-
67-
if cluster is None:
68-
raise Exception(
69-
"Neither SLURM nor LFS cluster detected. "
70-
"Currently, this script only supports SLURM or LSF cluster scheduler. "
71-
"You have two options:"
72-
"\n * Either use `distributed_eval` without the `cluster` but with the `cluster_kwargs` argument to start a local cluster on your machine"
73-
"\n * or raise a feature request at https://github.com/MouseLand/cellpose/issues."
11+
from cellpose.contrib.distributed_segmentation import SlurmCluster, janeliaLSFCluster, myLocalCluster
12+
13+
14+
15+
def main():
16+
## PARAMETERS
17+
# Compute node accessible directory for test input zarr dataset and outputs
18+
output_dir = Path.home() / 'link_scratch'
19+
20+
# Cluster parameters (here: https://docs.mpcdf.mpg.de/doc/computing/viper-gpu-user-guide.html)
21+
cluster = {
22+
'job_cpu': 2, # number of CPUs per GPU worker
23+
'ncpus':1, # threads requested per GPU worker
24+
'min_workers':1, # min number of workers based on expected workload
25+
'max_workers':16, # max number of workers based on expected workload
26+
'walltime': '1:00:00', # available runtime for each GPU worker for cluster scheduler (Slurm, LSF)
27+
'queue': 'apu', # queue/ partition name for single GPU worker *
28+
'interface': 'ib0', # interface name for compute-node communication *
29+
'local_directory': '/tmp', # compute node local temporary directory *
30+
'job_extra_directives': [ # extra directives for scheduler (here: Slurm) *
31+
'--constraint apu',
32+
'--gres gpu:1',
33+
],
34+
}
35+
# * Ask your cluster support staff for assistance
36+
37+
input_zarr_path = output_dir / 'input.zarr'
38+
output_zarr_path = output_dir / 'segmentation.zarr'
39+
output_bbox_pkl = output_dir / 'bboxes.pkl'
40+
41+
42+
if not input_zarr_path.exists():
43+
print('Download test data')
44+
fname = retrieve(
45+
url="https://zenodo.org/records/17590053/files/2d_gastruloid.tif?download=1",
46+
known_hash="8ac2d944882268fbaebdfae5f7c18e4d20fdab024db2f9f02f4f45134b936872",
47+
path = Path.home() / '.cellpose' / 'data',
48+
progressbar=True,
49+
)
50+
#crop = (slice(None), slice(1024,2048), slice(1024,2048))
51+
data_numpy = imread(fname)#[crop]
52+
53+
print('Save as 3D local zarr array')
54+
data_zarr = numpy_array_to_zarr(input_zarr_path, data_numpy, chunks=(256, 256, 256))
55+
del data_numpy
56+
else:
57+
data_zarr = zarr.open(input_zarr_path)
58+
59+
# parameterize cellpose however you like
60+
model_kwargs = {'gpu':True}
61+
eval_kwargs = {
62+
'z_axis':0,
63+
'do_3D':True,
64+
}
65+
66+
# Guess cluster type by checking for cluster submission commands
67+
if subprocess.getstatusoutput('sbatch -h')[0] == 0:
68+
print('Slurm sbatch command detected -> use SlurmCluster')
69+
cluster = SlurmCluster(**cluster_kwargs)
70+
elif subprocess.getstatusoutput('bsub -h')[0] == 0:
71+
print('LSF bsub command detected -> use janeliaLSFCLuster')
72+
cluster = janeliaLSFCluster(**cluster_kwargs)
73+
else:
74+
cluster = None
75+
#cluster = myLocalCluster(**{
76+
#'n_workers':1, # if you only have 1 gpu, then 1 worker is the right choice
77+
#'ncpus':24,
78+
#'memory_limit':'64GB',
79+
#'threads_per_worker':1,
80+
#})
81+
82+
if cluster is None:
83+
raise Exception(
84+
"Neither SLURM nor LFS cluster detected. "
85+
"Currently, this script only supports SLURM or LSF cluster scheduler. "
86+
"You have two options:"
87+
"\n * Either use `distributed_eval` without the `cluster` but with the `cluster_kwargs` argument to start a local cluster on your machine"
88+
"\n * or raise a feature request at https://github.com/MouseLand/cellpose/issues."
89+
)
90+
91+
# Start evaluation
92+
segments, boxes = distributed_eval(
93+
input_zarr=data_zarr,
94+
blocksize=(256, 256, 256),
95+
write_path=str(output_zarr_path),
96+
model_kwargs=model_kwargs,
97+
eval_kwargs=eval_kwargs,
98+
cluster = cluster,
7499
)
75100

76-
# Start evaluation
77-
segments, boxes = distributed_eval(
78-
input_zarr=data_zarr,
79-
blocksize=(256, 256, 256),
80-
write_path=str(output_zarr_path),
81-
model_kwargs=model_kwargs,
82-
eval_kwargs=eval_kwargs,
83-
cluster = cluster,
84-
)
85-
86-
# Save boxes on disk
87-
with open(output_bbox_pkl, 'wb') as f:
88-
pickle.dump(boxes, f)
89-
90-
print(f'Segmentation saved in {str(output_zarr_path)}')
91-
print(f'Object boxes saved in {str(output_bbox_pkl)}')
101+
# Save boxes on disk
102+
with open(output_bbox_pkl, 'wb') as f:
103+
pickle.dump(boxes, f)
104+
105+
print(f'Segmentation saved in {str(output_zarr_path)}')
106+
print(f'Object boxes saved in {str(output_bbox_pkl)}')
107+
108+
if __name__ == '__main__':
109+
main()

environment-rocm.yml

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -28,5 +28,4 @@ dependencies:
2828
- dask_jobqueue
2929
- bokeh
3030
- fill-voids
31-
- aiohttp
32-
31+
- pooch

environment.yml

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,10 @@
11
name: cellpose
22
dependencies:
3-
- python==3.8.5
3+
- python==3.9.23
44
- pip
55
- pip:
6+
- --index-url https://download.pytorch.org/whl/cu126
7+
- --extra-index-url https://pypi.org/simple
68
- qtpy
79
# - PyQt5.sip
810
- numpy>=1.20.0
@@ -26,4 +28,5 @@ dependencies:
2628
- dask_jobqueue
2729
- bokeh
2830
- fill-voids
31+
- pooch
2932

0 commit comments

Comments
 (0)