Skip to content

Commit 35d6a24

Browse files
authored
Merge branch 'master' into sylph
2 parents bdc5e02 + cefdfdc commit 35d6a24

File tree

97 files changed

+4926
-641
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

97 files changed

+4926
-641
lines changed

.github/workflows/pr.yaml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -177,7 +177,7 @@ jobs:
177177
runs-on: ${{ matrix.os }}
178178
strategy:
179179
matrix:
180-
os: [ubuntu-20.04]
180+
os: [ubuntu-24.04]
181181
r-version: ['release']
182182
steps:
183183
- uses: actions/checkout@v4

data_managers/data_manager_motus_db_downloader/tool-data/motus_db_versioned.loc.sample

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -6,4 +6,4 @@
66

77
# for example:
88

9-
db_from_2024-07-11T081301Z 3.1.0 mOTUs DB version 3.1.0 downloaded at 2024-07-11 08:13:01.698939 /galaxy/tool-data/motus_database/db_from_2024-07-11T081301Z/db_mOTU
9+
#db_from_2024-07-11T081301Z 3.1.0 mOTUs DB version 3.1.0 downloaded at 2024-07-11 08:13:01.698939 /galaxy/tool-data/motus_database/db_from_2024-07-11T081301Z/db_mOTU

tools/bioimaging/bioimage_inference.xml

Lines changed: 100 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@
22
<description>with PyTorch</description>
33
<macros>
44
<token name="@TOOL_VERSION@">2.4.1</token>
5-
<token name="@VERSION_SUFFIX@">0</token>
5+
<token name="@VERSION_SUFFIX@">1</token>
66
</macros>
77
<creator>
88
<organization name="European Galaxy Team" url="https://galaxyproject.org/eu/" />
@@ -30,12 +30,18 @@
3030
--imaging_model '$input_imaging_model'
3131
--image_file '$input_image_file'
3232
--image_size '$input_image_input_size'
33+
--image_axes '$input_image_input_axes'
3334
]]>
3435
</command>
3536
<inputs>
3637
<param name="input_imaging_model" type="data" format="zip" label="BioImage.IO model" help="Please upload a BioImage.IO model."/>
3738
<param name="input_image_file" type="data" format="tiff,png" label="Input image" help="Please provide an input image for the analysis."/>
38-
<param name="input_image_input_size" type="text" label="Size of the input image" help="Provide the size of the input image. See the chosen model's RDF file to find the correct input size. For example: for the BioImage.IO model MitochondriaEMSegmentationBoundaryModel, the input size is 256 x 256 x 32 x 1. Enter the size as 256,256,32,1."/>
39+
<param name="input_image_input_size" type="text" optional="false" label="Size of the input image" help="Provide the size of the input image. See the chosen model's RDF file to find the correct input size. For example: for the BioImage.IO model MitochondriaEMSegmentationBoundaryModel, the input size is 256 x 256 x 32 x 1. Enter the size as 256,256,32,1."/>
40+
<param name="input_image_input_axes" type="select" label="Axes of the input image" optional="false" help="Provide the input axes of the input image. See the chosen model's RDF file to find the correct axes. For example: for the BioImage.IO model MitochondriaEMSegmentationBoundaryModel, the input axes is 'bczyx'">
41+
<option value="bczyx">bczyx</option>
42+
<option value="bcyx">bcyx</option>
43+
<option value="byxc">byxc</option>
44+
</param>
3945
</inputs>
4046
<outputs>
4147
<data format="tif" name="output_predicted_image" from_work_dir="output_predicted_image.tif" label="Predicted image"></data>
@@ -46,15 +52,97 @@
4652
<param name="input_imaging_model" value="input_imaging_model.zip" location="https://zenodo.org/api/records/6647674/files/weights-torchscript.pt/content"/>
4753
<param name="input_image_file" value="input_image_file.tif" location="https://zenodo.org/api/records/6647674/files/sample_input_0.tif/content"/>
4854
<param name="input_image_input_size" value="256,256,1,1"/>
49-
<output name="output_predicted_image" file="output_nucleisegboundarymodel.tif" compare="sim_size" delta="100" />
50-
<output name="output_predicted_image_matrix" file="output_nucleisegboundarymodel_matrix.npy" compare="sim_size" delta="100" />
55+
<param name="input_image_input_axes" value="bcyx"/>
56+
<output name="output_predicted_image" ftype="tif">
57+
<assert_contents>
58+
<has_size size="524846" delta="110" />
59+
</assert_contents>
60+
</output>
61+
<output name="output_predicted_image_matrix" ftype="npy">
62+
<assert_contents>
63+
<has_size size="524416" delta="110" />
64+
</assert_contents>
65+
</output>
5166
</test>
5267
<test>
5368
<param name="input_imaging_model" value="input_imaging_model.zip" location="https://zenodo.org/api/records/6647674/files/weights-torchscript.pt/content"/>
5469
<param name="input_image_file" value="input_nucleisegboundarymodel.png"/>
5570
<param name="input_image_input_size" value="256,256,1,1"/>
56-
<output name="output_predicted_image" file="output_nucleisegboundarymodel.tif" compare="sim_size" delta="100" />
57-
<output name="output_predicted_image_matrix" file="output_nucleisegboundarymodel_matrix.npy" compare="sim_size" delta="100" />
71+
<param name="input_image_input_axes" value="bcyx"/>
72+
<output name="output_predicted_image" ftype="tif">
73+
<assert_contents>
74+
<has_size size="524846" delta="110" />
75+
</assert_contents>
76+
</output>
77+
<output name="output_predicted_image_matrix" ftype="npy">
78+
<assert_contents>
79+
<has_size size="524416" delta="110" />
80+
</assert_contents>
81+
</output>
82+
</test>
83+
<test>
84+
<param name="input_imaging_model" value="input_imaging_model.zip" location="https://uk1s3.embassy.ebi.ac.uk/public-datasets/bioimage.io/emotional-cricket/1.1/files/torchscript_tracing.pt"/>
85+
<param name="input_image_file" value="input_image_file.tif" location="https://uk1s3.embassy.ebi.ac.uk/public-datasets/bioimage.io/emotional-cricket/1.1/files/sample_input_0.tif"/>
86+
<param name="input_image_input_size" value="128,128,100,1"/>
87+
<param name="input_image_input_axes" value="bczyx"/>
88+
<output name="output_predicted_image" ftype="tif">
89+
<assert_contents>
90+
<has_size size="6572778" delta="100" />
91+
</assert_contents>
92+
</output>
93+
<output name="output_predicted_image_matrix" ftype="npy">
94+
<assert_contents>
95+
<has_size size="6572778" delta="100" />
96+
</assert_contents>
97+
</output>
98+
</test>
99+
<test>
100+
<param name="input_imaging_model" value="input_imaging_model.zip" location="https://uk1s3.embassy.ebi.ac.uk/public-datasets/bioimage.io/emotional-cricket/1.1/files/torchscript_tracing.pt"/>
101+
<param name="input_image_file" value="input_3d-unet-arabidopsis-apical-stem-cells.png" location="https://uk1s3.embassy.ebi.ac.uk/public-datasets/bioimage.io/emotional-cricket/1.1/files/raw.png"/>
102+
<param name="input_image_input_size" value="128,128,100,1"/>
103+
<param name="input_image_input_axes" value="bczyx"/>
104+
<output name="output_predicted_image" ftype="tif">
105+
<assert_contents>
106+
<has_size size="6572778" delta="100" />
107+
</assert_contents>
108+
</output>
109+
<output name="output_predicted_image_matrix" ftype="npy">
110+
<assert_contents>
111+
<has_size size="6572778" delta="100" />
112+
</assert_contents>
113+
</output>
114+
</test>
115+
<test>
116+
<param name="input_imaging_model" value="input_imaging_model.zip" location="https://uk1s3.embassy.ebi.ac.uk/public-datasets/bioimage.io/organized-badger/1/files/weights-torchscript.pt"/>
117+
<param name="input_image_file" value="input_platynereisemnucleisegmentationboundarymodel.tif" location="https://uk1s3.embassy.ebi.ac.uk/public-datasets/bioimage.io/organized-badger/1/files/sample_input_0.tif"/>
118+
<param name="input_image_input_size" value="256,256,32,1"/>
119+
<param name="input_image_input_axes" value="bczyx"/>
120+
<output name="output_predicted_image" ftype="tif">
121+
<assert_contents>
122+
<has_size size="16789714" delta="100" />
123+
</assert_contents>
124+
</output>
125+
<output name="output_predicted_image_matrix" ftype="npy">
126+
<assert_contents>
127+
<has_size size="16777344" delta="100" />
128+
</assert_contents>
129+
</output>
130+
</test>
131+
<test>
132+
<param name="input_imaging_model" value="input_imaging_model.zip" location="https://uk1s3.embassy.ebi.ac.uk/public-datasets/bioimage.io/thoughtful-turtle/1/files/torchscript_tracing.pt"/>
133+
<param name="input_image_file" value="input_3d-unet-lateral-root-primordia-cells.tif" location="https://uk1s3.embassy.ebi.ac.uk/public-datasets/bioimage.io/thoughtful-turtle/1/files/sample_input_0.tif"/>
134+
<param name="input_image_input_size" value="128,128,100,1"/>
135+
<param name="input_image_input_axes" value="bczyx"/>
136+
<output name="output_predicted_image" ftype="tif">
137+
<assert_contents>
138+
<has_size size="6572778" delta="100" />
139+
</assert_contents>
140+
</output>
141+
<output name="output_predicted_image_matrix" ftype="npy">
142+
<assert_contents>
143+
<has_size size="6553728" delta="100" />
144+
</assert_contents>
145+
</output>
58146
</test>
59147
</tests>
60148
<help>
@@ -64,13 +152,14 @@
64152
The tool takes a BioImage.IO model and an image (as TIF or PNG) to be analyzed. The analysis is performed by the model. The model is used to obtain a prediction of the result of the analysis, and the predicted image becomes available as a TIF file in the Galaxy history.
65153
66154
**Input files**
67-
- BioImage.IO model: Add one of the model from Galaxy file uploader by choosing a "remote" file at "ML Models/bioimaging-models"
68-
- Image to be analyzed: Provide an image as TIF/PNG file
69-
- Provide the necessary input size for the model. This information can be found in the RDF file of each model (RDF file > config > test_information > inputs > size)
155+
- BioImage.IO model: Add one of the model from Galaxy file uploader by choosing a "remote" file at "ML Models/bioimaging-models"
156+
- Image to be analyzed: Provide an image as TIF/PNG file
157+
- Provide the necessary input size for the model. This information can be found in the RDF file of each model (RDF file > config > test_information > inputs > size)
158+
- Provide axes of input image. This information can also be found in the RDF file of each model (RDF file > inputs > axes). An example value of axes is 'bczyx' for 3D U-Net Arabidopsis Lateral Root Primordia model
70159
71160
**Output files**
72-
- Predicted image: Predicted image using the BioImage.IO model
73-
- Predicted image matrix: Predicted image matrix in original dimensions
161+
- Predicted image: Predicted image using the BioImage.IO model
162+
- Predicted image matrix: Predicted image matrix in original dimensions
74163
]]>
75164
</help>
76165
<citations>

tools/bioimaging/main.py

Lines changed: 99 additions & 41 deletions
Original file line numberDiff line numberDiff line change
@@ -7,70 +7,128 @@
77
import imageio
88
import numpy as np
99
import torch
10+
import torch.nn.functional as F
1011

1112

12-
def find_dim_order(user_in_shape, input_image):
13+
def dynamic_resize(image: torch.Tensor, target_shape: tuple):
1314
"""
14-
Find the correct order of input image's
15-
shape. For a few models, the order of input size
16-
mentioned in the RDF.yaml file is reversed compared
17-
to the input image's original size. If it is reversed,
18-
transpose the image to find correct order of image's
19-
dimensions.
15+
Resize an input tensor dynamically to the target shape.
16+
17+
Parameters:
18+
- image: Input tensor with shape (C, D1, D2, ..., DN) (any number of spatial dims)
19+
- target_shape: Tuple specifying the target shape (C', D1', D2', ..., DN')
20+
21+
Returns:
22+
- Resized tensor with target shape target_shape.
2023
"""
21-
image_shape = list(input_image.shape)
22-
# reverse the input shape provided from RDF.yaml file
23-
correct_order = user_in_shape.split(",")[::-1]
24-
# remove 1s from the original dimensions
25-
correct_order = [int(i) for i in correct_order if i != "1"]
26-
if (correct_order[0] == image_shape[-1]) and (correct_order != image_shape):
27-
input_image = torch.tensor(input_image.transpose())
28-
return input_image, correct_order
24+
# Extract input shape
25+
input_shape = image.shape
26+
num_dims = len(input_shape) # Includes channels and spatial dimensions
27+
28+
# Ensure target shape matches the number of dimensions
29+
if len(target_shape) != num_dims:
30+
raise ValueError(
31+
f"Target shape {target_shape} must match input dimensions {num_dims}"
32+
)
33+
34+
# Extract target channels and spatial sizes
35+
target_channels = target_shape[0] # First element is the target channel count
36+
target_spatial_size = target_shape[1:] # Remaining elements are spatial dimensions
37+
38+
# Add batch dim (N=1) for resizing
39+
image = image.unsqueeze(0)
40+
41+
# Choose the best interpolation mode based on dimensionality
42+
if num_dims == 4:
43+
interp_mode = "trilinear"
44+
elif num_dims == 3:
45+
interp_mode = "bilinear"
46+
elif num_dims == 2:
47+
interp_mode = "bicubic"
48+
else:
49+
interp_mode = "nearest"
50+
51+
# Resize spatial dimensions dynamically
52+
image = F.interpolate(
53+
image, size=target_spatial_size, mode=interp_mode, align_corners=False
54+
)
55+
56+
# Adjust channels if necessary
57+
current_channels = image.shape[1]
58+
59+
if target_channels > current_channels:
60+
# Expand channels by repeating existing ones
61+
expand_factor = target_channels // current_channels
62+
remainder = target_channels % current_channels
63+
image = image.repeat(1, expand_factor, *[1] * (num_dims - 1))
64+
65+
if remainder > 0:
66+
extra_channels = image[
67+
:, :remainder, ...
68+
] # Take the first few channels to match target
69+
image = torch.cat([image, extra_channels], dim=1)
70+
71+
elif target_channels < current_channels:
72+
# Reduce channels by averaging adjacent ones
73+
image = image[:, :target_channels, ...] # Simply slice to reduce channels
74+
return image.squeeze(0) # Remove batch dimension before returning
2975

3076

3177
if __name__ == "__main__":
3278
arg_parser = argparse.ArgumentParser()
33-
arg_parser.add_argument("-im", "--imaging_model", required=True, help="Input BioImage model")
34-
arg_parser.add_argument("-ii", "--image_file", required=True, help="Input image file")
35-
arg_parser.add_argument("-is", "--image_size", required=True, help="Input image file's size")
79+
arg_parser.add_argument(
80+
"-im", "--imaging_model", required=True, help="Input BioImage model"
81+
)
82+
arg_parser.add_argument(
83+
"-ii", "--image_file", required=True, help="Input image file"
84+
)
85+
arg_parser.add_argument(
86+
"-is", "--image_size", required=True, help="Input image file's size"
87+
)
88+
arg_parser.add_argument(
89+
"-ia", "--image_axes", required=True, help="Input image file's axes"
90+
)
3691

3792
# get argument values
3893
args = vars(arg_parser.parse_args())
3994
model_path = args["imaging_model"]
4095
input_image_path = args["image_file"]
96+
input_size = args["image_size"]
4197

4298
# load all embedded images in TIF file
4399
test_data = imageio.v3.imread(input_image_path, index="...")
44-
test_data = np.squeeze(test_data)
45100
test_data = test_data.astype(np.float32)
101+
test_data = np.squeeze(test_data)
46102

47-
# assess the correct dimensions of TIF input image
48-
input_image_shape = args["image_size"]
49-
im_test_data, shape_vals = find_dim_order(input_image_shape, test_data)
103+
target_image_dim = input_size.split(",")[::-1]
104+
target_image_dim = [int(i) for i in target_image_dim if i != "1"]
105+
target_image_dim = tuple(target_image_dim)
106+
107+
exp_test_data = torch.tensor(test_data)
108+
# check if image dimensions are reversed
109+
reversed_order = list(reversed(range(exp_test_data.dim())))
110+
exp_test_data_T = exp_test_data.permute(*reversed_order)
111+
if exp_test_data_T.shape == target_image_dim:
112+
exp_test_data = exp_test_data_T
113+
if exp_test_data.shape != target_image_dim:
114+
for i in range(len(target_image_dim) - exp_test_data.dim()):
115+
exp_test_data = exp_test_data.unsqueeze(i)
116+
try:
117+
exp_test_data = dynamic_resize(exp_test_data, target_image_dim)
118+
except Exception as e:
119+
raise RuntimeError(f"Error during resizing: {e}") from e
120+
121+
current_dimension = len(exp_test_data.shape)
122+
input_axes = args["image_axes"]
123+
target_dimension = len(input_axes)
124+
# expand input image based on the number of target dimensions
125+
for i in range(target_dimension - current_dimension):
126+
exp_test_data = torch.unsqueeze(exp_test_data, i)
50127

51128
# load model
52129
model = torch.load(model_path)
53130
model.eval()
54131

55-
# find the number of dimensions required by the model
56-
target_dimension = 0
57-
for param in model.named_parameters():
58-
target_dimension = len(param[1].shape)
59-
break
60-
current_dimension = len(list(im_test_data.shape))
61-
62-
# update the dimensions of input image if the required image by
63-
# the model is smaller
64-
slices = tuple(slice(0, s_val) for s_val in shape_vals)
65-
66-
# apply the slices to the reshaped_input
67-
im_test_data = im_test_data[slices]
68-
exp_test_data = torch.tensor(im_test_data)
69-
70-
# expand input image's dimensions
71-
for i in range(target_dimension - current_dimension):
72-
exp_test_data = torch.unsqueeze(exp_test_data, i)
73-
74132
# make prediction
75133
pred_data = model(exp_test_data)
76134
pred_data_output = pred_data.detach().numpy()
-513 KB
Binary file not shown.
Binary file not shown.

0 commit comments

Comments
 (0)