Install mamba and command line tools:
"${SHELL}" <(curl -L micro.mamba.pm/install.sh)
source ~/.bashrc
micromamba create -n import -c conda-forge -c ome omero-py
micromamba activate import
pip install pandas omero-rois omero-cli-zarr
omero obj new Project name=idr0167-li-cellcyclenet
omero obj new Project name=idr0167-li-cellcyclenet/experimentA
omero obj new Project name=idr0167-li-cellcyclenet/experimentB
cd experimentA
/uod/idr/metadata/idr-utils/scripts/create_datasets.sh [PROJECT_ID] idr0167-experimentA-filePaths.tsv >> dataset.out
grep ", Dataset:" dataset.out | sed "s/.\+,//g" > dataset.ids
# Manually remove the leading whitespaces in dataset.ids
# Import images:
while IFS=$'\t' read -r dataset image url; do omero zarr import --target-by-name $dataset --name $image $url; done < "idr0167-experimentA-filePaths.tsv" >> import.log
Check number of images:
grep "Created Image" import.log | wc -l # add 1
parallel -j 4 --eta --progress --delay 3 omero render test --thumb :::: dataset.ids
# Then same for experimentB ...
cd ../script
python rois.py
find ../processed -type f -name "*.tsv" -exec python update_processed.py {} \;
# Combine the csvs:
head -1 10_masks_filtered.csv > experimentB_processed.csv
find * -regextype posix-extended -regex "[0-9]{1,2}.+\.csv" -exec sed 1d >> experimentB_processed.csv {} \;
head -1 tile_x10_y10_masks_filtered.csv > experimentA_processed.csv
find * -regextype posix-extended -regex "tile.+\.csv" -exec sed 1d >> experimentA_processed.csv {} \;
# delete rows without dataset id, etc, then:
omero metadata populate --report --batch 1000 --table-name experimentA_processed.csv --file experimentA_processed.csv Project:1234