Skip to content

Commit a9b0b91

Browse files
author
Guillaume Le Goc
committed
fixes from joss aeic
1 parent 23a2d35 commit a9b0b91

File tree

2 files changed

+31
-29
lines changed

2 files changed

+31
-29
lines changed

paper.bib

Lines changed: 22 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -1,13 +1,14 @@
1-
@online{chiaruttini2024,
2-
title = {{{ABBA}}, a Novel Tool for Whole-Brain Mapping, Reveals Brain-Wide Differences in Immediate Early Genes Induction Following Learning},
3-
author = {Chiaruttini, Nicolas and Castoldi, Carlo and Requie, Linda Maria and Camarena-Delgado, Carmen and Dal Bianco, Beatrice and Gräff, Johannes and Seitz, Arne and Silva, Bianca A.},
4-
date = {2024-09-06},
5-
doi = {10.1101/2024.09.06.611625},
6-
url = {http://biorxiv.org/lookup/doi/10.1101/2024.09.06.611625},
7-
urldate = {2024-09-12},
8-
abstract = {Abstract Unbiased characterization of whole-brain cytoarchitecture represents an invaluable tool for understanding brain function. For this, precise mapping of histological markers from 2D sections onto 3D brain atlases is pivotal. Here, we present two novel software tools facilitating this process: Aligning Big Brains and Atlases (ABBA), designed to streamline the precise and efficient registration of 2D sections to 3D reference atlases, and BraiAn, an integrated suite for multi-marker automated segmentation, whole-brain statistical analysis, and data visualisation. Combining these tools, we performed a comprehensive comparative study of the whole-brain expression of three of the most widely used immediate early genes (IEGs). Thanks to their neural activity-dependent expression, IEGs have been used for decades as a proxy of neural activity to generate unbiased mapping of activity following behaviour, but their respective induction in response to neuronal activation across the entire brain remains unclear. To address this question, we systematically compared the brain-wide expression cFos, Arc and NPAS4, three abundantly used IEGs, across three different behavioural conditions related to memory. Our results highlight major differences in both their distribution and induction patterns, indicating that they do not represent equivalent markers across brain areas or activity states, but can provide instead complementary information.},
9-
langid = {english},
10-
pubstate = {prepublished}
1+
@article {chiaruttini2024,
2+
author = {Chiaruttini, Nicolas and Castoldi, Carlo and Requie, Linda Maria and Camarena-Delgado, Carmen and dal Bianco, Beatrice and Gr{\"a}ff, Johannes and Seitz, Arne and Silva, Bianca A.},
3+
title = {ABBA, a novel tool for whole-brain mapping, reveals brain-wide differences in immediate early genes induction following learning},
4+
elocation-id = {2024.09.06.611625},
5+
year = {2024},
6+
doi = {10.1101/2024.09.06.611625},
7+
publisher = {Cold Spring Harbor Laboratory},
8+
abstract = {Unbiased characterization of whole-brain cytoarchitecture represents an invaluable tool for understanding brain function. For this, precise mapping of histological markers from 2D sections onto 3D brain atlases is pivotal. Here, we present two novel software tools facilitating this process: Aligning Big Brains and Atlases (ABBA), designed to streamline the precise and efficient registration of 2D sections to 3D reference atlases, and BraiAn, an integrated suite for multi-marker automated segmentation, whole-brain statistical analysis, and data visualisation. Combining these tools, we performed a comprehensive comparative study of the whole-brain expression of three of the most widely used immediate early genes (IEGs). Thanks to their neural activity-dependent expression, IEGs have been used for decades as a proxy of neural activity to generate unbiased mapping of activity following behaviour, but their respective induction in response to neuronal activation across the entire brain remains unclear. To address this question, we systematically compared the brain-wide expression cFos, Arc and NPAS4, three abundantly used IEGs, across three different behavioural conditions related to memory. Our results highlight major differences in both their distribution and induction patterns, indicating that they do not represent equivalent markers across brain areas or activity states, but can provide instead complementary information.Competing Interest StatementThe authors have declared no competing interest.},
9+
URL = {https://www.biorxiv.org/content/early/2024/09/06/2024.09.06.611625},
10+
eprint = {https://www.biorxiv.org/content/early/2024/09/06/2024.09.06.611625.full.pdf},
11+
journal = {bioRxiv}
1112
}
1213

1314
@article{bankhead2017,
@@ -259,16 +260,17 @@ @incollection{schmidt2018
259260
langid = {english},
260261
}
261262

262-
@online{goldsborough2024a,
263-
title = {A Novel Channel Invariant Architecture for the Segmentation of Cells and Nuclei in Multiplexed Images Using {{InstanSeg}}},
264-
author = {Goldsborough, Thibaut and O’Callaghan, Alan and Inglis, Fiona and Leplat, Léo and Filby, Andrew and Bilen, Hakan and Bankhead, Peter},
265-
date = {2024-09-08},
266-
doi = {10.1101/2024.09.04.611150},
267-
url = {http://biorxiv.org/lookup/doi/10.1101/2024.09.04.611150},
268-
urldate = {2024-09-12},
269-
abstract = {The quantitative analysis of bioimaging data increasingly depends on the accurate segmentation of cells and nuclei, a significant challenge for the analysis of high-plex imaging data. Current deep learning-based approaches to segment cells in multiplexed images require reducing the input to a small and fixed number of input channels, discarding imaging information in the process. We present ChannelNet, a novel deep learning architecture for generating threechannel representations of multiplexed images irrespective of the number or ordering of imaged biomarkers. When combined with InstanSeg, ChannelNet sets a new benchmark for the segmentation of cells and nuclei on public multiplexed imaging datasets. We provide an open implementation of our method and integrate it in open source software. Our code and models are available on https://github.com/instanseg/instanseg.},
270-
langid = {english},
271-
pubstate = {prepublished},
263+
@article {goldsborough2024a,
264+
author = {Goldsborough, Thibaut and O{\textquoteright}Callaghan, Alan and Inglis, Fiona and Leplat, L{\'e}o and Filby, Andrew and Bilen, Hakan and Bankhead, Peter},
265+
title = {A novel channel invariant architecture for the segmentation of cells and nuclei in multiplexed images using InstanSeg},
266+
elocation-id = {2024.09.04.611150},
267+
year = {2024},
268+
doi = {10.1101/2024.09.04.611150},
269+
publisher = {Cold Spring Harbor Laboratory},
270+
abstract = {The quantitative analysis of bioimaging data increasingly depends on the accurate segmentation of cells and nuclei, a significant challenge for the analysis of high-plex imaging data. Current deep learning-based approaches to segment cells in multiplexed images require reducing the input to a small and fixed number of input channels, discarding imaging information in the process. We present Channel Net, a novel deep learning architecture for generating three-channel representations of multiplexed images irrespective of the number or ordering of imaged biomarkers. When combined with InstanSeg, ChannelNet sets a new benchmark for the segmentation of cells and nuclei on public multiplexed imaging datasets. We provide an open implementation of our method and integrate it in open source software. Our code and models are available on https://github.com/instanseg/instanseg.Competing Interest StatementThe authors have declared no competing interest.},
271+
URL = {https://www.biorxiv.org/content/early/2024/09/08/2024.09.04.611150},
272+
eprint = {https://www.biorxiv.org/content/early/2024/09/08/2024.09.04.611150.full.pdf},
273+
journal = {bioRxiv}
272274
}
273275

274276
@article{stringer2021,

paper.md

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -33,27 +33,27 @@ bibliography: paper.bib
3333
# Summary
3434
Fluorescent labeling techniques, including immunohistochemistry and endogenously fluorescent proteins, are key assets in neuroscience. Combined to genetic toolboxes, they enable the anatomical identification of specific neurons and neuronal processes, shedding light on neural networks organization and linking behavior to brain structures. Advances in imaging techniques and numerical tools have enabled the creation of volumetric, annotated, whole-brain atlases of various animal models [@wang2020; @kleven2023; @kunst2019; @lazcano2021] facilitating brain-wide mapping of labeled elements to a reference three-dimensional space. In this framework, a common task is to count, in the reference brain regions, objects of interest (be it whole cells, nuclei, axons, synaptic puncta...) detected in 2D histological slices imaged with fluorescence microscopy.
3535

36-
Multiple software solutions exist to perform image analysis for objects detection, and tools to register 2D slices to 3D reference atlases are also available. The `cuisto` package aims at bridging these pieces of software together to provide a streamlined process, from raw images to quantification figures. It harnesses the output of the bioimage analysis software QuPath [@bankhead2017] used together with the registration toolbox ABBA [@chiaruttini2024]. Designed with users who have minimal programming knowledge in mind, it is configurable and intended to be atlas- and staining-agnostic to cover various use-cases, and provides utility scripts that can be used throughout the analysis pipeline. Furthermore, an extensive documentation is provided, and includes 1) the installation of the various pieces of software used upstream of the `cuisto` package, 2) ABBA and QuPath tutorials, 3) in-depth explanation of the data formatting requirements and 4) hands-on examples with Python scripts and Jupyter notebooks.
36+
Multiple software solutions exist to perform image analysis for object detection, and tools to register 2D slices to 3D reference atlases are also available. The `cuisto` package aims at bridging these pieces of software together to provide a streamlined process, from raw images to quantification figures. It harnesses the output of the bioimage analysis software QuPath [@bankhead2017] used together with the registration toolbox ABBA [@chiaruttini2024]. Designed with users who have minimal programming knowledge in mind, it is configurable and intended to be atlas- and staining-agnostic to cover various use-cases, and provides utility scripts that can be used throughout the analysis pipeline. Furthermore, extensive documentation is provided, and includes 1) the installation of the various pieces of software used upstream of the `cuisto` package, 2) ABBA and QuPath tutorials, 3) in-depth explanation of the data formatting requirements and 4) hands-on examples with Python scripts and Jupyter notebooks.
3737

3838
# Statement of need
3939
As the task of counting objects in brain regions from fluorescent microscopy images is widespread, several toolboxes have been developed for brain-wide object quantification such as the QUINT workflow [@yates2019]. The pipeline relies on QuickNII and VisuAlign [@puchades2019] for the registration and Ilastik [@berg2019] for the segmentation. While effective for specific tasks, such as counting punctal objects, its architecture offers limited flexibility, making it hard to take shortcuts during the workflow, and it is not interoperable with other computational neuroanatomical tools, notably reference atlases provided by BrainGlobe [@claudi2020]. The latter does provide numerous tools to perform brain-wide cell counting [@tyson2021; @tyson2022] but is primarily designed for native 3D data (such as obtained from light sheet imaging of optically cleared tissue). This approach is however not readily accessible to all laboratories and may show variable efficacy depending on the fluorophore used, the intended resolution, or the targeted brain structures.
4040

41-
The QuPath software provides a full-featured, user-friendly interface to perform image quantification while being extensible and scriptable. It is being actively developed and is supported by a vivid community (more than 4k topics tagged with "qupath" on the [image.sc forum](https://forum.image.sc/tag/qupath) and more than 4k citations). This software supports a variety of segmentation strategies, from basic thresholding to pixel classification and advanced deep learning methods such as CellPose [@stringer2021], StarDist [@schmidt2018] and InstanSeg [@goldsborough2024a]. The Fiji [@schindelin2012] plugin Aligning Big Brain and Atlases (ABBA) allows for semi-automated registration of whole-brain 2D sections to a 3D atlas in an intuitive and interactive graphical user interface using native full-resolution multichannel images, providing both automatic in-plane registration through elastix [@klein2010] and manual adjustment with BigWarp [@bogovic2016]. It also supports the deep-learning-based automatic registration tool DeepSlice [@carey2023] and can be interfaced with BrainGlobe atlases. Furthermore, it integrates seamlessly with QuPath, to both import images and export back the registration results as annotations in QuPath for further quantification.
41+
The QuPath software provides a full-featured, user-friendly interface to perform image quantification while being extensible and scriptable. It is being actively developed and is supported by a vibrant community (more than 4k topics tagged with "qupath" on the [image.sc forum](https://forum.image.sc/tag/qupath) and more than 4k citations). This software supports a variety of segmentation strategies, from basic thresholding to pixel classification and advanced deep learning methods such as CellPose [@stringer2021], StarDist [@schmidt2018] and InstanSeg [@goldsborough2024a]. The Fiji [@schindelin2012] plugin Aligning Big Brain and Atlases (ABBA) allows for semi-automated registration of whole-brain 2D sections to a 3D atlas in an intuitive and interactive graphical user interface using native full-resolution multichannel images, providing both automatic in-plane registration through elastix [@klein2010] and manual adjustment with BigWarp [@bogovic2016]. It also supports the deep-learning-based automatic registration tool DeepSlice [@carey2023] and can be interfaced with BrainGlobe atlases. Furthermore, it integrates seamlessly with QuPath, to both import images and export back the registration results as annotations in QuPath for further quantification.
4242

43-
Yet, to our knowledge, no streamlined pipeline existed to bridge the image processing and registration with the region-based quantification and data-visualization. The present work is two-fold:
43+
Yet, to our knowledge, no streamlined pipeline existed to bridge the image processing and registration with the region-based quantification and data visualization. The present work is two-fold:
4444

4545
1. Provide step-by-step guides to install and use QuPath and ABBA to quantify and format data for counting objects in a reference brain, providing automation scripts where necessary.
4646
2. Provide a downstream Python package to use the raw counting data exported from QuPath to summarize and display derived metrics with minimal coding knowledge, while being modular enough to fit more advanced users' needs.
4747

48-
Specifically, guides are provided to align 2D histological slices to volumetric reference atlases with ABBA and detect objects of interest in QuPath. `cuisto` includes an image processing module to segment objects from the prediction maps generated with QuPath’s pixel classifier, supporting punctal objects, polygons and fibers. Guides further cover how to quantify those in the registered atlas regions (either object counting for punctual objects or cumulated length for fibers-like objects), format the data to be used by `cuisto`, and finally export the results as tabular data (or json files for fibers-like objects).
48+
Specifically, guides are provided to align 2D histological slices to volumetric reference atlases with ABBA and detect objects of interest in QuPath. `cuisto` includes an image processing module to segment objects from the prediction maps generated with QuPath’s pixel classifier, supporting punctal objects, polygons, and fibers. Guides further cover how to quantify those in the registered atlas regions (either object counting for punctual objects or cumulative length for fiber-like objects), format the data to be used by `cuisto`, and finally export the results as tabular data (or json files for fiber-like objects).
4949

50-
Subsequently, `cuisto` is used to collect the data from different subjects, pool them based on the atlas regions names and derive quantifying metrics from the raw count or cumulated length. Those metrics include, for each atlas region:
50+
Subsequently, `cuisto` is used to collect the data from different subjects, pool them based on the atlas region names, and derive quantifying metrics from the raw count or cumulative length. Those metrics include, for each atlas region:
5151

5252
- the raw measurement,
53-
- the areal density, e.g. the raw measurement divided by the region area,
54-
- the relative density, e.g. the areal density as a fraction of the total density.
53+
- the areal density, i.e. the raw measurement divided by the region area,
54+
- the relative density, i.e. the areal density as a fraction of the total density.
5555

56-
Furthermore, `cuisto` leverages the atlas coordinates of each object of interest to compute and display spatial distributions in the reference atlas space. 2D projection heatmaps are also generated and overlaid on the atlas regions contours.
56+
Furthermore, `cuisto` leverages the atlas coordinates of each object of interest to compute and display spatial distributions in the reference atlas space. 2D projection heatmaps are also generated and overlaid on the atlas region contours.
5757

5858
`cuisto` processing and display is configured with human-readable configuration files that support up to two hemispheres and any number of detection channels to be able to compare different biological markers. Ultimately, this package aims to be versatile: while it was designed around BrainGlobe atlases, those are not mandatory and the user can use it with custom annotated regions instead — as long as the format requirements are met.
5959

@@ -62,6 +62,6 @@ It is worth mentioning that BraiAn [@chiaruttini2024], a toolbox following a sim
6262
The documentation and the examples provide toy data, derived from datasets acquired on mice models. All procedures were approved by the French Ethical Committee (“Comité d’éthique en Expérimentation Animale”, CEEA #59, authorization 2020-022410231878) and conducted in accordance with EU Directive 2010/63/EU.
6363

6464
# Acknowledgements
65-
We would like to thank the original author of QuPath, Peter Bankhead and all contributors. We also thank the original author of ABBA, Nicolas Chiaruttini, especially for his support on ABBA-Python. We thank all contributors of the BrainGlobe Initiative. We're grateful to member of the lab Patricia del Cerro de Pablo for her early feedbacks. This work has received funding from the European Research Council (ERC) under the European Union's Horizon Europe research and innovation program (grant agreement No 101089318), the Fondation pour la Recherche Médicale (FRM EQU202203014620 and ECO202206015594), the CNRS and the University Paris-Saclay.
65+
We would like to thank the original author of QuPath, Peter Bankhead, and all contributors. We also thank the original author of ABBA, Nicolas Chiaruttini, especially for his support on ABBA-Python. We thank all contributors of the BrainGlobe Initiative. We're grateful to member of the lab Patricia del Cerro de Pablo for her early feedback. This work has received funding from the European Research Council (ERC) under the European Union's Horizon Europe research and innovation program (grant agreement No 101089318), the Fondation pour la Recherche Médicale (FRM EQU202203014620 and ECO202206015594), the CNRS and the University Paris-Saclay.
6666

6767
# References

0 commit comments

Comments
 (0)