-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathpapers.bib
More file actions
111 lines (105 loc) · 9.92 KB
/
papers.bib
File metadata and controls
111 lines (105 loc) · 9.92 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
---
% TODO (LouisTier): Update bibliography in _bibliography/papers.bib
% - `abbr`: Adds an abbreviation to the left of the entry. You can add links to these by creating a venue.yaml-file in the \_data folder and adding entries that match.
% - `abstract`: Adds an "Abs" button that expands a hidden text field when clicked to show the abstract text
% - `altmetric`: Adds an [Altmetric](https://www.altmetric.com/) badge (Note: if DOI is provided just use `true`, otherwise only add the altmetric identifier here - the link is generated automatically)
% - `annotation`: Adds a popover info message to the end of the author list that can potentially be used to clarify superscripts. HTML is allowed.
% - `arxiv`: Adds a link to the Arxiv website (Note: only add the arxiv identifier here - the link is generated automatically)
% - `bibtex_show`: Adds a "Bib" button that expands a hidden text field with the full bibliography entry
% - `blog`: Adds a "Blog" button redirecting to the specified link
% - `code`: Adds a "Code" button redirecting to the specified link
% - `dimensions`: Adds a [Dimensions](https://www.dimensions.ai/) badge (Note: if DOI or PMID is provided just use `true`, otherwise only add the Dimensions' identifier here - the link is generated automatically)
% - `html`: Inserts an "HTML" button redirecting to the user-specified link
% - `pdf`: Adds a "PDF" button redirecting to a specified file (if a full link is not specified, the file will be assumed to be placed in the /assets/pdf/ directory)
% - `poster`: Adds a "Poster" button redirecting to a specified file (if a full link is not specified, the file will be assumed to be placed in the /assets/pdf/ directory)
% - `slides`: Adds a "Slides" button redirecting to a specified file (if a full link is not specified, the file will be assumed to be placed in the /assets/pdf/ directory)
% - `supp`: Adds a "Supp" button to a specified file (if a full link is not specified, the file will be assumed to be placed in the /assets/pdf/ directory)
% - `video`: Adds a "Video" button redirecting to the specified link
% - `website`: Adds a "Website" button redirecting to the specified link
---
@misc{berthier2025torchsom,
bibtex_show={true},
title={torchsom: The Reference PyTorch Library for Self-Organizing Maps},
author = {Berthier, Louis and Shokry, Ahmed and Moreaud, Maxime and Ramelet, Guillaume and Moulines, Eric},
year={2025},
eprint={2510.11147},
archivePrefix={arXiv},
primaryClass={stat.ML},
abstract={This paper introduces torchsom, an open-source Python library that provides a reference implementation of the Self-Organizing Map (SOM) in PyTorch. This package offers three main features: (i) dimensionality reduction, (ii) clustering, and (iii) friendly data visualization. It relies on a PyTorch backend, enabling (i) fast and efficient training of SOMs through GPU acceleration, and (ii) easy and scalable integrations with PyTorch ecosystem. Moreover, torchsom follows the scikit-learn API for ease of use and extensibility. The library is released under the Apache 2.0 license with 90% test coverage, and its source code and documentation are available at this https URL: https://github.com/michelin/TorchSOM.},
note={Preprint submitted to Journal of Machine Learning Research},
url={https://arxiv.org/abs/2510.11147},
abbr={arXiv},
arxiv={2510.11147},
preview={torchsom_umap.jpeg},
}
% doi={10.1002/andp.19053220607},
% google_scholar_id={cFVpOmcAAAAJ}
@software{berthier2025torchsom_software,
bibtex_show={true},
author={Berthier, Louis},
title={torchsom: The Reference PyTorch Library for Self-Organizing Maps},
year={2025},
version={1.1.1},
url={https://github.com/michelin/TorchSOM},
abbr={Software},
code={https://github.com/michelin/TorchSOM},
website={https://opensource.michelin.io/TorchSOM/},
preview={torchsom_logo.png},
}
% note = {Documentation available at \url{https://opensource.michelin.io/TorchSOM/}},
@article{berthier2025knowledge_discovery_rubber_mixing,
bibtex_show={true},
title = {Knowledge Discovery in Large-Scale Batch Processes through Explainable Boosted Models and Uncertainty Quantification: Application to Rubber Mixing},
author = {Berthier, Louis and Shokry, Ahmed and Moulines, Eric and Ramelet, Guillaume and Desroziers, Sylvain},
journal = {Systems and Control Transactions},
volume = {4},
pages = {1518--1523},
year = {2025},
doi = {10.69997/sct.183525},
url = {https://doi.org/10.69997/sct.183525},
abstract = {Rubber mixing (RM) is a vital batch process producing high-quality composites, which serve as input material for manufacturing different types of final products, such as tires. Due to its complexity, this process faces two main challenges regarding the final quality: i) lack of online measurement and ii) limited comprehension of the influence of the different factors involved in the process. While data-driven and machine learning (ML) based soft-sensing methods have been widely applied to address the first challenge, the second challenge, to the best of the author's knowledge, has not yet been addressed in the rubber industry. This work presents a data-driven method for extracting knowledge and providing explainability in the quality prediction in RM processes. The method centers on an XGBoost model while leveraging high-dimensional data collected over extended time periods from one of Michelins complex mixing processes. First, a recursive feature elimination-based procedure is used for selecting relevant features, which reduces the number of input features used for building the ML model by 82% while improving its predictive performance by 17%. Secondly, SHapley Additive exPlanations (SHAP) techniques are employed to explain the ML models predictions through global and local analyses of feature interactions. The selected quality-related variables can be leveraged to improve process control and supervision. Finally, an uncertainty quantification (UQ) module, based on Split Conformal Prediction (SCP), is combined with the ML model, providing confidence intervals with 90% coverage and empirically verified theoretical guarantees. This module ensures prediction reliability and robustness in real applications.},
abbr = {Systems & Control T.},
}
note = {Published June 27, 2025; archive record: LAPSE:2025.0396},
@article{gardy2025fast_ripples,
bibtex_show={true},
title={Detecting fast-ripples on both micro- and macro-electrodes in epilepsy: A wavelet-based CNN detector},
author={Gardy, Ludovic and Curot, Jonathan and Valton, Luc and Berthier, Louis and Barbeau, Emmanuel J. and Hurter, Christophe},
journal={Journal of Neuroscience Methods},
volume={415},
pages={110350},
year={2025},
publisher={Elsevier},
doi={10.1016/j.jneumeth.2024.110350},
abbr={J. Neurosci. Methods},
abstract={Background: Fast-ripples (FR) are short (~10 ms) high-frequency oscillations (HFO) between 200 and 600 Hz that
are helpful in epilepsy to identify the epileptogenic zone. Our aim is to propose a new method to detect FR that
had to be efficient for intracerebral EEG (iEEG) recorded from both usual clinical macro-contacts (millimeter
scale) and microwires (micrometer scale).
New Method: Step 1 of the detection method is based on a convolutional neural network (CNN) trained using a
large database of > 11,000 FR recorded from the iEEG of 38 patients with epilepsy from both macro-contacts and
microwires. The FR and non-FR events were fed to the CNN as normalized time-frequency maps. Step 2 is based
on feature-based control techniques in order to reject false positives. In step 3, the human is reinstated in the
decision-making process for final validation using a graphical user interface.
Results: WALFRID achieved high performance on the realistically simulated data with sensitivity up to 99.95 %
and precision up to 96.51 %. The detector was able to adapt to both macro and micro-EEG recordings. The real
data was used without any pre-processing step such as artefact rejection. The precision of the automatic detection
was of 57.5. Step 3 helped eliminating remaining false positives in a few minutes per subject.
Comparison with Existing Methods: WALFRID performed as well or better than 6 other existing methods.
Conclusion: Since WALFRID was created to mimic the work-up of the neurologist, clinicians can easily use, understand, interpret and, if necessary, correct the output.},
preview={neuroscience.png},
}
% abstract={This paper presents a convolutional neural network (CNN)-based method for detecting fast ripples, a biomarker of epileptogenic zones, from both micro- and macro-electrode recordings. The approach combines a Morlet-based continuous wavelet transform with a CNN architecture to enable robust and interpretable oscillation detection across scales.},
@inproceedings{magnier2023_2dsbg,
bibtex_show={true},
title={2DSBG: A 2D Semi Bi-Gaussian Filter Adapted for Adjacent and Multi-Scale Line Feature Detection},
author={Magnier, Baptiste and Shokouh, Ghulam Sakhi and Berthier, Louis and Pie, Marcel and Ruggiero, Adrien},
booktitle={ICASSP 2023–2023 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)},
pages={1--5},
year={2023},
organization={IEEE},
doi={10.1109/ICASSP49357.2023.10095570},
abbr={ICASSP},
abstract={Existing filtering techniques fail to precisely detect adjacent line features in multi-scale applications. In this paper, a new filter composed of a bi-Gaussian and a semi-Gaussian kernel is proposed, capable of highlighting complex linear structures such as ridges and valleys of different widths, with noise robustness. Experiments have been performed on a set of both synthetic and real images containing adjacent line features. The obtained results show the performance of the new technique in comparison to the main existing filtering methods.},
preview={icassp.png},
}