Skip to content
This repository was archived by the owner on Feb 11, 2023. It is now read-only.

Commit f9b62fb

Browse files
committed
CIMA experiments (#45)
* add CIMA pairing * prepare CIMA experiments * add notebook CIMA scope * add notebook w. scope compare * fix for too large images * fix evaluation * fix fig export * add STD measures * add JSON results * update readm & add ref. * update shell experiments * drop enlighten * minor rename
1 parent 99b324f commit f9b62fb

26 files changed

+2203
-78
lines changed

.travis.yml

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -76,6 +76,7 @@ before_install:
7676
fi
7777

7878
install:
79+
- pip install "setuptools<46" -U # v46 crashes openslide-python install
7980
- pip install -r requirements.txt
8081
- pip install -r ./tests/requirements.txt
8182
- pip --version ; pip list

README.md

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -320,6 +320,7 @@ The project is using the standard [BSD license](http://opensource.org/licenses/B
320320
321321
For complete references see [bibtex](docs/references.bib).
322322
1. Borovec, J., Munoz-Barrutia, A., & Kybic, J. (2018). **[Benchmarking of image registration methods for differently stained histological slides](https://www.researchgate.net/publication/325019076_Benchmarking_of_image_registration_methods_for_differently_stained_histological_slides)**. In IEEE International Conference on Image Processing (ICIP) (pp. 3368–3372), Athens. [DOI: 10.1109/ICIP.2018.8451040](https://doi.org/10.1109/ICIP.2018.8451040)
323+
2. Borovec, J. (2019). BIRL: **Benchmark on Image Registration methods with Landmark validation**. arXiv preprint [arXiv:1912.13452.](https://arxiv.org/abs/1912.13452)
323324
324325
## Appendix - Useful information
325326

appveyor.yml

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -62,6 +62,7 @@ install:
6262
# the parent CMD process).
6363
- "SET PATH=%PYTHON%;%PYTHON%\\Scripts;%PATH%"
6464
- python -m pip install --upgrade pip
65+
- pip install "setuptools<46" -U # v46 crashes openslide-python install
6566
- pip install -r requirements.txt
6667
- pip install -r ./tests/requirements.txt
6768
- pip install tox

birl/benchmark.py

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -455,6 +455,9 @@ def _perform_registration(self, df_row):
455455
row = self.__images_preprocessing(row)
456456
row[self.COL_TIME_PREPROC] = (time.time() - time_start) / 60.
457457
row = self._prepare_img_registration(row)
458+
# if the pre-processing failed, return back None
459+
if not row:
460+
return None
458461

459462
# measure execution time
460463
time_start = time.time()
@@ -468,6 +471,9 @@ def _perform_registration(self, df_row):
468471
row = self.__remove_pproc_images(row)
469472

470473
row = self._parse_regist_results(row)
474+
# if the post-processing failed, return back None
475+
if not row:
476+
return None
471477
row = self._clear_after_registration(row)
472478

473479
if self.params.get('visual', False):

birl/utilities/drawing.py

Lines changed: 10 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -324,11 +324,13 @@ def __init__(self, df, steps=5, fig=None, rect=None, fill_alpha=0.05, colors='ni
324324
for i, (idx, row) in enumerate(self.data.iterrows()):
325325
self.__draw_curve(idx, row, fill_alpha, color=colors[i], *args, **kwargs)
326326

327+
self._labels = []
327328
for ax in self.axes:
328329
for theta, label in zip(ax.get_xticks(), ax.get_xticklabels()):
329330
self.__realign_polar_xtick(ax, theta, label)
331+
self._labels.append(label)
330332

331-
self.ax.legend(loc='center left', bbox_to_anchor=(1.2, 0.7))
333+
self._legend = self.ax.legend(loc='center left', bbox_to_anchor=(1.2, 0.7))
332334

333335
@classmethod
334336
def __ax_set_invisible(self, ax):
@@ -490,7 +492,7 @@ def draw_matrix_user_ranking(df_stat, higher_better=False, fig=None, cmap='tab20
490492
ranking = compute_matrix_user_ranking(df_stat, higher_better)
491493

492494
if fig is None:
493-
fig, _ = plt.subplots(figsize=np.array(df_stat.as_matrix().shape[::-1]) * 0.35)
495+
fig, _ = plt.subplots(figsize=np.array(df_stat.values.shape[::-1]) * 0.35)
494496
ax = fig.gca()
495497
arange = np.linspace(-0.5, len(df_stat) - 0.5, len(df_stat) + 1)
496498
norm = plt_colors.BoundaryNorm(arange, len(df_stat))
@@ -513,7 +515,7 @@ def draw_scatter_double_scale(df, colors='nipy_spectral',
513515
figsize=None,
514516
legend_style=None,
515517
plot_style=None,
516-
x_spread=(0.3, 5)):
518+
x_spread=(0.4, 5)):
517519
"""Draw a scatter with double scales on left and right
518520
519521
:param DF df: dataframe
@@ -531,7 +533,7 @@ def draw_scatter_double_scale(df, colors='nipy_spectral',
531533
>>> df = pd.DataFrame(np.random.random((10, 3)), columns=['col1', 'col2', 'col3'])
532534
>>> fig, axs = draw_scatter_double_scale(df, ax_decs={'name': None}, xlabel='X')
533535
>>> axs # doctest: +ELLIPSIS
534-
(<...>, None)
536+
{...}
535537
>>> # just the selected columns
536538
>>> fig, axs = draw_scatter_double_scale(df, ax_decs={'name1': ['col1', 'col2'],
537539
... 'name2': ['col3']})
@@ -602,5 +604,7 @@ def draw_scatter_double_scale(df, colors='nipy_spectral',
602604
# legend - https://matplotlib.org/3.1.1/gallery/text_labels_and_annotations/custom_legends.html
603605
if legend_style is None:
604606
legend_style = dict(loc='upper center', bbox_to_anchor=(1.25, 1.0), ncol=1)
605-
ax1.legend(idx_names, **legend_style)
606-
return fig, (ax1, ax2)
607+
lgd = ax1.legend(idx_names, **legend_style)
608+
609+
extras = {'ax1': ax1, 'ax2': ax2, 'legend': lgd}
610+
return fig, extras

birl/utilities/evaluate.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -219,7 +219,7 @@ def compute_matrix_user_ranking(df_stat, higher_better=False):
219219
[ 0., 2., 1.],
220220
[ 4., 4., 2.]])
221221
"""
222-
ranking = np.zeros(df_stat.as_matrix().shape)
222+
ranking = np.zeros(df_stat.values.shape)
223223
nan = -np.inf if higher_better else np.inf
224224
for i, col in enumerate(df_stat.columns):
225225
vals = [v if not np.isnan(v) else nan for v in df_stat[col]]

bm_ANHIR/Dockerfile

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -47,7 +47,7 @@ ENV PATH="/home/evaluator/.local/bin:${PATH}"
4747
COPY --chown=evaluator:evaluator ./evaluate_submission.py /opt/evaluation/
4848
COPY --chown=evaluator:evaluator ./dataset_ANHIR/dataset_medium.csv /opt/evaluation/dataset.csv
4949
COPY --chown=evaluator:evaluator ./dataset_ANHIR/computer-performances_cmpgrid-71.json /opt/evaluation/computer-performances.json
50-
COPY --chown=evaluator:evaluator ./dataset_ANHIR/landmarks_user /opt/evaluation/lnds_provided
50+
COPY --chown=evaluator:evaluator dataset_ANHIR/landmarks_user_phase1 /opt/evaluation/lnds_provided
5151
COPY --chown=evaluator:evaluator ./dataset_ANHIR/landmarks_all /opt/evaluation/lnds_reference
5252

5353
# Define execution

bm_ANHIR/evaluate_submission.py

Lines changed: 20 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -209,18 +209,25 @@ def parse_landmarks(idx_row):
209209
# 'reference landmarks': np.round(lnds_ref, 1).tolist(),
210210
# 'warped landmarks': np.round(lnds_warp, 1).tolist(),
211211
'matched-landmarks': match_lnds,
212-
'Robustness': row.get(ImRegBenchmark.COL_ROBUSTNESS, 0),
213-
'Norm-Time_minutes': row.get(COL_NORM_TIME, None),
212+
'Robustness': np.round(row.get(ImRegBenchmark.COL_ROBUSTNESS, 0), 3),
213+
'Norm-Time_minutes': np.round(row.get(COL_NORM_TIME, None), 5),
214214
'Status': row.get(ImRegBenchmark.COL_STATUS, None),
215215
}
216+
217+
def _round_val(row, col):
218+
dec = 5 if col.startswith('rTRE') else 2
219+
return np.round(row[col], dec)
220+
216221
# copy all columns with Affine statistic
217-
item.update({col.replace(' ', '-'): row[col] for col in row if 'affine' in col.lower()})
222+
item.update({col.replace(' ', '-'): _round_val(row, col)
223+
for col in row if 'affine' in col.lower()})
218224
# copy all columns with rTRE, TRE and Overlap
219225
# item.update({col.replace(' (final)', '').replace(' ', '-'): row[col]
220226
# for col in row if '(final)' in col})
221-
item.update({col.replace(' (elastic)', '_elastic').replace(' ', '-'): row[col]
227+
item.update({col.replace(' (elastic)', '_elastic').replace(' ', '-'): _round_val(row, col)
222228
for col in row if 'TRE' in col})
223-
return idx, item
229+
# later in JSON keys ahs to be str only
230+
return str(idx), item
224231

225232

226233
def compute_scores(df_experiments, min_landmarks=1.):
@@ -271,6 +278,7 @@ def _compute_scores_general(df_experiments, df_expt_robust):
271278
# parse specific metrics
272279
scores = {
273280
'Average-Robustness': np.mean(df_experiments[ImRegBenchmark.COL_ROBUSTNESS]),
281+
'STD-Robustness': np.std(df_experiments[ImRegBenchmark.COL_ROBUSTNESS]),
274282
'Median-Robustness': np.median(df_experiments[ImRegBenchmark.COL_ROBUSTNESS]),
275283
'Average-Rank-Median-rTRE': np.nan,
276284
'Average-Rank-Max-rTRE': np.nan,
@@ -280,15 +288,18 @@ def _compute_scores_general(df_experiments, df_expt_robust):
280288
('Max-rTRE', 'rTRE Max'),
281289
('Average-rTRE', 'rTRE Mean'),
282290
('Norm-Time', COL_NORM_TIME)]:
283-
scores['Average-' + name] = np.nanmean(df_experiments[col])
284-
scores['Average-' + name + '-Robust'] = np.nanmean(df_expt_robust[col])
285-
scores['Median-' + name] = np.median(df_experiments[col])
286-
scores['Median-' + name + '-Robust'] = np.median(df_expt_robust[col])
291+
for df, sufix in [(df_experiments, ''), (df_expt_robust, '-Robust')]:
292+
scores['Average-' + name + sufix] = np.nanmean(df[col])
293+
scores['STD-' + name + sufix] = np.nanstd(df[col])
294+
scores['Median-' + name + sufix] = np.median(df[col])
287295
return scores
288296

289297

290298
def _compute_scores_state_tissue(df_experiments):
291299
scores = {}
300+
if ImRegBenchmark.COL_STATUS not in df_experiments.columns:
301+
logging.warning('experiments (table) is missing "%s" column', ImRegBenchmark.COL_STATUS)
302+
df_experiments[ImRegBenchmark.COL_STATUS] = 'any'
292303
# filter all statuses in the experiments
293304
statuses = df_experiments[ImRegBenchmark.COL_STATUS].unique()
294305
# parse metrics according to TEST and TRAIN case

bm_CIMA/README.md

Lines changed: 32 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,32 @@
1+
# Experimentation CIMA dataset
2+
3+
This section is strictly limited to image registration experiment on [CIMA dataset](http://cmp.felk.cvut.cz/~borovji3/?page=dataset).
4+
5+
## Structure
6+
7+
- **Datasets**: the particular dataset setting described in the image/landmarks pairing - csv tables called `dataset_CIMA_<scope>.csv`
8+
- **Script**: the execution script is `run-SOTA-experiments.sh` and it perform all experiments
9+
- **Results**: the experimental results are exported and zipped per particular dataset scope, the archives are `results_size-<scope>.zip`
10+
11+
12+
## Usage
13+
14+
**Reproduce statistic**
15+
16+
You need to unzip the particular result for each dataset scale in to a separate folder (e.g with the same name).
17+
Then you need to run the [scope notebook](../notebooks/CIMA_SOTA-results_scope.ipynb) for showing results on a particular dataset scope or [comparing notebook](../notebooks/CIMA_SOTA-results_comparing.ipynb) to compare some statistics over two scopes.
18+
Note that with using attached JSON results you do not need to run cells related parsing results from raw benchmarks results.
19+
20+
**Add own method to statistic**
21+
22+
You need to run your benchmark on the particular dataset scope, the image oaring are:
23+
- [10k scope](dataset_CIMA_10k.csv)
24+
- [full scope](dataset_CIMA_full.csv)
25+
26+
Then you can parse just the new results with [evaluation script](../bm_ANHIR/evaluate_submission.py) or execute the parsing cells at the beginning of [scope notebook](../notebooks/CIMA_SOTA-results_scope.ipynb).
27+
28+
29+
## References
30+
31+
For complete references see [bibtex](../docs/references.bib).
32+
1. Borovec, J. (2019). **BIRL: Benchmark on Image Registration methods with Landmark validation**. arXiv preprint [arXiv:1912.13452.](https://arxiv.org/abs/1912.13452)

0 commit comments

Comments
 (0)