Skip to content

Commit f6b6d40

Browse files
authored
Merge pull request #9 from TimoK93/clearmot_and_hota
Clearmot and hota
2 parents d2e51f8 + e867803 commit f6b6d40

File tree

26 files changed

+1700
-117
lines changed

26 files changed

+1700
-117
lines changed

.github/workflows/pylint.yml

+3-3
Original file line numberDiff line numberDiff line change
@@ -7,11 +7,11 @@ jobs:
77
runs-on: ubuntu-latest
88
strategy:
99
matrix:
10-
python-version: ["3.8", "3.9", "3.10"]
10+
python-version: ["3.9", "3.10", "3.11"]
1111
steps:
12-
- uses: actions/checkout@v3
12+
- uses: actions/checkout@v4
1313
- name: Set up Python ${{ matrix.python-version }}
14-
uses: actions/setup-python@v3
14+
uses: actions/setup-python@v4
1515
with:
1616
python-version: ${{ matrix.python-version }}
1717
- name: Install dependencies

.github/workflows/python-package.yml

+3-3
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@ name: Python package
55

66
on:
77
push:
8-
branches: [ "main" ]
8+
branches: [ "main", "clearmot_and_hota" ]
99
pull_request:
1010
branches: [ "main" ]
1111

@@ -19,9 +19,9 @@ jobs:
1919
python-version: ["3.9", "3.10", "3.11"]
2020

2121
steps:
22-
- uses: actions/checkout@v3
22+
- uses: actions/checkout@v4
2323
- name: Set up Python ${{ matrix.python-version }}
24-
uses: actions/setup-python@v3
24+
uses: actions/setup-python@v4
2525
with:
2626
python-version: ${{ matrix.python-version }}
2727
- name: Install dependencies

README.md

+191-49
Large diffs are not rendered by default.

ctc_metrics/metrics/__init__.py

+10-3
Original file line numberDiff line numberDiff line change
@@ -6,13 +6,20 @@
66
from ctc_metrics.metrics.technical.seg import seg
77
from ctc_metrics.metrics.technical.tra import tra
88
from ctc_metrics.metrics.technical.det import det
9+
from ctc_metrics.metrics.clearmot.mota import mota
10+
from ctc_metrics.metrics.hota.hota import hota
11+
from ctc_metrics.metrics.hota.chota import chota
12+
from ctc_metrics.metrics.identity_metrics.idf1 import idf1
13+
from ctc_metrics.metrics.others.mt_ml import mtml
14+
from ctc_metrics.metrics.others.faf import faf
15+
916
from ctc_metrics.metrics.technical.op_ctb import op_ctb
1017
from ctc_metrics.metrics.technical.op_csb import op_csb
1118
from ctc_metrics.metrics.biological.bio import bio
1219
from ctc_metrics.metrics.biological.op_clb import op_clb
1320
from ctc_metrics.metrics.technical.lnk import lnk
1421

1522
ALL_METRICS = [
16-
"Valid", "BC", "CT", "CCA", "TF", "SEG", "TRA", "DET", "LNK", "OP_CTB",
17-
"OP_CSB", "BIO", "OP_CLB"
18-
]
23+
"Valid", "BC", "CT", "CCA", "TF", "SEG", "TRA", "DET", "MOTA", "HOTA",
24+
"CHOTA", "IDF1", "MTML", "FAF", "LNK", "OP_CTB", "OP_CSB", "BIO", "OP_CLB"
25+

ctc_metrics/metrics/biological/bc.py

+35-15
Original file line numberDiff line numberDiff line change
@@ -8,10 +8,14 @@ def get_ids_that_ends_with_split(
88
Extracts the ids of tracks that end with a cell split.
99
1010
Args:
11-
tracks: The tracks to check.
11+
tracks: The tracks to check. A numpy nd array with columns:
12+
- label
13+
- birth frame
14+
- end frame
15+
- parent
1216
1317
Returns:
14-
The ids of tracks that end with a cell split.
18+
The ids of tracks that end with a cell split stored in a numpy.ndarray.
1519
"""
1620
parents, counts = np.unique(tracks[:, 3], return_counts=True)
1721
counts = counts[parents > 0]
@@ -43,8 +47,8 @@ def calculate_f1_score(
4347

4448

4549
def is_matching(
46-
comp: int,
47-
ref: int,
50+
id_comp: int,
51+
id_ref: int,
4852
mapped_ref: list,
4953
mapped_comp: list,
5054
ref_children: np.ndarray,
@@ -56,14 +60,14 @@ def is_matching(
5660
Checks if the reference and the computed track match.
5761
5862
Args:
59-
comp: The computed track id.
60-
ref: The reference track id.
63+
id_comp: The computed track id.
64+
id_ref: The reference track id.
6165
mapped_ref: The matched labels of the ground truth masks.
6266
mapped_comp: The matched labels of the result masks.
6367
ref_children: The children ids of the reference track.
6468
comp_children: The children ids of the computed track.
65-
tr: The reference track end.
66-
tc: The computed track end.
69+
tr: The frame of the reference track end.
70+
tc: The frame of the computed track end.
6771
6872
Returns:
6973
True if the reference and the computed track match, False otherwise.
@@ -74,10 +78,10 @@ def is_matching(
7478
# Compare parents
7579
t1, t2 = min(tr, tc), max(tr, tc)
7680
mr, mc = mapped_ref[t1], mapped_comp[t1]
77-
if np.sum(mc == comp) < 1 or np.sum(mr == ref) != 1:
81+
if np.sum(mc == id_comp) < 1 or np.sum(mr == id_ref) != 1:
7882
return False
79-
ind = np.argwhere(mr == ref).squeeze()
80-
if mc[ind] != comp:
83+
ind = np.argwhere(mr == id_ref).squeeze()
84+
if mc[ind] != id_comp:
8185
return False
8286
# Compare children
8387
mr, mc = np.asarray(mapped_ref[t2 + 1]), np.asarray(mapped_comp[t2 + 1])
@@ -101,10 +105,26 @@ def bc(
101105
- Vladimir Ulman et al., Nature methods 2017
102106
103107
Args:
104-
comp_tracks: The result tracks.
105-
ref_tracks: The ground truth tracks.
106-
mapped_ref: The matched labels of the ground truth masks.
107-
mapped_comp: The matched labels of the result masks.
108+
comp_tracks: The result tracks. A (n,4) numpy ndarray with columns:
109+
- label
110+
- birth frame
111+
- end frame
112+
- parent
113+
ref_tracks: The ground truth tracks. A (n,4) numpy ndarray with columns:
114+
- label
115+
- birth frame
116+
- end frame
117+
- parent
118+
mapped_ref: The matched labels of the ground truth masks. A list of
119+
length equal to the number of frames. Each element is a list with
120+
the matched labels of the ground truth masks in the respective
121+
frame. The elements are in the same order as the corresponding
122+
elements in mapped_comp.
123+
mapped_comp: The matched labels of the result masks. A list of length
124+
equal to the number of frames. Each element is a list with the
125+
matched labels of the result masks in the respective frame. The
126+
elements are in the same order as the corresponding elements in
127+
mapped_ref.
108128
i: The maximal allowed error in frames.
109129
110130
Returns:

ctc_metrics/metrics/biological/bio.py

+9-3
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,9 @@ def bio(
88
"""
99
Computes the BIO. As described by
1010
[celltrackingchallenge](http://celltrackingchallenge.net/).
11+
It is the average of the CT, TF, BC, and CCA metrics. If a metric is not
12+
available, it is not considered in the average.
13+
1114
1215
Args:
1316
ct: The complete tracking metric.
@@ -16,7 +19,8 @@ def bio(
1619
cca: The cell cycle accuracy metric.
1720
1821
Returns:
19-
The bio metric.
22+
The BIO metric.
23+
2024
"""
2125
total_metrics = 0
2226
if ct is not None:
@@ -35,5 +39,7 @@ def bio(
3539
total_metrics += 1
3640
else:
3741
cca = 0
38-
metric = (ct + tf + bc + cca) / total_metrics
39-
return metric
42+
43+
bio_score = (ct + tf + bc + cca) / total_metrics
44+
return bio_score
45+

ctc_metrics/metrics/biological/cca.py

+14-3
Original file line numberDiff line numberDiff line change
@@ -24,15 +24,26 @@ def is_valid_track(
2424
return valid
2525

2626

27-
def cca(comp_tracks: np.ndarray, ref_tracks: np.ndarray):
27+
def cca(
28+
comp_tracks: np.ndarray,
29+
ref_tracks: np.ndarray
30+
):
2831
"""
2932
Computes the cell cycle accuracy. As described in the paper,
3033
"An objective comparison of cell-tracking algorithms."
3134
- Vladimir Ulman et al., Nature methods 2017
3235
3336
Args:
34-
comp_tracks: The result tracks.
35-
ref_tracks: The ground truth tracks.
37+
comp_tracks: The result tracks. A (n,4) numpy ndarray with columns:
38+
- label
39+
- birth frame
40+
- end frame
41+
- parent
42+
ref_tracks: The ground truth tracks. A (n,4) numpy ndarray with columns:
43+
- label
44+
- birth frame
45+
- end frame
46+
- parent
3647
3748
Returns:
3849
The cell cycle accuracy metric.

ctc_metrics/metrics/biological/ct.py

+23-5
Original file line numberDiff line numberDiff line change
@@ -16,11 +16,29 @@ def ct(
1616
- Vladimir Ulman et al., Nature methods 2017
1717
1818
Args:
19-
comp_tracks: The result tracks.
20-
ref_tracks: The ground truth tracks.
21-
labels_ref: The labels of the ground truth masks.
22-
mapped_ref: The matched labels of the ground truth masks.
23-
mapped_comp: The matched labels of the result masks.
19+
comp_tracks: The result tracks. A (n,4) numpy ndarray with columns:
20+
- label
21+
- birth frame
22+
- end frame
23+
- parent
24+
ref_tracks: The ground truth tracks. A (n,4) numpy ndarray with columns:
25+
- label
26+
- birth frame
27+
- end frame
28+
- parent
29+
labels_ref: The labels of the ground truth masks. A list of length
30+
equal to the number of frames. Each element is a list with the
31+
labels of the ground truth masks in the respective frame.
32+
mapped_ref: The matched labels of the ground truth masks. A list of
33+
length equal to the number of frames. Each element is a list with
34+
the matched labels of the ground truth masks in the respective
35+
frame. The elements are in the same order as the corresponding
36+
elements in mapped_comp.
37+
mapped_comp: The matched labels of the result masks. A list of length
38+
equal to the number of frames. Each element is a list with the
39+
matched labels of the result masks in the respective frame. The
40+
elements are in the same order as the corresponding elements in
41+
mapped_ref.
2442
2543
Returns:
2644
The complete tracks metric.

ctc_metrics/metrics/biological/tf.py

+17-5
Original file line numberDiff line numberDiff line change
@@ -61,10 +61,22 @@ def tf(
6161
- Vladimir Ulman et al., Nature methods 2017
6262
6363
Args:
64-
ref_tracks: The ground truth tracks.
65-
labels_ref: The labels of the ground truth masks.
66-
mapped_ref: The matched labels of the ground truth masks.
67-
mapped_comp: The matched labels of the result masks.
64+
ref_tracks: The ground truth tracks. A (n,4) numpy ndarray with columns:
65+
- label
66+
- birth frame
67+
- end frame
68+
- parent
69+
labels_ref: The labels of the ground truth masks. A list of length
70+
mapped_ref: The matched labels of the ground truth masks. A list of
71+
length equal to the number of frames. Each element is a list with
72+
the matched labels of the ground truth masks in the respective
73+
frame. The elements are in the same order as the corresponding
74+
elements in mapped_comp.
75+
mapped_comp: The matched labels of the result masks. A list of length
76+
equal to the number of frames. Each element is a list with the
77+
matched labels of the result masks in the respective frame. The
78+
elements are in the same order as the corresponding elements in
79+
mapped_ref.
6880
6981
Returns:
7082
The track fractions metric.
@@ -73,7 +85,7 @@ def tf(
7385
ref_tracks, labels_ref, mapped_ref, mapped_comp)
7486
# Calculate the track fractions with respect to the reference tracks
7587
tfs = {k: 0 for k in ref_tracks[:, 0]}
76-
for k, v in sorted(comp_fractions.items()):
88+
for _, v in sorted(comp_fractions.items()):
7789
for k2, v2 in sorted(v.items()):
7890
if tfs[k2] == 1:
7991
continue

ctc_metrics/metrics/clearmot/__init__.py

Whitespace-only changes.

ctc_metrics/metrics/clearmot/mota.py

+67
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,67 @@
1+
import numpy as np
2+
3+
4+
def mota(
5+
labels_ref: list,
6+
labels_comp: list,
7+
mapped_ref: list,
8+
mapped_comp: list
9+
):
10+
"""
11+
Computes the MOTA metric. As described in the paper,
12+
"Evaluating Multiple Object Tracking Performance:
13+
The CLEAR MOT Metrics."
14+
- Keni Bernardin and Rainer Stiefelhagen, EURASIP 2008
15+
16+
Args:
17+
labels_comp: The labels of the computed masks. A list of length equal
18+
to the number of frames. Each element is a list with the labels of
19+
the computed masks in the respective frame.
20+
labels_ref: The labels of the ground truth masks. A list of length
21+
equal to the number of frames. Each element is a list with the
22+
labels of the ground truth masks in the respective frame.
23+
mapped_ref: The matched labels of the ground truth masks. A list of
24+
length equal to the number of frames. Each element is a list with
25+
the matched labels of the ground truth masks in the respective
26+
frame. The elements are in the same order as the corresponding
27+
elements in mapped_comp.
28+
mapped_comp: The matched labels of the result masks. A list of length
29+
equal to the number of frames. Each element is a list with the
30+
matched labels of the result masks in the respective frame. The
31+
elements are in the same order as the corresponding elements in
32+
mapped_ref.
33+
34+
Returns:
35+
The MOTA tracks metric.
36+
"""
37+
tp, fp, fn, idsw, multi_assignments = 0, 0, 0, 0, 0
38+
39+
max_label_gt = int(np.max(np.concatenate(labels_ref)))
40+
matches = np.zeros(max_label_gt + 1)
41+
for ref, comp, m_ref, m_comp in zip(
42+
labels_ref, labels_comp, mapped_ref, mapped_comp):
43+
# Calculate metrics
44+
_, counts = np.unique(m_comp, return_counts=True)
45+
tp += len(m_ref)
46+
fn += len(ref) - len(m_ref)
47+
fp += len(comp) - len(m_comp) + np.sum(counts[counts > 1] - 1)
48+
multi_assignments += np.sum(counts[counts > 1] - 1)
49+
idsw += np.sum((matches[m_ref] != m_comp) & (matches[m_ref] != 0))
50+
# Update the match cache
51+
matches[m_ref] = m_comp
52+
53+
mota_score = 1 - (fn + fp + idsw + multi_assignments) / (tp + fn)
54+
precision = tp / (tp + fp)
55+
recall = tp / (tp + fn)
56+
57+
res = {
58+
"MOTA": mota_score,
59+
"TP": tp,
60+
"FP": fp,
61+
"FN": fn,
62+
"IDSW": idsw,
63+
"MULTI-ASSIGNMENTS": multi_assignments,
64+
"Precision": precision,
65+
"Recall": recall
66+
}
67+
return res

ctc_metrics/metrics/hota/__init__.py

Whitespace-only changes.

0 commit comments

Comments
 (0)