Skip to content

Commit e867803

Browse files
authored
Merge branch 'main' into clearmot_and_hota
2 parents c0c6968 + d2e51f8 commit e867803

File tree

7 files changed

+345
-3
lines changed

7 files changed

+345
-3
lines changed

README.md

+6-1
Original file line numberDiff line numberDiff line change
@@ -40,6 +40,7 @@ pip install .
4040

4141
## Usage
4242

43+
### Validation
4344

4445
The package supports validation, evaluation, and visualization of tracking
4546
results. The following examples are shown for an example directory that is
@@ -107,6 +108,7 @@ Per default, the code is executed using multiple processes with one process per
107108
available CPU core. Multiprocessing increases the execution time but also
108109
the memory consumption. If you need to set the maximal number of processes,
109110
the number of processes can be specified with the argument
111+
110112
```--num-threads``` or ```-n```:
111113
```bash
112114
ctc_evaluate --gt "/ctc/train" --res "/ctc/train" -r -n 4
@@ -129,7 +131,7 @@ The following table shows the available arguments:
129131
| --csv-file | Path to a csv file to save the results. | None |
130132
| --num-threads | Number of threads to use for evaluation. | 1 |
131133

132-
Per default, all given metrics are evaluated. Aou can also select the metrics
134+
Per default, all given metrics are evaluated. You can also select the metrics
133135
you are interested in to avoid the calculation of metrics that are not in your
134136
interest. Additional arguments to select a subset of specific metrics are:
135137

@@ -180,6 +182,7 @@ print(res["TRA"])
180182

181183
You can visualize your tracking results with the following command:
182184

185+
183186
```bash
184187
ctc_visualize --img "/ctc/train/challenge_x/01" --res "/ctc/train/challenge_x/01_RES"
185188
```
@@ -203,6 +206,7 @@ There are additional arguments that can be used to specify the visualization.
203206
The following table shows the available arguments:
204207

205208

209+
206210
| Argument | Description | Default |
207211
|-------------------|------------------------------------------------------------------------------------------|---------|
208212
| --img | The directory to the images **(required)** | |
@@ -211,6 +215,7 @@ The following table shows the available arguments:
211215
| --video-name | The path to the video if a video should be created | None |
212216
| --border-width | The width of the border. Either an integer or a string that describes the challenge name | None |
213217
| --show-no-labels | Print no instance labels to the output as default | False |
218+
214219
| --show-no-parents | Print no parent labels to the output as default | False |
215220
| --ids-to-show | The IDs of the instances to show. If defined, all others will be ignored. | None |
216221
| --start-frame | The frame to start the visualization | 0 |

assets/visualization.jpg

97.7 KB
Loading

ctc_metrics/metrics/__init__.py

+2-1
Original file line numberDiff line numberDiff line change
@@ -12,6 +12,7 @@
1212
from ctc_metrics.metrics.identity_metrics.idf1 import idf1
1313
from ctc_metrics.metrics.others.mt_ml import mtml
1414
from ctc_metrics.metrics.others.faf import faf
15+
1516
from ctc_metrics.metrics.technical.op_ctb import op_ctb
1617
from ctc_metrics.metrics.technical.op_csb import op_csb
1718
from ctc_metrics.metrics.biological.bio import bio
@@ -21,4 +22,4 @@
2122
ALL_METRICS = [
2223
"Valid", "BC", "CT", "CCA", "TF", "SEG", "TRA", "DET", "MOTA", "HOTA",
2324
"CHOTA", "IDF1", "MTML", "FAF", "LNK", "OP_CTB", "OP_CSB", "BIO", "OP_CLB"
24-
]
25+

ctc_metrics/metrics/biological/bio.py

+4
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,7 @@ def bio(
1111
It is the average of the CT, TF, BC, and CCA metrics. If a metric is not
1212
available, it is not considered in the average.
1313
14+
1415
Args:
1516
ct: The complete tracking metric.
1617
tf: The track fractions metric.
@@ -19,6 +20,7 @@ def bio(
1920
2021
Returns:
2122
The BIO metric.
23+
2224
"""
2325
total_metrics = 0
2426
if ct is not None:
@@ -37,5 +39,7 @@ def bio(
3739
total_metrics += 1
3840
else:
3941
cca = 0
42+
4043
bio_score = (ct + tf + bc + cca) / total_metrics
4144
return bio_score
45+

ctc_metrics/scripts/evaluate.py

+8-1
Original file line numberDiff line numberDiff line change
@@ -26,6 +26,7 @@ def match_computed_to_reference_masks(
2626
Args:
2727
ref_masks: The reference masks. A list of paths to the reference masks.
2828
comp_masks: The computed masks. A list of paths to the computed masks.
29+
2930
threads: The number of threads to use. If 0, the number of threads
3031
is set to the number of available CPUs.
3132
@@ -74,7 +75,7 @@ def load_data(
7475
segmentation_data: True,
7576
threads: int = 0,
7677
):
77-
"""
78+
7879
Load data that is necessary to calculate metrics from the given directories.
7980

8081
Args:
@@ -100,13 +101,15 @@ def load_data(
100101
assert len(ref_tra_masks) == len(comp_masks), (
101102
f"{res}: Number of result masks ({len(comp_masks)}) unequal to "
102103
f"the number of ground truth masks ({len(ref_tra_masks)})!)")
104+
103105
# Match golden truth tracking masks to result masks
104106
traj = {}
105107
is_valid = 1
106108
if trajectory_data:
107109
traj = match_computed_to_reference_masks(
108110
ref_tra_masks, comp_masks, threads=threads)
109111
is_valid = valid(comp_masks, comp_tracks, traj["labels_comp"])
112+
110113
# Match golden truth segmentation masks to result masks
111114
segm = {}
112115
if segmentation_data:
@@ -118,6 +121,7 @@ def load_data(
118121
]
119122
segm = match_computed_to_reference_masks(
120123
ref_seg_masks, _res_masks, threads=threads)
124+
121125
return comp_tracks, ref_tracks, traj, segm, comp_masks, is_valid
122126
123127
@@ -264,6 +268,7 @@ def calculate_metrics(
264268
results.update(faf(
265269
traj["labels_comp_merged"], traj["mapped_comp_merged"]))
266270
271+
267272
return results
268273
269274
@@ -276,6 +281,7 @@ def evaluate_sequence(
276281
"""
277282
Evaluates a single sequence.
278283

284+
279285
Args:
280286
res: The path to the results.
281287
gt: The path to the ground truth.
@@ -360,6 +366,7 @@ def parse_args():
360366
parser.add_argument('--chota', action="store_true")
361367
parser.add_argument('--mtml', action="store_true")
362368
parser.add_argument('--faf', action="store_true")
369+
363370
parser.add_argument('--lnk', action="store_true")
364371
args = parser.parse_args()
365372
return args

0 commit comments

Comments
 (0)