Skip to content

Commit 474e446

Browse files
committed
lint and so on
1 parent f6b6d40 commit 474e446

File tree

4 files changed

+2
-18
lines changed

4 files changed

+2
-18
lines changed

README.md

-5
Original file line numberDiff line numberDiff line change
@@ -40,7 +40,6 @@ pip install .
4040

4141
## Usage
4242

43-
### Validation
4443

4544
The package supports validation, evaluation, and visualization of tracking
4645
results. The following examples are shown for an example directory that is
@@ -108,7 +107,6 @@ Per default, the code is executed using multiple processes with one process per
108107
available CPU core. Multiprocessing increases the execution time but also
109108
the memory consumption. If you need to set the maximal number of processes,
110109
the number of processes can be specified with the argument
111-
112110
```--num-threads``` or ```-n```:
113111
```bash
114112
ctc_evaluate --gt "/ctc/train" --res "/ctc/train" -r -n 4
@@ -182,7 +180,6 @@ print(res["TRA"])
182180

183181
You can visualize your tracking results with the following command:
184182

185-
186183
```bash
187184
ctc_visualize --img "/ctc/train/challenge_x/01" --res "/ctc/train/challenge_x/01_RES"
188185
```
@@ -206,7 +203,6 @@ There are additional arguments that can be used to specify the visualization.
206203
The following table shows the available arguments:
207204

208205

209-
210206
| Argument | Description | Default |
211207
|-------------------|------------------------------------------------------------------------------------------|---------|
212208
| --img | The directory to the images **(required)** | |
@@ -215,7 +211,6 @@ The following table shows the available arguments:
215211
| --video-name | The path to the video if a video should be created | None |
216212
| --border-width | The width of the border. Either an integer or a string that describes the challenge name | None |
217213
| --show-no-labels | Print no instance labels to the output as default | False |
218-
219214
| --show-no-parents | Print no parent labels to the output as default | False |
220215
| --ids-to-show | The IDs of the instances to show. If defined, all others will be ignored. | None |
221216
| --start-frame | The frame to start the visualization | 0 |

ctc_metrics/metrics/__init__.py

+1-2
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,6 @@
1212
from ctc_metrics.metrics.identity_metrics.idf1 import idf1
1313
from ctc_metrics.metrics.others.mt_ml import mtml
1414
from ctc_metrics.metrics.others.faf import faf
15-
1615
from ctc_metrics.metrics.technical.op_ctb import op_ctb
1716
from ctc_metrics.metrics.technical.op_csb import op_csb
1817
from ctc_metrics.metrics.biological.bio import bio
@@ -22,4 +21,4 @@
2221
ALL_METRICS = [
2322
"Valid", "BC", "CT", "CCA", "TF", "SEG", "TRA", "DET", "MOTA", "HOTA",
2423
"CHOTA", "IDF1", "MTML", "FAF", "LNK", "OP_CTB", "OP_CSB", "BIO", "OP_CLB"
25-
24+
]

ctc_metrics/metrics/biological/bio.py

-4
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,6 @@ def bio(
1111
It is the average of the CT, TF, BC, and CCA metrics. If a metric is not
1212
available, it is not considered in the average.
1313
14-
1514
Args:
1615
ct: The complete tracking metric.
1716
tf: The track fractions metric.
@@ -20,7 +19,6 @@ def bio(
2019
2120
Returns:
2221
The BIO metric.
23-
2422
"""
2523
total_metrics = 0
2624
if ct is not None:
@@ -39,7 +37,5 @@ def bio(
3937
total_metrics += 1
4038
else:
4139
cca = 0
42-
4340
bio_score = (ct + tf + bc + cca) / total_metrics
4441
return bio_score
45-

ctc_metrics/scripts/evaluate.py

+1-7
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,6 @@ def match_computed_to_reference_masks(
2626
Args:
2727
ref_masks: The reference masks. A list of paths to the reference masks.
2828
comp_masks: The computed masks. A list of paths to the computed masks.
29-
3029
threads: The number of threads to use. If 0, the number of threads
3130
is set to the number of available CPUs.
3231
@@ -75,7 +74,7 @@ def load_data(
7574
segmentation_data: True,
7675
threads: int = 0,
7776
):
78-
77+
"""
7978
Load data that is necessary to calculate metrics from the given directories.
8079
8180
Args:
@@ -101,15 +100,13 @@ def load_data(
101100
assert len(ref_tra_masks) == len(comp_masks), (
102101
f"{res}: Number of result masks ({len(comp_masks)}) unequal to "
103102
f"the number of ground truth masks ({len(ref_tra_masks)})!)")
104-
105103
# Match golden truth tracking masks to result masks
106104
traj = {}
107105
is_valid = 1
108106
if trajectory_data:
109107
traj = match_computed_to_reference_masks(
110108
ref_tra_masks, comp_masks, threads=threads)
111109
is_valid = valid(comp_masks, comp_tracks, traj["labels_comp"])
112-
113110
# Match golden truth segmentation masks to result masks
114111
segm = {}
115112
if segmentation_data:
@@ -121,7 +118,6 @@ def load_data(
121118
]
122119
segm = match_computed_to_reference_masks(
123120
ref_seg_masks, _res_masks, threads=threads)
124-
125121
return comp_tracks, ref_tracks, traj, segm, comp_masks, is_valid
126122

127123

@@ -281,7 +277,6 @@ def evaluate_sequence(
281277
"""
282278
Evaluates a single sequence.
283279
284-
285280
Args:
286281
res: The path to the results.
287282
gt: The path to the ground truth.
@@ -366,7 +361,6 @@ def parse_args():
366361
parser.add_argument('--chota', action="store_true")
367362
parser.add_argument('--mtml', action="store_true")
368363
parser.add_argument('--faf', action="store_true")
369-
370364
parser.add_argument('--lnk', action="store_true")
371365
args = parser.parse_args()
372366
return args

0 commit comments

Comments
 (0)