Skip to content

support evaluation on vehicleid #38

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 14 commits into
base: master
Choose a base branch
from
7 changes: 5 additions & 2 deletions openunreid/apis/runner.py
Original file line number Diff line number Diff line change
Expand Up @@ -301,7 +301,10 @@ def val(self):
self._rank,
print_freq=self.print_freq,
)
better_mAP = max(better_mAP, mAP)
if self.cfg.TRAIN.num_repeat != 1:
better_mAP = max(better_mAP, cmc[0])
else:
better_mAP = max(better_mAP, mAP)

return better_mAP

Expand All @@ -311,7 +314,7 @@ def save(self, mAP=None):
self._best_mAP = max(self._best_mAP, mAP)
print(
bcolors.OKGREEN
+ "\n * Finished epoch {:3d} mAP: {:5.1%} best: {:5.1%}{}\n".format(
+ "\n * Finished epoch {:3d} current: {:5.1%} best: {:5.1%}{}\n".format(
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

"current" here is not so good, please specify whether the metric is mAP or CMC here.

self._epoch, mAP, self._best_mAP, " *" if is_best else ""
)
+ bcolors.ENDC
Expand Down
47 changes: 35 additions & 12 deletions openunreid/apis/test.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,8 +8,9 @@

import numpy as np
import torch
import torchvision

from .train import set_random_seed
from openunreid.data import build_test_dataloader
from ..core.metrics.rank import evaluate_rank
from ..core.utils.compute_dist import build_dist
from ..models.utils.dsbn_utils import switch_target_bn
Expand All @@ -26,11 +27,9 @@

@torch.no_grad()
def test_reid(
cfg, model, data_loader, query, gallery, dataset_name=None, rank=None, **kwargs
cfg, model, data_loader, query, gallery, dataset_name=None, num=1, rank=None, **kwargs
):

start_time = time.monotonic()

if cfg.MODEL.dsbn:
assert (
dataset_name is not None
Expand All @@ -47,7 +46,7 @@ def test_reid(

sep = "*******************************"
if dataset_name is not None:
print(f"\n{sep} Start testing {dataset_name} {sep}\n")
print(f"\n{sep} Start testing {dataset_name} {-num} {sep}\n")

if rank is None:
rank, _, _ = get_dist_info()
Expand Down Expand Up @@ -78,7 +77,7 @@ def test_reid(

# evaluate with original distance
dist = build_dist(cfg.TEST, query_features, gallery_features)
cmc, map = evaluate_rank(dist, q_pids, g_pids, q_cids, g_cids)
cmc, map = evaluate_rank(cfg, dist, q_pids, g_pids, q_cids, g_cids)
else:
cmc, map = np.empty(50), 0.0

Expand All @@ -98,14 +97,10 @@ def test_reid(
# dist_gg = build_dist(cfg, gallery_features, gallery_features)
# final_dist = re_ranking_cpu(dist, dist_qq, dist_gg)

cmc, map = evaluate_rank(final_dist, q_pids, g_pids, q_cids, g_cids)
cmc, map = evaluate_rank(cfg, final_dist, q_pids, g_pids, q_cids, g_cids)
else:
cmc, map = np.empty(50), 0.0

end_time = time.monotonic()
print("Testing time: ", timedelta(seconds=end_time - start_time))
print(f"\n{sep} Finished testing {sep}\n")

return cmc, map


Expand Down Expand Up @@ -142,7 +137,7 @@ def val_reid(
# evaluate with original distance
if rank == 0:
dist = build_dist(cfg.TEST, features)
cmc, map = evaluate_rank(dist, pids, pids, cids, cids)
cmc, map = evaluate_rank(cfg, dist, pids, pids, cids, cids)
else:
cmc, map = np.empty(50), 0.0

Expand Down Expand Up @@ -207,3 +202,31 @@ def infer_gan(
print(f"\n{sep} Finished translating {sep}\n")

return


@torch.no_grad()
def final_test(cfg, model, cmc_topk=(1, 5, 10)):
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The module name "final_test" is somehow confusing. Maybe you could modify the original "test_reid" into "test_reid_once" and "final_test" into "test_reid".

sep = "*******************************"
start_time = time.monotonic()

all_cmc = []
all_mAP = []
for num in range(cfg.TRAIN.num_repeat):
set_random_seed(num + 1, cfg.TRAIN.deterministic)
test_loaders, queries, galleries = build_test_dataloader(cfg)
for i, (loader, query, gallery) in enumerate(zip(test_loaders, queries, galleries)):
cmc, mAP = test_reid(
cfg, model, loader, query, gallery, dataset_name=cfg.TEST.datasets[i], num=num+1
)
all_cmc.append(cmc)
all_mAP.append(mAP)

if cfg.TRAIN.num_repeat != 1:
print("\n ")
print("Average CMC Scores:")
for k in cmc_topk:
print(" top-{:<4}{:12.1%}".format(k, np.mean(all_cmc, axis=0)[k - 1]))

end_time = time.monotonic()
print("Testing time: ", timedelta(seconds=end_time - start_time))
print(f"\n{sep} Finished testing {sep}\n")
9 changes: 4 additions & 5 deletions openunreid/core/metrics/rank.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,8 @@

import numpy as np

from openunreid.utils.logger import display

try:
from .rank_cylib.rank_cy import evaluate_cy

Expand Down Expand Up @@ -162,6 +164,7 @@ def evaluate_py(


def evaluate_rank(
cfg,
distmat,
q_pids,
g_pids,
Expand Down Expand Up @@ -202,10 +205,6 @@ def evaluate_rank(
)

if verbose:
print("\n")
print("Mean AP: {:4.1%}".format(map))
print("CMC Scores:")
for k in cmc_topk:
print(" top-{:<4}{:12.1%}".format(k, cmc[k - 1]))
display(cfg, map, cmc, cmc_topk)

return cmc, map
1 change: 0 additions & 1 deletion openunreid/core/metrics/rank_cylib/rank_cy.pyx
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,6 @@ import cython
import numpy as np
cimport numpy as np
from collections import defaultdict
import random


"""
Expand Down
7 changes: 3 additions & 4 deletions openunreid/data/datasets/vehicleid.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,9 +2,7 @@

import os.path as osp
import random
import shutil
import warnings
from collections import defaultdict

from ..utils.base_dataset import ImageDataset

Expand Down Expand Up @@ -34,6 +32,7 @@ def __init__(
):
self.root = osp.abspath(osp.expanduser(root))
self.dataset_dir = osp.join(self.root, self.dataset_dir)
self.mode = mode
self.del_labels = del_labels
self.download_dataset(self.dataset_dir, self.dataset_url)
assert (val_split > 0.0) and (
Expand Down Expand Up @@ -108,7 +107,6 @@ def process_split(self, list_path, data_range, relabel=False):
list_data = f.readlines()
for data in list_data:
name, pid = data.strip().split(" ")
# pid = int(pid)
if pid == -1:
continue # junk images are just ignored
pid_container.add(pid)
Expand All @@ -123,13 +121,14 @@ def process_split(self, list_path, data_range, relabel=False):
pid2label = {pid: label for label, pid in enumerate(pid_container)}

data = []
camid = 0
for ld in list_data:
name, pid = ld.strip().split(" ")
if (pid not in pid_container) or (pid == -1):
continue

img_path = osp.join(self.img_dir, name + ".jpg")
camid = 0
camid += 1
if not self.del_labels:
if relabel:
pid = pid2label[pid]
Expand Down
16 changes: 16 additions & 0 deletions openunreid/utils/logger.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,8 @@
import os
import sys

import numpy as np

from .dist_utils import get_dist_info, synchronize
from .file_utils import mkdir_if_missing

Expand Down Expand Up @@ -42,3 +44,17 @@ def close(self):
self.console.close()
if self.file is not None:
self.file.close()


def display(cfg, map, cmc, cmc_topk=(1, 5, 10)):
if cfg.TRAIN.num_repeat != 1:
print("\n")
print("CMC Scores:")
for k in cmc_topk:
print(" top-{:<4}{:12.1%}".format(k, cmc[k - 1]))
else:
print("\n")
print("Mean AP: {:4.1%}".format(np.mean(map)))
print("CMC Scores:")
for k in cmc_topk:
print(" top-{:<4}{:12.1%}".format(k, cmc[k - 1]))
3 changes: 3 additions & 0 deletions tools/CycleGAN/config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -50,6 +50,9 @@ TRAIN:
datasets: {'market1501': 'trainval', 'dukemtmcreid': 'trainval'}
unsup_dataset_indexes: [1,]

# repeated number of evaluation
num_repeat: 10 # 10 only for vehicleid dataset, otherwise 1

epochs: 50
iters: 200

Expand Down
3 changes: 0 additions & 3 deletions tools/CycleGAN/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,18 +7,15 @@
from pathlib import Path

import torch
from torch.nn.parallel import DataParallel, DistributedDataParallel

from openunreid.apis import GANBaseRunner, set_random_seed, infer_gan
from openunreid.core.solvers import build_lr_scheduler, build_optimizer
from openunreid.data import (
build_test_dataloader,
build_train_dataloader,
build_val_dataloader,
)
from openunreid.models import build_gan_model
from openunreid.models.losses import build_loss
from openunreid.models.utils.extract import extract_features
from openunreid.utils.config import (
cfg,
cfg_from_list,
Expand Down
3 changes: 3 additions & 0 deletions tools/MMT/config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -61,6 +61,9 @@ TRAIN:
datasets: {'market1501': 'trainval', 'dukemtmcreid': 'trainval'}
unsup_dataset_indexes: [0,]

# repeated number of evaluation
num_repeat: 1 # 10 only for vehicleid dataset, otherwise 1

epochs: 50
iters: 400

Expand Down
21 changes: 6 additions & 15 deletions tools/MMT/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@
import torch

from openunreid.apis import BaseRunner, batch_processor, test_reid, set_random_seed
from openunreid.apis.test import final_test
from openunreid.core.metrics.accuracy import accuracy
from openunreid.core.solvers import build_lr_scheduler, build_optimizer
from openunreid.data import build_test_dataloader, build_train_dataloader
Expand Down Expand Up @@ -198,21 +199,11 @@ def main():
runner.resume(cfg.work_dir / "model_best.pth")

# final testing
test_loaders, queries, galleries = build_test_dataloader(cfg)
for i, (loader, query, gallery) in enumerate(zip(test_loaders, queries, galleries)):

for idx in range(len(runner.model)):
print("==> Test on the no.{} model".format(idx))
# test_reid() on self.model[idx] will only evaluate the 'mean_net'
# for testing 'net', use self.model[idx].module.net
cmc, mAP = test_reid(
cfg,
runner.model[idx],
loader,
query,
gallery,
dataset_name=cfg.TEST.datasets[i],
)
for idx in range(len(runner.model)):
print("==> Test on the no.{} model".format(idx))
# test_reid() on self.model[idx] will only evaluate the 'mean_net'
# for testing 'net', use self.model[idx].module.net
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The comments need to be modified accordingly.

final_test(cfg, runner.model[idx])

# print time
end_time = time.monotonic()
Expand Down
3 changes: 3 additions & 0 deletions tools/SPGAN/config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -50,6 +50,9 @@ TRAIN:
datasets: {'market1501': 'trainval', 'dukemtmcreid': 'trainval'}
unsup_dataset_indexes: [1,]

# repeated number of evaluation
num_repeat: 10 # 10 only for vehicleid dataset, otherwise 1
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Here should be "1" I guess?


epochs: 50
iters: 200

Expand Down
5 changes: 4 additions & 1 deletion tools/SpCL/config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -61,6 +61,9 @@ TRAIN:
# datasets: {'market1501': 'trainval', 'dukemtmcreid': 'trainval'}
unsup_dataset_indexes: [0,]

# repeated number of evaluation
num_repeat: 1 # 10 only for vehicleid dataset, otherwise 1

epochs: 50
iters: 400

Expand All @@ -71,7 +74,7 @@ TRAIN:

# validate
val_dataset: 'market1501'
val_freq: 5
val_freq: 1

# sampler
SAMPLER:
Expand Down
10 changes: 3 additions & 7 deletions tools/SpCL/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,10 +9,10 @@
import torch
from torch.nn.parallel import DataParallel, DistributedDataParallel

from openunreid.apis import BaseRunner, batch_processor, test_reid, set_random_seed
from openunreid.apis import BaseRunner, batch_processor, set_random_seed
from openunreid.apis.test import final_test
from openunreid.core.solvers import build_lr_scheduler, build_optimizer
from openunreid.data import (
build_test_dataloader,
build_train_dataloader,
build_val_dataloader,
)
Expand Down Expand Up @@ -265,11 +265,7 @@ def main():
runner.resume(cfg.work_dir / "model_best.pth")

# final testing
test_loaders, queries, galleries = build_test_dataloader(cfg)
for i, (loader, query, gallery) in enumerate(zip(test_loaders, queries, galleries)):
cmc, mAP = test_reid(
cfg, model, loader, query, gallery, dataset_name=cfg.TEST.datasets[i]
)
final_test(cfg, model)

# print time
end_time = time.monotonic()
Expand Down
3 changes: 3 additions & 0 deletions tools/UDA_TP/config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -61,6 +61,9 @@ TRAIN:
datasets: {'market1501': 'trainval',}
unsup_dataset_indexes: [0,]

# repeated number of evaluation
num_repeat: 1 # 10 only for vehicleid dataset, otherwise 1

epochs: 50
iters: 400

Expand Down
7 changes: 2 additions & 5 deletions tools/UDA_TP/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@
import torch

from openunreid.apis import BaseRunner, test_reid, set_random_seed
from openunreid.apis.test import final_test
from openunreid.core.solvers import build_lr_scheduler, build_optimizer
from openunreid.data import build_test_dataloader, build_train_dataloader
from openunreid.models import build_model
Expand Down Expand Up @@ -130,11 +131,7 @@ def main():
runner.resume(cfg.work_dir / "model_best.pth")

# final testing
test_loaders, queries, galleries = build_test_dataloader(cfg)
for i, (loader, query, gallery) in enumerate(zip(test_loaders, queries, galleries)):
cmc, mAP = test_reid(
cfg, model, loader, query, gallery, dataset_name=cfg.TEST.datasets[i]
)
final_test(cfg, model)

# print time
end_time = time.monotonic()
Expand Down
3 changes: 3 additions & 0 deletions tools/source_pretrain/config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -59,6 +59,9 @@ TRAIN:
datasets: {'market1501': 'trainval',}
unsup_dataset_indexes: null

# repeated number of evaluation
num_repeat: 1 # 10 only for vehicleid dataset, otherwise 1

epochs: 120
iters: 200

Expand Down
Loading