From c92cec86e4d31daabe8b336fa7067e3b0bb7ca89 Mon Sep 17 00:00:00 2001 From: Sourcery AI Date: Tue, 30 Mar 2021 09:18:33 +0000 Subject: [PATCH] 'Refactored by Sourcery' --- demo.py | 4 +- .../core/engine/abstract/abstract_engine.py | 3 +- pyanomaly/core/engine/abstract/base_engine.py | 22 +-- pyanomaly/core/engine/engine_api.py | 6 +- pyanomaly/core/engine/functions/amc.py | 4 +- pyanomaly/core/engine/functions/ma.py | 4 +- pyanomaly/core/hook/abstract/abstract_hook.py | 4 - pyanomaly/core/hook/functions/ocae_hooks.py | 183 +++++++++--------- pyanomaly/core/optimizer/optimizer_api.py | 10 +- pyanomaly/core/other/kmeans.py | 13 +- pyanomaly/core/scheduler/schedulers.py | 2 +- pyanomaly/core/utils.py | 60 +++--- .../abstract/abstract_datasets_factory.py | 5 +- pyanomaly/datatools/abstract/readers.py | 25 ++- pyanomaly/datatools/abstract/video_dataset.py | 4 +- .../datatools/dataclass/datasets_factory.py | 3 +- .../datatools/dataclass/sampler/common.py | 3 +- pyanomaly/datatools/datatools_api.py | 11 +- pyanomaly/datatools/evaluate/eval_function.py | 6 +- pyanomaly/datatools/evaluate/utils.py | 35 ++-- pyanomaly/loss/functions/basic_loss.py | 35 ++-- .../auxiliary/flownet2/FlowNetFusion.py | 8 +- .../correlation_package/correlation.py | 11 +- .../networks/auxiliary/flownet2/models.py | 38 ++-- .../networks/auxiliary/liteflownet/models.py | 14 +- pyanomaly/networks/auxiliary/pose/models.py | 5 +- pyanomaly/networks/meta/amc_networks.py | 4 +- pyanomaly/networks/meta/anopcn_networks.py | 9 +- pyanomaly/networks/meta/base/commonness.py | 9 +- pyanomaly/networks/meta/base/prednet.py | 9 +- pyanomaly/networks/meta/ma_networks.py | 4 +- pyanomaly/networks/meta/memae_networks.py | 3 +- pyanomaly/networks/meta/pcn_parts/pcm.py | 9 +- pyanomaly/networks/meta/pcn_parts/prednet.py | 9 +- pyanomaly/networks/model_api.py | 39 ++-- pyanomaly/utils/system.py | 3 +- pyanomaly/utils/tools.py | 107 +++++----- 37 files changed, 319 insertions(+), 404 deletions(-) diff --git a/demo.py b/demo.py index 842804b..eb5d366 100644 --- a/demo.py +++ b/demo.py @@ -71,9 +71,7 @@ def main(args, cfg, logger, cfg_name, time_stamp, is_training): logger.info('Finish Using the anomaly detection service') def make_result(result_dict, video_path): - result_image = None - - return result_image + return None if __name__ == '__main__': args = parse_args() diff --git a/pyanomaly/core/engine/abstract/abstract_engine.py b/pyanomaly/core/engine/abstract/abstract_engine.py index 6c4dc67..18f8fe1 100644 --- a/pyanomaly/core/engine/abstract/abstract_engine.py +++ b/pyanomaly/core/engine/abstract/abstract_engine.py @@ -84,8 +84,7 @@ def data_parallel(self, model): """ logger.info(' ==> Data Parallel') gpus = [int(i) for i in self.engine_gpus] - model_parallel = torch.nn.DataParallel(model.cuda(), device_ids=gpus) - return model_parallel + return torch.nn.DataParallel(model.cuda(), device_ids=gpus) def _load_file(self, model_keys, model_file): diff --git a/pyanomaly/core/engine/abstract/base_engine.py b/pyanomaly/core/engine/abstract/base_engine.py index af45b31..2d6c762 100644 --- a/pyanomaly/core/engine/abstract/base_engine.py +++ b/pyanomaly/core/engine/abstract/base_engine.py @@ -200,16 +200,15 @@ def fine_tune(self): for n, p in self.model.named_parameters(): parts = n.split('.') # consider the data parallel situation - if parts[0] == 'module': - if parts[1] not in layer_list: - p.requires_grad = False - if p.requires_grad: - print(n) - else: - if parts[0] not in layer_list: - p.requires_grad = False - if p.requires_grad: - print(n) + if ( + parts[0] == 'module' + and parts[1] not in layer_list + or parts[0] != 'module' + and parts[0] not in layer_list + ): + p.requires_grad = False + if p.requires_grad: + print(n) self.logger.info('Finish Setting freeze layers') def data_parallel(self, model): @@ -222,8 +221,7 @@ def data_parallel(self, model): """ logger.info(' ==> Data Parallel') gpus = [int(i) for i in self.config.SYSTEM.gpus] - model_parallel = torch.nn.DataParallel(model.cuda(), device_ids=gpus) - return model_parallel + return torch.nn.DataParallel(model.cuda(), device_ids=gpus) def after_step(self, current_step): diff --git a/pyanomaly/core/engine/engine_api.py b/pyanomaly/core/engine/engine_api.py index 4bcc66a..a11ccd2 100644 --- a/pyanomaly/core/engine/engine_api.py +++ b/pyanomaly/core/engine/engine_api.py @@ -23,11 +23,7 @@ def __init__(self, cfg, is_training): self.cfg = cfg self.model_name = self.cfg.MODEL.name self.is_training = is_training - if self.is_training: - self.phase = 'TRAIN' - else: - self.phase = 'VAL' - + self.phase = 'TRAIN' if self.is_training else 'VAL' self.engine_name = self.cfg.get(self.phase)['engine_name'] def build(self): diff --git a/pyanomaly/core/engine/functions/amc.py b/pyanomaly/core/engine/functions/amc.py index a0bb272..81da012 100644 --- a/pyanomaly/core/engine/functions/amc.py +++ b/pyanomaly/core/engine/functions/amc.py @@ -163,13 +163,11 @@ def custom_setup(self): self.wf = 1.0 self.wi = 1.0 self.threshold = 0.0 # the threshold to judge whether the frame is the anomaly - pass def get_clip_by_stride(self, video, stride=2): """Get the clip list by the stride """ - clip_list = [] - return clip_list + return [] def execute(self, data): output_dict = OrderedDict() diff --git a/pyanomaly/core/engine/functions/ma.py b/pyanomaly/core/engine/functions/ma.py index 73b8f0b..2935a7e 100644 --- a/pyanomaly/core/engine/functions/ma.py +++ b/pyanomaly/core/engine/functions/ma.py @@ -172,13 +172,11 @@ def custom_setup(self): self.wf = 1.0 self.wi = 1.0 self.threshold = 0.0 # the threshold to judge whether the frame is the anomaly - pass def get_clip_by_stride(self, video, stride=2): """Get the clip list by the stride """ - clip_list = [] - return clip_list + return [] def execute(self, data): output_dict = OrderedDict() diff --git a/pyanomaly/core/hook/abstract/abstract_hook.py b/pyanomaly/core/hook/abstract/abstract_hook.py index 9a9fdaf..efbc571 100644 --- a/pyanomaly/core/hook/abstract/abstract_hook.py +++ b/pyanomaly/core/hook/abstract/abstract_hook.py @@ -74,10 +74,6 @@ def after_step(self, current_step): # save the checkpoint self.engine.save(current_step) self.engine.logger.info('LOL==>the accuracy is not imporved in epcoh{} but save'.format(current_step)) - else: - pass - else: - pass def inference(self): acc = self.evaluate(0) diff --git a/pyanomaly/core/hook/functions/ocae_hooks.py b/pyanomaly/core/hook/functions/ocae_hooks.py index dc60b6b..4e1cd7b 100644 --- a/pyanomaly/core/hook/functions/ocae_hooks.py +++ b/pyanomaly/core/hook/functions/ocae_hooks.py @@ -37,97 +37,102 @@ class ClusterHook(HookBase): def after_step(self, current_step): # import ipdb; ipdb.set_trace() - if current_step % self.engine.config.TRAIN.eval_step == 0 and current_step!= 0: - self.engine.logger.info('Start clsuter the feature') - frame_num = self.engine.config.DATASET.train.clip_length - frame_step = self.engine.config.DATASET.train.clip_step - feature_record = [] - for video_name in self.engine.cluster_dataset_keys: - dataset = self.engine.cluster_dataset_dict[video_name] - data_loader = DataLoader(dataset=dataset, batch_size=1, shuffle=False, num_workers=1) - # import ipdb; ipdb.set_trace() - for test_input, anno, meta in data_loader: - future = data[:, :, 2, :, :].cuda() # t+1 frame - current = data[:, :, 1, :, :].cuda() # t frame - past = data[:, :, 0, :, :].cuda() # t frame - bboxs = get_batch_dets(self.engine.Detector, current) - for index, bbox in enumerate(bboxs): - # import ipdb; ipdb.set_trace() - if bbox.numel() == 0: - # import ipdb; ipdb.set_trace() - # bbox = torch.zeros([1,4]) - bbox = bbox.new_zeros([1,4]) - # print('NO objects') - # continue - # import ipdb; ipdb.set_trace() - current_object, _ = multi_obj_grid_crop(current[index], bbox) - future_object, _ = multi_obj_grid_crop(future[index], bbox) - future2current = torch.stack([future_object, current_object], dim=1) - - past_object, _ = multi_obj_grid_crop(past[index], bbox) - current2past = torch.stack([current_object, past_object], dim=1) - - _, _, A_input = frame_gradient(future2current) - A_input = A_input.sum(1) - _, _, C_input = frame_gradient(current2past) - C_input = C_input.sum(1) - A_feature, _, _ = self.engine.A(A_input) - B_feature, _, _ = self.engine.B(current_object) - C_feature, _, _ = self.engine.C(C_input) - - A_flatten_feature = A_feature.flatten(start_dim=1) - B_flatten_feature = B_feature.flatten(start_dim=1) - C_flatten_feature = C_feature.flatten(start_dim=1) - ABC_feature = torch.cat([A_flatten_feature, B_flatten_feature, C_flatten_feature], dim=1).detach() - # import ipdb; ipdb.set_trace() - ABC_feature_s = torch.chunk(ABC_feature, ABC_feature.size(0), dim=0) - # feature_record.extend(ABC_feature_s) - for abc_f in ABC_feature_s: - temp = abc_f.squeeze(0).cpu().numpy() - feature_record.append(temp) - # import ipdb; ipdb.set_trace() - self.engine.logger.info(f'Finish the video:{video_name}') - self.engine.logger.info(f'Finish extract feature, the sample:{len(feature_record)}') - device = torch.device('cuda:0') - cluster_input = torch.from_numpy(np.array(feature_record)) - # cluster_input = np.array(feature_record) - time = mmcv.Timer() - # import ipdb; ipdb.set_trace() - cluster_centers = cluster_input.new_zeros(size=[self.engine.config.TRAIN.cluster.k, 3072]) - cluster_score = 0.0 - cluster_model = None - for _ in range(1): - # model = KMeans(n_clusters=self.trainer.config.TRAIN.cluster.k, init='k-means++',n_init=10, algorithm='full',max_iter=300).fit(cluster_input) - # labels = model.labels_ - # temp = calinski_harabaz_score(cluster_input, labels) - # if temp > cluster_score: - # cluster_model = model - # print(f'the temp score is {temp}') - cluster_ids_x, cluster_center = kmeans(X=cluster_input, num_clusters=self.engine.config.TRAIN.cluster.k, distance='euclidean', device=device) - cluster_centers += cluster_center - # import ipdb; ipdb.set_trace() - # cluster_centers = cluster_centers / 10 - # model.fit(cluster_input) - # pusedo_labels = model.predict(cluster_input) - pusedo_labels = kmeans_predict(cluster_input, cluster_centers, 'euclidean', device=device).detach().cpu().numpy() - # pusedo_labels = cluster_model.labels_ - print(f'The cluster time is :{time.since_start()/60} min') + if ( + current_step % self.engine.config.TRAIN.eval_step != 0 + or current_step == 0 + ): + return + + self.engine.logger.info('Start clsuter the feature') + frame_num = self.engine.config.DATASET.train.clip_length + frame_step = self.engine.config.DATASET.train.clip_step + feature_record = [] + for video_name in self.engine.cluster_dataset_keys: + dataset = self.engine.cluster_dataset_dict[video_name] + data_loader = DataLoader(dataset=dataset, batch_size=1, shuffle=False, num_workers=1) # import ipdb; ipdb.set_trace() - # pusedo_labels = np.split(pusedo_labels, pusedo_labels.shape[0], 0) + for test_input, anno, meta in data_loader: + future = data[:, :, 2, :, :].cuda() # t+1 frame + current = data[:, :, 1, :, :].cuda() # t frame + past = data[:, :, 0, :, :].cuda() # t frame + bboxs = get_batch_dets(self.engine.Detector, current) + for index, bbox in enumerate(bboxs): + # import ipdb; ipdb.set_trace() + if bbox.numel() == 0: + # import ipdb; ipdb.set_trace() + # bbox = torch.zeros([1,4]) + bbox = bbox.new_zeros([1,4]) + # print('NO objects') + # continue + # import ipdb; ipdb.set_trace() + current_object, _ = multi_obj_grid_crop(current[index], bbox) + future_object, _ = multi_obj_grid_crop(future[index], bbox) + future2current = torch.stack([future_object, current_object], dim=1) - pusedo_dataset = os.path.join(self.engine.config.TRAIN.pusedo_data_path, 'pusedo') - if not os.path.exists(pusedo_dataset): - os.mkdir(pusedo_dataset) - - np.savez_compressed(os.path.join(pusedo_dataset, f'{self.engine.config.DATASET.name}_dummy.npz'), data=cluster_input, label=pusedo_labels) - print(f'The save time is {time.since_last_check() / 60} min') - # binary_labels = MultiLabelBinarizer().fit_transform(pusedo_labels) - # self.trainer.ovr_model = OneVsRestClassifier(LinearSVC(random_state = 0)).fit(cluster_input,binary_labels) - # self.trainer.ovr_model = OneVsRestClassifier(LinearSVC(random_state = 0), n_jobs=16).fit(cluster_input, pusedo_labels) - self.engine.ovr_model = self.engine.ovr_model.fit(cluster_input, pusedo_labels) - # self.trainer.saved_model['OVR'] = self.trainer.ovr_model - print(f'The train ovr: {time.since_last_check() / 60} min') - joblib.dump(self.engine.ovr_model, self.engine.ovr_model_path) + past_object, _ = multi_obj_grid_crop(past[index], bbox) + current2past = torch.stack([current_object, past_object], dim=1) + + _, _, A_input = frame_gradient(future2current) + A_input = A_input.sum(1) + _, _, C_input = frame_gradient(current2past) + C_input = C_input.sum(1) + A_feature, _, _ = self.engine.A(A_input) + B_feature, _, _ = self.engine.B(current_object) + C_feature, _, _ = self.engine.C(C_input) + + A_flatten_feature = A_feature.flatten(start_dim=1) + B_flatten_feature = B_feature.flatten(start_dim=1) + C_flatten_feature = C_feature.flatten(start_dim=1) + ABC_feature = torch.cat([A_flatten_feature, B_flatten_feature, C_flatten_feature], dim=1).detach() + # import ipdb; ipdb.set_trace() + ABC_feature_s = torch.chunk(ABC_feature, ABC_feature.size(0), dim=0) + # feature_record.extend(ABC_feature_s) + for abc_f in ABC_feature_s: + temp = abc_f.squeeze(0).cpu().numpy() + feature_record.append(temp) + # import ipdb; ipdb.set_trace() + self.engine.logger.info(f'Finish the video:{video_name}') + self.engine.logger.info(f'Finish extract feature, the sample:{len(feature_record)}') + device = torch.device('cuda:0') + cluster_input = torch.from_numpy(np.array(feature_record)) + # cluster_input = np.array(feature_record) + time = mmcv.Timer() + # import ipdb; ipdb.set_trace() + cluster_centers = cluster_input.new_zeros(size=[self.engine.config.TRAIN.cluster.k, 3072]) + cluster_score = 0.0 + cluster_model = None + for _ in range(1): + # model = KMeans(n_clusters=self.trainer.config.TRAIN.cluster.k, init='k-means++',n_init=10, algorithm='full',max_iter=300).fit(cluster_input) + # labels = model.labels_ + # temp = calinski_harabaz_score(cluster_input, labels) + # if temp > cluster_score: + # cluster_model = model + # print(f'the temp score is {temp}') + cluster_ids_x, cluster_center = kmeans(X=cluster_input, num_clusters=self.engine.config.TRAIN.cluster.k, distance='euclidean', device=device) + cluster_centers += cluster_center + # import ipdb; ipdb.set_trace() + # cluster_centers = cluster_centers / 10 + # model.fit(cluster_input) + # pusedo_labels = model.predict(cluster_input) + pusedo_labels = kmeans_predict(cluster_input, cluster_centers, 'euclidean', device=device).detach().cpu().numpy() + # pusedo_labels = cluster_model.labels_ + print(f'The cluster time is :{time.since_start()/60} min') + # import ipdb; ipdb.set_trace() + # pusedo_labels = np.split(pusedo_labels, pusedo_labels.shape[0], 0) + + pusedo_dataset = os.path.join(self.engine.config.TRAIN.pusedo_data_path, 'pusedo') + if not os.path.exists(pusedo_dataset): + os.mkdir(pusedo_dataset) + + np.savez_compressed(os.path.join(pusedo_dataset, f'{self.engine.config.DATASET.name}_dummy.npz'), data=cluster_input, label=pusedo_labels) + print(f'The save time is {time.since_last_check() / 60} min') + # binary_labels = MultiLabelBinarizer().fit_transform(pusedo_labels) + # self.trainer.ovr_model = OneVsRestClassifier(LinearSVC(random_state = 0)).fit(cluster_input,binary_labels) + # self.trainer.ovr_model = OneVsRestClassifier(LinearSVC(random_state = 0), n_jobs=16).fit(cluster_input, pusedo_labels) + self.engine.ovr_model = self.engine.ovr_model.fit(cluster_input, pusedo_labels) + # self.trainer.saved_model['OVR'] = self.trainer.ovr_model + print(f'The train ovr: {time.since_last_check() / 60} min') + joblib.dump(self.engine.ovr_model, self.engine.ovr_model_path) # import ipdb; ipdb.set_trace() @HOOK_REGISTRY.register() diff --git a/pyanomaly/core/optimizer/optimizer_api.py b/pyanomaly/core/optimizer/optimizer_api.py index f62a375..d6e73b6 100644 --- a/pyanomaly/core/optimizer/optimizer_api.py +++ b/pyanomaly/core/optimizer/optimizer_api.py @@ -46,7 +46,7 @@ def _build_one_optimizer(self, model): return t def _build_multi_optimizers(self, model_list): - param_groups = list() + param_groups = [] if self.type not in OptimizerAPI._SUPPROT: raise Exception(f'Not support: {self.type} in {OptimizerAPI._NAME}') @@ -58,7 +58,7 @@ def _build_multi_optimizers(self, model_list): for model in model_list: param_groups.append({'params':model.parameters()}) t = torch.optim.SGD(model.parameters(), lr=self.lr, momentum=self.params.momentum, weight_decay=self.params.weight_decay,nesterov=self.params.nesterov) - + return t def _build(self, model): @@ -81,9 +81,7 @@ def __call__(self, model): if mode == OptimizerAPI._MODE[0]: optimizer_name = 'optimizer_'+''.join(include_parts) - model_combination = [] - for temp in include_parts: - model_combination.append(model[temp]) + model_combination = [model[temp] for temp in include_parts] optimizer_value = self._build(model_combination) optimizer_dict.update({optimizer_name:optimizer_value}) elif mode == OptimizerAPI._MODE[1]: @@ -94,6 +92,6 @@ def __call__(self, model): optimizer_dict.update({optimizer_name:optimizer_value}) else: raise Exception(f'Not support the optimizer mode, only support {OptimizerAPI._MODE}') - + return optimizer_dict diff --git a/pyanomaly/core/other/kmeans.py b/pyanomaly/core/other/kmeans.py index cfcde3f..9fc328e 100644 --- a/pyanomaly/core/other/kmeans.py +++ b/pyanomaly/core/other/kmeans.py @@ -18,8 +18,7 @@ def initialize(X, num_clusters): """ num_samples = len(X) indices = np.random.choice(num_samples, num_clusters, replace=False) - initial_state = X[indices] - return initial_state + return X[indices] def kmeans( @@ -41,10 +40,10 @@ def kmeans( """ print(f'running k-means on {device}..') - if distance == 'euclidean': - pairwise_distance_function = pairwise_distance - elif distance == 'cosine': + if distance == 'cosine': pairwise_distance_function = pairwise_cosine + elif distance == 'euclidean': + pairwise_distance_function = pairwise_distance else: raise NotImplementedError @@ -65,7 +64,7 @@ def kmeans( choice_points = torch.argmin(dis, dim=0) initial_state = X[choice_points] initial_state = initial_state.to(device) - + iteration = 0 # tqdm_meter = tqdm(desc='[running kmeans]') while True: @@ -89,7 +88,7 @@ def kmeans( )) # increment iteration - iteration = iteration + 1 + iteration += 1 # update tqdm meter # tqdm_meter.set_postfix( diff --git a/pyanomaly/core/scheduler/schedulers.py b/pyanomaly/core/scheduler/schedulers.py index bc49061..1d40a58 100644 --- a/pyanomaly/core/scheduler/schedulers.py +++ b/pyanomaly/core/scheduler/schedulers.py @@ -26,7 +26,7 @@ def __init__( warmup_method: str = "linear", last_epoch: int = -1, ): - if not list(milestones) == sorted(milestones): + if list(milestones) != sorted(milestones): raise ValueError( "Milestones should be a list of" " increasing integers. Got {}", milestones ) diff --git a/pyanomaly/core/utils.py b/pyanomaly/core/utils.py index d1add5e..f3d488b 100644 --- a/pyanomaly/core/utils.py +++ b/pyanomaly/core/utils.py @@ -216,13 +216,13 @@ def frame_gradient(x): ''' video_length = x.size(1) dx = list() - dy = list() + dy = [] for i in range(video_length): temp_dx, temp_dy = image_gradient(x[:,i,:,:,:]) # dx.append(temp_dx.unsqueeze_(1)) dx.append(temp_dx) dy.append(temp_dy) - + dx = torch.stack(dx, dim=1) dy = torch.stack(dy, dim=1) # import ipdb; ipdb.set_trace() @@ -232,7 +232,7 @@ def frame_gradient(x): def vis_optical_flow(batch_optical, output_format, output_size, normalize): temp = batch_optical.detach().cpu().permute(0,2,3,1).numpy() - temp_list = list() + temp_list = [] for i in range(temp.shape[0]): np_image = flow2img(temp[i], output_format) temp_image = torch.from_numpy(np_image.transpose((2, 0, 1))) @@ -350,31 +350,33 @@ def verse_normalize(image_tensor, mean, std, video=False): def get_batch_dets(det_model, batch_image): - """ + """ Use the detecron2 """ - image_list = list() - batch_size = batch_image.size(0) - images = torch.chunk(batch_image, batch_size, dim=0) - for image in images: - image_list.append({"image":image.squeeze_(0).mul(255).byte()[[2,0,1],:,:]}) - outputs = det_model(image_list) - - bboxs = [] - frame_objects = OrderedDict() - max_objects = 0 - min_objects = 1000 - for frame_id, out in enumerate(outputs): - temp = out['instances'].pred_boxes.tensor.detach() - temp.requires_grad = False - frame_objects[frame_id] = temp.size(0) - if frame_objects[frame_id] > max_objects: - max_objects = frame_objects[frame_id] - if frame_objects[frame_id] < min_objects: - min_objects = frame_objects[frame_id] - bboxs.append(temp) - - return bboxs + batch_size = batch_image.size(0) + images = torch.chunk(batch_image, batch_size, dim=0) + image_list = [ + {"image": image.squeeze_(0).mul(255).byte()[[2, 0, 1], :, :]} + for image in images + ] + + outputs = det_model(image_list) + + bboxs = [] + frame_objects = OrderedDict() + max_objects = 0 + min_objects = 1000 + for frame_id, out in enumerate(outputs): + temp = out['instances'].pred_boxes.tensor.detach() + temp.requires_grad = False + frame_objects[frame_id] = temp.size(0) + if frame_objects[frame_id] > max_objects: + max_objects = frame_objects[frame_id] + if frame_objects[frame_id] < min_objects: + min_objects = frame_objects[frame_id] + bboxs.append(temp) + + return bboxs def save_score_results(score, cfg, logger, verbose=None, config_name='None', current_step=0, time_stamp='time_step'): """Save scores. @@ -448,14 +450,12 @@ def make_info_message(current_step, max_step, model_type, batch_time, batch_size loss_string += f'{loss_name}:{loss_val:.5f}({loss_avg:.5f})' if index != (len(loss_list) -1): loss_string += '\t' - - msg = f'Step: [{current_step}/{max_step}]\t' \ + + return f'Step: [{current_step}/{max_step}]\t' \ f'Type: {model_type}\t' \ f'Time: {batch_time.val:.2f}s ({batch_time.avg:.2f}s)\t' \ f'Speed: {speed:.1f} samples/s\t' \ f'Data: {data_time.val:.2f}s ({data_time.avg:.2f}s)\t' + loss_string - - return msg if __name__ == '__main__': diff --git a/pyanomaly/datatools/abstract/abstract_datasets_factory.py b/pyanomaly/datatools/abstract/abstract_datasets_factory.py index 2d66710..89ca775 100644 --- a/pyanomaly/datatools/abstract/abstract_datasets_factory.py +++ b/pyanomaly/datatools/abstract/abstract_datasets_factory.py @@ -16,10 +16,7 @@ def __init__(self, cfg, aug, is_training=True) -> None: self.aug = aug self.is_training = is_training - if self.is_training: - self.phase = 'train' - else: - self.phase = 'val' + self.phase = 'train' if self.is_training else 'val' @abc.abstractmethod def _produce_train_dataset(self): diff --git a/pyanomaly/datatools/abstract/readers.py b/pyanomaly/datatools/abstract/readers.py index b95a002..601df4b 100644 --- a/pyanomaly/datatools/abstract/readers.py +++ b/pyanomaly/datatools/abstract/readers.py @@ -145,8 +145,8 @@ def read(self, frames_list, start, end, clip_length=2, step=1, array_type='tenso ''' array_type: the output format of the video array. The shape of the video data is [C,D,H,W] ''' - clip_list = list() - clip_list_original = list() + clip_list = [] + clip_list_original = [] for frame_id in range(start, end, step): # import ipdb; ipdb.set_trace() frame_name = frames_list[frame_id] @@ -154,7 +154,7 @@ def read(self, frames_list, start, end, clip_length=2, step=1, array_type='tenso original_frame = original_frame.numpy() clip_list.append(frame) clip_list_original.append(original_frame) - + # Make the clip have the same length, method1: supplement the frames if len(clip_list) < clip_length: diff = clip_length - len(clip_list) @@ -168,14 +168,14 @@ def read(self, frames_list, start, end, clip_length=2, step=1, array_type='tenso # ================================= clip_np = np.array(clip_list) # the shape of the clip_np is [D,H,W,C] clip_original = self._normalize_original(torch.from_numpy(np.array(clip_list_original))) # the shape of the clip_original is [C, D, H, W] - + assert clip_np.shape[0] == clip_length, f'The clip length is {clip_length}, the real one is {clip_np[0]}' - + # Use the data augment for the video if self.transforms is not None: # type of clip is ndarray clip = self._augment(clip_np) - + if array_type == 'ndarray': if isinstance(clip, np.ndarray): return clip, clip_original @@ -204,7 +204,7 @@ def read(self, frames_list, start, end, clip_length=2, step=1, array_type='tenso raise Exception('Some error in videoloader line 134') else: raise Exception(f'Get the wrong type {array_type}') - + clip = clip.permute(1,0,2,3) return clip, clip_original # the batch shape is [N,C,D,H,W], because the Pytorch conv3D is [N,C,D,H,W] @@ -329,9 +329,7 @@ def get_video_length(sub_video_number): video_name = os.path.join(dataset_video_folder, video_list[sub_video_number]) assert os.path.isdir(video_name), f'{video_name} is not directory!' - length = len(os.listdir(video_name)) - - return length + return len(os.listdir(video_name)) gt = [] for i in range(number_videos): @@ -359,8 +357,7 @@ def get_video_length(sub_video_number): def _load_shanghai_gt(self): video_path_list = sorted(os.listdir(self.gt_path)) - gt = [] - for video in video_path_list: - gt.append(np.load(os.path.join(self.gt_path, video))) - return gt + return [ + np.load(os.path.join(self.gt_path, video)) for video in video_path_list + ] diff --git a/pyanomaly/datatools/abstract/video_dataset.py b/pyanomaly/datatools/abstract/video_dataset.py index 07151ed..28f2a90 100644 --- a/pyanomaly/datatools/abstract/video_dataset.py +++ b/pyanomaly/datatools/abstract/video_dataset.py @@ -73,7 +73,6 @@ def abstract_setup(self): self.videos[video_name]['length'] = len(self.videos[video_name]['frames']) self.videos[video_name]['cursor'] = 0 self.total_clips += (len(self.videos[video_name]['frames']) - self.clip_length) - self.videos_keys = self.videos.keys() else: self.total_clips_onevideo = 0 # the dir is the path of one video @@ -87,7 +86,8 @@ def abstract_setup(self): self.videos[video_name]['cursor'] = 0 self.total_clips_onevideo += (len(self.videos[video_name]['frames']) - self.clip_length) self.pics_len = len(self.videos[video_name]['frames']) - self.videos_keys = self.videos.keys() + + self.videos_keys = self.videos.keys() def __getitem__(self, indice): raise Exception(f'No inplement at {AbstractVideoDataset._NAME}') diff --git a/pyanomaly/datatools/dataclass/datasets_factory.py b/pyanomaly/datatools/dataclass/datasets_factory.py index ddb4a93..e26f8d8 100644 --- a/pyanomaly/datatools/dataclass/datasets_factory.py +++ b/pyanomaly/datatools/dataclass/datasets_factory.py @@ -232,5 +232,4 @@ def __call__(self): Returns: dataset_dict(OrederedDict): As the return of self._build() """ - dataset_dict = self._build() - return dataset_dict + return self._build() diff --git a/pyanomaly/datatools/dataclass/sampler/common.py b/pyanomaly/datatools/dataclass/sampler/common.py index 0a7624a..ce8b58a 100644 --- a/pyanomaly/datatools/dataclass/sampler/common.py +++ b/pyanomaly/datatools/dataclass/sampler/common.py @@ -105,8 +105,7 @@ def _serialize_to_tensor(data, group): ) ) storage = torch.ByteStorage.from_buffer(buffer) - tensor = torch.ByteTensor(storage).to(device=device) - return tensor + return torch.ByteTensor(storage).to(device=device) def _pad_to_largest_tensor(tensor, group): diff --git a/pyanomaly/datatools/datatools_api.py b/pyanomaly/datatools/datatools_api.py index e98720c..bc871aa 100644 --- a/pyanomaly/datatools/datatools_api.py +++ b/pyanomaly/datatools/datatools_api.py @@ -116,8 +116,7 @@ def _build_dataset(self): Returns: dataset(OrderedDict) """ - dataset = self.factory() - return dataset + return self.factory() def _build_sampler(self, _data_len): """ @@ -128,10 +127,9 @@ def _build_sampler(self, _data_len): sampler: torch.data.Sampler """ if self.cfg.SYSTEM.distributed.use: - sampler = DistTrainSampler(_data_len) + return DistTrainSampler(_data_len) else: - sampler = TrainSampler(_data_len, self.seed) - return sampler + return TrainSampler(_data_len, self.seed) def __call__(self): """ @@ -141,8 +139,7 @@ def __call__(self): Returns: None """ - dataloader_dict = self.build() - return dataloader_dict + return self.build() class EvaluateAPI(object): diff --git a/pyanomaly/datatools/evaluate/eval_function.py b/pyanomaly/datatools/evaluate/eval_function.py index 0530be5..ae63057 100644 --- a/pyanomaly/datatools/evaluate/eval_function.py +++ b/pyanomaly/datatools/evaluate/eval_function.py @@ -33,11 +33,7 @@ def __init__(self, cfg, is_training) -> None: self.optimal_resulst = RecordResult() self.decidable_idx = self.dataset_params.decidable_idx self.decidable_idx_back = self.dataset_params.decidable_idx - if is_training: - self.parts = ['train', 'val'] - else: - self.parts = ['val'] - + self.parts = ['train', 'val'] if is_training else ['val'] if self.dataset_params.score_type == 'normal': self.pos_label = 0 elif self.dataset_params.score_type == 'abnormal': diff --git a/pyanomaly/datatools/evaluate/utils.py b/pyanomaly/datatools/evaluate/utils.py index 89b42de..1264c3d 100644 --- a/pyanomaly/datatools/evaluate/utils.py +++ b/pyanomaly/datatools/evaluate/utils.py @@ -31,7 +31,7 @@ def load_pickle_results(loss_file, cfg): # psnr_records = results['psnr'] # score_records = results['score'] score_records = list() - psnr_records = list() + psnr_records = [] num_videos = results['num_videos'] # import ipdb; ipdb.set_trace() if cfg.DATASET.smooth.guassian: @@ -85,15 +85,12 @@ def psnr_error(gen_frames, gt_frames, hat=False): for i in range(batch_num): num_pixels = gen_frames[i].numel() # max_val_hat = gen_frames[i].max() - if hat: - max_val = gen_frames[i].max() - else: - max_val = gt_frames[i].max() + max_val = gen_frames[i].max() if hat else gt_frames[i].max() square_diff = (gt_frames[i] - gen_frames[i])**2 log_value = torch.log10(max_val ** 2 / ((1. / num_pixels) * torch.sum(square_diff))) image_errors = 10 * log_value batch_errors += image_errors - + batch_errors = torch.div(batch_errors, batch_num) return batch_errors @@ -213,9 +210,7 @@ def calc_w(w_dict): return wf, wi def amc_normal_score(wf, sf, wi, si, lambada_s=0.2): - final_score = torch.log(wf * sf) + lambada_s * torch.log(wi*si) - - return final_score + return torch.log(wf * sf) + lambada_s * torch.log(wi*si) def amc_score(frame, frame_hat, flow, flow_hat, wf, wi, kernel_size=16, stride=4, lambada_s=0.2): ''' @@ -233,10 +228,9 @@ def oc_score(raw_data): # temp = np.max(-dummy_objects) temp = -np.max(dummy_objects) object_score[index] = temp - - frame_score = np.max(object_score) + # import ipdb; ipdb.set_trace() - return frame_score + return np.max(object_score) def reconstruction_loss(x_hat, x): @@ -306,8 +300,7 @@ def precision_recall_auc(loss_file, cfg): results = RecordResult(recall, precision, thresholds, auc, dataset, sub_loss_file) - if optimal_results < results: - optimal_results = results + optimal_results = max(optimal_results, results) if os.path.isdir(loss_file): print(results) @@ -336,8 +329,7 @@ def compute_eer(loss_file, cfg): results = RecordResult(fpr, tpr, thresholds, eer, dataset, sub_loss_file) - if optimal_results > results: - optimal_results = results + optimal_results = min(optimal_results, results) if os.path.isdir(loss_file): print(results) @@ -411,7 +403,7 @@ def compute_auc_psnr(loss_file, logger, cfg, score_type='normal'): scores = np.array([], dtype=np.float32) labels = np.array([], dtype=np.int8) - + # video normalization for i in range(num_videos): distance = psnr_records[i] @@ -430,8 +422,7 @@ def compute_auc_psnr(loss_file, logger, cfg, score_type='normal'): results = RecordResult(fpr, tpr, thresholds, auc, dataset, sub_loss_file) - if optimal_results < results: - optimal_results = results + optimal_results = max(optimal_results, results) if os.path.isdir(loss_file): print(results) @@ -457,12 +448,10 @@ def get_results(score_record, sigma, pos_label): score_one_video = np.clip(score_one_video, 0, None) scores = np.concatenate((scores, score_one_video[DECIDABLE_IDX:l-DECIDABLE_IDX_BACK]), axis=0) labels = np.concatenate((labels, gt[i][DECIDABLE_IDX:l-DECIDABLE_IDX_BACK]), axis=0) - + fpr, tpr, thresholds = metrics.roc_curve(labels, scores, pos_label=pos_label) auc = metrics.auc(fpr, tpr) - results = RecordResult(fpr, tpr, thresholds, auc, dataset, sub_loss_file, sigma) - - return results + return RecordResult(fpr, tpr, thresholds, auc, dataset, sub_loss_file, sigma) if score_type == 'normal': pos_label = 0 diff --git a/pyanomaly/loss/functions/basic_loss.py b/pyanomaly/loss/functions/basic_loss.py index 3e0a88c..4ad1f05 100644 --- a/pyanomaly/loss/functions/basic_loss.py +++ b/pyanomaly/loss/functions/basic_loss.py @@ -28,14 +28,13 @@ def get_loss_args(loss_cfg): >>> loss_args >>> namedtuple(size_average=None, reduce=None, reduction='mean') """ - args_name = list() - args_value = list() + args_name = [] + args_value = [] for config in loss_cfg: args_name.append(config[0]) args_value.append(config[1]) loss_args_template = namedtuple('LossArgs', args_name) - loss_args = loss_args_template._make(args_value) - return loss_args + return loss_args_template._make(args_value) def pad_same(in_dim, ks, stride, dilation=1): """ @@ -81,9 +80,8 @@ def __init__(self, loss_cfg=None): super(IntensityLoss, self).__init__() self.l_num =2 def forward(self, gen_frames, gt_frames): - x = torch.mean(torch.pow(torch.abs(gen_frames - gt_frames), self.l_num)) # x = torch.mean(torch.abs(gen_frames - gt_frames)**self.l_num) - return x + return torch.mean(torch.pow(torch.abs(gen_frames - gt_frames), self.l_num)) @LOSS_REGISTRY.register() class GradientLoss(nn.Module): @@ -137,8 +135,7 @@ def __init__(self, loss_cfg): self.t1 = nn.BCELoss() def forward(self, outputs, labels): - loss = self.t1(outputs, labels) - return loss + return self.t1(outputs, labels) @LOSS_REGISTRY.register() class AMCGenerateLoss(nn.Module): @@ -146,8 +143,7 @@ def __init__(self, loss_cfg): super(AMCGenerateLoss, self).__init__() self.t1 = nn.BCELoss() def forward(self, fake_outputs, fake): - loss = self.t1(fake_outputs, fake) - return loss + return self.t1(fake_outputs, fake) @LOSS_REGISTRY.register() class GANLoss(nn.Module): @@ -194,10 +190,7 @@ def get_target_tensor(self, prediction, target_is_real): A label tensor filled with ground truth label, and with the size of the input """ - if target_is_real: - target_tensor = self.real_label - else: - target_tensor = self.fake_label + target_tensor = self.real_label if target_is_real else self.fake_label return target_tensor.expand_as(prediction) def __call__(self, prediction, target_is_real): @@ -214,10 +207,7 @@ def __call__(self, prediction, target_is_real): target_tensor = self.get_target_tensor(prediction, target_is_real) loss = self.loss(prediction, target_tensor) elif self.gan_mode == 'wgangp': - if target_is_real: - loss = -prediction.mean() - else: - loss = prediction.mean() + loss = -prediction.mean() if target_is_real else prediction.mean() return loss @LOSS_REGISTRY.register() @@ -227,15 +217,13 @@ def __init__(self): # pass def forward(self, x, target): - error = 0 pred_len = target.shape[2] weight = [i * 1.0 for i in range(pred_len, 0, -1)] weighted_error = [torch.mean(torch.pow(x[:,:,i,:,:] - target[:,:,i,:,:], 2)) * weight[i] for i in range(pred_len)] - for item in weighted_error: - error += item + error = sum(weighted_error) # error /= pred_len ** 2 # import ipdb; ipdb.set_trace() - + error /= pred_len ** 2 return error @@ -282,7 +270,6 @@ def __init__(self): # self.l_num =2 def forward(self, att_weights): att_weights = att_weights + (att_weights == 0).float() * 1.0 - x = torch.mean(-att_weights * att_weights.log()) # import ipdb; ipdb.set_trace() # print(f'the memae loss is{x}') - return x + return torch.mean(-att_weights * att_weights.log()) diff --git a/pyanomaly/networks/auxiliary/flownet2/FlowNetFusion.py b/pyanomaly/networks/auxiliary/flownet2/FlowNetFusion.py index 7d80da3..4b8b821 100755 --- a/pyanomaly/networks/auxiliary/flownet2/FlowNetFusion.py +++ b/pyanomaly/networks/auxiliary/flownet2/FlowNetFusion.py @@ -52,16 +52,14 @@ def forward(self, x): flow2 = self.predict_flow2(out_conv2) flow2_up = self.upsampled_flow2_to_1(flow2) out_deconv1 = self.deconv1(out_conv2) - + concat1 = torch.cat((out_conv1,out_deconv1,flow2_up),1) out_interconv1 = self.inter_conv1(concat1) flow1 = self.predict_flow1(out_interconv1) flow1_up = self.upsampled_flow1_to_0(flow1) out_deconv0 = self.deconv0(concat1) - + concat0 = torch.cat((out_conv0,out_deconv0,flow1_up),1) out_interconv0 = self.inter_conv0(concat0) - flow0 = self.predict_flow0(out_interconv0) - - return flow0 + return self.predict_flow0(out_interconv0) diff --git a/pyanomaly/networks/auxiliary/flownet2/correlation_package/correlation.py b/pyanomaly/networks/auxiliary/flownet2/correlation_package/correlation.py index 80a8b09..561bb99 100755 --- a/pyanomaly/networks/auxiliary/flownet2/correlation_package/correlation.py +++ b/pyanomaly/networks/auxiliary/flownet2/correlation_package/correlation.py @@ -56,7 +56,12 @@ def __init__(self, pad_size=0, kernel_size=0, max_displacement=0, stride1=1, str def forward(self, input1, input2): - result = CorrelationFunction(self.pad_size, self.kernel_size, self.max_displacement,self.stride1, self.stride2, self.corr_multiply)(input1, input2) - - return result + return CorrelationFunction( + self.pad_size, + self.kernel_size, + self.max_displacement, + self.stride1, + self.stride2, + self.corr_multiply, + )(input1, input2) diff --git a/pyanomaly/networks/auxiliary/flownet2/models.py b/pyanomaly/networks/auxiliary/flownet2/models.py index 0e86c1b..907ac95 100755 --- a/pyanomaly/networks/auxiliary/flownet2/models.py +++ b/pyanomaly/networks/auxiliary/flownet2/models.py @@ -122,7 +122,7 @@ def init_deconv_bilinear(self, weight): def forward(self, inputs): rgb_mean = inputs.contiguous().view(inputs.size()[:2]+(-1,)).mean(dim=-1).view(inputs.size()[:2] + (1,1,1,)) - + x = (inputs - rgb_mean) / self.rgb_max x1 = x[:,:,0,:,:] x2 = x[:,:,1,:,:] @@ -131,15 +131,15 @@ def forward(self, inputs): # flownetc flownetc_flow2 = self.flownetc(x)[0] flownetc_flow = self.upsample1(flownetc_flow2*self.div_flow) - + # warp img1 to img0; magnitude of diff between img0 and and warped_img1, resampled_img1 = self.resample1(x[:,3:,:,:], flownetc_flow) - diff_img0 = x[:,:3,:,:] - resampled_img1 + diff_img0 = x[:,:3,:,:] - resampled_img1 norm_diff_img0 = self.channelnorm(diff_img0) # concat img0, img1, img1->img0, flow, diff-mag ; concat1 = torch.cat((x, resampled_img1, flownetc_flow/self.div_flow, norm_diff_img0), dim=1) - + # flownets1 flownets1_flow2 = self.flownets_1(concat1)[0] flownets1_flow = self.upsample2(flownets1_flow2*self.div_flow) @@ -169,7 +169,7 @@ def forward(self, inputs): flownetsd_flow2 = self.flownets_d(x)[0] flownetsd_flow = self.upsample3(flownetsd_flow2 / self.div_flow) norm_flownetsd_flow = self.channelnorm(flownetsd_flow) - + diff_flownetsd_flow = self.resample3(x[:,3:,:,:], flownetsd_flow) # if not diff_flownetsd_flow.volatile: # diff_flownetsd_flow.register_hook(save_grad(self.args.grads, 'diff_flownetsd_flow')) @@ -180,12 +180,10 @@ def forward(self, inputs): # concat img1 flownetsd, flownets2, norm_flownetsd, norm_flownets2, diff_flownetsd_img1, diff_flownets2_img1 concat3 = torch.cat((x[:,:3,:,:], flownetsd_flow, flownets2_flow, norm_flownetsd_flow, norm_flownets2_flow, diff_flownetsd_img1, diff_flownets2_img1), dim=1) - flownetfusion_flow = self.flownetfusion(concat3) - # if not flownetfusion_flow.volatile: # flownetfusion_flow.register_hook(save_grad(self.args.grads, 'flownetfusion_flow')) - return flownetfusion_flow + return self.flownetfusion(concat3) class FlowNet2C(FlowNetC.FlowNetC): def __init__(self, args, batchNorm=False, div_flow=20): @@ -394,7 +392,7 @@ def __init__(self, args, batchNorm=False, div_flow = 20.): def forward(self, inputs): rgb_mean = inputs.contiguous().view(inputs.size()[:2]+(-1,)).mean(dim=-1).view(inputs.size()[:2] + (1,1,1,)) - + x = (inputs - rgb_mean) / self.rgb_max x1 = x[:,:,0,:,:] x2 = x[:,:,1,:,:] @@ -403,20 +401,18 @@ def forward(self, inputs): # flownetc flownetc_flow2 = self.flownetc(x)[0] flownetc_flow = self.upsample1(flownetc_flow2*self.div_flow) - + # warp img1 to img0; magnitude of diff between img0 and and warped_img1, resampled_img1 = self.resample1(x[:,3:,:,:], flownetc_flow) - diff_img0 = x[:,:3,:,:] - resampled_img1 + diff_img0 = x[:,:3,:,:] - resampled_img1 norm_diff_img0 = self.channelnorm(diff_img0) # concat img0, img1, img1->img0, flow, diff-mag ; concat1 = torch.cat((x, resampled_img1, flownetc_flow/self.div_flow, norm_diff_img0), dim=1) - + # flownets1 flownets1_flow2 = self.flownets_1(concat1)[0] - flownets1_flow = self.upsample2(flownets1_flow2*self.div_flow) - - return flownets1_flow + return self.upsample2(flownets1_flow2*self.div_flow) class FlowNet2CSS(nn.Module): @@ -471,7 +467,7 @@ def __init__(self, args, batchNorm=False, div_flow = 20.): def forward(self, inputs): rgb_mean = inputs.contiguous().view(inputs.size()[:2]+(-1,)).mean(dim=-1).view(inputs.size()[:2] + (1,1,1,)) - + x = (inputs - rgb_mean) / self.rgb_max x1 = x[:,:,0,:,:] x2 = x[:,:,1,:,:] @@ -480,15 +476,15 @@ def forward(self, inputs): # flownetc flownetc_flow2 = self.flownetc(x)[0] flownetc_flow = self.upsample1(flownetc_flow2*self.div_flow) - + # warp img1 to img0; magnitude of diff between img0 and and warped_img1, resampled_img1 = self.resample1(x[:,3:,:,:], flownetc_flow) - diff_img0 = x[:,:3,:,:] - resampled_img1 + diff_img0 = x[:,:3,:,:] - resampled_img1 norm_diff_img0 = self.channelnorm(diff_img0) # concat img0, img1, img1->img0, flow, diff-mag ; concat1 = torch.cat((x, resampled_img1, flownetc_flow/self.div_flow, norm_diff_img0), dim=1) - + # flownets1 flownets1_flow2 = self.flownets_1(concat1)[0] flownets1_flow = self.upsample2(flownets1_flow2*self.div_flow) @@ -503,7 +499,5 @@ def forward(self, inputs): # flownets2 flownets2_flow2 = self.flownets_2(concat2)[0] - flownets2_flow = self.upsample3(flownets2_flow2 * self.div_flow) - - return flownets2_flow + return self.upsample3(flownets2_flow2 * self.div_flow) diff --git a/pyanomaly/networks/auxiliary/liteflownet/models.py b/pyanomaly/networks/auxiliary/liteflownet/models.py index 41f0d8b..ce732c7 100644 --- a/pyanomaly/networks/auxiliary/liteflownet/models.py +++ b/pyanomaly/networks/auxiliary/liteflownet/models.py @@ -125,7 +125,7 @@ def __init__(self, intLevel): if intLevel != 2: self.netFeat = torch.nn.Sequential() - elif intLevel == 2: + else: self.netFeat = torch.nn.Sequential( torch.nn.Conv2d(in_channels=32, out_channels=64, kernel_size=1, stride=1, padding=0), torch.nn.LeakyReLU(inplace=False, negative_slope=0.1) @@ -136,7 +136,7 @@ def __init__(self, intLevel): if intLevel == 6: self.netUpflow = None - elif intLevel != 6: + else: self.netUpflow = torch.nn.ConvTranspose2d(in_channels=2, out_channels=2, kernel_size=4, stride=2, padding=1, bias=False, groups=2) # end @@ -144,7 +144,7 @@ def __init__(self, intLevel): if intLevel >= 4: self.netUpcorr = None - elif intLevel < 4: + else: self.netUpcorr = torch.nn.ConvTranspose2d(in_channels=49, out_channels=49, kernel_size=4, stride=2, padding=1, bias=False, groups=49) # end @@ -175,7 +175,7 @@ def forward(self, tenFirst, tenSecond, tenFeaturesFirst, tenFeaturesSecond, tenF if self.netUpcorr is None: tenCorrelation = torch.nn.functional.leaky_relu(input=correlation.FunctionCorrelation(tenFirst=tenFeaturesFirst, tenSecond=tenFeaturesSecond, intStride=1), negative_slope=0.1, inplace=False) - elif self.netUpcorr is not None: + else: tenCorrelation = self.netUpcorr(torch.nn.functional.leaky_relu(input=correlation.FunctionCorrelation(tenFirst=tenFeaturesFirst, tenSecond=tenFeaturesSecond, intStride=2), negative_slope=0.1, inplace=False)) # end @@ -193,7 +193,7 @@ def __init__(self, intLevel): if intLevel != 2: self.netFeat = torch.nn.Sequential() - elif intLevel == 2: + else: self.netFeat = torch.nn.Sequential( torch.nn.Conv2d(in_channels=32, out_channels=64, kernel_size=1, stride=1, padding=0), torch.nn.LeakyReLU(inplace=False, negative_slope=0.1) @@ -235,7 +235,7 @@ def __init__(self, intLevel): if intLevel >= 5: self.netFeat = torch.nn.Sequential() - elif intLevel < 5: + else: self.netFeat = torch.nn.Sequential( torch.nn.Conv2d(in_channels=[ 0, 0, 32, 64, 96, 128, 192 ][intLevel], out_channels=128, kernel_size=1, stride=1, padding=0), torch.nn.LeakyReLU(inplace=False, negative_slope=0.1) @@ -263,7 +263,7 @@ def __init__(self, intLevel): torch.nn.Conv2d(in_channels=32, out_channels=[ 0, 0, 49, 25, 25, 9, 9 ][intLevel], kernel_size=[ 0, 0, 7, 5, 5, 3, 3 ][intLevel], stride=1, padding=[ 0, 0, 3, 2, 2, 1, 1 ][intLevel]) ) - elif intLevel < 5: + else: self.netDist = torch.nn.Sequential( torch.nn.Conv2d(in_channels=32, out_channels=[ 0, 0, 49, 25, 25, 9, 9 ][intLevel], kernel_size=([ 0, 0, 7, 5, 5, 3, 3 ][intLevel], 1), stride=1, padding=([ 0, 0, 3, 2, 2, 1, 1 ][intLevel], 0)), torch.nn.Conv2d(in_channels=[ 0, 0, 49, 25, 25, 9, 9 ][intLevel], out_channels=[ 0, 0, 49, 25, 25, 9, 9 ][intLevel], kernel_size=(1, [ 0, 0, 7, 5, 5, 3, 3 ][intLevel]), stride=1, padding=(0, [ 0, 0, 3, 2, 2, 1, 1 ][intLevel])) diff --git a/pyanomaly/networks/auxiliary/pose/models.py b/pyanomaly/networks/auxiliary/pose/models.py index f2861e1..1b00153 100644 --- a/pyanomaly/networks/auxiliary/pose/models.py +++ b/pyanomaly/networks/auxiliary/pose/models.py @@ -237,10 +237,9 @@ def _make_layer(self, block, planes, blocks, stride=1): nn.BatchNorm2d(planes * block.expansion, momentum=self.BN_MOMENTUM), ) - layers = [] - layers.append(block(self.inplanes, planes, stride, downsample)) + layers = [block(self.inplanes, planes, stride, downsample)] self.inplanes = planes * block.expansion - for i in range(1, blocks): + for _ in range(1, blocks): layers.append(block(self.inplanes, planes)) return nn.Sequential(*layers) diff --git a/pyanomaly/networks/meta/amc_networks.py b/pyanomaly/networks/meta/amc_networks.py index 84077d1..f91dd33 100644 --- a/pyanomaly/networks/meta/amc_networks.py +++ b/pyanomaly/networks/meta/amc_networks.py @@ -104,6 +104,4 @@ def forward(self, x): x = F.leaky_relu_(x) x = self.conv4(x) x = self.bn4(x) - x_sigmod = F.sigmoid(x) - - return x_sigmod + return F.sigmoid(x) diff --git a/pyanomaly/networks/meta/anopcn_networks.py b/pyanomaly/networks/meta/anopcn_networks.py index 106001b..f9aa0a0 100644 --- a/pyanomaly/networks/meta/anopcn_networks.py +++ b/pyanomaly/networks/meta/anopcn_networks.py @@ -78,7 +78,7 @@ def __init__(self, input_channels, hidden_channels, kernel_size): self._all_layers.append(cell) # for each sequence, we need to clear the internal_state - self.internal_state = list() + self.internal_state = [] # @torchsnooper.snoop() def forward(self, input, step): @@ -160,10 +160,7 @@ def forward(self, video_clip): for time_stamp in range(len_video): # print(time_stamp) frame = frames[time_stamp].squeeze(2) - if time_stamp == 0: - E = torch.zeros_like(frame) - else: - E = torch.sub(frame, temp) + E = torch.zeros_like(frame) if time_stamp == 0 else torch.sub(frame, temp) R = self.pep(E) x, _ = self.convlstm(R, time_stamp) Ihat = self.fr(x) @@ -172,7 +169,7 @@ def forward(self, video_clip): temp = Ihat if time_stamp == len_video-1: # 最后一个 result = Ihat - + return result diff --git a/pyanomaly/networks/meta/base/commonness.py b/pyanomaly/networks/meta/base/commonness.py index e1a7cb3..43df3bd 100644 --- a/pyanomaly/networks/meta/base/commonness.py +++ b/pyanomaly/networks/meta/base/commonness.py @@ -287,17 +287,16 @@ def __init__(self, cfg, use_norm=False,norm_layer=nn.BatchNorm2d): self.net=[] self.net.append(nn.Conv2d(input_nc,num_filters[0],kernel_size=4,padding=2,stride=2)) self.net.append(nn.LeakyReLU(0.1)) - if use_norm: - for i in range(1,len(num_filters)-1): + for i in range(1,len(num_filters)-1): + if use_norm: self.net.extend([nn.Conv2d(num_filters[i-1],num_filters[i],4,2,2,bias=use_bias), nn.LeakyReLU(0.1), norm_layer(num_filters[i])]) - else : - for i in range(1,len(num_filters)-1): + else: self.net.extend([nn.Conv2d(num_filters[i-1],num_filters[i],4,2,2,bias=use_bias), nn.LeakyReLU(0.1)]) self.net.append(nn.Conv2d(num_filters[-1],1,4,1,2)) - + self.net = nn.Sequential(*self.net) def forward(self, input): diff --git a/pyanomaly/networks/meta/base/prednet.py b/pyanomaly/networks/meta/base/prednet.py index 13d0950..139d167 100644 --- a/pyanomaly/networks/meta/base/prednet.py +++ b/pyanomaly/networks/meta/base/prednet.py @@ -254,10 +254,7 @@ def get_initial_states(self, input_shape): def isNotTopestLayer(self, layerIndex): '''judge if the layerIndex is not the topest layer.''' - if layerIndex < self.num_layers - 1: - return True - else: - return False + return layerIndex < self.num_layers - 1 def make_layers(self): @@ -507,9 +504,7 @@ def forward(self, A0_withTimeStep, initial_states): return output_list elif self.output_mode == 'prediction': return output_list # 此时的output_list是timestep个预测帧图像 - elif self.output_mode == 'all': - pass - else: + elif self.output_mode != 'all': raise(RuntimeError('Kidding? Unknown output mode!')) diff --git a/pyanomaly/networks/meta/ma_networks.py b/pyanomaly/networks/meta/ma_networks.py index 0466d33..7636e40 100644 --- a/pyanomaly/networks/meta/ma_networks.py +++ b/pyanomaly/networks/meta/ma_networks.py @@ -104,9 +104,7 @@ def forward(self, x): x = F.leaky_relu_(x) x = self.conv4(x) x = self.bn4(x) - x_sigmod = F.sigmoid(x) - - return x_sigmod + return F.sigmoid(x) class MA(nn.Module): """ diff --git a/pyanomaly/networks/meta/memae_networks.py b/pyanomaly/networks/meta/memae_networks.py index fc510cb..70a332f 100644 --- a/pyanomaly/networks/meta/memae_networks.py +++ b/pyanomaly/networks/meta/memae_networks.py @@ -32,8 +32,7 @@ def _init_weights(self): nn.init.kaiming_uniform_(self.memory) def hard_shrink_relu(self, input, lambd=0, epsilon=1e-15): - output = (F.relu(input-lambd) * input) / (torch.abs(input - lambd) + epsilon) - return output + return (F.relu(input-lambd) * input) / (torch.abs(input - lambd) + epsilon) # @torchsnooper.snoop() def forward(self, z): diff --git a/pyanomaly/networks/meta/pcn_parts/pcm.py b/pyanomaly/networks/meta/pcn_parts/pcm.py index 0e9fd77..da28600 100644 --- a/pyanomaly/networks/meta/pcn_parts/pcm.py +++ b/pyanomaly/networks/meta/pcn_parts/pcm.py @@ -24,7 +24,7 @@ def __init__(self, input_channels, hidden_channels, kernel_size): self._all_layers.append(cell) # for each sequence, we need to clear the internal_state - self.internal_state = list() + self.internal_state = [] # @torchsnooper.snoop() def forward(self, input, step): @@ -106,10 +106,7 @@ def forward(self, video_clip): for time_stamp in range(len_video): # print(time_stamp) frame = frames[time_stamp].squeeze(2) - if time_stamp == 0: - E = torch.zeros_like(frame) - else: - E = torch.sub(frame, temp) + E = torch.zeros_like(frame) if time_stamp == 0 else torch.sub(frame, temp) R = self.pep(E) x, _ = self.convlstm(R, time_stamp) Ihat = self.fr(x) @@ -118,7 +115,7 @@ def forward(self, video_clip): temp = Ihat if time_stamp == len_video-1: # 最后一个 result = Ihat - + return result diff --git a/pyanomaly/networks/meta/pcn_parts/prednet.py b/pyanomaly/networks/meta/pcn_parts/prednet.py index 13d0950..139d167 100644 --- a/pyanomaly/networks/meta/pcn_parts/prednet.py +++ b/pyanomaly/networks/meta/pcn_parts/prednet.py @@ -254,10 +254,7 @@ def get_initial_states(self, input_shape): def isNotTopestLayer(self, layerIndex): '''judge if the layerIndex is not the topest layer.''' - if layerIndex < self.num_layers - 1: - return True - else: - return False + return layerIndex < self.num_layers - 1 def make_layers(self): @@ -507,9 +504,7 @@ def forward(self, A0_withTimeStep, initial_states): return output_list elif self.output_mode == 'prediction': return output_list # 此时的output_list是timestep个预测帧图像 - elif self.output_mode == 'all': - pass - else: + elif self.output_mode != 'all': raise(RuntimeError('Kidding? Unknown output mode!')) diff --git a/pyanomaly/networks/model_api.py b/pyanomaly/networks/model_api.py index a49b57d..e0f9b79 100644 --- a/pyanomaly/networks/model_api.py +++ b/pyanomaly/networks/model_api.py @@ -36,25 +36,24 @@ def __call__(self): logger.info('The model type is' + f'\033[1;31m {self.cfg.MODEL.type} \033[0m') model_parts = self.cfg.MODEL.parts model_type = self.cfg.MODEL.type - if model_type in ModelAPI.MODEL_TYPE: - model = OrderedDict() - logger.info('Model Dict') - # 2. get the model based on the registry - _model_parts = list(model_parts[i:i+2] for i in range(0, len(model_parts), 2)) - for couple in _model_parts: - model_dict_key = couple[0].split('_') - if model_dict_key[0] == 'auxiliary': - model_dict_value = AUX_ARCH_REGISTRY.get(couple[1])(self.cfg) - elif model_dict_key[0] == 'meta': - model_dict_value = META_ARCH_REGISTRY.get(couple[1])(self.cfg) - elif model_dict_key[0] == 'base': - model_dict_value = BASE_ARCH_REGISTRY.get(couple[1])(self.cfg) - else: - raise Exception('Wrong model in line62') - # 3. set the grad requirement --move to the Trainer, for the convenience - # 4. get the final model - model[model_dict_key[1]] = model_dict_value - else: + if model_type not in ModelAPI.MODEL_TYPE: raise Exception(f'Not support Model Type, we only support: {ModelAPI.MODEL_TYPE}') - + + model = OrderedDict() + logger.info('Model Dict') + # 2. get the model based on the registry + _model_parts = [model_parts[i:i+2] for i in range(0, len(model_parts), 2)] + for couple in _model_parts: + model_dict_key = couple[0].split('_') + if model_dict_key[0] == 'auxiliary': + model_dict_value = AUX_ARCH_REGISTRY.get(couple[1])(self.cfg) + elif model_dict_key[0] == 'meta': + model_dict_value = META_ARCH_REGISTRY.get(couple[1])(self.cfg) + elif model_dict_key[0] == 'base': + model_dict_value = BASE_ARCH_REGISTRY.get(couple[1])(self.cfg) + else: + raise Exception('Wrong model in line62') + # 3. set the grad requirement --move to the Trainer, for the convenience + # 4. get the final model + model[model_dict_key[1]] = model_dict_value return model diff --git a/pyanomaly/utils/system.py b/pyanomaly/utils/system.py index d33bb52..1b87325 100644 --- a/pyanomaly/utils/system.py +++ b/pyanomaly/utils/system.py @@ -49,5 +49,4 @@ def parse_args(): parser.add_argument('--flow_model_path', default='/export/home/chengyh/Anomaly_DA/lib/networks/liteFlownet/network-sintel.pytorch') parser.add_argument('opts', help='change the config from the command-line', default=None, nargs=argparse.REMAINDER) - args = parser.parse_args() - return args \ No newline at end of file + return parser.parse_args() \ No newline at end of file diff --git a/pyanomaly/utils/tools.py b/pyanomaly/utils/tools.py index e05f530..9e64273 100644 --- a/pyanomaly/utils/tools.py +++ b/pyanomaly/utils/tools.py @@ -34,7 +34,7 @@ def readFlow(fn): # print 'fn = %s'%(fn) with open(fn, 'rb') as f: magic = np.fromfile(f, np.float32, count=1) - if 202021.25 != magic: + if magic != 202021.25: print('Magic number incorrect. Invalid .flo file') return None else: @@ -53,8 +53,6 @@ def writeFlow(filename,uv,v=None): stacked in depth. Original code by Deqing Sun, adapted from Daniel Scharstein. """ - nBands = 2 - if v is None: assert(uv.ndim == 3) assert(uv.shape[2] == 2) @@ -65,17 +63,18 @@ def writeFlow(filename,uv,v=None): assert(u.shape == v.shape) height,width = u.shape - f = open(filename,'wb') - # write the header - f.write(TAG_CHAR) - np.array(width).astype(np.int32).tofile(f) - np.array(height).astype(np.int32).tofile(f) - # arrange into matrix form - tmp = np.zeros((height, width*nBands)) - tmp[:,np.arange(width)*2] = u - tmp[:,np.arange(width)*2 + 1] = v - tmp.astype(np.float32).tofile(f) - f.close() + with open(filename,'wb') as f: + # write the header + f.write(TAG_CHAR) + np.array(width).astype(np.int32).tofile(f) + np.array(height).astype(np.int32).tofile(f) + nBands = 2 + + # arrange into matrix form + tmp = np.zeros((height, width*nBands)) + tmp[:,np.arange(width)*2] = u + tmp[:,np.arange(width)*2 + 1] = v + tmp.astype(np.float32).tofile(f) @@ -90,24 +89,22 @@ def writeFlow(filename,uv,v=None): def flow2img(flow_data, output_format): - ''' + ''' Make the flow to 3 channel ''' - if output_format == 'Y': - img = flow2Y(flow_data) - # if normalize: - # img = img / 255 - elif output_format == 'xym': - mag, _ = cv2.cartToPolar(flow_data[:, :, 0], flow_data[:, :, 1]) - img = np.concatenate((flow_data, np.expand_dims(mag, axis=2)), axis=-1) - elif output_format == 'hsv': - raise Exception('Not finish') - elif output_format == 'rgb': - raise Exception('Not finish') - else: - raise Exception('Not support') - - return img + if output_format == 'Y': + img = flow2Y(flow_data) + # if normalize: + # img = img / 255 + elif output_format == 'xym': + mag, _ = cv2.cartToPolar(flow_data[:, :, 0], flow_data[:, :, 1]) + img = np.concatenate((flow_data, np.expand_dims(mag, axis=2)), axis=-1) + elif output_format in ['hsv', 'rgb']: + raise Exception('Not finish') + else: + raise Exception('Not support') + + return img # ref: https://github.com/sampepose/flownet2-tf/ # blob/18f87081db44939414fc4a48834f9e0da3e69f4c/src/flowlib.py#L240 @@ -154,48 +151,48 @@ def flow2Y(flow_data): def compute_color(u, v): - """ + """ compute optical flow color map :param u: horizontal optical flow :param v: vertical optical flow :return: """ - height, width = u.shape - img = np.zeros((height, width, 3)) + height, width = u.shape + img = np.zeros((height, width, 3)) - NAN_idx = np.isnan(u) | np.isnan(v) - u[NAN_idx] = v[NAN_idx] = 0 + NAN_idx = np.isnan(u) | np.isnan(v) + u[NAN_idx] = v[NAN_idx] = 0 - colorwheel = make_color_wheel() - ncols = np.size(colorwheel, 0) + colorwheel = make_color_wheel() + ncols = np.size(colorwheel, 0) - rad = np.sqrt(u ** 2 + v ** 2) + rad = np.sqrt(u ** 2 + v ** 2) - a = np.arctan2(-v, -u) / np.pi + a = np.arctan2(-v, -u) / np.pi - fk = (a + 1) / 2 * (ncols - 1) + 1 + fk = (a + 1) / 2 * (ncols - 1) + 1 - k0 = np.floor(fk).astype(int) + k0 = np.floor(fk).astype(int) - k1 = k0 + 1 - k1[k1 == ncols + 1] = 1 - f = fk - k0 + k1 = k0 + 1 + k1[k1 == ncols + 1] = 1 + f = fk - k0 - for i in range(0, np.size(colorwheel, 1)): - tmp = colorwheel[:, i] - col0 = tmp[k0 - 1] / 255 - col1 = tmp[k1 - 1] / 255 - col = (1 - f) * col0 + f * col1 + for i in range(np.size(colorwheel, 1)): + tmp = colorwheel[:, i] + col0 = tmp[k0 - 1] / 255 + col1 = tmp[k1 - 1] / 255 + col = (1 - f) * col0 + f * col1 - idx = rad <= 1 - col[idx] = 1 - rad[idx] * (1 - col[idx]) - notidx = np.logical_not(idx) + idx = rad <= 1 + col[idx] = 1 - rad[idx] * (1 - col[idx]) + notidx = np.logical_not(idx) - col[notidx] *= 0.75 - img[:, :, i] = np.uint8(np.floor(255 * col * (1 - NAN_idx))) + col[notidx] *= 0.75 + img[:, :, i] = np.uint8(np.floor(255 * col * (1 - NAN_idx))) - return img + return img def make_color_wheel():