diff --git a/evaluate.sh b/evaluate.sh index 2713aff..28df779 100644 --- a/evaluate.sh +++ b/evaluate.sh @@ -4,9 +4,9 @@ # pose_number is how many number of keypoints we want to see in a person # pose_threshold is the score we set to filter the keypoints whose score is small # ground_truth='/export/home/cyh/mygithub/PoseGCN/data/annotations/val_2017/' -ground_truth='${PGPT_ROOT}/data/demodataset/annotations/' -predictions='${PGPT_ROOT}/results/demo' -results='test' +ground_truth='/PGPT/data/demodataset/annotations' +predictions='/PGPT/results/demo' +results='/PGPT/results/evaluate' pose_number=0 pose_threshold=0.5 diff --git a/inference/config.py b/inference/config_old.py similarity index 79% rename from inference/config.py rename to inference/config_old.py index 7fd8762..21b27d7 100644 --- a/inference/config.py +++ b/inference/config_old.py @@ -1,31 +1,33 @@ class Config(): - root = '${PGPT_ROOT}' + root = '/PGPT' + # save_dir is the loaction where we store the results save_dir = root + '/results/demo' + # json_path_detection is the loaction where we store the detection results json_path_detection = root + '/results/demo_detection.json' - - # gt_json_path is the gound truth of the validiation, all the ground_truth are in one file + + # gt_json_path is the ground truth of the validiation, all the ground_truth are in one file gt_json_path = root + '/data/demo_val.json' - + # the data folder of the PoseTrack dataset data_folder = root + '/data/demodataset' - + # the path of the location where we store the video - video_path = root - + video_path = save_dir + # the path of the track model track_model = root + '/models/tracker.pth' - + # the path of the pose estimation model pose_model = root + '/models/pose_gcn.pth.tar' - + # pose config file location pose_cfg = root + '/cfgs/pose_res152.yaml' - + # the path of the embedding model embedding_model = root + '/models/embedding_model.pth' - + def __init__(self): print('Using the config class at', __file__) diff --git a/inference/inference.py b/inference/inference.py index 65302ee..df384b7 100644 --- a/inference/inference.py +++ b/inference/inference.py @@ -9,7 +9,13 @@ from tqdm import tqdm import random -from config import Config + +#from config import Config +import sys +sys.path.append('../lib') +from config import cfg +from config import update_config + from track_and_detect_new import Track_And_Detect ''' @@ -51,13 +57,21 @@ } ''' match_list=[13,12,14,9,8,10,7,11,6,3,2,4,1,5,0] -config = Config() +#config = Config() def parseArgs(): parser = argparse.ArgumentParser(description="Evaluation of Pose Estimation and Tracking (PoseTrack)") + parser.add_argument('--cfg', type=str, required=True) #added by alnguyen parser.add_argument("-t", "--detection_thresh",dest = 'det_thresh',required=False, default=0.4, type= float) parser.add_argument("-p", "--pos_thresh",dest = 'pose_thresh',required=False, default=0, type= float) parser.add_argument("-v", "--vis_flag",dest = 'vis_flag',required=False, default=False, type= bool) - return parser.parse_args() + parser.add_argument('opts', + help='Modify config options using the command-line', + default=None, + nargs=argparse.REMAINDER) #added by alnguyen + + args = parser.parse_args() + + return args class DateEncoder(json.JSONEncoder): def default(self, obj): @@ -67,18 +81,22 @@ def default(self, obj): return obj.tolist() return json.JSONEncoder.default(self, obj) -def track_test(args, gpu_id=0): +def track_test(): + args = parseArgs() pose_vis_thresh = args.pose_thresh detection_score_thresh = args.det_thresh vis_flag = args.vis_flag - json_path = config.json_path_detection + + update_config(cfg, args) + gpu_id = cfg.GPU_ID + json_path = cfg.INPUT.JSON_DETECTION_PATH # Change temporially - save_dir = config.save_dir + save_dir = cfg.OUTPUT.SAVE_DIR - gt_json_path = config.gt_json_path - data_folder = config.data_folder - video_path = config.video_path + gt_json_path = cfg.INPUT.GT_JSON_PATH + data_folder = cfg.INPUT.DATA_FOLDER + video_path = cfg.OUTPUT.VIDEO_PATH print('----------------------------------------------------------------------------------') print('Detection_score_thresh: {} Vis_flag: {}'.format(detection_score_thresh, vis_flag)) @@ -87,24 +105,27 @@ def track_test(args, gpu_id=0): if not os.path.exists(save_dir): os.makedirs(save_dir) - # Load the Detection Results + # Load the Detection Results (demo_detection.json) with open(json_path,'r') as f: bbox_dict = json.load(f) # Create the Tracker - tracker = Track_And_Detect(gpu_id=gpu_id, track_model=config.track_model, pose_model=config.pose_model, embedding_model=config.embedding_model) + track_model=cfg.INPUT.TRACK_MODEL + pose_model=cfg.INPUT.POSE_MODEL + embedding_model=cfg.INPUT.EMBEDDING_MODEL + tracker = Track_And_Detect(gpu_id=gpu_id, track_model=track_model, pose_model=pose_model, embedding_model=embedding_model) - # Load the Ground Truth to get the right video keys + # Load the Ground Truth to get the right video keys (demo_val.json) with open(gt_json_path,'r') as f: gt_dict = json.load(f) video_keys = gt_dict.keys() pbar = tqdm(range(len(video_keys))) - for video_name in video_keys: + for video_name in video_keys: #in demo_val.json pbar.update(1) frame_dict = bbox_dict[video_name] - video_name = video_name.replace('.json','') + #video_name = video_name.replace('.json','') video_json = {'annolist':[]} save_path = os.path.join(save_dir, video_name+'.json') idx =0 @@ -124,7 +145,7 @@ def track_test(args, gpu_id=0): if not os.path.exists(video_path): os.makedirs(video_path) video_store_name = video_path + '/{}.mp4' - videoWriter = cv2.VideoWriter(video_store_name.format(video_name),fourcc,10,(im_W,im_H)) + videoWriter = cv2.VideoWriter(video_store_name.format(video_name+'-pgpt'),fourcc,10,(im_W,im_H)) final_list = tracker.init_tracker(frame,det_list) else: track_list = tracker.multi_track(frame) @@ -154,5 +175,4 @@ def track_test(args, gpu_id=0): if __name__ == "__main__": - args = parseArgs() - track_test(args=args) + track_test() diff --git a/inference/pose_estimation_graph.py b/inference/pose_estimation_graph.py index ad1cf5c..82bacd9 100755 --- a/inference/pose_estimation_graph.py +++ b/inference/pose_estimation_graph.py @@ -56,7 +56,7 @@ ''' class PoseNet(object): def __init__(self, gpu_id=0, model_path=None): - self.cfg_file='${PGPT_ROOT}/cfgs/pose_res152.yaml' + self.cfg_file='/PGPT/cfgs/pose_res152.yaml' self.flag = 0 update_config(self.cfg_file) diff --git a/inference/skeleton_visulize.py b/inference/skeleton_visulize.py index 5ee9bc0..c700651 100644 --- a/inference/skeleton_visulize.py +++ b/inference/skeleton_visulize.py @@ -5,12 +5,17 @@ import cv2 import numpy as np from cv2_color import Color +import glob +#from config import Config +import sys +sys.path.append('../lib') +from config import cfg +from config import update_config # The match list from the results to the test match_list=[13,12,14,9,8,10,7,11,6,3,2,4,1,5,0] color = Color(flag='bgr') - def draw_limb(image, kps, color): def draw_line(head, tail): if head == [] or tail == []: @@ -33,7 +38,17 @@ def draw_line(head, tail): for h, t in limbs: draw_line(kps[h], kps[t]) +def parseArgs(): + parser = argparse.ArgumentParser(description="Visualizing the results") + parser.add_argument('--cfg', type=str, required=True) #added by alnguyen + parser.add_argument('opts', + help='Modify config options using the command-line', + default=None, + nargs=argparse.REMAINDER) #added by alnguyen + + args = parser.parse_args() + return args def demo(image_dir, result_dir, save_dir): """ @@ -42,12 +57,14 @@ def demo(image_dir, result_dir, save_dir): save_dir: the loaction where we store the result videos """ - json_files = os.listdir(result_dir) + #json_files = os.listdir(result_dir) + json_files = glob.glob(result_dir + "/*.json") + json_files = [json_file.split('/')[-1] for json_file in json_files] pbar = tqdm(range(len(json_files))) for json_name in json_files: - video_name = json_name.replace('.json','_new') + video_name = json_name.replace('.json','-pgpt') video_folder = os.path.join(save_dir, video_name) @@ -58,11 +75,13 @@ def demo(image_dir, result_dir, save_dir): old_annolist = json.load(f)['annolist'] pbar.set_description('Visulizing video {}'.format(video_name)) color_list = color.get_random_color_list() + j = 0 for i,annotation in enumerate(old_annolist): color_flag = 0 frame_name = annotation['image'][0]['name'] - frame_store_path = video_folder + '/{}'.format(frame_name.split('/')[-1]) - frame_path = os.path.join(image_dir,frame_name) + #annotation['image_id'] #10010010103=frame_id + frame_store_path = os.path.join(video_folder, frame_name.split('/')[-1]) #file + frame_path = os.path.join(image_dir,frame_name) #file for read frame = cv2.imread(frame_path) im_H, im_W, im_C = frame.shape if i==0: @@ -73,30 +92,37 @@ def demo(image_dir, result_dir, save_dir): if len(anno['annopoints']) == 0: continue old_point_list = anno['annopoints'][0]['point'] - xmin, xmax, ymin, ymax, track_id = anno['x1'][0], anno['x2'][0], anno['y1'][0], anno['y2'][0], anno['track_id'][0] color_flag = int(track_id) % 16 - kps = [[] for _ in range(15)] for pose in old_point_list: - pose_id, pose_x, pose_y, = pose['id'][0], pose['x'][0], pose['y'][0] kps[pose_id] = (int(pose_x), int(pose_y)) - cv2.circle(frame,(int(pose_x),int(pose_y)), 3 ,color_list[color_flag], -1) + cv2.circle(frame,(int(pose_x),int(pose_y)), 3 ,color_list[color_flag], -1) draw_limb(frame, kps, color_list[color_flag]) cv2.rectangle(frame, (int(xmin),int(ymin)), (int(xmax),int(ymax)), color_list[color_flag], 3) cv2.putText(frame, 'id:' + str(track_id), (int(xmin),int(ymin)), cv2.FONT_HERSHEY_SIMPLEX, 1, color_list[color_flag], 2) videoWriter.write(frame) cv2.imwrite(frame_store_path, frame) + #videoWriter.release() pbar.update(1) pbar.close() if __name__ == '__main__': print('Visualizing the results') - image_dir = '${PGPT_ROOT}/data/demodataset/' - result_dir = '${PGPT_ROOT}/results/demo/' - save_dir = '${PGPT_ROOT}/results/render/' + #parser.add_argument("--image_dir", type=str, default=cfg.INPUT.DATA_FOLDER) # /PGPT/data/demodataset + #parser.add_argument("--result_dir", type=str, default=cfg.OUTPUT.SAVE_DIR) # /PGPT/results/demo + #parser.add_argument("--save_dir", type=str, default=cfg.OUTPUT.VIDEO_PATH) # /PGPT/results/demo + + + args = parseArgs() + update_config(cfg, args) + + image_dir = cfg.INPUT.DATA_FOLDER + result_dir = cfg.OUTPUT.SAVE_DIR + save_dir = cfg.OUTPUT.VIDEO_PATH + if not os.path.exists(save_dir): os.makedirs(save_dir) demo(image_dir, result_dir, save_dir) diff --git a/inference/track_and_detect_new.py b/inference/track_and_detect_new.py index f16f331..bbf4bc1 100644 --- a/inference/track_and_detect_new.py +++ b/inference/track_and_detect_new.py @@ -15,7 +15,7 @@ from tracker import SiamFCTracker from match import Matcher -from model.nms.nms_wrapper import nms +#from model.nms.nms_wrapper import nms class Track_And_Detect(object): effective_track_thresh = 0.5 diff --git a/lib/config/__init__.py b/lib/config/__init__.py new file mode 100644 index 0000000..8c70ee9 --- /dev/null +++ b/lib/config/__init__.py @@ -0,0 +1,9 @@ +# ------------------------------------------------------------------------------ +# Copyright (c) Microsoft +# Licensed under the MIT License. +# The code is based on HigherHRNet-Human-Pose-Estimation. +# (https://github.com/HRNet/HigherHRNet-Human-Pose-Estimation) +# ------------------------------------------------------------------------------ + +from .default import _C as cfg +from .default import update_config diff --git a/lib/config/default.py b/lib/config/default.py new file mode 100644 index 0000000..ad4dc51 --- /dev/null +++ b/lib/config/default.py @@ -0,0 +1,53 @@ +import os +from yacs.config import CfgNode as CN + + +_C = CN() + +_C.GPU_ID = 0 #ne marche pas pour les autres valeurs (1,2,3) +_C.SYSTEM = CN() +# Number of GPUS to use in the experiment +_C.SYSTEM.NUM_GPUS = 4 +# Number of workers for doing things +_C.SYSTEM.NUM_WORKERS = 4 + +_C.SYSTEM.PGPT_ROOT = '/PGPT' +# pose config file location +_C.SYSTEM.POSE_CONFIG = '/PGPT/cfgs/pose_res152.yaml' + +_C.INPUT = CN() +# json from detection and pose, images directory +_C.INPUT.JSON_DETECTION_PATH = '/PGPT/results/demo_detection.json' +# gt_json_path is the ground truth of the validiation, all the ground_truth are in one file +_C.INPUT.GT_JSON_PATH = '/PGPT/data/demo_val.json' +# the data folder of the PoseTrack dataset +_C.INPUT.DATA_FOLDER = '/PGPT/data/demodataset' +# the path of the track model +_C.INPUT.TRACK_MODEL = '/PGPT/models/tracker.pth' +# the path of the pose estimation model +_C.INPUT.POSE_MODEL = '/PGPT/models/pose_gcn.pth.tar' +_C.INPUT.EMBEDDING_MODEL = '/PGPT/models/embedding_model.pth' + +_C.OUTPUT = CN() +# where we store the results +_C.OUTPUT.SAVE_DIR = '/PGPT/results/demo' +# the path of the location where we store the video +_C.OUTPUT.VIDEO_PATH = '/PGPT/results/demo/demo-pgpt.mp4' + +def get_cfg_defaults(): + """Get a yacs CfgNode object with default values for my_project.""" + # Return a clone so that the defaults will not be altered + # This is for the "local variable" use pattern + return _C.clone() + +def update_config(cfg, args): + cfg.defrost() + cfg.merge_from_file(args.cfg) + cfg.merge_from_list(args.opts) + cfg.freeze() + + +if __name__ == '__main__': + import sys + with open(sys.argv[1], 'w') as f: + print(_C, file=f) diff --git a/lib/poseval/py/eval_helpers.py b/lib/poseval/py/eval_helpers.py index 5ec15df..2ba6917 100755 --- a/lib/poseval/py/eval_helpers.py +++ b/lib/poseval/py/eval_helpers.py @@ -227,8 +227,9 @@ def process_arguments(argv): elif len(argv)<3 or len(argv)>4: help() - gt_file = argv[1] - pred_file = argv[2] + gt_dir = argv[1] + pred_dir = argv[2] + return gt_dir, pred_dir, mode if not os.path.exists(gt_file): help('Given ground truth directory does not exist!\n') @@ -282,7 +283,6 @@ def load_data(argv): return gtFramesAll, prFramesAll - def cleanupData(gtFramesAll,prFramesAll): # remove all GT frames with empty annorects and remove corresponding entries from predictions @@ -377,6 +377,7 @@ def load_data_dir(argv): if not os.path.exists(pred_dir): help('Given prediction directory ' + pred_dir + ' does not exist!\n') filenames = glob.glob(gt_dir + "/*.json") + gtFramesAll = [] prFramesAll = [] diff --git a/requirement.txt b/requirement.txt deleted file mode 100644 index 5352db1..0000000 --- a/requirement.txt +++ /dev/null @@ -1,28 +0,0 @@ -certifi==2019.3.9 -cffi==1.12.3 -Click==7.0 -cycler==0.10.0 -easydict==1.9 -fire==0.1.3 -joblib==0.13.2 -kiwisolver==1.1.0 -matplotlib==3.1.0 -mkl-fft==1.0.12 -mkl-random==1.0.2 -numpy==1.16.4 -olefile==0.46 -opencv-python==4.1.0.25 -pandas==0.24.2 -Pillow==6.0.0 -pycparser==2.19 -pyparsing==2.4.0 -python-dateutil==2.8.0 -pytz==2019.1 -PyYAML==5.1.1 -scikit-learn==0.21.2 -scipy==1.2.1 -Shapely==1.6.4.post2 -six==1.12.0 -torch==0.4.0 -torchvision==0.2.1 -tqdm==4.32.1