-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathmain.py
More file actions
569 lines (449 loc) · 26.5 KB
/
main.py
File metadata and controls
569 lines (449 loc) · 26.5 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
import os
import json
import pdb
from tqdm import tqdm
from datetime import datetime
from concurrent.futures import ThreadPoolExecutor
from util import *
from model import GPT
from arg_parser import parse_args
from summarizer import summarize_one_video, \
qa_one_video_by_summary, postprocess_response_dict
from video_seg import VideoSeg, extract_videoseg_from_descriptions, split_and_reconnect_segments
from arg_parser import parse_args
set_random_seed(42)
global_args = parse_args()
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
logger = set_logger(timestamp, global_args.logger_base_path)
api_key = os.getenv("OPENAI_API_KEY")
base_url = os.getenv("OPENAI_BASE_URL")
with open(global_args.example_summary_path,'r') as ex:
example_summary = ex.read()
with open(global_args.example_qa_by_summary_path,'r') as ex:
example_qa_by_summary = ex.read()
summarizer = GPT(api_key=api_key, model_name=global_args.model_name, temperature=global_args.temperature, base_url=base_url)
qa_model = GPT(api_key=api_key, model_name=global_args.model_name, temperature=global_args.temperature, base_url=base_url)
planner = GPT(api_key=api_key, model_name=global_args.model_name, temperature=global_args.temperature, base_url=base_url)
self_evaluator = GPT(api_key=api_key, model_name=global_args.model_name, temperature=global_args.temperature, base_url=base_url)
def bfs_select_segments(question, caption, num_frames, segment_des, use_cache=True):
formatted_description = {
"frame_descriptions": [
{"segment_id": "1", "duration": "xxx - xxx", "description": "frame of xxx"},
{"segment_id": "2", "duration": "xxx - xxx", "description": "frame of xxx"},
{"segment_id": "3", "duration": "xxx - xxx", "description": "frame of xxx"},
]
}
prompt = f"""
Given a video that has {num_frames} frames, the frames are decoded at 1 fps. Given the following descriptions of sampled frames in the video:
{caption}
#C to denote the sentence is an action done by the camera wearer (the person who recorded the video while wearing a camera on their head).
#O to denote that the sentence is an action done by someone other than the camera wearer.
To answer the following question:
```
{question}
```
However, the information in the initial frames is not suffient.
Objective:
Our goal is to identify additional frames that contain crucial information necessary for answering the question. These frames should not only address the query directly but should also complement the insights gleaned from the descriptions of the initial frames.
To achieve this, we will:
1. Divide the video into segments based on the intervals between the initial frames as, candiate segments: {segment_des}
2. Determine which segments are likely to contain frames that are most relevant to the question. These frames should capture key visual elements, such as objects, humans, interactions, actions, and scenes, that are supportive to answer the question.
For each frame identified as potentially relevant, provide a concise description focusing on essential visual elements. Use a single sentence per frame. If the specifics of a segment's visual content are uncertain based on the current information, use placeholders for specific actions or objects, but ensure the description still conveys the segment's relevance to the query.
Select multiple frames from one segment if necessary to gather comprehensive insights.
Return the descriptions and the segment id in JSON format, note "segment_id" must be smaller than {len(segment_des) + 1}, "duration" should be the same as candiate segments:
```
{formatted_description}
```
"""
system_prompt = "You are a helpful assistant designed to output JSON."
response, _ = planner.forward(head=system_prompt, prompt=prompt, logger=logger, use_cache=use_cache, use_json_format=True)
return response
def gbfs_select_one_segment(question, caption, num_frames, segment_des, use_cache=True):
formatted_description = {
"frame_descriptions": [
{"segment_id": "1", "duration": "xxx - xxx", "description": "frame of xxx"}
]
}
prompt = f"""
Given a video that has {num_frames} frames, the frames are decoded at 1 fps. Given the following descriptions of sampled frames in the video:
{caption}
#C to denote the sentence is an action done by the camera wearer (the person who recorded the video while wearing a camera on their head).
#O to denote that the sentence is an action done by someone other than the camera wearer.
To answer the following question:
```
{question}
```
However, the information in the initial frames is not suffient.
Objective:
Our goal is to step-by-setp identify additional frames that contain crucial information necessary for answering the question. These frames should not only address the query directly but should also complement the insights gleaned from the descriptions of the initial frames.
To achieve this, we will:
1. Consider the video segments based on the intervals between the initial frames as, candiate segments: {segment_des}
2. Determine which single segment is most likely to contain frames that are most relevant to the question. These frames should capture key visual elements, such as objects, humans, interactions, actions, and scenes, that are supportive to answer the question.
For the segment identified as potentially relevant, provide a concise description focusing on essential visual elements. Use a single sentence per frame. If the specifics of the segment's visual content are uncertain based on the current information, use placeholders for specific actions or objects, but ensure the description still conveys the segment's relevance to the query.
Return the description and the segment id in JSON format, note "segment_id" must be smaller than {len(segment_des) + 1}, "duration" should be the same as candiate segments:
```
{formatted_description}
```
"""
system_prompt = "You are a helpful assistant designed to output JSON."
response, _ = planner.forward(head=system_prompt, prompt=prompt, logger=logger, use_cache=use_cache, use_json_format=True)
return response
def dijkstra_select_one_segment(question, caption, num_frames, segment_des, use_cache=True):
formatted_description = {
"frame_descriptions": [
{"segment_id": "1", "duration": "xxx - xxx", "description": "frame of xxx"}
]
}
prompt = f"""
Given a video that has {num_frames} frames, the frames are decoded at 1 fps. Given the following descriptions of sampled frames in the video:
{caption}
#C to denote the sentence is an action done by the camera wearer (the person who recorded the video while wearing a camera on their head).
#O to denote that the sentence is an action done by someone other than the camera wearer.
Based on the intervals between the sample frames, we have candiate video segments: {segment_des}
Please identify which candidate video segment contains richest visual elements and most dramatic scene changes, making it most suitable for splitting into smaller video segments. For example, if the characters and scenes have changed between the two sampled frames, then this video segment is suitable for splitting into smaller and atomic video segments.
For the segment identified as most suitable for splitting into smaller video segments, provide a concise description focusing on the segment's rich visual elements or scene changes. If the specifics of the segment's visual content are uncertain based on the current information, use placeholders for specific actions or objects.
Return the description and the segment id in JSON format, note "segment_id" must be smaller than {len(segment_des) + 1}, "duration" should be the same as candiate segments:
```
{formatted_description}
```
"""
system_prompt = "You are a helpful assistant designed to output JSON."
response, _ = planner.forward(head=system_prompt, prompt=prompt, logger=logger, use_cache=use_cache, use_json_format=True)
return response
def a_star_select_one_segment(question, caption, num_frames, segment_des, use_cache=True):
formatted_description = {
"frame_descriptions": [
{"segment_id": "1", "duration": "xxx - xxx", "description": "frame of xxx"}
]
}
prompt = f"""
Given a video that has {num_frames} frames, the frames are decoded at 1 fps. Given the following descriptions of sampled frames in the video:
{caption}
#C to denote the sentence is an action done by the camera wearer (the person who recorded the video while wearing a camera on their head).
#O to denote that the sentence is an action done by someone other than the camera wearer.
To answer the following question:
```
{question}
```
However, the information in the initial frames is not suffient.
Objective:
In order to obtain more information about the video and ultimately answer the question, we need to step-by-setp identify video segment between the initial frames that meet the following two conditions:
1. contains crucial information necessary for answering the question. This video segment should not only address the query directly but should also complement the insights gleaned from the descriptions of the initial frames. This segment should capture key visual elements, such as objects, humans, interactions, actions, and scenes, that are supportive to answer the question.
2. contains rich visual elements and dramatic scene changes, making it suitable for splitting into smaller video segments. For example, if the characters and scenes have changed between the two sampled frames, then this video segment is suitable for splitting into smaller and atomic video segments.
To achieve this, we will:
1. Consider the video segments based on the intervals between the initial frames as, candiate segments: {segment_des}
2. Determine which single candidate segment is most likely to meet the above two conditions.
For the segment identified as most suitable, provide a concise description focusing on essential visual elements in the segment. Use a single sentence per frame. If the specifics of the segment's visual content are uncertain based on the current information, use placeholders for specific actions or objects.
Return the description and the segment id in JSON format, note "segment_id" must be smaller than {len(segment_des) + 1}, "duration" should be the same as candiate segments:
```
{formatted_description}
```
"""
system_prompt = "You are a helpful assistant designed to output JSON."
response, _ = planner.forward(head=system_prompt, prompt=prompt, logger=logger, use_cache=use_cache, use_json_format=True)
return response
def self_eval(previous_prompt, answer, use_cache=True):
confidence_format = {"confidence": "xxx"}
prompt = f"""Please assess the confidence level in the decision-making process.
The provided information is as as follows,
{previous_prompt}
The decision making process is as follows,
{answer}
Criteria for Evaluation:
Insufficient Information (Confidence Level: 1): If information is too lacking for a reasonable conclusion.
Partial Information (Confidence Level: 2): If information partially supports an informed guess.
Sufficient Information (Confidence Level: 3): If information fully supports a well-informed decision.
Assessment Focus:
Evaluate based on the relevance, completeness, and clarity of the provided information in relation to the decision-making context.
Please generate the confidence with JSON format {confidence_format}
"""
system_prompt = "You are a helpful assistant designed to output JSON."
response, _ = self_evaluator.forward(head=system_prompt, prompt=prompt, logger=logger, use_cache=use_cache, use_json_format=True)
return response
def generate_answer_cot(question, caption, num_frames, use_cache=True):
answer_format = {"final_answer": "xxx"}
prompt = f"""
Given a video that has {num_frames} frames, the frames are decoded at 1 fps. Given the following descriptions of the sampled frames in the video:
{caption}
#C to denote the sentence is an action done by the camera wearer (the person who recorded the video while wearing a camera on their head).
#O to denote that the sentence is an action done by someone other than the camera wearer.
Please answer the following question:
```
{question}
```
Please think step-by-step and write the best answer index in Json format {answer_format}. Note that only one answer is returned for the question.
"""
system_prompt = "You are a helpful assistant."
response, _ = qa_model.forward(head=system_prompt, prompt=prompt, logger=logger, use_cache=use_cache, use_json_format=False)
return prompt, response
def generate_answer_direct(question, caption, num_frames, use_cache=True):
answer_format = {"final_answer": "xxx"}
prompt = f"""
Given a video that has {num_frames} frames, the frames are decoded at 1 fps. Given the following descriptions of the sampled frames in the video:
{caption}
#C to denote the sentence is an action done by the camera wearer (the person who recorded the video while wearing a camera on their head).
#O to denote that the sentence is an action done by someone other than the camera wearer.
Please answer the following question:
```
{question}
```
Please think carefully and write the best answer index in Json format {answer_format}. Note that only one answer is returned for the question, and you must select one answer index from the candidates.
"""
system_prompt = "You are a helpful assistant designed to output JSON."
response, _ = qa_model.forward(head=system_prompt, prompt=prompt, logger=logger, use_cache=use_cache, use_json_format=True)
return response
def summarize_and_qa(video_id, sampled_caps, ann, args):
summary = summarize_one_video(summarizer, video_id, sampled_caps, \
example_summary, use_cache=args.use_cache, logger=logger)
response_dict = qa_one_video_by_summary(qa_model, ann, summary, video_id, sampled_caps, \
example_qa_by_summary, use_cache=args.use_cache, logger=logger)
answer, confidnce = postprocess_response_dict(response_dict)
return answer, confidnce
def qa_and_reflect(formatted_question, sampled_caps, num_frames, args):
previous_prompt, answer_str = generate_answer_cot(
formatted_question, sampled_caps, num_frames, args.use_cache
)
answer = parse_text_find_number(answer_str, logger)
confidence_str = self_eval(previous_prompt, answer_str, args.use_cache)
confidence = parse_text_find_confidence(confidence_str, logger)
return answer, confidence
def choose_ans(s_qa_ans, s_qa_conf, s_conf_lower, \
r_qa_ans, r_qa_conf, r_conf_lower, \
ans_mode, step):
answer = -1
get_ans_step = None
if ans_mode == "s":
if s_qa_ans != -1 and s_qa_conf >= s_conf_lower:
answer = s_qa_ans
get_ans_step = f"{step}_s_qa"
elif ans_mode == "r":
if r_qa_ans != -1 and r_qa_conf >= r_conf_lower:
answer = r_qa_ans
get_ans_step = f"{step}_r_qa"
elif ans_mode == "sr":
if s_qa_ans != -1 and s_qa_conf >= s_conf_lower:
answer = s_qa_ans
get_ans_step = f"{step}_s_qa"
elif r_qa_ans != -1 and r_qa_conf >= r_conf_lower:
answer = r_qa_ans
get_ans_step = f"{step}_r_qa"
elif ans_mode == "rs":
if r_qa_ans != -1 and r_qa_conf >= r_conf_lower:
answer = r_qa_ans
get_ans_step = f"{step}_r_qa"
elif s_qa_ans != -1 and s_qa_conf >= s_conf_lower:
answer = s_qa_ans
get_ans_step = f"{step}_s_qa"
elif ans_mode == "vote":
if s_qa_ans == r_qa_ans:
answer = s_qa_ans
get_ans_step = f"{step}_s_r"
elif (ans_mode == "vote_conf_and"):
if (s_qa_ans == r_qa_ans) and \
((s_qa_conf >= s_conf_lower) and (r_qa_conf >= r_conf_lower)):
answer = s_qa_ans
get_ans_step = f"{step}_s_r"
elif (ans_mode == "vote_conf_or"):
if (s_qa_ans == r_qa_ans) and \
((s_qa_conf >= s_conf_lower) or (r_qa_conf >= r_conf_lower)):
answer = s_qa_ans
get_ans_step = f"{step}_s_r"
else:
raise KeyError
return answer, get_ans_step
def select_process(formatted_question, sample_idx, sampled_caps, num_frames, step,
args, all_sample_idx, caps, video_segments, select_fn):
segment_des = {
i + 1: f"{video_seg.start}-{video_seg.end}"
for i, video_seg in enumerate(video_segments)
}
# segment_des: {1: '1-12', 2: '12-23', 3: '23-34', 4: '34-45', ...
# LLM decides which segment from `segment_des` to use
candidate_descriptions = select_fn(formatted_question, sampled_caps, num_frames, segment_des, args.use_cache)
if candidate_descriptions != None:
parsed_candidate_descriptions = parse_json(candidate_descriptions)
selected_descriptions = get_frames_descriptions(parsed_candidate_descriptions)
# re-generate
max_generate = 5
generate_count = 0
while candidate_descriptions == None or selected_descriptions == None:
generate_count += 1
if generate_count > max_generate:
break
candidate_descriptions = select_fn(formatted_question, sampled_caps, num_frames, segment_des, False)
parsed_candidate_descriptions = parse_json(candidate_descriptions)
selected_descriptions = get_frames_descriptions(parsed_candidate_descriptions)
# extract `VideoSeg` instance base on selected_descriptions
selected_video_segments = extract_videoseg_from_descriptions(selected_descriptions)
video_segments = split_and_reconnect_segments(selected_video_segments, video_segments, args.for_seg_not_interested, num_frames)
# extract visible frames from `selected_video_segments``
sample_idx_set = set()
for segment in video_segments:
sample_idx_set.add(segment.start)
sample_idx_set.add(segment.end)
sample_idx = sorted(list(sample_idx_set))
return video_segments, sample_idx
def run_one_question(video_id, ann, caps, logs, args):
logger.info(f"Start to process {video_id}")
print(f"\nStart video: {video_id}")
get_ans_step = None # which step get the answer
sample_idx_change_list = [] # change of `sample_idx`
question = ann["question"]
answers = [ann[f"option {i}"] for i in range(5)]
formatted_question = (
f"Here is the question: {question}\n"
+ "Here are the choices: "
+ " ".join([f"{i}. {ans}" for i, ans in enumerate(answers)])
)
num_frames = len(caps)
# root node initialization
sample_idx = list(range(1, num_frames + 1, args.init_interval))
sampled_caps = read_caption(caps, sample_idx) # e.g. {'frame 1': '#C C pours the water from the bowl', 'frame 45': '#C C puts the sponge in the sink', 'frame 90': '#C C scrubs the plate with the sponge', 'frame 135': '#C C puts the soap bottle on the sink', 'frame 180': '#C C opens the soap bottle'}
all_sample_idx = sample_idx
# video_segments initialization
video_segments = []
for segment_id in range(1, len(sample_idx)):
video_seg = VideoSeg(sample_idx[segment_id - 1], sample_idx[segment_id], segment_id, None)
video_segments.append(video_seg)
sample_idx_change_list.append(sample_idx)
# main loop for tree search
# 1. LLM QA
# 2. node expansion
for step in range(1, args.final_step + 1):
# print(f"{video_id}: step {step}, sample_idx {sample_idx}")
print_segment_list(video_segments)
# 1. LLM QA
if args.ans_mode == "s":
s_qa_ans, s_qa_conf = summarize_and_qa(video_id, sampled_caps, ann, args)
r_qa_ans, r_qa_conf = None, None
elif args.ans_mode == "r":
r_qa_ans, r_qa_conf = qa_and_reflect(formatted_question, sampled_caps, num_frames, args)
s_qa_ans, s_qa_conf = None, None
else:
s_qa_ans, s_qa_conf = summarize_and_qa(video_id, sampled_caps, ann, args)
r_qa_ans, r_qa_conf = qa_and_reflect(formatted_question, sampled_caps, num_frames, args)
answer, get_ans_step = choose_ans(s_qa_ans, s_qa_conf, args.s_conf_lower, \
r_qa_ans, r_qa_conf, args.r_conf_lower, \
args.ans_mode, step)
if answer != -1:
break
# 2. node expansion
if args.search_strategy == "bfs":
select_fn = bfs_select_segments
video_segments, sample_idx = \
select_process(formatted_question, sample_idx, sampled_caps, num_frames,
step, args, all_sample_idx, caps, video_segments, select_fn)
elif args.search_strategy == "gbfs":
select_fn = gbfs_select_one_segment
for select_iter in range(args.beam_size):
video_segments, sample_idx = \
select_process(formatted_question, sample_idx, sampled_caps, num_frames,
step, args, all_sample_idx, caps, video_segments, select_fn)
elif args.search_strategy == "dijkstra":
select_fn = dijkstra_select_one_segment
for select_iter in range(args.beam_size):
video_segments, sample_idx = \
select_process(formatted_question, sample_idx, sampled_caps, num_frames,
step, args, all_sample_idx, caps, video_segments, select_fn)
elif args.search_strategy == "a_star":
select_fn = a_star_select_one_segment
for select_iter in range(args.beam_size):
video_segments, sample_idx = \
select_process(formatted_question, sample_idx, sampled_caps, num_frames,
step, args, all_sample_idx, caps, video_segments, select_fn)
else:
raise KeyError
# aggregate all `sample_idx` to `all_sample_idx`
all_sample_idx = sorted(list(set(all_sample_idx + sample_idx)))
sample_idx_change_list.append(sample_idx)
sampled_caps = read_caption(caps, sample_idx)
# print(video_id, "final sample num:", len(sample_idx))
# Post Process
if answer == -1:
if args.post_resume_samples:
sample_idx = list(range(1, num_frames + 1, args.init_interval))
else:
sample_idx = all_sample_idx
sample_idx_change_list.append(sample_idx)
sampled_caps = read_caption(caps, sample_idx)
if args.post_ans_mode == "s":
s_qa_ans, s_qa_conf = summarize_and_qa(video_id, sampled_caps, ann, args)
r_qa_ans, r_qa_conf = None, None
elif args.post_ans_mode == "r":
r_qa_ans, r_qa_conf = qa_and_reflect(formatted_question, sampled_caps, num_frames, args)
s_qa_ans, s_qa_conf = None, None
else:
s_qa_ans, s_qa_conf = summarize_and_qa(video_id, sampled_caps, ann, args)
r_qa_ans, r_qa_conf = qa_and_reflect(formatted_question, sampled_caps, num_frames, args)
step = "post"
answer, get_ans_step = choose_ans(s_qa_ans, s_qa_conf, args.post_s_conf_lower, \
r_qa_ans, r_qa_conf, args.post_r_conf_lower, \
args.post_ans_mode, step)
# final direct QA
if answer == -1:
answer_str = generate_answer_direct(
formatted_question, sampled_caps, num_frames, args.use_cache
)
answer = parse_text_find_number(answer_str, logger)
get_ans_step = f"final_direct_qa"
# no_ans
if answer == -1:
logger.info(f"No answer video id: {video_id}")
print(f"\nNo answer video id: {video_id}\n")
# print_nested_list(sample_idx_change_list)
logger.info(f"Finished video: {video_id}/{answer}/{ann['truth']}")
print(f"Finished video: {video_id}/{answer}/{ann['truth']}\n")
label = int(ann["truth"])
corr = int(label == answer)
count_frame = len(all_sample_idx)
logs[video_id] = {
"answer": answer,
"label": label,
"corr": corr,
"count_frame": count_frame,
"get_ans_step": get_ans_step,
}
def main(args):
output_result_file = os.path.join(args.output_base_path, f"{timestamp}.json")
anns = json.load(open(args.anno_path, "r"))
all_caps = json.load(open(args.cap_path, "r"))
logs = {}
process_video_ids = list(anns.keys())[:args.process_num]
if args.specific_id != None:
specific_video_ids = [args.specific_id]
process_video_ids = get_intersection(specific_video_ids, list(anns.keys()))
if args.reprocess_log != None:
# Load the log file and find video_ids with answer == -1
reprocess_log = json.load(open(args.reprocess_log, "r"))
process_video_ids = [
video_id for video_id, log in reprocess_log.items()
# if log["answer"] == -1
if log["answer"] != log["label"]
]
if args.avoid_id != None:
process_video_ids.remove(args.avoid_id)
if args.specific_id_path != None:
process_video_ids = json.load(open(args.specific_id_path, "r"))
logger.info(f"{len(process_video_ids)} videos to process")
tasks = [
(video_id, anns[video_id], all_caps[video_id], logs, args)
for video_id in process_video_ids
]
# parallel processing
# with ThreadPoolExecutor(max_workers=args.max_workers) as executor:
# for _ in tqdm(executor.map(lambda p: run_one_question(*p), tasks), total=len(tasks), desc="Processing"):
# pass
for task in tqdm(tasks):
try:
run_one_question(*task)
except Exception as e:
print(f"\nError -- main -- {e}\n")
json.dump(logs, open(output_result_file, "w"))
if __name__ == "__main__":
args = parse_args()
logger.info(args)
main(args)
print(args)
print(f"\ntimestamp: {timestamp}\n")
# eval
os.system(f"python3 eval.py results/{args.dataset}/{timestamp}.json")