Skip to content

Commit dbf9d69

Browse files
committed
after Sofya's review
1 parent 4f2bf4b commit dbf9d69

File tree

2 files changed

+11
-8
lines changed

2 files changed

+11
-8
lines changed

tools/llm_bench/llm_bench_utils/parse_json_data.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,8 @@ def parse_vlm_json_data(json_data_list):
2323
text_param_list = []
2424
for json_data in json_data_list:
2525
prompt_data = create_base_prompt(json_data)
26-
assert ("media" in json_data) ^ ("video" in json_data)
26+
if ("media" in json_data) and ("video" in json_data):
27+
raise ValueError("only one key is avaialble from media & video")
2728
if "media" in json_data:
2829
prompt_data["media"] = json_data["media"]
2930
if "video" in json_data:

tools/llm_bench/task/visual_language_generation.py

Lines changed: 9 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -334,8 +334,10 @@ def run_visual_language_generation_benchmark(
334334
log.info(f"Numbeams: {args['num_beams']}, benchmarking iter nums(exclude warm-up): {num_iters}, "
335335
f'prompt nums: {len(image_text_list)}, prompt idx: {prompt_idx_list}')
336336

337-
if use_genai: gen_fn = run_visual_language_generation_genai
338-
else: gen_fn = run_visual_language_generation_optimum
337+
if use_genai:
338+
gen_fn = run_visual_language_generation_genai
339+
else:
340+
gen_fn = run_visual_language_generation_optimum
339341

340342
proc_id = os.getpid()
341343
iter_timestamp = model_utils.init_timestamp(num_iters, image_text_list, prompt_idx_list)
@@ -350,8 +352,8 @@ def run_visual_language_generation_benchmark(
350352
input_text, num, model, processor, args, iter_data_list, md5_list,
351353
p_idx, bench_hook, model_precision, proc_id, mem_consumption, required_frames)
352354
iter_timestamp[num][p_idx]['end'] = datetime.datetime.now().isoformat()
353-
prefix = '[warm-up]' if num == 0 else '[{}]'.format(num)
354-
log.info(f"{prefix}[P{p_idx}] start: {iter_timestamp[num][p_idx]['start']}, end: {iter_timestamp[num][p_idx]['end']}")
355+
prefix = f"[warm-up][P{p_idx}]" if num == 0 else f"[{num}][P{p_idx}]"
356+
log.info(f"{prefix} start: {iter_timestamp[num][p_idx]['start']}, end: {iter_timestamp[num][p_idx]['end']}")
355357
else:
356358
for idx, input_text in enumerate(image_text_list):
357359
p_idx = prompt_idx_list[idx]
@@ -363,8 +365,8 @@ def run_visual_language_generation_benchmark(
363365
input_text, num, model, processor, args, iter_data_list, md5_list, prompt_idx_list[idx],
364366
bench_hook, model_precision, proc_id, mem_consumption, required_frames)
365367
iter_timestamp[num][p_idx]['end'] = datetime.datetime.now().isoformat()
366-
prefix = '[warm-up]' if num == 0 else '[{}]'.format(num)
367-
log.info(f"{prefix}[P{p_idx}] start: {iter_timestamp[num][p_idx]['start']}, end: {iter_timestamp[num][p_idx]['end']}")
368+
prefix = f"[warm-up][P{p_idx}]" if num == 0 else f"[{num}][P{p_idx}]"
369+
log.info(f"{prefix} start: {iter_timestamp[num][p_idx]['start']}, end: {iter_timestamp[num][p_idx]['end']}")
368370

369371
metrics_print.print_average(iter_data_list, prompt_idx_list, args['batch_size'], True)
370372
return iter_data_list, pretrain_time, iter_timestamp
@@ -378,7 +380,7 @@ def get_image_text_prompt(args):
378380
if len(vlm_param_list) > 0:
379381
for vlm_file in vlm_param_list:
380382
if args['prompt_file'] is not None and len(args['prompt_file']) > 0 and 'media' in vlm_file:
381-
if 'video' in vlm_file: log.warning('media and video cannot be specify in a single prompt file')
383+
if 'video' in vlm_file: raise ValueError('media and video cannot be specify in a single prompt file')
382384
vlm_file['media'] = model_utils.resolve_media_file_path(vlm_file.get('media'), args['prompt_file'][0])
383385
elif args['prompt_file'] is not None and len(args['prompt_file']) > 0 and 'video' in vlm_file:
384386
vlm_file['video'] = model_utils.resolve_media_file_path(vlm_file.get('video'), args['prompt_file'][0])

0 commit comments

Comments
 (0)