@@ -334,8 +334,10 @@ def run_visual_language_generation_benchmark(
334334 log .info (f"Numbeams: { args ['num_beams' ]} , benchmarking iter nums(exclude warm-up): { num_iters } , "
335335 f'prompt nums: { len (image_text_list )} , prompt idx: { prompt_idx_list } ' )
336336
337- if use_genai : gen_fn = run_visual_language_generation_genai
338- else : gen_fn = run_visual_language_generation_optimum
337+ if use_genai :
338+ gen_fn = run_visual_language_generation_genai
339+ else :
340+ gen_fn = run_visual_language_generation_optimum
339341
340342 proc_id = os .getpid ()
341343 iter_timestamp = model_utils .init_timestamp (num_iters , image_text_list , prompt_idx_list )
@@ -350,8 +352,8 @@ def run_visual_language_generation_benchmark(
350352 input_text , num , model , processor , args , iter_data_list , md5_list ,
351353 p_idx , bench_hook , model_precision , proc_id , mem_consumption , required_frames )
352354 iter_timestamp [num ][p_idx ]['end' ] = datetime .datetime .now ().isoformat ()
353- prefix = ' [warm-up]' if num == 0 else '[{}]' . format ( num )
354- log .info (f"{ prefix } [P { p_idx } ] start: { iter_timestamp [num ][p_idx ]['start' ]} , end: { iter_timestamp [num ][p_idx ]['end' ]} " )
355+ prefix = f" [warm-up][P { p_idx } ]" if num == 0 else f"[ { num } ][P { p_idx } ]"
356+ log .info (f"{ prefix } start: { iter_timestamp [num ][p_idx ]['start' ]} , end: { iter_timestamp [num ][p_idx ]['end' ]} " )
355357 else :
356358 for idx , input_text in enumerate (image_text_list ):
357359 p_idx = prompt_idx_list [idx ]
@@ -363,8 +365,8 @@ def run_visual_language_generation_benchmark(
363365 input_text , num , model , processor , args , iter_data_list , md5_list , prompt_idx_list [idx ],
364366 bench_hook , model_precision , proc_id , mem_consumption , required_frames )
365367 iter_timestamp [num ][p_idx ]['end' ] = datetime .datetime .now ().isoformat ()
366- prefix = ' [warm-up]' if num == 0 else '[{}]' . format ( num )
367- log .info (f"{ prefix } [P { p_idx } ] start: { iter_timestamp [num ][p_idx ]['start' ]} , end: { iter_timestamp [num ][p_idx ]['end' ]} " )
368+ prefix = f" [warm-up][P { p_idx } ]" if num == 0 else f"[ { num } ][P { p_idx } ]"
369+ log .info (f"{ prefix } start: { iter_timestamp [num ][p_idx ]['start' ]} , end: { iter_timestamp [num ][p_idx ]['end' ]} " )
368370
369371 metrics_print .print_average (iter_data_list , prompt_idx_list , args ['batch_size' ], True )
370372 return iter_data_list , pretrain_time , iter_timestamp
@@ -378,7 +380,7 @@ def get_image_text_prompt(args):
378380 if len (vlm_param_list ) > 0 :
379381 for vlm_file in vlm_param_list :
380382 if args ['prompt_file' ] is not None and len (args ['prompt_file' ]) > 0 and 'media' in vlm_file :
381- if 'video' in vlm_file : log . warning ('media and video cannot be specify in a single prompt file' )
383+ if 'video' in vlm_file : raise ValueError ('media and video cannot be specify in a single prompt file' )
382384 vlm_file ['media' ] = model_utils .resolve_media_file_path (vlm_file .get ('media' ), args ['prompt_file' ][0 ])
383385 elif args ['prompt_file' ] is not None and len (args ['prompt_file' ]) > 0 and 'video' in vlm_file :
384386 vlm_file ['video' ] = model_utils .resolve_media_file_path (vlm_file .get ('video' ), args ['prompt_file' ][0 ])
0 commit comments