Skip to content

Commit 00a103d

Browse files
authored
GaudiTrainer: Take accuracy measurement outside of timer (#2331)
Signed-off-by: Urszula <urszula.golowicz@intel.com>
1 parent 5117a3f commit 00a103d

21 files changed

+66
-4
lines changed

optimum/habana/diffusers/pipelines/controlnet/pipeline_controlnet.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -634,11 +634,13 @@ def __call__(
634634
self.htcore.mark_step()
635635

636636
hb_profiler.stop()
637+
end_time = time.time()
637638

638639
speed_metrics_prefix = "generation"
639640
speed_measures = speed_metrics(
640641
split=speed_metrics_prefix,
641642
start_time=t0,
643+
end_time=end_time,
642644
num_samples=num_batches * batch_size
643645
if t1 == t0 or use_warmup_inference_steps
644646
else (num_batches - throughput_warmup_steps) * batch_size,

optimum/habana/diffusers/pipelines/controlnet/pipeline_stable_video_diffusion_controlnet.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -534,10 +534,13 @@ def __call__(
534534

535535
outputs["frames"].append(frames)
536536

537+
end_time = time.time()
538+
537539
speed_metrics_prefix = "generation"
538540
speed_measures = speed_metrics(
539541
split=speed_metrics_prefix,
540542
start_time=t0,
543+
end_time=end_time,
541544
num_samples=num_batches * batch_size
542545
if t1 == t0
543546
else (num_batches - throughput_warmup_steps) * batch_size,

optimum/habana/diffusers/pipelines/ddpm/pipeline_ddpm.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -192,6 +192,8 @@ def __call__(
192192
if not self.use_hpu_graphs: # For checking output resutls
193193
self.htcore.mark_step()
194194

195+
end_time = time.time()
196+
195197
if self.gaudi_config.use_torch_autocast:
196198
image = image.float()
197199

@@ -204,6 +206,7 @@ def __call__(
204206
speed_measures = speed_metrics(
205207
split=speed_metrics_prefix,
206208
start_time=start_time,
209+
end_time=end_time,
207210
num_samples=batch_size,
208211
num_steps=batch_size * len(num_inference_steps),
209212
start_time_after_warmup=time_after_warmup,

optimum/habana/diffusers/pipelines/flux/pipeline_flux.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -583,12 +583,15 @@ def __call__(
583583
finalize_calibration(self.transformer)
584584

585585
ht.hpu.synchronize()
586+
end_time = time.time()
587+
586588
speed_metrics_prefix = "generation"
587589
if use_warmup_inference_steps:
588590
t1 = warmup_inference_steps_time_adjustment(t1, t1, num_inference_steps, throughput_warmup_steps)
589591
speed_measures = speed_metrics(
590592
split=speed_metrics_prefix,
591593
start_time=t0,
594+
end_time=end_time,
592595
num_samples=batch_size
593596
if t1 == t0 or use_warmup_inference_steps
594597
else (num_batches - throughput_warmup_steps) * batch_size,

optimum/habana/diffusers/pipelines/flux/pipeline_flux_img2img.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -615,12 +615,15 @@ def __call__(
615615
finalize_calibration(self.transformer)
616616

617617
ht.hpu.synchronize()
618+
end_time = time.time()
619+
618620
speed_metrics_prefix = "generation"
619621
if use_warmup_inference_steps:
620622
t1 = warmup_inference_steps_time_adjustment(t1, t1, num_inference_steps, throughput_warmup_steps)
621623
speed_measures = speed_metrics(
622624
split=speed_metrics_prefix,
623625
start_time=t0,
626+
end_time=end_time,
624627
num_samples=batch_size
625628
if t1 == t0 or use_warmup_inference_steps
626629
else (num_batches - throughput_warmup_steps) * batch_size,

optimum/habana/diffusers/pipelines/i2vgen_xl/pipeline_i2vgen_xl.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -624,11 +624,14 @@ def __call__(
624624
if not self.use_hpu_graphs:
625625
self.htcore.mark_step()
626626

627+
end_time = time.time()
627628
hb_profiler.stop()
629+
628630
speed_metrics_prefix = "generation"
629631
speed_measures = speed_metrics(
630632
split=speed_metrics_prefix,
631633
start_time=t0,
634+
end_time=end_time,
632635
num_samples=num_batches * batch_size
633636
if t1 == t0 or use_warmup_inference_steps
634637
else (num_batches - throughput_warmup_steps) * batch_size,

optimum/habana/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -596,6 +596,7 @@ def __call__(
596596
self.htcore.mark_step()
597597

598598
hb_profiler.stop()
599+
end_time = time.time()
599600

600601
speed_metrics_prefix = "generation"
601602
if t1 == t0 or use_warmup_inference_steps:
@@ -608,6 +609,7 @@ def __call__(
608609
speed_measures = speed_metrics(
609610
split=speed_metrics_prefix,
610611
start_time=t0,
612+
end_time=end_time,
611613
num_samples=num_samples,
612614
num_steps=num_steps,
613615
start_time_after_warmup=t1,

optimum/habana/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_image_variation.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -372,11 +372,14 @@ def __call__(
372372
if not self.use_hpu_graphs:
373373
self.htcore.mark_step()
374374

375+
end_time = time.time()
375376
hb_profiler.stop()
377+
376378
speed_metrics_prefix = "generation"
377379
speed_measures = speed_metrics(
378380
split=speed_metrics_prefix,
379381
start_time=t0,
382+
end_time=end_time,
380383
num_samples=num_batches * batch_size
381384
if t1 == t0 or use_warmup_inference_steps
382385
else (num_batches - throughput_warmup_steps) * batch_size,

optimum/habana/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_img2img.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -607,12 +607,14 @@ def __call__(
607607

608608
outputs["images"].append(image)
609609

610+
end_time = time.time()
610611
hb_profiler.stop()
611612

612613
speed_metrics_prefix = "generation"
613614
speed_measures = speed_metrics(
614615
split=speed_metrics_prefix,
615616
start_time=t0,
617+
end_time=end_time,
616618
num_samples=num_batches * batch_size
617619
if t1 == t0 or use_warmup_inference_steps
618620
else (num_batches - throughput_warmup_steps) * batch_size,

optimum/habana/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_inpaint.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -708,6 +708,8 @@ def __call__(
708708
if not self.use_hpu_graphs:
709709
self.htcore.mark_step()
710710

711+
end_time = time.time()
712+
711713
# Remove dummy generations if needed
712714
if num_dummy_samples > 0:
713715
outputs["images"][-1] = outputs["images"][-1][:-num_dummy_samples]
@@ -716,6 +718,7 @@ def __call__(
716718
speed_measures = speed_metrics(
717719
split=speed_metrics_prefix,
718720
start_time=t0,
721+
end_time=end_time,
719722
num_samples=num_batches * batch_size
720723
if t1 == t0 or use_warmup_inference_steps
721724
else (num_batches - throughput_warmup_steps) * batch_size,

0 commit comments

Comments
 (0)