Skip to content

Commit c94a810

Browse files
authored
Merge branch 'open-edge-platform:main' into rm_datastore
2 parents fe5263e + 0908813 commit c94a810

File tree

9 files changed

+530
-179
lines changed

9 files changed

+530
-179
lines changed

.github/workflows/dls-build-docker-images.yaml

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -27,10 +27,10 @@ jobs:
2727
image: one-binary-ubuntu24
2828
steps:
2929
- name: Check out edge-ai-libraries repository
30-
uses: actions/checkout@v4.2.0
30+
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 #4.2.2
3131

3232
- name: Log in to GitHub Container Registry
33-
uses: docker/login-action@v3.4.0
33+
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 #3.4.0
3434
with:
3535
registry: ghcr.io
3636
username: ${{ github.actor }}

libraries/dl-streamer/scripts/build_deb_packages.sh

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -44,7 +44,7 @@ for i in "$@"; do
4444
;;
4545
--ubuntu_version=*)
4646
ubuntu_version="${i#*=}"
47-
if [[ "$MODEL" != "ubuntu22" ]] && [[ "$MODEL" != "ubuntu24" ]]; then
47+
if [[ "$ubuntu_version" != "ubuntu22" ]] && [[ "$ubuntu_version" != "ubuntu24" ]]; then
4848
echo "Error! Wrong Ubuntu version parameter. Supported versions: ubuntu22 | ubuntu24"
4949
exit 1
5050
fi

tools/visual-pipeline-and-platform-evaluation-tool/Dockerfile.vippet

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -32,6 +32,6 @@ RUN pip install -r requirements.txt
3232

3333
ADD diagrams/ /home/dlstreamer/vippet/diagrams
3434

35-
ADD app.py collect.py optimize.py pipeline.py device.py explore.py /home/dlstreamer/vippet/
35+
ADD app.py collect.py optimize.py pipeline.py device.py explore.py benchmark.py utils.py /home/dlstreamer/vippet/
3636

3737
CMD ["python", "app.py"]

tools/visual-pipeline-and-platform-evaluation-tool/app.py

100644100755
Lines changed: 127 additions & 54 deletions
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,7 @@
44
import time
55
import math
66
import requests
7+
import logging
78

89
import gradio as gr
910
import matplotlib.pyplot as plt
@@ -13,8 +14,13 @@
1314
from collect import CollectionReport, MetricsCollectorFactory
1415
from optimize import OptimizationResult, PipelineOptimizer
1516
from pipeline import SmartNVRPipeline, Transportation2Pipeline
17+
1618
from device import DeviceDiscovery
1719
from explore import GstInspector
20+
from benchmark import Benchmark
21+
from utils import prepare_video_and_constants
22+
23+
logging.getLogger("httpx").setLevel(logging.WARNING)
1824

1925
css_code = """
2026
@@ -370,6 +376,15 @@ def create_interface():
370376
show_fullscreen_button=False,
371377
)
372378

379+
# Textbox to display the best configuration (initially hidden)
380+
best_config_textbox = gr.Textbox(
381+
label="Best Configuration",
382+
interactive=False,
383+
lines=2,
384+
placeholder="The best configuration will appear here after benchmarking.",
385+
visible=True, # Initially hidden
386+
)
387+
373388
# Pipeline parameters accordion
374389
pipeline_parameters_accordion = gr.Accordion("Pipeline Parameters", open=True)
375390

@@ -392,6 +407,13 @@ def create_interface():
392407
label="Number of Recording only channels",
393408
interactive=True,
394409
)
410+
# FPS floor
411+
fps_floor = gr.Number(
412+
label="Set FPS Floor",
413+
value=30.0, # Default value
414+
minimum=1.0,
415+
interactive=True
416+
)
395417

396418
# Object detection accordion
397419
object_detection_accordion = gr.Accordion("Object Detection Parameters", open=True)
@@ -422,6 +444,35 @@ def create_interface():
422444
value=preferred_device,
423445
)
424446

447+
# Batch size
448+
batch_size = gr.Slider(
449+
minimum=0,
450+
maximum=1024,
451+
value=0,
452+
step=1,
453+
label="Batch Size",
454+
interactive=True,
455+
)
456+
457+
# Inference interval
458+
inference_interval = gr.Slider(
459+
minimum=1,
460+
maximum=1800,
461+
value=1,
462+
step=1,
463+
label="Inference Interval",
464+
interactive=True,
465+
)
466+
467+
# Number of inference requests (nireq)
468+
nireq = gr.Slider(
469+
minimum=0,
470+
maximum=1024,
471+
value=0,
472+
step=1,
473+
label="Number of Inference Requests (nireq)",
474+
interactive=True,
475+
)
425476
# This elements are not used in the current version of the app
426477
# # Object classification accordion
427478
# object_classification_accordion = gr.Accordion(
@@ -456,6 +507,9 @@ def create_interface():
456507
# Run button
457508
run_button = gr.Button("Run")
458509

510+
# Add a Benchmark button
511+
benchmark_button = gr.Button("Benchmark")
512+
459513
# Interface layout
460514
with gr.Blocks(theme=theme, css=css_code) as demo:
461515

@@ -478,7 +532,9 @@ def create_interface():
478532
[object_detection_accordion],
479533
)
480534
run_button.render()
535+
benchmark_button.render()
481536
#results_plot.render()
537+
best_config_textbox.render()
482538
cpu_metrics_plot.render()
483539

484540
gpu_time_series_plot.render()
@@ -491,61 +547,20 @@ def on_run(
491547
# This elements are not used in the current version of the app
492548
# object_classification_model,
493549
# object_classification_device,
550+
batch_size,
551+
inference_interval,
552+
nireq,
494553
input_video_player,
495554
):
496-
497-
random_string = "".join(
498-
random.choices(string.ascii_lowercase + string.digits, k=6)
499-
)
500-
video_output_path = input_video_player.replace(
501-
".mp4", f"-output-{random_string}.mp4"
502-
)
503-
# Delete the video in the output folder before producing a new one
504-
# Otherwise, gstreamer will just save a few seconds of the video
505-
# and stop.
506-
if os.path.exists(video_output_path):
507-
os.remove(video_output_path)
508-
509-
param_grid = {
510-
"object_detection_device": object_detection_device.split(", "),
511-
# This elements are not used in the current version of the app
512-
# "vehicle_classification_device": object_classification_device.split(
513-
# ", "
514-
# ),
515-
}
516-
517-
constants = {
518-
"VIDEO_PATH": input_video_player,
519-
"VIDEO_OUTPUT_PATH": video_output_path,
520-
}
521-
522-
MODELS_PATH = "/home/dlstreamer/vippet/models"
523-
524-
match object_detection_model:
525-
case "SSDLite MobileNet V2":
526-
constants["OBJECT_DETECTION_MODEL_PATH"] = (
527-
f"{MODELS_PATH}/pipeline-zoo-models/ssdlite_mobilenet_v2_INT8/FP16-INT8/ssdlite_mobilenet_v2.xml"
528-
)
529-
constants["OBJECT_DETECTION_MODEL_PROC"] = (
530-
f"{MODELS_PATH}/pipeline-zoo-models/ssdlite_mobilenet_v2_INT8/ssdlite_mobilenet_v2.json"
531-
)
532-
case "YOLO v5m":
533-
constants["OBJECT_DETECTION_MODEL_PATH"] = (
534-
f"{MODELS_PATH}/pipeline-zoo-models/yolov5m-416_INT8/FP16-INT8/yolov5m-416_INT8.xml"
535-
)
536-
constants["OBJECT_DETECTION_MODEL_PROC"] = (
537-
f"{MODELS_PATH}/pipeline-zoo-models/yolov5m-416_INT8/yolo-v5.json"
538-
)
539-
case "YOLO v5s":
540-
constants["OBJECT_DETECTION_MODEL_PATH"] = (
541-
f"{MODELS_PATH}/pipeline-zoo-models/yolov5s-416_INT8/FP16-INT8/yolov5s.xml"
542-
)
543-
constants["OBJECT_DETECTION_MODEL_PROC"] = (
544-
f"{MODELS_PATH}/pipeline-zoo-models/yolov5s-416_INT8/yolo-v5.json"
545-
)
546-
case _:
547-
raise ValueError("Unrecognized Object Detection Model")
548-
555+
video_output_path, constants, param_grid = prepare_video_and_constants(
556+
input_video_player,
557+
object_detection_model,
558+
object_detection_device,
559+
batch_size,
560+
nireq,
561+
inference_interval,
562+
)
563+
549564
# This elements are not used in the current version of the app
550565
# match object_classification_model:
551566
# case "ResNet-50 TF":
@@ -592,6 +607,42 @@ def on_run(
592607
gpu_plot = generate_gpu_time_series(report)
593608
return [video_output_path, cpu_plot, gpu_plot]
594609

610+
def on_benchmark(
611+
fps_floor,
612+
object_detection_model,
613+
object_detection_device,
614+
batch_size,
615+
inference_interval,
616+
nireq,
617+
input_video_player,
618+
):
619+
620+
_, constants, param_grid = prepare_video_and_constants(
621+
input_video_player,
622+
object_detection_model,
623+
object_detection_device,
624+
batch_size,
625+
nireq,
626+
inference_interval,
627+
)
628+
629+
# Initialize the benchmark class
630+
bm = Benchmark(
631+
video_path=input_video_player,
632+
pipeline_cls=pipeline,
633+
fps_floor=fps_floor,
634+
parameters=param_grid,
635+
constants=constants,
636+
elements=gst_inspector.get_elements(),
637+
)
638+
639+
# Run the benchmark
640+
s, ai, non_ai, fps = bm.run()
641+
642+
# Return results
643+
return f"Best Config: {s} streams ({ai} AI, {non_ai} non-AI -> {fps:.2f} FPS)"
644+
645+
595646
input_video_player.change(
596647
lambda v: (
597648
(
@@ -621,16 +672,34 @@ def on_run(
621672
# This elements are not used in the current version of the app
622673
# object_classification_model,
623674
# object_classification_device,
675+
batch_size,
676+
inference_interval,
677+
nireq,
624678
input_video_player,
625679
],
626680
outputs=[output_video_player, cpu_metrics_plot, gpu_time_series_plot],
627681
).then(
628-
fn=lambda video: gr.update(
682+
fn=lambda: gr.update(
629683
interactive=True
630684
), # Re-enable Run button
631685
outputs=[run_button],
632686
)
633687

688+
689+
benchmark_button.click(
690+
on_benchmark,
691+
inputs=[
692+
fps_floor,
693+
object_detection_model,
694+
object_detection_device,
695+
batch_size,
696+
inference_interval,
697+
nireq,
698+
input_video_player,
699+
],
700+
outputs=[best_config_textbox],
701+
)
702+
634703
with gr.Column(scale=1, min_width=150):
635704
with gr.Accordion("Video Player", open=True):
636705
input_video_player.render()
@@ -639,10 +708,14 @@ def on_run(
639708
with pipeline_parameters_accordion.render():
640709
inferencing_channels.render()
641710
recording_channels.render()
711+
fps_floor.render()
642712

643713
with object_detection_accordion.render():
644714
object_detection_model.render()
645715
object_detection_device.render()
716+
batch_size.render()
717+
inference_interval.render()
718+
nireq.render()
646719

647720
# This elements are not used in the current version of the app
648721
# with object_classification_accordion.render():

0 commit comments

Comments
 (0)