Skip to content

Commit 4039f3a

Browse files
authored
Merge pull request #225 from mlcommons/dev
Sync dev
2 parents 0e647d7 + 3124c4d commit 4039f3a

File tree

49 files changed

+355
-201
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

49 files changed

+355
-201
lines changed

Diff for: .github/workflows/build_wheel.yml

+2
Original file line numberDiff line numberDiff line change
@@ -3,13 +3,15 @@ name: Build wheel and release into PYPI
33
on:
44
release:
55
types: [published]
6+
67
push:
78
branches:
89
- dev
910
paths:
1011
- VERSION
1112

1213
jobs:
14+
1315
build_wheels:
1416
if: github.repository_owner == 'mlcommons'
1517
name: Build wheel

Diff for: .github/workflows/format.yml

+1-1
Original file line numberDiff line numberDiff line change
@@ -59,6 +59,6 @@ jobs:
5959
git config --global user.name github-actions[bot]
6060
git config --global user.email "github-actions[bot]@users.noreply.github.com"
6161
# Commit changes
62-
git commit -m '[Automated Commit] Format Codebase'
62+
git commit -m '[Automated Commit] Format Codebase [skip ci]'
6363
git push
6464
fi

Diff for: .github/workflows/test-mlperf-inference-resnet50.yml

+2-2
Original file line numberDiff line numberDiff line change
@@ -53,11 +53,11 @@ jobs:
5353
- name: Test MLPerf Inference ResNet50 (Windows)
5454
if: matrix.os == 'windows-latest'
5555
run: |
56-
mlcr --tags=run-mlperf,inference,_submission,_short --submitter="MLCommons" --pull_changes=yes --pull_inference_changes=yes --hw_name=gh_${{ matrix.os }}_x86 --model=resnet50 --adr.loadgen.tags=_from-pip --pip_loadgen=yes --implementation=${{ matrix.implementation }} --backend=${{ matrix.backend }} --device=cpu --scenario=Offline --test_query_count=500 --target_qps=1 -v --quiet
56+
mlcr --tags=run-mlperf,inference,_submission,_short --submitter="MLCommons" --pull_changes=yes --pull_inference_changes=yes --hw_name="gh_${{ matrix.os }} x86" --model=resnet50 --adr.loadgen.tags=_from-pip --pip_loadgen=yes --implementation=${{ matrix.implementation }} --backend=${{ matrix.backend }} --device=cpu --scenario=Offline --test_query_count=500 --target_qps=1 -v --quiet
5757
- name: Test MLPerf Inference ResNet50 (Linux/macOS)
5858
if: matrix.os != 'windows-latest'
5959
run: |
60-
mlcr --tags=run-mlperf,inference,_submission,_short --submitter="MLCommons" --pull_changes=yes --pull_inference_changes=yes --hw_name=gh_${{ matrix.os }}_x86 --model=resnet50 --implementation=${{ matrix.implementation }} --backend=${{ matrix.backend }} --device=cpu --scenario=Offline --test_query_count=500 --target_qps=1 -v --quiet
60+
mlcr --tags=run-mlperf,inference,_submission,_short --submitter="MLCommons" --pull_changes=yes --pull_inference_changes=yes --hw_name="gh_${{ matrix.os }} x86" --model=resnet50 --implementation=${{ matrix.implementation }} --backend=${{ matrix.backend }} --device=cpu --scenario=Offline --test_query_count=500 --target_qps=1 -v --quiet
6161
# Step for Linux/MacOS
6262
- name: Randomly Execute Step (Linux/MacOS)
6363
if: runner.os != 'Windows'

Diff for: .github/workflows/test-nvidia-mlperf-inference-implementations.yml

+11-5
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@ name: MLPerf Inference Nvidia implementations
22

33
on:
44
schedule:
5-
- cron: "35 01 * * *"
5+
- cron: "27 11 * * *"
66

77
jobs:
88
run_nvidia:
@@ -23,8 +23,8 @@ jobs:
2323
model: [ "resnet50", "retinanet", "bert-99", "bert-99.9", "gptj-99.9", "3d-unet-99.9", "sdxl" ]
2424
exclude:
2525
- model: gptj-99.9
26-
- system: phoenix
27-
- system: GO-i9
26+
- system: phoenix1
27+
- system: GO-i91
2828

2929
steps:
3030
- name: Test MLPerf Inference NVIDIA ${{ matrix.model }}
@@ -45,6 +45,11 @@ jobs:
4545
gpu_name=rtx_4090
4646
docker_string=" --docker"
4747
fi
48+
if [ "${{ matrix.model }}" = "bert-99.9" ]; then
49+
submission_preprocessor_args=" --noinfer-low-accuracy-results"
50+
else
51+
submission_preprocessor_args=""
52+
fi
4853
category="datacenter,edge"
4954
if [ -f "gh_action/bin/deactivate" ]; then source gh_action/bin/deactivate; fi
5055
python3 -m venv gh_action
@@ -53,6 +58,7 @@ jobs:
5358
pip install --upgrade mlcflow
5459
mlc pull repo mlcommons@mlperf-automations --branch=dev
5560
56-
mlcr --tags=run-mlperf,inference,_all-scenarios,_submission,_full,_r5.0-dev --preprocess_submission=yes --pull_changes=yes --pull_inference_changes=yes --execution_mode=valid --gpu_name=$gpu_name --pull_changes=yes --pull_inference_changes=yes --model=${{ matrix.model }} --submitter="MLCommons" --hw_name=$hw_name --implementation=nvidia --backend=tensorrt --category=$category --division=closed --docker_dt --docker_mlc_repo=mlcommons@mlperf-automations --docker_mlc_repo_branch=dev --adr.compiler.tags=gcc --device=cuda --use_model_from_host=yes --use_dataset_from_host=yes --results_dir=$HOME/gh_action_results --submission_dir=$HOME/gh_action_submissions --clean $docker_string --quiet
61+
mlcr --tags=run-mlperf,inference,_all-scenarios,_submission,_full,_r5.0-dev --preprocess_submission=yes --pull_changes=yes --pull_inference_changes=yes --execution_mode=valid --gpu_name=$gpu_name --pull_changes=yes --pull_inference_changes=yes --model=${{ matrix.model }} --submitter="GATEOverflow" --hw_name=$hw_name --implementation=nvidia --backend=tensorrt --category=$category --division=closed --docker_dt --docker_mlc_repo=mlcommons@mlperf-automations --docker_mlc_repo_branch=dev --adr.compiler.tags=gcc --device=cuda --use_model_from_host=yes --use_dataset_from_host=yes --results_dir=$HOME/gh_action_results --submission_dir=$HOME/gh_action_submissions --clean $docker_string $submission_preprocessor_args --quiet
62+
#mlcr --tags=push,github,mlperf,inference,submission --repo_url=https://github.com/mlcommons/mlperf_inference_unofficial_submissions_v5.0 --repo_branch=auto-update --commit_message="Results from GH action on NVIDIA_$hw_name" --quiet --submission_dir=$HOME/gh_action_submissions --hw_name=$hw_name
63+
mlcr --tags=push,github,mlperf,inference,submission --repo_url=https://github.com/GATEOverflow/mlperf_inference_submissions_v5.0 --repo_branch=main --commit_message="Results from GH actions on NVIDIA_$hw_name" --quiet --submission_dir=$HOME/gh_action_submissions --hw_name=$hw_name
5764
58-
mlcr --tags=push,github,mlperf,inference,submission --repo_url=https://github.com/mlcommons/mlperf_inference_unofficial_submissions_v5.0 --repo_branch=auto-update --commit_message="Results from GH action on NVIDIA_$hw_name" --quiet --submission_dir=$HOME/gh_action_submissions --hw_name=$hw_name

Diff for: .gitignore

+6-2
Original file line numberDiff line numberDiff line change
@@ -15,6 +15,10 @@ wheels/
1515
.coverage
1616
htmlcov
1717
*tmp/
18-
*tmp-ck-*/
18+
tmp-*
1919
local/cache/
20-
20+
mlc-log.txt
21+
repos.json
22+
index_script.json
23+
index_cache.json
24+
index_experiment.json

Diff for: CONTRIBUTORS.md

+5-3
Original file line numberDiff line numberDiff line change
@@ -30,11 +30,13 @@ Once your contribution exceeds 50 lines of code (in total), we will:
3030
## Current Contributors
3131

3232
- **Grigori Fursin** - *Initial Development, CMind development to drive the automations, Added core automation features*
33-
- **Arjun Suresh** - *Initial Development, Added core automation features*
34-
- **Anandhu Sooraj** - *Added multiple CM scripts for MLPerf Inference*
33+
- **Arjun Suresh** - *Initial Development, Added core automation features, MLCFlow development to drive the automations*
34+
- **Anandhu Sooraj** - *Added multiple automation scripts for MLPerf Inference, MLCFlow development to drive the automations*
3535
- **Thomaz Zhu** - *Added CPP implementation for MLPerf Inference Onnxruntime*
3636
- **Sahil Avaran** - *Adding logging support in MLPerf script automation*
37-
- **[Your Name Here]** - This could be you! 🎉
37+
- **[Your Name Here]** - This could be you! 🎉
38+
39+
* CMind is now replaced by MLCFlow in the MLPerf Automations
3840

3941
---
4042

Diff for: README.md

+1-1
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,7 @@
88

99
Welcome to the **MLPerf Automations and Scripts** repository! This repository is your go-to resource for tools, automations, and scripts designed to streamline the execution of **MLPerf benchmarks**—with a strong emphasis on **MLPerf Inference benchmarks**.
1010

11-
Starting **January 2025**, MLPerf automation scripts are built on the powerful [MLCFlow](https://github.com/mlcommons/mlcflow) automation interface. This modern interface replaces the earlier [Collective Mind (CM)](https://github.com/mlcommons/ck/tree/master/cm), offering a more robust and efficient framework for benchmarking workflows.
11+
Starting **January 2025**, MLPerf automation scripts will be powered by the advanced [MLCFlow](https://github.com/mlcommons/mlcflow) automation interface. This modern framework replaces the previous [Collective Mind (CM)](https://github.com/mlcommons/ck/tree/master/cm), providing a more robust, efficient, and self-contained solution for benchmarking workflows, making MLPerf automations independent of any external projects.
1212

1313

1414
---

Diff for: VERSION

+1-1
Original file line numberDiff line numberDiff line change
@@ -1 +1 @@
1-
0.0.9
1+
1.0.0

Diff for: automation/script/module.py

+3-9
Original file line numberDiff line numberDiff line change
@@ -2260,13 +2260,7 @@ def _update_env_from_input(self, env, i):
22602260

22612261
##########################################################################
22622262
def _fix_cache_paths(self, env):
2263-
'''
2264-
mlc_repos_path = os.environ.get(
2265-
'MLC_REPOS', os.path.join(
2266-
os.path.expanduser("~"), "CM", "repos"))
2267-
current_cache_path = os.path.realpath(
2268-
os.path.join(mlc_repos_path, "local", "cache"))
2269-
'''
2263+
22702264
current_cache_path = self.action_object.local_cache_path
22712265

22722266
new_env = env # just a reference
@@ -2285,7 +2279,7 @@ def _fix_cache_paths(self, env):
22852279
if loaded_cache_path != current_cache_path and os.path.exists(
22862280
current_cache_path):
22872281
new_env[key] = val.replace(
2288-
loaded_cache_path, current_cache_path)
2282+
loaded_cache_path, current_cache_path).replace(sep, "/")
22892283

22902284
elif isinstance(val, list):
22912285
for i, val2 in enumerate(val):
@@ -2300,7 +2294,7 @@ def _fix_cache_paths(self, env):
23002294
if loaded_cache_path != current_cache_path and os.path.exists(
23012295
current_cache_path):
23022296
new_env[key][i] = val2.replace(
2303-
loaded_cache_path, current_cache_path)
2297+
loaded_cache_path, current_cache_path).replace(sep, "/")
23042298

23052299
return {'return': 0, 'new_env': new_env}
23062300

Diff for: git_commit_hash.txt

+1-1
Original file line numberDiff line numberDiff line change
@@ -1 +1 @@
1-
3dacfdc14894006f456d3b14d1b174e2e9e6e19f
1+
bee755068b7104201446663d41824053671a254d

Diff for: script/activate-python-venv/customize.py

+1
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,5 @@
11
from mlc import utils
2+
23
import os
34

45

Diff for: script/app-mlperf-inference-ctuning-cpp-tflite/COPYRIGHT.md

+4
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,10 @@
22

33
© 2024-2025 MLCommons. All Rights Reserved.
44

5+
Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone and continue development as a community effort.
6+
7+
Copyright (c) 2014-2021 cTuning foundation
8+
59
This file is licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License can be obtained at:
610

711
[Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0)

Diff for: script/app-mlperf-inference-ctuning-cpp-tflite/customize.py

-3
Original file line numberDiff line numberDiff line change
@@ -86,9 +86,6 @@ def preprocess(i):
8686
env['MLC_LINKER_LANG'] = 'CXX'
8787
env['MLC_RUN_DIR'] = os.getcwd()
8888

89-
if 'MLC_MLPERF_CONF' not in env:
90-
env['MLC_MLPERF_CONF'] = os.path.join(
91-
env['MLC_MLPERF_INFERENCE_SOURCE'], "mlperf.conf")
9289
if 'MLC_MLPERF_USER_CONF' not in env:
9390
env['MLC_MLPERF_USER_CONF'] = os.path.join(
9491
env['MLC_MLPERF_INFERENCE_CLASSIFICATION_AND_DETECTION_PATH'], "user.conf")

Diff for: script/app-mlperf-inference-ctuning-cpp-tflite/src/classification.cpp

-8
Original file line numberDiff line numberDiff line change
@@ -336,7 +336,6 @@ void TestSingleStream(Program *prg) {
336336
SystemUnderTestSingleStream sut(prg);
337337
QuerySampleLibrarySingleStream qsl(prg);
338338

339-
const std::string mlperf_conf_path = getenv_s("MLC_MLPERF_CONF");
340339
const std::string user_conf_path = getenv_s("MLC_MLPERF_USER_CONF");
341340
const std::string audit_conf_path =
342341
getenv_opt_s("MLC_MLPERF_INFERENCE_AUDIT_PATH", "");
@@ -347,7 +346,6 @@ void TestSingleStream(Program *prg) {
347346
const std::string scenario_string = getenv_s("MLC_MLPERF_LOADGEN_SCENARIO");
348347
const std::string mode_string = getenv_s("MLC_MLPERF_LOADGEN_MODE");
349348

350-
std::cout << "Path to mlperf.conf : " << mlperf_conf_path << std::endl;
351349
std::cout << "Path to user.conf : " << user_conf_path << std::endl;
352350
std::cout << "Model Name: " << model_name << std::endl;
353351
std::cout << "LoadGen Scenario: " << scenario_string << std::endl;
@@ -374,12 +372,6 @@ void TestSingleStream(Program *prg) {
374372
? mlperf::TestMode::FindPeakPerformance
375373
: mlperf::TestMode::SubmissionRun;
376374

377-
if (ts.FromConfig(mlperf_conf_path, model_name, scenario_string)) {
378-
std::cout << "Issue with mlperf.conf file at " << mlperf_conf_path
379-
<< std::endl;
380-
exit(1);
381-
}
382-
383375
if (ts.FromConfig(user_conf_path, model_name, scenario_string)) {
384376
std::cout << "Issue with user.conf file at " << user_conf_path << std::endl;
385377
exit(1);

Diff for: script/app-mlperf-inference-mlcommons-cpp/customize.py

-3
Original file line numberDiff line numberDiff line change
@@ -91,9 +91,6 @@ def preprocess(i):
9191
env['MLC_LINKER_LANG'] = 'CXX'
9292
env['MLC_RUN_DIR'] = os.getcwd()
9393

94-
if 'MLC_MLPERF_CONF' not in env:
95-
env['MLC_MLPERF_CONF'] = os.path.join(
96-
env['MLC_MLPERF_INFERENCE_SOURCE'], "mlperf.conf")
9794
if 'MLC_MLPERF_USER_CONF' not in env:
9895
env['MLC_MLPERF_USER_CONF'] = os.path.join(
9996
env['MLC_MLPERF_INFERENCE_CLASSIFICATION_AND_DETECTION_PATH'], "user.conf")

Diff for: script/app-mlperf-inference-mlcommons-cpp/src/main.cpp

-11
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,6 @@ class InputSettings {
2323

2424
public:
2525
InputSettings() {
26-
mlperf_conf_path = getenv("MLC_MLPERF_CONF", "../inference/mlperf.conf");
2726
user_conf_path =
2827
getenv("MLC_MLPERF_USER_CONF",
2928
"../inference/vision/classification_and_detection/user.conf");
@@ -49,7 +48,6 @@ class InputSettings {
4948
performance_sample_count =
5049
std::stol(getenv("MLC_MLPERF_LOADGEN_PERFORMANCE_SAMPLE_COUNT", "0"));
5150
batch_size = std::stol(getenv("MLC_MLPERF_LOADGEN_MAX_BATCHSIZE", "32"));
52-
std::cout << "MLPerf Conf path: " << mlperf_conf_path << std::endl;
5351
std::cout << "User Conf path: " << user_conf_path << std::endl;
5452
std::cout << "Dataset Preprocessed path: " << dataset_preprocessed_path
5553
<< std::endl;
@@ -62,7 +60,6 @@ class InputSettings {
6260
<< performance_sample_count << std::endl;
6361
}
6462

65-
std::string mlperf_conf_path;
6663
std::string user_conf_path;
6764
std::string audit_conf_path;
6865
std::string output_dir;
@@ -104,14 +101,6 @@ int main(int argc, const char *argv[]) {
104101
? mlperf::TestMode::FindPeakPerformance
105102
: mlperf::TestMode::SubmissionRun;
106103

107-
// read test settings from mlperf.conf and user.conf
108-
if (test_settings.FromConfig(input_settings.mlperf_conf_path,
109-
input_settings.model_name,
110-
input_settings.scenario_name)) {
111-
std::cerr << "Could not read mlperf.conf at "
112-
<< input_settings.mlperf_conf_path << std::endl;
113-
return 1;
114-
}
115104
if (test_settings.FromConfig(input_settings.user_conf_path,
116105
input_settings.model_name,
117106
input_settings.scenario_name)) {

Diff for: script/app-mlperf-inference-mlcommons-python/customize.py

+11-7
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,7 @@
33
import json
44
import shutil
55
import subprocess
6+
from utils import *
67

78

89
def preprocess(i):
@@ -51,12 +52,14 @@ def preprocess(i):
5152

5253
env['MLC_MLPERF_LOADGEN_EXTRA_OPTIONS'] += env['MLC_MLPERF_LOADGEN_QPS_OPT']
5354

54-
if 'MLC_NUM_THREADS' not in env:
55-
if 'MLC_MINIMIZE_THREADS' in env:
55+
if env.get('MLC_NUM_THREADS', '') == '':
56+
if is_true(env.get('MLC_MINIMIZE_THREADS', '')) and env.get(
57+
'MLC_HOST_CPU_TOTAL_CORES', '') != '':
5658
env['MLC_NUM_THREADS'] = str(int(env['MLC_HOST_CPU_TOTAL_CORES']) //
57-
(int(env.get('MLC_HOST_CPU_SOCKETS', '1')) * int(env.get('MLC_HOST_CPU_TOTAL_CORES', '1'))))
59+
(int(env.get('MLC_HOST_CPU_SOCKETS', '1'))))
5860
else:
5961
env['MLC_NUM_THREADS'] = env.get('MLC_HOST_CPU_TOTAL_CORES', '1')
62+
env['CM_NUM_THREADS'] = env['MLC_NUM_THREADS'] # For inference code
6063

6164
if env.get('MLC_MLPERF_LOADGEN_MAX_BATCHSIZE', '') != '' and str(env.get(
6265
'MLC_MLPERF_MODEL_SKIP_BATCHING', False)).lower() not in ["true", "1", "yes"]:
@@ -250,9 +253,9 @@ def get_run_cmd_reference(
250253
'MLC_MLPERF_DEVICE') != "tpu":
251254
if os_info['platform'] == 'windows':
252255
cmd = "python python/main.py --profile " + env['MLC_MODEL'] + "-" + env['MLC_MLPERF_BACKEND'] + \
253-
" --model=" + env['MLC_ML_MODEL_FILE_WITH_PATH'] + ' --dataset-path=' + env['MLC_DATASET_PREPROCESSED_PATH'] + \
254-
" --scenario " + env['MLC_MLPERF_LOADGEN_SCENARIO'] + " " + \
255-
" --output " + env['OUTPUT_DIR'] + " " + \
256+
" --model=\"" + env['MLC_ML_MODEL_FILE_WITH_PATH'] + '" --dataset-path="' + env['MLC_DATASET_PREPROCESSED_PATH'] + \
257+
"\" --scenario " + env['MLC_MLPERF_LOADGEN_SCENARIO'] + " " + \
258+
" --output \"" + env['OUTPUT_DIR'] + "\" " + \
256259
env['MLC_MLPERF_LOADGEN_EXTRA_OPTIONS'] + \
257260
scenario_extra_options + mode_extra_options + dataset_options
258261
else:
@@ -270,12 +273,13 @@ def get_run_cmd_reference(
270273
env['MODEL_FILE'] = env.get(
271274
'MLC_MLPERF_CUSTOM_MODEL_PATH',
272275
env.get('MLC_ML_MODEL_FILE_WITH_PATH'))
276+
273277
if not env['MODEL_FILE']:
274278
return {'return': 1, 'error': 'No valid model file found!'}
275279

276280
env['LOG_PATH'] = env['MLC_MLPERF_OUTPUT_DIR']
277281

278-
extra_options = " --output " + env['MLC_MLPERF_OUTPUT_DIR'] + " --model-name resnet50 --dataset " + env['MLC_MLPERF_VISION_DATASET_OPTION'] + ' --max-batchsize ' + env.get('MLC_MLPERF_LOADGEN_MAX_BATCHSIZE', '1') + \
282+
extra_options = " --output " + env['MLC_MLPERF_OUTPUT_DIR'] + " --model-name resnet50 --dataset " + env['MLC_MLPERF_VISION_DATASET_OPTION'] + f""" --max-batchsize {env.get('MLC_MLPERF_LOADGEN_MAX_BATCHSIZE', '1')}""" + \
279283
" --dataset-path " + env['MLC_DATASET_PREPROCESSED_PATH'] + " --model " + env['MODEL_FILE'] + \
280284
" --preprocessed_dir " + env['MLC_DATASET_PREPROCESSED_PATH']
281285

Diff for: script/app-mlperf-inference-mlcommons-python/meta.yaml

+15-7
Original file line numberDiff line numberDiff line change
@@ -888,6 +888,14 @@ variations:
888888
ml-model:
889889
tags: raw,_deepsparse
890890

891+
deepsparse,resnet50:
892+
default_env:
893+
DEEPSPARSE_NUM_STREAMS: 24
894+
ENQUEUE_NUM_THREADS: 2
895+
MLC_MLPERF_LOADGEN_MAX_BATCHSIZE: 16
896+
MLC_MLPERF_VISION_DATASET_OPTION: imagenet_pytorch
897+
898+
891899
tvm-onnx:
892900
group: framework
893901
env:
@@ -1300,9 +1308,14 @@ variations:
13001308
- tags: get,generic-python-lib,_package.torchdata
13011309
- tags: get,generic-python-lib,_package.pybind11
13021310
- tags: get,generic-python-lib,_package.PyYAML
1311+
- tags: get,generic-python-lib,_package.numpy
1312+
version_min: "2.0.2"
1313+
names:
1314+
- numpy-upgrade
13031315
- tags: get,generic-python-lib,_package.numpy
13041316
version_max: "1.26.4"
1305-
version_max_usable: "1.26.4"
1317+
names:
1318+
- numpy-downgrade
13061319
- tags: get,generic-python-lib,_package.pydantic
13071320
- tags: get,generic-python-lib,_package.igb,_url.git+https://github.com/IllinoisGraphBenchmark/IGB-Datasets.git
13081321
- tags: get,generic-python-lib,_package.torch-geometric
@@ -1347,11 +1360,7 @@ variations:
13471360
- tags: get,generic-python-lib,_package.numba
13481361
- tags: get,generic-python-lib,_package.open3d
13491362
- tags: get,generic-python-lib,_package.numpy
1350-
version_max: "1.26.4"
1351-
names:
1352-
- numpy
1353-
- tags: get,generic-python-lib,_package.numpy
1354-
version_max: "2.0.2"
1363+
version_min: "2.0.2"
13551364
names:
13561365
- numpy-upgrade
13571366
- tags: get,generic-python-lib,_package.numpy
@@ -1363,7 +1372,6 @@ variations:
13631372
- tags: get,generic-python-lib,_package.opencv-python
13641373
- tags: get,generic-python-lib,_package.scikit-image
13651374
- tags: get,generic-python-lib,_package.scipy
1366-
version_max: "1.11.2"
13671375
names:
13681376
- scipy
13691377
- tags: get,generic-python-lib,_package.ninja

Diff for: script/app-mlperf-inference/customize.py

+2
Original file line numberDiff line numberDiff line change
@@ -311,6 +311,8 @@ def postprocess(i):
311311
# if custom model name is not set, the official model name will be
312312
# mapped to itself
313313
official_model_name = model
314+
if "efficientnet" in official_model_name or "mobilenet" in official_model_name:
315+
official_model_name = "resnet"
314316
model_mapping = {model_full_name: official_model_name}
315317
with open("model_mapping.json", "w") as fp:
316318
json.dump(model_mapping, fp, indent=2)

0 commit comments

Comments
 (0)