diff --git a/examples/__init__.py b/examples/__init__.py index 22e663ef899..92618e42e43 100644 --- a/examples/__init__.py +++ b/examples/__init__.py @@ -8,3 +8,20 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. + +from re import compile +from subprocess import check_output # nosec B404: used only for executing benchmark_app + +throughput_pattern = compile(r"Throughput\: (.+?) FPS") + + +def execute_benchmark_on_cpu(model_path, time, shape=None): + command = ["benchmark_app", "-m", model_path.as_posix(), "-d", "CPU", "-api", "async", "-t", str(time)] + if shape is not None: + command += ["-shape", str(shape)] + + cmd_output = check_output(command, text=True) # nosec B603: used only for executing benchmark_app + print(*cmd_output.splitlines()[-8:], sep="\n") + + match = throughput_pattern.search(cmd_output) + return float(match.group(1)) diff --git a/examples/post_training_quantization/onnx/mobilenet_v2/main.py b/examples/post_training_quantization/onnx/mobilenet_v2/main.py index ff6bfab4efd..ff167c54ced 100755 --- a/examples/post_training_quantization/onnx/mobilenet_v2/main.py +++ b/examples/post_training_quantization/onnx/mobilenet_v2/main.py @@ -9,8 +9,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -import re -import subprocess from pathlib import Path import numpy as np @@ -25,6 +23,7 @@ from torchvision import transforms import nncf +from examples import execute_benchmark_on_cpu ROOT = Path(__file__).parent.resolve() MODEL_URL = "https://huggingface.co/alexsu52/mobilenet_v2_imagenette/resolve/main/mobilenet_v2_imagenette.onnx" @@ -61,21 +60,6 @@ def validate(path_to_model: Path, validation_loader: torch.utils.data.DataLoader return accuracy_score(predictions, references) -def run_benchmark(path_to_model: Path, shape: list[int]) -> float: - command = [ - "benchmark_app", - "-m", path_to_model.as_posix(), - "-d", "CPU", - "-api", "async", - "-t", "15", - "-shape", str(shape), - ] # fmt: skip - cmd_output = subprocess.check_output(command, text=True) # nosec - print(*cmd_output.splitlines()[-8:], sep="\n") - match = re.search(r"Throughput\: (.+?) FPS", str(cmd_output)) - return float(match.group(1)) - - def get_model_size(path: Path, m_type: str = "Mb") -> float: model_size = path.stat().st_size for t in ["bytes", "Kb", "Mb"]: @@ -152,9 +136,9 @@ def transform_fn(data_item): int8_model_size = get_model_size(int8_model_path) print("[3/7] Benchmark FP32 model:") -fp32_fps = run_benchmark(fp32_model_path, shape=[1, 3, 224, 224]) +fp32_fps = execute_benchmark_on_cpu(fp32_model_path, time=15, shape=[1, 3, 224, 224]) print("[4/7] Benchmark INT8 model:") -int8_fps = run_benchmark(int8_model_path, shape=[1, 3, 224, 224]) +int8_fps = execute_benchmark_on_cpu(int8_model_path, time=15, shape=[1, 3, 224, 224]) print("[5/7] Validate ONNX FP32 model in OpenVINO:") fp32_top1 = validate(fp32_model_path, val_loader) diff --git a/examples/post_training_quantization/onnx/yolov8_quantize_with_accuracy_control/deploy.py b/examples/post_training_quantization/onnx/yolov8_quantize_with_accuracy_control/deploy.py index a6dbd582df5..812cfe7075a 100644 --- a/examples/post_training_quantization/onnx/yolov8_quantize_with_accuracy_control/deploy.py +++ b/examples/post_training_quantization/onnx/yolov8_quantize_with_accuracy_control/deploy.py @@ -9,8 +9,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -import re -import subprocess from pathlib import Path import openvino as ov @@ -22,6 +20,7 @@ from ultralytics.utils import DEFAULT_CFG from ultralytics.utils.metrics import ConfusionMatrix +from examples import execute_benchmark_on_cpu from examples.post_training_quantization.onnx.yolov8_quantize_with_accuracy_control.main import prepare_validation from examples.post_training_quantization.onnx.yolov8_quantize_with_accuracy_control.main import print_statistics @@ -64,20 +63,6 @@ def validate_ov_model( return stats, validator.seen, validator.metrics.nt_per_class.sum() -def run_benchmark(model_path: Path, config) -> float: - command = [ - "benchmark_app", - "-m", model_path.as_posix(), - "-d", "CPU", - "-api", "async", - "-t", "30", - "-shape", str([1, 3, config.imgsz, config.imgsz]), - ] # fmt: skip - cmd_output = subprocess.check_output(command, text=True) # nosec - match = re.search(r"Throughput\: (.+?) FPS", cmd_output) - return float(match.group(1)) - - args = get_cfg(cfg=DEFAULT_CFG) args.data = "coco128-seg.yaml" @@ -90,11 +75,11 @@ def run_benchmark(model_path: Path, config) -> float: ov.save_model(int8_ov_model, INT8_OV_MODEL_PATH, compress_to_fp16=False) print("[3/7] Benchmark FP32 OpenVINO model:", end=" ") -fp32_fps = run_benchmark(FP32_OV_MODEL_PATH, args) +fp32_fps = execute_benchmark_on_cpu(FP32_OV_MODEL_PATH, time=30, shape=[1, 3, args.imgsz, args.imgsz]) print(f"{fp32_fps} FPS") print("[4/7] Benchmark INT8 OpenVINO model:", end=" ") -int8_fps = run_benchmark(INT8_OV_MODEL_PATH, args) +int8_fps = execute_benchmark_on_cpu(INT8_OV_MODEL_PATH, time=30, shape=[1, 3, args.imgsz, args.imgsz]) print(f"{int8_fps} FPS") validator, data_loader = prepare_validation(YOLO(ROOT / f"{MODEL_NAME}.pt"), args) diff --git a/examples/post_training_quantization/openvino/anomaly_stfpm_quantize_with_accuracy_control/main.py b/examples/post_training_quantization/openvino/anomaly_stfpm_quantize_with_accuracy_control/main.py index ba48000fe3c..1db1a2cb5fd 100644 --- a/examples/post_training_quantization/openvino/anomaly_stfpm_quantize_with_accuracy_control/main.py +++ b/examples/post_training_quantization/openvino/anomaly_stfpm_quantize_with_accuracy_control/main.py @@ -10,8 +10,6 @@ # limitations under the License. import json -import re -import subprocess import sys from functools import partial from pathlib import Path @@ -26,6 +24,7 @@ from anomalib.utils.metrics import create_metric_collection import nncf +from examples import execute_benchmark_on_cpu ROOT = Path(__file__).parent.resolve() HOME_PATH = Path.home() @@ -82,21 +81,6 @@ def validate( return metric_value, per_sample_metric_values -def run_benchmark(model_path: Path, shape: list[int]) -> float: - command = [ - "benchmark_app", - "-m", model_path.as_posix(), - "-d", "CPU", - "-api", "async", - "-t", "15", - "-shape", str(shape), - ] # fmt: skip - cmd_output = subprocess.check_output(command, text=True) # nosec - print(*cmd_output.splitlines()[-8:], sep="\n") - match = re.search(r"Throughput\: (.+?) FPS", cmd_output) - return float(match.group(1)) - - def get_model_size(ir_path: Path, m_type: str = "Mb") -> float: xml_size = ir_path.stat().st_size bin_size = ir_path.with_suffix(".bin").stat().st_size @@ -182,9 +166,9 @@ def transform_fn(data_item): int8_size = get_model_size(int8_ir_path) print("[3/7] Benchmark FP32 model:") - fp32_fps = run_benchmark(fp32_ir_path, shape=[1, 3, 256, 256]) + fp32_fps = execute_benchmark_on_cpu(fp32_ir_path, time=15, shape=[1, 3, 256, 256]) print("[4/7] Benchmark INT8 model:") - int8_fps = run_benchmark(int8_ir_path, shape=[1, 3, 256, 256]) + int8_fps = execute_benchmark_on_cpu(int8_ir_path, time=15, shape=[1, 3, 256, 256]) print("[5/7] Validate OpenVINO FP32 model:") compiled_model = ov.compile_model(ov_model, device_name="CPU") diff --git a/examples/post_training_quantization/openvino/mobilenet_v2/main.py b/examples/post_training_quantization/openvino/mobilenet_v2/main.py index a53a0a97fb5..aa09f37f258 100644 --- a/examples/post_training_quantization/openvino/mobilenet_v2/main.py +++ b/examples/post_training_quantization/openvino/mobilenet_v2/main.py @@ -9,8 +9,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -import re -import subprocess from pathlib import Path import numpy as np @@ -23,6 +21,7 @@ from torchvision import transforms import nncf +from examples import execute_benchmark_on_cpu ROOT = Path(__file__).parent.resolve() DATASET_PATH = Path().home() / ".cache" / "nncf" / "datasets" @@ -54,14 +53,6 @@ def validate(model: ov.Model, val_loader: torch.utils.data.DataLoader) -> float: return accuracy_score(predictions, references) -def run_benchmark(model_path: Path, shape: list[int]) -> float: - cmd = ["benchmark_app", "-m", model_path.as_posix(), "-d", "CPU", "-api", "async", "-t", "15", "-shape", str(shape)] - cmd_output = subprocess.check_output(cmd, text=True) # nosec - print(*cmd_output.splitlines()[-8:], sep="\n") - match = re.search(r"Throughput\: (.+?) FPS", cmd_output) - return float(match.group(1)) - - def get_model_size(ir_path: Path, m_type: str = "Mb") -> float: xml_size = ir_path.stat().st_size bin_size = ir_path.with_suffix(".bin").stat().st_size @@ -141,9 +132,10 @@ def transform_fn(data_item): int8_model_size = get_model_size(int8_ir_path) print("[3/7] Benchmark FP32 model:") -fp32_fps = run_benchmark(fp32_ir_path, shape=[1, 3, 224, 224]) +fp32_fps = execute_benchmark_on_cpu(fp32_ir_path, time=15, shape=[1, 3, 224, 224]) print("[4/7] Benchmark INT8 model:") -int8_fps = run_benchmark(int8_ir_path, shape=[1, 3, 224, 224]) +int8_fps = execute_benchmark_on_cpu(int8_ir_path, time=15, shape=[1, 3, 224, 224]) + print("[5/7] Validate OpenVINO FP32 model:") fp32_top1 = validate(ov_model, val_data_loader) diff --git a/examples/post_training_quantization/openvino/yolo26/main.py b/examples/post_training_quantization/openvino/yolo26/main.py index 75825744dad..01e8147175f 100644 --- a/examples/post_training_quantization/openvino/yolo26/main.py +++ b/examples/post_training_quantization/openvino/yolo26/main.py @@ -8,8 +8,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -import re -import subprocess from pathlib import Path from typing import Any @@ -26,6 +24,7 @@ from ultralytics.utils.metrics import ConfusionMatrix import nncf +from examples import execute_benchmark_on_cpu MODEL_NAME = "yolo26n" @@ -83,20 +82,6 @@ def prepare_validation(model: YOLO, args: Any) -> tuple[DetectionValidator, torc return validator, data_loader -def benchmark_performance(model_path: Path, config) -> float: - command = [ - "benchmark_app", - "-m", model_path.as_posix(), - "-d", "CPU", - "-api", "async", - "-t", "30", - "-shape", str([1, 3, config.imgsz, config.imgsz]), - ] # fmt: skip - cmd_output = subprocess.check_output(command, text=True) # nosec - match = re.search(r"Throughput\: (.+?) FPS", cmd_output) - return float(match.group(1)) - - def prepare_openvino_model(model: YOLO, model_name: str) -> tuple[ov.Model, Path]: ir_model_path = ROOT / f"{model_name}_openvino_model" / f"{model_name}.xml" if not ir_model_path.exists(): @@ -162,11 +147,11 @@ def main(): print_statistics(q_stats, total_images, total_objects) # Benchmark performance of FP32 model - fp_model_perf = benchmark_performance(ov_model_path, args) + fp_model_perf = execute_benchmark_on_cpu(ov_model_path, time=30, shape=[1, 3, args.imgsz, args.imgsz]) print(f"Floating-point model performance: {fp_model_perf} FPS") # Benchmark performance of quantized model - quantized_model_perf = benchmark_performance(quantized_model_path, args) + quantized_model_perf = execute_benchmark_on_cpu(quantized_model_path, time=30, shape=[1, 3, args.imgsz, args.imgsz]) print(f"Quantized model performance: {quantized_model_perf} FPS") return fp_stats["metrics/mAP50-95(B)"], q_stats["metrics/mAP50-95(B)"], fp_model_perf, quantized_model_perf diff --git a/examples/post_training_quantization/openvino/yolov8_quantize_with_accuracy_control/main.py b/examples/post_training_quantization/openvino/yolov8_quantize_with_accuracy_control/main.py index 88dba967af1..3c2dfb06091 100644 --- a/examples/post_training_quantization/openvino/yolov8_quantize_with_accuracy_control/main.py +++ b/examples/post_training_quantization/openvino/yolov8_quantize_with_accuracy_control/main.py @@ -8,8 +8,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -import re -import subprocess from functools import partial from pathlib import Path from typing import Any @@ -28,6 +26,7 @@ from ultralytics.utils.metrics import ConfusionMatrix import nncf +from examples import execute_benchmark_on_cpu MODEL_NAME = "yolov8n-seg" @@ -109,20 +108,6 @@ def prepare_validation(model: YOLO, args: Any) -> tuple[SegmentationValidator, t return validator, data_loader -def benchmark_performance(model_path, config) -> float: - command = [ - "benchmark_app", - "-m", model_path.as_posix(), - "-d", "CPU", - "-api", "async", - "-t", "30", - "-shape", str([1, 3, config.imgsz, config.imgsz]), - ] # fmt: skip - cmd_output = subprocess.check_output(command, text=True) # nosec - match = re.search(r"Throughput\: (.+?) FPS", cmd_output) - return float(match.group(1)) - - def prepare_openvino_model(model: YOLO, model_name: str) -> tuple[ov.Model, Path]: ir_model_path = ROOT / f"{model_name}_openvino_model" / f"{model_name}.xml" if not ir_model_path.exists(): @@ -235,11 +220,11 @@ def main(): print_statistics(q_stats, total_images, total_objects) # Benchmark performance of FP32 model - fp_model_perf = benchmark_performance(ov_model_path, args) + fp_model_perf = execute_benchmark_on_cpu(ov_model_path, time=30, shape=[1, 3, args.imgsz, args.imgsz]) print(f"Floating-point model performance: {fp_model_perf} FPS") # Benchmark performance of quantized model - quantized_model_perf = benchmark_performance(quantized_model_path, args) + quantized_model_perf = execute_benchmark_on_cpu(quantized_model_path, time=30, shape=[1, 3, args.imgsz, args.imgsz]) print(f"Quantized model performance: {quantized_model_perf} FPS") return fp_stats["metrics/mAP50-95(B)"], q_stats["metrics/mAP50-95(B)"], fp_model_perf, quantized_model_perf diff --git a/examples/post_training_quantization/torch/mobilenet_v2/main.py b/examples/post_training_quantization/torch/mobilenet_v2/main.py index 42f59ab401d..360e079f19e 100644 --- a/examples/post_training_quantization/torch/mobilenet_v2/main.py +++ b/examples/post_training_quantization/torch/mobilenet_v2/main.py @@ -9,8 +9,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -import re -import subprocess from functools import partial from pathlib import Path @@ -25,6 +23,7 @@ from torchvision import transforms import nncf +from examples import execute_benchmark_on_cpu ROOT = Path(__file__).parent.resolve() CHECKPOINT_URL = "https://huggingface.co/alexsu52/mobilenet_v2_imagenette/resolve/main/pytorch_model.bin" @@ -61,21 +60,6 @@ def validate(model: ov.Model, val_loader: torch.utils.data.DataLoader) -> float: return accuracy_score(predictions, references) -def run_benchmark(model_path: Path, shape: list[int]) -> float: - command = [ - "benchmark_app", - "-m", model_path.as_posix(), - "-d", "CPU", - "-api", "async", - "-t", "15", - "-shape", str(shape), - ] # fmt: skip - cmd_output = subprocess.check_output(command, text=True) # nosec - print(*cmd_output.splitlines()[-8:], sep="\n") - match = re.search(r"Throughput\: (.+?) FPS", cmd_output) - return float(match.group(1)) - - def get_model_size(ir_path: Path, m_type: str = "Mb") -> float: xml_size = ir_path.stat().st_size bin_size = ir_path.with_suffix(".bin").stat().st_size @@ -165,9 +149,10 @@ def transform_fn(data_item: tuple[torch.Tensor, int], device: torch.device) -> t int8_model_size = get_model_size(int8_ir_path) print("[3/7] Benchmark FP32 model:") -fp32_fps = run_benchmark(fp32_ir_path, shape=[1, 3, 224, 224]) +fp32_fps = execute_benchmark_on_cpu(fp32_ir_path, time=15, shape=[1, 3, 224, 224]) + print("[4/7] Benchmark INT8 model:") -int8_fps = run_benchmark(int8_ir_path, shape=[1, 3, 224, 224]) +int8_fps = execute_benchmark_on_cpu(int8_ir_path, time=15, shape=[1, 3, 224, 224]) print("[5/7] Validate OpenVINO FP32 model:") fp32_top1 = validate(ov_model, val_data_loader) diff --git a/examples/post_training_quantization/torch/ssd300_vgg16/main.py b/examples/post_training_quantization/torch/ssd300_vgg16/main.py index 2d35fe56043..8d95c484d26 100644 --- a/examples/post_training_quantization/torch/ssd300_vgg16/main.py +++ b/examples/post_training_quantization/torch/ssd300_vgg16/main.py @@ -9,8 +9,6 @@ # See the License for the specific language governing permissions and # limitations under the License. # isort: off -import re -import subprocess from pathlib import Path from typing import Callable @@ -21,6 +19,7 @@ import openvino as ov import torch import torchvision +from examples import execute_benchmark_on_cpu from fastdownload import FastDownload from PIL import Image from torchmetrics.detection.mean_ap import MeanAveragePrecision @@ -54,14 +53,6 @@ def get_model_size(ir_path: Path, m_type: str = "Mb") -> float: return model_size -def run_benchmark(model_path: Path) -> float: - command = ["benchmark_app", "-m", model_path.as_posix(), "-d", "CPU", "-api", "async", "-t", "15"] - cmd_output = subprocess.check_output(command, text=True) # nosec - print(*cmd_output.splitlines()[-8:], sep="\n") - match = re.search(r"Throughput\: (.+?) FPS", cmd_output) - return float(match.group(1)) - - class COCO128Dataset(torch.utils.data.Dataset): category_mapping = [ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 27, 28, 31, 32, 33, @@ -169,9 +160,10 @@ def main(): int8_model_size = get_model_size(int8_ir_path) print("[3/7] Benchmark FP32 model:") - fp32_fps = run_benchmark(fp32_ir_path) + fp32_fps = execute_benchmark_on_cpu(fp32_ir_path, time=15) + print("[4/7] Benchmark INT8 model:") - int8_fps = run_benchmark(int8_ir_path) + int8_fps = execute_benchmark_on_cpu(int8_ir_path, time=15) print("[5/7] Validate FP32 model:") torch.backends.cudnn.deterministic = True diff --git a/examples/quantization_aware_training/torch/anomalib/main.py b/examples/quantization_aware_training/torch/anomalib/main.py index f8efb698108..f9d1df21a13 100644 --- a/examples/quantization_aware_training/torch/anomalib/main.py +++ b/examples/quantization_aware_training/torch/anomalib/main.py @@ -10,8 +10,6 @@ # limitations under the License. import os -import re -import subprocess import tarfile import warnings from copy import deepcopy @@ -27,6 +25,7 @@ from anomalib.models import Stfpm import nncf +from examples import execute_benchmark_on_cpu warnings.filterwarnings("ignore", category=torch.jit.TracerWarning) @@ -93,21 +92,6 @@ def create_dataset(root: Path) -> MVTecAD: return data -def run_benchmark(model_path: Path, shape: list[int]) -> float: - command = [ - "benchmark_app", - "-m", model_path.as_posix(), - "-d", "CPU", - "-api", "async", - "-t", "15", - "-shape", str(shape), - ] # fmt: skip - cmd_output = subprocess.check_output(command, text=True) # nosec - print(*cmd_output.splitlines()[-8:], sep="\n") - match = re.search(r"Throughput\: (.+?) FPS", cmd_output) - return float(match.group(1)) - - def get_model_size(ir_path: Path, m_type: str = "Mb") -> float: xml_size = ir_path.stat().st_size bin_size = ir_path.with_suffix(".bin").stat().st_size @@ -209,10 +193,10 @@ def transform_fn(data_item): print(os.linesep + "[Step 5] Run benchmarks") print("Run benchmark for FP32 model (IR)...") - fp32_fps = run_benchmark(fp32_ir_path, shape=[1, 3, 256, 256]) + fp32_fps = execute_benchmark_on_cpu(fp32_ir_path, time=15, shape=[1, 3, 256, 256]) print("Run benchmark for INT8 model (IR)...") - int8_fps = run_benchmark(int8_ir_path, shape=[1, 3, 256, 256]) + int8_fps = execute_benchmark_on_cpu(int8_ir_path, time=15, shape=[1, 3, 256, 256]) ############################################################################### # Step 6: Summary diff --git a/examples/quantization_aware_training/torch/resnet18/main.py b/examples/quantization_aware_training/torch/resnet18/main.py index e9a89461746..7b841e15f95 100644 --- a/examples/quantization_aware_training/torch/resnet18/main.py +++ b/examples/quantization_aware_training/torch/resnet18/main.py @@ -10,8 +10,6 @@ # limitations under the License. import os -import re -import subprocess import warnings from copy import deepcopy from pathlib import Path @@ -30,6 +28,7 @@ from torch.jit import TracerWarning import nncf +from examples import execute_benchmark_on_cpu from nncf.common.utils.helpers import create_table warnings.filterwarnings("ignore", category=TracerWarning) @@ -205,20 +204,6 @@ def prepare_tiny_imagenet_200(dataset_dir: Path): val_images_dir.rmdir() -def run_benchmark(model_path: Path, shape: tuple[int, ...]) -> float: - command = [ - "benchmark_app", - "-m", model_path.as_posix(), - "-d", "CPU", - "-api", "async", - "-t", "15", - "-shape", str(list(shape)), - ] # fmt: skip - cmd_output = subprocess.check_output(command, text=True) # nosec - match = re.search(r"Throughput\: (.+?) FPS", cmd_output) - return float(match.group(1)) - - def get_model_size(ir_path: Path, m_type: str = "Mb") -> float: xml_size = ir_path.stat().st_size bin_size = ir_path.with_suffix(".bin").stat().st_size @@ -323,10 +308,10 @@ def transform_fn(data_item): # Step 5: Run benchmarks print(os.linesep + "[Step 5] Run benchmarks") print("Run benchmark for FP32 model (IR)...") - fp32_fps = run_benchmark(fp32_ir_path, shape=input_shape) + fp32_fps = execute_benchmark_on_cpu(fp32_ir_path, time=15, shape=input_shape) print("Run benchmark for INT8 model (IR)...") - int8_fps = run_benchmark(int8_ir_path, shape=input_shape) + int8_fps = execute_benchmark_on_cpu(int8_ir_path, time=15, shape=input_shape) fp32_model_size = get_model_size(fp32_ir_path) int8_model_size = get_model_size(int8_ir_path) diff --git a/pyproject.toml b/pyproject.toml index 599eca11a90..fd197c673d4 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -203,7 +203,6 @@ pythonpath = "." exclude_dirs = ["tools", "tests", "**/venv*", "build"] skips = [ "B101", # assert_used - "B404", # import_subprocess "B614", # pytorch_load "B615", # huggingface_unsafe_download ] diff --git a/src/custom_version.py b/src/custom_version.py index 905efd7f22d..6e603364792 100644 --- a/src/custom_version.py +++ b/src/custom_version.py @@ -48,7 +48,7 @@ import contextlib import os import re -import subprocess +import subprocess # nosec B404; only git commands are executed (user data do not modified in any way executed commands) from pathlib import Path NNCF_VERSION_FILE = "src/nncf/version.py" diff --git a/src/nncf/torch/quantization/extensions.py b/src/nncf/torch/quantization/extensions.py index cb13acc39e7..79d670245e1 100644 --- a/src/nncf/torch/quantization/extensions.py +++ b/src/nncf/torch/quantization/extensions.py @@ -9,7 +9,9 @@ # See the License for the specific language governing permissions and # limitations under the License. -import subprocess +from subprocess import ( + CalledProcessError, # nosec B404: imported only for CalledProcessError exception handling; no subprocess commands are executed in this module +) import torch @@ -92,7 +94,7 @@ def load(cls): ) except ExtensionLoaderTimeoutException as e: raise e - except (subprocess.CalledProcessError, OSError, RuntimeError) as e: + except (CalledProcessError, OSError, RuntimeError) as e: assert torch.cuda.is_available() msg = ( "CUDA is available for PyTorch, but NNCF could not compile "