Minimal ONNX model generated by the reproducer script.
OpenVINO CPU accepts an ONNX MaxPool model where the explicit padding is greater than or equal to the kernel size.
For the same model, ONNX Runtime and onnx2torch reject the configuration with a padding/kernel validation error, while OpenVINO compiles and runs it, returning an extended output shape.
#!/usr/bin/env python3
import sys
import numpy as np
import onnx
import onnxruntime as ort
import openvino as ov
from onnx import helper as oh, TensorProto as TP
x = np.random.RandomState(88).randn(1, 1, 8, 8).astype(np.float32)
node = oh.make_node(
"MaxPool",
["x"],
["y"],
kernel_shape=[3, 3],
pads=[3, 3, 3, 3],
strides=[1, 1],
auto_pad="NOTSET",
)
graph = oh.make_graph(
[node],
"maxpool_bad_pad",
[oh.make_tensor_value_info("x", TP.FLOAT, [1, 1, 8, 8])],
[oh.make_tensor_value_info("y", TP.FLOAT, [1, 1, 12, 12])],
)
model = oh.make_model(graph, opset_imports=[oh.make_opsetid("", 13)])
model.ir_version = 8
onnx.checker.check_model(model)
mb = model.SerializeToString()
feed = {"x": x}
results = {}
try:
so = ort.SessionOptions()
so.graph_optimization_level = ort.GraphOptimizationLevel.ORT_DISABLE_ALL
out = ort.InferenceSession(
mb,
sess_options=so,
providers=["CPUExecutionProvider"],
).run(None, feed)[0]
results["ORT_ref"] = ("ok", out.shape)
except Exception as e:
results["ORT_ref"] = ("ERR", str(e).splitlines()[0][:80])
try:
so = ort.SessionOptions()
so.graph_optimization_level = ort.GraphOptimizationLevel.ORT_ENABLE_ALL
out = ort.InferenceSession(
mb,
sess_options=so,
providers=["CPUExecutionProvider"],
).run(None, feed)[0]
results["ORT_opt"] = ("ok", out.shape)
except Exception as e:
results["ORT_opt"] = ("ERR", str(e).splitlines()[0][:80])
try:
core = ov.Core()
compiled = core.compile_model(core.read_model(mb, b""), "CPU")
out = compiled(feed)[compiled.output(0)]
results["OpenVINO"] = ("ok", out.shape)
except Exception as e:
results["OpenVINO"] = ("ERR", str(e).splitlines()[0][:80])
try:
import onnx2torch
import torch
net = onnx2torch.convert(onnx.load_from_string(mb)).eval()
with torch.no_grad():
out = net(torch.from_numpy(feed["x"]))
results["onnx2torch"] = ("ok", tuple(out.shape))
except Exception as e:
results["onnx2torch"] = ("ERR", str(e).splitlines()[0][:80])
for name, (status, detail) in results.items():
print(f"{name:<12} {status:<4} {detail}")
if results.get("OpenVINO", ("ERR",))[0] == "ok":
print("BUG REPRODUCED: OpenVINO accepts MaxPool with pad >= kernel.")
sys.exit(0)
print("NOT REPRODUCED")
sys.exit(1)
OpenVINO Version
2026.1.0
Operating System
Other (Please specify in description)
Device used for inference
CPU
Framework
ONNX
Model used
Minimal ONNX model generated by the reproducer script.
Issue description
OpenVINO CPU accepts an ONNX MaxPool model where the explicit padding is greater than or equal to the kernel size.
For the same model, ONNX Runtime and onnx2torch reject the configuration with a padding/kernel validation error, while OpenVINO compiles and runs it, returning an extended output shape.
OS: Ubuntu 24.04.2 LTS / Linux x86_64
Step-by-step reproduction
Relevant log output
Issue submission checklist