Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,8 @@ std::vector<TRShape> shape_infer(const SpaceToBatch* op,
for (auto idx = spatial_dim_offset; idx < data_rank_size; ++idx) {
NODE_VALIDATION_CHECK(op, (*blocks)[idx] > 0, "block_shape values must be greater than 0");

const auto padded_dim = data_shape[idx] + static_cast<TVal>((*pads_begin)[idx] + (*pads_end)[idx]);
const auto padded_dim =
data_shape[idx] + static_cast<TVal>((*pads_begin)[idx]) + static_cast<TVal>((*pads_end)[idx]);
Comment on lines +73 to +74
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Any reason why the validation has been not extended in the shape_infer function?

The shape_infer is called during op constructor and in SpaceToBatch::evaluate, in case of Const pads and static/bounded dimensions, it could preserve from creation of such op even before execution. Also the checks could be beneficial for plugins where the shape_infer function is shared and called for dynamic case.

Related tests to cover such case are here:

TEST(type_prop, space_to_batch_when_space_is_static) {
auto data_shape = PartialShape{{2, 5}, 100, 1024, 3};
set_shape_symbols(data_shape);
auto data = make_shared<ov::op::v0::Parameter>(element::f32, data_shape);
auto block_shape = make_shared<ov::op::v0::Constant>(element::i64, Shape{4}, vector<int64_t>{1, 12, 100, 2});
auto pads_begin = make_shared<ov::op::v0::Constant>(element::i64, Shape{4}, vector<int64_t>{0, 3, 38, 1});
auto pads_end = make_shared<ov::op::v0::Constant>(element::i64, Shape{4}, vector<int64_t>{0, 5, 38, 0});
auto space_to_batch = make_shared<op::v1::SpaceToBatch>(data, block_shape, pads_begin, pads_end);
EXPECT_EQ(
space_to_batch->get_output_partial_shape(0),
(PartialShape{{2 * 12 * 100 * 2, 5 * 12 * 100 * 2}, (100 + 3 + 5) / 12, (1024 + 38 + 38) / 100, (3 + 1) / 2}));

TEST_F(SpaceToBatchV1StaticShapeInferenceTest, blocks_pads_as_constants) {
const auto data = std::make_shared<Parameter>(element::f32, PartialShape{-1, -1, -1, -1});
const auto block_shape =
std::make_shared<Constant>(element::i64, ov::Shape{4}, std::vector<int64_t>{1, 12, 100, 2});
const auto pads_begin = std::make_shared<Constant>(element::i64, ov::Shape{4}, std::vector<int64_t>{0, 3, 38, 1});
const auto pads_end = std::make_shared<Constant>(element::i64, ov::Shape{4}, std::vector<int64_t>{0, 5, 38, 0});
const auto op = make_op(data, block_shape, pads_begin, pads_end);
input_shapes = {{2, 100, 1024, 3}, {4}, {4}, {4}};
output_shapes = shape_inference(op.get(), input_shapes);
EXPECT_EQ(output_shapes[0],
(StaticShape{2 * 12 * 100 * 2, (100 + 3 + 5) / 12, (1024 + 38 + 38) / 100, (3 + 1) / 2}));
}

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The shape infer logic uses Dimension operator+ which handles potential overflows

const auto divisor = static_cast<TVal>((*blocks)[idx]);

if (static_cast<int64_t>(padded_dim.get_max_length()) == dim::inf_bound) {
Expand Down
29 changes: 25 additions & 4 deletions src/core/src/op/space_to_batch.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -6,10 +6,12 @@

#include <cmath>
#include <cstddef>
#include <limits>
#include <memory>
#include <numeric>

#include "itt.hpp"
#include "openvino/core/memory_util.hpp"
#include "openvino/core/shape.hpp"
#include "openvino/op/util/attr_types.hpp"
#include "openvino/op/util/precision_sensitive_attribute.hpp"
Expand Down Expand Up @@ -100,10 +102,29 @@ bool evaluate(TensorVector& outputs, const TensorVector& inputs) {

Shape padded_shape(data_shape.size());
for (size_t i = 0; i < data_shape.size(); ++i) {
padded_shape[i] = data_shape[i] + pads_begin_vec[i] + pads_end_vec[i];
OPENVINO_ASSERT(pads_begin_vec[i] >= 0,
"SpaceToBatch: pads_begin[",
i,
"] must be non-negative, got ",
pads_begin_vec[i]);
OPENVINO_ASSERT(pads_end_vec[i] >= 0,
"SpaceToBatch: pads_end[",
i,
"] must be non-negative, got ",
pads_end_vec[i]);
const auto pb = static_cast<size_t>(pads_begin_vec[i]);
const auto pe = static_cast<size_t>(pads_end_vec[i]);
OPENVINO_ASSERT(data_shape[i] <= std::numeric_limits<size_t>::max() - pb &&
data_shape[i] + pb <= std::numeric_limits<size_t>::max() - pe,
"SpaceToBatch: padded dimension ",
i,
" overflows");
padded_shape[i] = data_shape[i] + pb + pe;
}

std::vector<char> padded_data(shape_size(padded_shape) * elem_size);
const auto padded_byte_size = ov::util::get_memory_size_safe(data.get_element_type(), padded_shape);
OPENVINO_ASSERT(padded_byte_size.has_value(), "SpaceToBatch: padded shape size overflows");
std::vector<char> padded_data(*padded_byte_size);
reference::pad(static_cast<const char*>(data.data()),
pad_value,
padded_data.data(),
Expand All @@ -122,8 +143,8 @@ bool evaluate(TensorVector& outputs, const TensorVector& inputs) {
std::iota(plain_axes_order.begin(), plain_axes_order.end(), 0);

std::vector<char> flat_data(padded_data.begin(), padded_data.end());
std::vector<char> dispersed_data(shape_size(data_shape) * elem_size);
std::vector<char> post_transpose_data(shape_size(data_shape) * elem_size);
std::vector<char> dispersed_data(*padded_byte_size);
std::vector<char> post_transpose_data(*padded_byte_size);

for (int64_t block_idx = block_values_size - 1; block_idx >= 0; --block_idx) {
int64_t sq_shape_idx = block_values_size - 1;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -6,8 +6,10 @@

#include <gtest/gtest.h>

#include <limits>

#include "base_reference_test.hpp"
#include "openvino/op/constant.hpp"
#include "openvino/core/except.hpp"
#include "openvino/op/parameter.hpp"

using namespace reference_tests;
Expand Down Expand Up @@ -158,4 +160,37 @@ INSTANTIATE_TEST_SUITE_P(smoke_SpaceToBatch_With_Hardcoded_Refs,
ReferenceSpaceToBatchLayerTest,
testing::ValuesIn(generateCombinedParams()),
ReferenceSpaceToBatchLayerTest::getTestCaseName);

class ReferenceSpaceToBatchLayerNegativeTest : public ReferenceSpaceToBatchLayerTest {};

TEST_P(ReferenceSpaceToBatchLayerNegativeTest, InvalidPaddingThrows) {
EXPECT_THROW(Exec(), ov::Exception);
}

std::vector<SpaceToBatchParams> generateNegativeParams() {
constexpr auto I64_MAX = std::numeric_limits<int64_t>::max();
const reference_tests::Tensor dummyExpected({1}, element::f32, std::vector<float>{0});

return {
// INT64_MAX padding causes size_t overflow in padded_shape computation
SpaceToBatchParams(reference_tests::Tensor({1, 4, 4}, element::f32, std::vector<float>(16, 1.0f)),
reference_tests::Tensor({3}, element::i64, std::vector<int64_t>{1, 1, 1}),
reference_tests::Tensor({3}, element::i64, std::vector<int64_t>{0, I64_MAX, I64_MAX}),
reference_tests::Tensor({3}, element::i64, std::vector<int64_t>{0, I64_MAX, I64_MAX}),
dummyExpected,
"padding_overflow"),
// Negative padding values
SpaceToBatchParams(reference_tests::Tensor({1, 1, 2, 2}, element::f32, std::vector<float>{1, 1, 1, 1}),
reference_tests::Tensor({4}, element::i64, std::vector<int64_t>{1, 1, 1, 1}),
reference_tests::Tensor({4}, element::i64, std::vector<int64_t>{0, 0, -1, 0}),
reference_tests::Tensor({4}, element::i64, std::vector<int64_t>{0, 0, 0, 0}),
dummyExpected,
"negative_padding"),
};
}

INSTANTIATE_TEST_SUITE_P(smoke_SpaceToBatch_Negative,
ReferenceSpaceToBatchLayerNegativeTest,
testing::ValuesIn(generateNegativeParams()),
ReferenceSpaceToBatchLayerNegativeTest::getTestCaseName);
} // namespace
Loading