Skip to content

[WIP] Use absl inline vector for Shape, Strides #28756

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 5 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from 4 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions docs/snippets/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -87,6 +87,7 @@ if(OpenCV_FOUND)
target_link_libraries(${TARGET_NAME} PRIVATE opencv_core)
endif()

target_link_libraries(${TARGET_NAME} PRIVATE absl::inlined_vector)
# ov_ncc_naming_style(FOR_TARGET "${TARGET_NAME}"
# SOURCE_DIRECTORIES "${CMAKE_CURRENT_SOURCE_DIR}"
# ADDITIONAL_INCLUDE_DIRECTORIES
Expand Down
8 changes: 4 additions & 4 deletions samples/cpp/benchmark_app/remote_tensors_filling.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -101,8 +101,8 @@ std::map<std::string, ov::TensorVector> get_remote_input_tensors(

// Creating and filling shared buffers
cl_int err;
auto elementsNum = std::accumulate(begin(input.second.dataShape),
end(input.second.dataShape),
auto elementsNum = std::accumulate(std::begin(input.second.dataShape),
std::end(input.second.dataShape),
1,
std::multiplies<size_t>());
auto inputSize = elementsNum * input.second.type.bitwidth() / 8;
Expand Down Expand Up @@ -223,8 +223,8 @@ std::map<std::string, ov::TensorVector> get_remote_input_tensors(
<< std::string((input.second.is_image() ? "image" : "some binary data"))
<< " is expected)" << slog::endl;

auto elementsNum = std::accumulate(begin(input.second.dataShape),
end(input.second.dataShape),
auto elementsNum = std::accumulate(std::begin(input.second.dataShape),
std::end(input.second.dataShape),
1,
std::multiplies<size_t>());

Expand Down
1 change: 1 addition & 0 deletions src/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
#

add_definitions(-DIN_OV_COMPONENT)
add_subdirectory(absl)

if(CMAKE_COMPILER_IS_GNUCXX OR OV_COMPILER_IS_CLANG)
ov_add_compiler_flags(-Wmissing-declarations)
Expand Down
16 changes: 16 additions & 0 deletions src/absl/CMakeLists.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
# Copyright (C) 2018-2025 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#

include(FetchContent)

set(ABSL_ENABLE_INSTALL ON)

set(BUILD_SHARED_LIBS OFF)
FetchContent_Declare(
abseil-cpp
EXCLUDE_FROM_ALL
GIT_REPOSITORY https://github.com/abseil/abseil-cpp.git
GIT_TAG d7aaad83b488fd62bd51c81ecf16cd938532cc0a # LTS_2024_01_16
)
FetchContent_MakeAvailable(abseil-cpp)
6 changes: 3 additions & 3 deletions src/bindings/python/src/pyopenvino/core/common.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -388,15 +388,15 @@ py::array array_from_constant_view(ov::op::v0::Constant&& c) {

namespace constant_helpers {

std::vector<size_t> _get_byte_strides(const ov::Shape& s, const size_t element_byte_size) {
auto byte_strides = ov::row_major_strides(s);
ov::Strides _get_byte_strides(const ov::Shape& s, const size_t element_byte_size) {
ov::Strides byte_strides = ov::row_major_strides(s);
for (auto&& stride : byte_strides) {
stride *= element_byte_size;
}
return byte_strides;
}

std::vector<size_t> _get_strides(const ov::op::v0::Constant& self) {
ov::Strides _get_strides(const ov::op::v0::Constant& self) {
using namespace ov::element;
switch (self.get_element_type()) {
case i4:
Expand Down
6 changes: 3 additions & 3 deletions src/bindings/python/src/pyopenvino/core/common.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -113,14 +113,14 @@ py::array array_from_constant_view(ov::op::v0::Constant&& c);
}; // namespace array_helpers

namespace constant_helpers {
std::vector<size_t> _get_byte_strides(const ov::Shape& s, size_t element_byte_size);
ov::Strides _get_byte_strides(const ov::Shape& s, size_t element_byte_size);

template <typename T>
std::vector<size_t> _get_byte_strides(const ov::Shape& s) {
ov::Strides _get_byte_strides(const ov::Shape& s) {
return _get_byte_strides(s, sizeof(T));
}

std::vector<size_t> _get_strides(const ov::op::v0::Constant& self);
ov::Strides _get_strides(const ov::op::v0::Constant& self);

}; // namespace constant_helpers

Expand Down
2 changes: 1 addition & 1 deletion src/bindings/python/src/pyopenvino/core/tensor.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -209,7 +209,7 @@ void regclass_Tensor(py::module m) {

cls.def(py::init<ov::Tensor, ov::Coordinate, ov::Coordinate>(), py::arg("other"), py::arg("begin"), py::arg("end"));

cls.def(py::init<ov::Tensor, std::vector<size_t>, std::vector<size_t>>(),
cls.def(py::init<ov::Tensor, ov::inplace_vector<size_t>, ov::inplace_vector<size_t>>(),
py::arg("other"),
py::arg("begin"),
py::arg("end"));
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,6 @@

namespace py = pybind11;

std::vector<size_t> _get_strides(const ov::op::v0::Constant& self);
ov::Strides _get_strides(const ov::op::v0::Constant& self);

void regclass_graph_op_Constant(py::module m);
2 changes: 1 addition & 1 deletion src/bindings/python/src/pyopenvino/graph/strides.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ void regclass_graph_Strides(py::module m) {
py::class_<ov::Strides, std::shared_ptr<ov::Strides>> strides(m, "Strides");
strides.doc() = "openvino.Strides wraps ov::Strides";
strides.def(py::init<const std::initializer_list<size_t>&>(), py::arg("axis_strides"));
strides.def(py::init<const std::vector<size_t>&>(), py::arg("axis_strides"));
strides.def(py::init<const ov::inplace_vector<size_t>&>(), py::arg("axis_strides"));
strides.def(py::init<const ov::Strides&>(), py::arg("axis_strides"));

strides.def("__str__", [](const ov::Strides& self) -> std::string {
Expand Down
5 changes: 4 additions & 1 deletion src/cmake/openvino.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,10 @@ target_include_directories(${TARGET_NAME} INTERFACE
$<BUILD_INTERFACE:${OpenVINO_SOURCE_DIR}/src/frontends/tensorflow/include>
$<BUILD_INTERFACE:${OpenVINO_SOURCE_DIR}/src/frontends/tensorflow_lite/include>)

target_link_libraries(${TARGET_NAME} PRIVATE openvino::reference
target_link_libraries(${TARGET_NAME} PUBLIC absl::base
absl::throw_delegate
absl::inlined_vector
PRIVATE openvino::reference
openvino::shape_inference
openvino::pugixml
${CMAKE_DL_LIBS}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -85,10 +85,9 @@ class LP_TRANSFORMATIONS_API NetworkHelper {

static std::shared_ptr<Node> getConstantInput(const std::shared_ptr<const Node>& node, const bool convertIsExpected = false);

static std::vector<size_t> updateReshapeValues(
const Shape& elementwiseConstantShape,
const Shape& elementwiseShape,
const std::vector<size_t>& reshapeValues);
static ov::inplace_vector<size_t> updateReshapeValues(const Shape& elementwiseConstantShape,
const Shape& elementwiseShape,
const ov::inplace_vector<size_t>& reshapeValues);

// Optimizes the series of multiplies after a given output port
static std::shared_ptr<ov::opset1::Multiply> optimizeMultipliesAfter(std::shared_ptr<Node> multiply);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -411,11 +411,11 @@ std::shared_ptr<Node> NetworkHelper::getConstantInput(const std::shared_ptr<cons
return nullptr;
}

std::vector<size_t> NetworkHelper::updateReshapeValues(
ov::inplace_vector<size_t> NetworkHelper::updateReshapeValues(
const Shape& elementwiseConstantShape,
const Shape& elementwiseShape,
const std::vector<size_t>& reshapeValues) {
Shape updatedReshapeValues = reshapeValues;
const ov::inplace_vector<size_t>& reshapeValues) {
auto updatedReshapeValues = reshapeValues;
for (size_t elementwiseIndex = 0, reshapeIndex = 0; elementwiseIndex < elementwiseConstantShape.size(); ++elementwiseIndex) {
if (elementwiseConstantShape[elementwiseIndex] != elementwiseShape[elementwiseIndex]) {
size_t reducedValue = 1ul;
Expand All @@ -438,7 +438,7 @@ std::vector<size_t> NetworkHelper::updateReshapeValues(
}
}
}
return std::move(updatedReshapeValues);
return updatedReshapeValues;
}

std::shared_ptr<ov::opset1::Multiply> NetworkHelper::optimizeMultipliesAfter(std::shared_ptr<Node> node) {
Expand Down Expand Up @@ -1897,4 +1897,4 @@ bool NetworkHelper::checkConstantNotInf(const std::shared_ptr<Node> constant_nod
}
} // namespace low_precision
} // namespace pass
} // namespace ov
} // namespace ov
Original file line number Diff line number Diff line change
Expand Up @@ -967,7 +967,7 @@ class ov::pass::mask_propagation::Reduce : public MatcherPass {
}
};

using dims_vec = std::vector<size_t>;
using dims_vec = ov::inplace_vector<size_t>;
static std::vector<dims_vec> map_reshaped_dimensions(const dims_vec input_shape, const dims_vec output_shape) {
auto dims_map = std::vector<dims_vec>();
auto cur_output_dims = dims_vec();
Expand Down Expand Up @@ -1107,7 +1107,7 @@ static ChannelsMap map_channels(const std::set<uint64_t> squized_mask_dim,
* dims_map map vector and unsquized_shape shape.
*/
static std::vector<DimsAttr> collect_dims_attrs(const std::vector<dims_vec> dims_map,
const std::vector<size_t> unsquized_shape) {
const ov::inplace_vector<size_t> unsquized_shape) {
auto dims_attrs = std::vector<DimsAttr>();
for (size_t squized_dim = 0; squized_dim < dims_map.size(); ++squized_dim) {
auto unsquized_dims = dims_map[squized_dim];
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@ class MHAParallelWAOptimizer : public lowered::pass::RuntimeOptimizer {

std::vector<lowered::ExpandedLoopInfoPtr> m_loops_to_split{};
std::unordered_set<size_t> m_unsqueezed_params{};
std::vector<std::vector<size_t>> m_optimized_layouts{};
std::vector<VectorDims> m_optimized_layouts{};
std::vector<size_t> m_dim_M_idces{};
size_t m_concurrency = 0;

Expand All @@ -58,4 +58,4 @@ class MHAParallelWAOptimizer : public lowered::pass::RuntimeOptimizer {
} // namespace pass
} // namespace lowered
} // namespace snippets
} // namespace ov
} // namespace ov
30 changes: 15 additions & 15 deletions src/common/snippets/include/snippets/lowered/port_descriptor.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -20,29 +20,29 @@ using PortDescriptorPtr = std::shared_ptr<PortDescriptor>;
class PortDescriptor {
friend class LinearIRBuilder;
public:
explicit PortDescriptor(const ov::Input<ov::Node>& node,
VectorDims subtensor_shape = {},
std::vector<size_t> layout = {});
explicit PortDescriptor(const ov::Input<ov::Node>& node, VectorDims subtensor_shape = {}, VectorDims layout = {});
explicit PortDescriptor(const ov::Input<const ov::Node>& node,
VectorDims subtensor_shape = {},
std::vector<size_t> layout = {});
explicit PortDescriptor(const ov::Output<ov::Node>& node,
VectorDims subtensor_shape = {},
std::vector<size_t> layout = {});
VectorDims layout = {});
explicit PortDescriptor(const ov::Output<ov::Node>& node, VectorDims subtensor_shape = {}, VectorDims layout = {});
explicit PortDescriptor(const ov::Output<const ov::Node>& node,
VectorDims subtensor_shape = {},
std::vector<size_t> layout = {});
PortDescriptor(VectorDims shape, VectorDims subtensor_shape, std::vector<size_t> layout = {}, Reg reg = {});
PortDescriptor(VectorDimsPtr shape, VectorDims subtensor_shape, std::vector<size_t> layout = {}, Reg reg = {});
VectorDims layout = {});
PortDescriptor(VectorDims shape, VectorDims subtensor_shape, VectorDims layout = {}, Reg reg = {});
PortDescriptor(VectorDimsPtr shape, VectorDims subtensor_shape, VectorDims layout = {}, Reg reg = {});
PortDescriptor();

const VectorDims& get_shape() const;
const VectorDims& get_subtensor() const {return m_subtensor_shape;}
const std::vector<size_t>& get_layout() const {return m_layout;}
const VectorDims& get_layout() const {
return m_layout;
}
const Reg& get_reg() const { return m_reg; }

void set_shape(const VectorDims& tensor);
void set_layout(const std::vector<size_t>& layout) { m_layout = layout; }
void set_layout(const VectorDims& layout) {
m_layout = layout;
}
void set_subtensor(const VectorDims& subtensor) { m_subtensor_shape = subtensor; }
void set_reg(Reg reg) { m_reg = std::move(reg); }
void set_reg_type(RegType type) { m_reg.type = type; }
Expand All @@ -63,7 +63,7 @@ class PortDescriptor {
/// \brief Original tensor shape
VectorDimsPtr m_tensor_shape = nullptr;
/// \brief Order of dimensions: NCHW == {0, 1, 2, 3}, NHWC == {0, 2, 3, 1}, NCHW16c == {0, 1, 2, 3, 1}
std::vector<size_t> m_layout{};
VectorDims m_layout{};
/// \brief Minimal tensor size that could be processed in one call
VectorDims m_subtensor_shape{};
/// \brief The corresponding abstract/physical register
Expand All @@ -84,8 +84,8 @@ class PortDescriptorUtils {
public:
static void set_port_descriptor_ptr(const ov::Input<ov::Node>& in, const PortDescriptorPtr& desc);
static void set_port_descriptor_ptr(const ov::Output<ov::Node>& out, const PortDescriptorPtr& desc);
static void set_port_descriptor(const ov::Input<ov::Node>& in, std::vector<size_t> subtensor, std::vector<size_t> layout = {});
static void set_port_descriptor(const ov::Output<ov::Node>& out, std::vector<size_t> subtensor, std::vector<size_t> layout = {});
static void set_port_descriptor(const ov::Input<ov::Node>& in, VectorDims subtensor, VectorDims layout = {});
static void set_port_descriptor(const ov::Output<ov::Node>& out, VectorDims subtensor, VectorDims layout = {});

static PortDescriptorPtr get_port_descriptor_ptr(const ov::Input<ov::Node>& in);
static PortDescriptorPtr get_port_descriptor_ptr(const ov::Input<const ov::Node>& in);
Expand Down
26 changes: 18 additions & 8 deletions src/common/snippets/include/snippets/op/brgemm.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -20,12 +20,22 @@ namespace op {
class Brgemm : virtual public modifier::MemoryAccess, public ov::op::Op {
public:
OPENVINO_OP("Brgemm", "SnippetsOpset");
Brgemm(const Output<Node>& A, const Output<Node>& B,
const size_t offset_a = 0lu, const size_t offset_b = 0lu, const size_t offset_c = 0lu,
std::vector<size_t> layout_a = {}, std::vector<size_t> layout_b = {}, std::vector<size_t> layout_c = {});
Brgemm(const Output<Node>& A, const Output<Node>& B,
const PortDescriptor& desc_a, const PortDescriptor& desc_b, const PortDescriptor& desc_c,
std::vector<size_t> layout_a = {}, std::vector<size_t> layout_b = {}, std::vector<size_t> layout_c = {});
Brgemm(const Output<Node>& A,
const Output<Node>& B,
const size_t offset_a = 0lu,
const size_t offset_b = 0lu,
const size_t offset_c = 0lu,
VectorDims layout_a = {},
VectorDims layout_b = {},
VectorDims layout_c = {});
Brgemm(const Output<Node>& A,
const Output<Node>& B,
const PortDescriptor& desc_a,
const PortDescriptor& desc_b,
const PortDescriptor& desc_c,
VectorDims layout_a = {},
VectorDims layout_b = {},
VectorDims layout_c = {});
Brgemm() = default;

size_t get_offset_a() const { return get_input_offset(0); }
Expand All @@ -47,9 +57,9 @@ class Brgemm : virtual public modifier::MemoryAccess, public ov::op::Op {
ov::PartialShape get_planar_output_shape(const ov::PartialShape& output_shape) const;

private:
void custom_constructor_validate_and_infer_types(std::vector<size_t> layout_a, std::vector<size_t> layout_b, std::vector<size_t> layout_c);
void custom_constructor_validate_and_infer_types(VectorDims layout_a, VectorDims layout_b, VectorDims layout_c);
};

} // namespace op
} // namespace snippets
} // namespace ov
} // namespace ov
7 changes: 4 additions & 3 deletions src/common/snippets/include/snippets/op/load.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,7 @@ class Load : public modifier::MemoryAccess, public ov::op::Op {
class LoadReorder : public Load {
public:
OPENVINO_OP("LoadReorder", "SnippetsOpset", Load);
LoadReorder(const Output<Node>& x, size_t count = 1lu, const size_t offset = 0lu, std::vector<size_t> order = {});
LoadReorder(const Output<Node>& x, size_t count = 1lu, const size_t offset = 0lu, VectorDims order = {});
LoadReorder() = default;

void set_offset(size_t offset) { set_output_offset(offset, 0); }
Expand All @@ -61,15 +61,16 @@ class LoadReorder : public Load {
void validate_and_infer_types() override;

class ShapeInfer : public IShapeInferSnippets {
std::vector<size_t> m_order;
VectorDims m_order;

public:
explicit ShapeInfer(const std::shared_ptr<ov::Node>& n);
Result infer(const std::vector<VectorDimsRef>& input_shapes) override;
};


protected:
std::vector<size_t> m_order;
VectorDims m_order;
};
} // namespace op
} // namespace snippets
Expand Down
7 changes: 4 additions & 3 deletions src/common/snippets/include/snippets/op/reorder.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -21,21 +21,22 @@ class Reorder : public ShapeInferOp {
public:
OPENVINO_OP("Reorder", "SnippetsOpset", ShapeInferOp);
Reorder() = default;
Reorder(const Output<Node>& x, std::vector<size_t> order);
Reorder(const Output<Node>& x, VectorDims order);

bool visit_attributes(AttributeVisitor& visitor) override;
std::shared_ptr<Node> clone_with_new_inputs(const OutputVector& new_args) const override;
void validate_and_infer_types() override;

class ShapeInfer : public IShapeInferSnippets {
std::vector<size_t> m_target_order {};
VectorDims m_target_order{};

public:
explicit ShapeInfer(const std::shared_ptr<Node>& n);
Result infer(const std::vector<VectorDimsRef>& input_shapes) override;
};

private:
void custom_constructor_validate_and_infer_types(std::vector<size_t> order);
void custom_constructor_validate_and_infer_types(VectorDims order);
};

} // namespace op
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@ class SplitDimensionM: public CommonOptimizations::SubgraphPass {
* @param m_index M dimension index
* @return updated order with the split M dimension
*/
static std::vector<size_t> get_updated_order(const std::vector<size_t>& order, size_t m_index);
static VectorDims get_updated_order(const VectorDims& order, size_t m_index);
/**
* @brief Reshapes m dimension in "shape": separates M in two parts: "batch_m_dim" and "new_m_dim"
* @param shape Shape to split
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -186,7 +186,7 @@ class RuntimeConfigurator {
/**
* @brief Extract layouts from m_io_descs
*/
std::vector<std::vector<size_t>> extract_layouts() const;
std::vector<VectorDims> extract_layouts() const;

std::shared_ptr<RuntimeConfig> m_config = nullptr;

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,8 @@ class HorizonOpShapeInfer : public IShapeInferSnippets {
};

class BrgemmShapeInfer : public IShapeInferSnippets {
std::vector<std::vector<size_t>> m_io_layouts;
std::vector<VectorDims> m_io_layouts;

public:
explicit BrgemmShapeInfer(const std::shared_ptr<Node>& n);
Result infer(const std::vector<VectorDimsRef>& input_shapes) override;
Expand Down
Loading
Loading