Skip to content

Commit 510ea8a

Browse files
committed
Debugging
1 parent 6e27267 commit 510ea8a

File tree

4 files changed

+88
-21
lines changed

4 files changed

+88
-21
lines changed

src/core/include/openvino/op/constant.hpp

Lines changed: 15 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -439,6 +439,18 @@ class OPENVINO_API Constant : public Op {
439439
/// @return Constant's strides in bytes.
440440
const Strides& get_strides() const;
441441

442+
/// @brief Sets the external name for the constant.
443+
/// @param name The external name to set.
444+
void set_external_name(const std::string& name) {
445+
m_external_name = name;
446+
}
447+
448+
/// @brief Gets the external name of the constant.
449+
/// @return The external name.
450+
const std::string& get_external_name() const {
451+
return m_external_name;
452+
}
453+
442454
private:
443455
Constant(bool memset_allocation, const element::Type& type, const Shape& shape);
444456

@@ -796,6 +808,9 @@ class OPENVINO_API Constant : public Op {
796808
mutable std::atomic_bool m_all_elements_bitwise_identical{false};
797809
mutable std::atomic_bool m_all_elements_bitwise_identical_checked{false};
798810
bool m_alloc_buffer_on_visit_attributes{true};
811+
812+
// TODO: Move to dedicated ExternalConstant op:
813+
std::string m_external_name;
799814
};
800815

801816
template <>

src/core/src/op/constant.cpp

Lines changed: 36 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -324,7 +324,8 @@ Constant::Constant(const Constant& other)
324324
m_data{other.m_data},
325325
m_all_elements_bitwise_identical{other.m_all_elements_bitwise_identical.load()},
326326
m_all_elements_bitwise_identical_checked{other.m_all_elements_bitwise_identical_checked.load()},
327-
m_alloc_buffer_on_visit_attributes{other.m_alloc_buffer_on_visit_attributes} {
327+
m_alloc_buffer_on_visit_attributes{other.m_alloc_buffer_on_visit_attributes},
328+
m_external_name{other.m_external_name} {
328329
constructor_validate_and_infer_types();
329330
}
330331

@@ -591,32 +592,46 @@ bool Constant::visit_attributes(AttributeVisitor& visitor) {
591592
OV_OP_SCOPE(v0_Constant_visit_attributes);
592593
const auto prev_shape = m_shape;
593594
const auto prev_type = m_element_type;
595+
const auto prev_external_name = m_external_name;
594596
visitor.on_attribute("element_type", m_element_type);
595597
visitor.on_attribute("shape", m_shape);
598+
visitor.on_attribute("external_name", m_external_name);
596599

597-
const auto need_to_reallocate = (m_shape != prev_shape) || (prev_type != m_element_type);
598-
const auto is_string_constant = (m_element_type == element::string);
599-
if (m_alloc_buffer_on_visit_attributes && need_to_reallocate) {
600-
// string objects initialization is required, others filling in a fresh constant
601-
allocate_buffer(is_string_constant);
602-
}
603-
604-
if (is_string_constant) {
605-
if (auto string_aligned_buffer = std::dynamic_pointer_cast<ov::StringAlignedBuffer>(m_data)) {
606-
visitor.on_attribute("value", string_aligned_buffer);
607-
} else if (auto shared_string_tensor = std::dynamic_pointer_cast<ov::SharedBuffer<ov::Tensor>>(m_data)) {
608-
auto shared_string_buffer =
609-
std::make_shared<ov::SharedStringAlignedBuffer>(shared_string_tensor->get_ptr<char>(),
610-
shared_string_tensor->size());
611-
visitor.on_attribute("value", shared_string_buffer);
600+
if(!m_external_name.empty()) {
601+
if(prev_external_name != m_external_name) {
602+
// If it becomes an external constant or change it's content, it is unlinked
603+
std::cerr << "[ DEBUG ] ExternalConstant in Constant::visit_attributes is activated" << std::endl;
604+
m_data.reset();
612605
} else {
613-
// deserialization case when buffer does not exist yet
614-
std::shared_ptr<ov::StringAlignedBuffer> string_aligned_buffer;
615-
visitor.on_attribute("value", string_aligned_buffer);
616-
m_data = string_aligned_buffer;
606+
std::cerr << "[ DEBUG ] Retriving value from Constant::visit_attributes" << std::endl;
607+
std::cerr << " m_external_name = " << m_external_name << std::endl;
608+
std::cerr << " Skip value retrieving" << std::endl;
617609
}
618610
} else {
619-
visitor.on_attribute("value", m_data);
611+
const auto need_to_reallocate = (m_shape != prev_shape) || (prev_type != m_element_type);
612+
const auto is_string_constant = (m_element_type == element::string);
613+
if (m_alloc_buffer_on_visit_attributes && need_to_reallocate) {
614+
// string objects initialization is required, others filling in a fresh constant
615+
allocate_buffer(is_string_constant);
616+
}
617+
618+
if (is_string_constant) {
619+
if (auto string_aligned_buffer = std::dynamic_pointer_cast<ov::StringAlignedBuffer>(m_data)) {
620+
visitor.on_attribute("value", string_aligned_buffer);
621+
} else if (auto shared_string_tensor = std::dynamic_pointer_cast<ov::SharedBuffer<ov::Tensor>>(m_data)) {
622+
auto shared_string_buffer =
623+
std::make_shared<ov::SharedStringAlignedBuffer>(shared_string_tensor->get_ptr<char>(),
624+
shared_string_tensor->size());
625+
visitor.on_attribute("value", shared_string_buffer);
626+
} else {
627+
// deserialization case when buffer does not exist yet
628+
std::shared_ptr<ov::StringAlignedBuffer> string_aligned_buffer;
629+
visitor.on_attribute("value", string_aligned_buffer);
630+
m_data = string_aligned_buffer;
631+
}
632+
} else {
633+
visitor.on_attribute("value", m_data);
634+
}
620635
}
621636
update_identical_flags(false, false);
622637
return true;

src/core/src/pass/serialize.cpp

Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -582,6 +582,8 @@ class XmlSerializer : public ov::AttributeVisitor {
582582
} else if (const auto& a = ov::as_type<ov::AttributeAdapter<std::shared_ptr<ov::AlignedBuffer>>>(&adapter)) {
583583
if (name == "value" && translate_type_name(m_node_type_name) == "Const") {
584584
const int64_t size = a->get()->size();
585+
if(size > 100000)
586+
std::cerr << "[ DEBUG ] SERIALIZE value of big size: " << size << std::endl;
585587
size_t new_size;
586588
int64_t offset = m_constant_write_handler.write(static_cast<const char*>(a->get()->get_ptr()),
587589
size,
@@ -1010,6 +1012,9 @@ void ngfunction_2_ir(pugi::xml_node& netXml,
10101012

10111013
auto sorted_ops = model.get_ordered_ops();
10121014

1015+
int iii = 0;
1016+
std::cerr << "DEBUG " << iii++ << std::endl;
1017+
10131018
// get_ordered_ops() returns operations after a topological sort. The topological sort reverses order of Parameters
10141019
// and Results. So we need to put them into sorted_ops separately to ensure correct order of inputs and outputs.
10151020
{
@@ -1033,7 +1038,12 @@ void ngfunction_2_ir(pugi::xml_node& netXml,
10331038
sorted_ops = std::move(result);
10341039
}
10351040

1041+
std::cerr << "DEBUG " << iii++ << std::endl;
1042+
int jjj = 0;
1043+
10361044
for (const auto& n : sorted_ops) {
1045+
1046+
std::cerr << jjj++ << std::endl;
10371047
ov::Node* node = n.get();
10381048
int node_id{};
10391049
{
@@ -1214,6 +1224,8 @@ void ngfunction_2_ir(pugi::xml_node& netXml,
12141224
layer.remove_child(data);
12151225
}
12161226
}
1227+
std::cerr << "DEBUG " << iii++ << std::endl;
1228+
12171229
// <edges>
12181230
const std::vector<Edge> edge_mapping = create_edge_mapping(layer_ids, model);
12191231
pugi::xml_node edges = netXml.append_child("edges");
@@ -1233,6 +1245,7 @@ void ngfunction_2_ir(pugi::xml_node& netXml,
12331245
edge.append_attribute("to-port").set_value(e.to_port);
12341246
}
12351247

1248+
std::cerr << "DEBUG " << iii++ << std::endl;
12361249
// Serialize rt info
12371250
pugi::xml_node rt_info_node = netXml.append_child("rt_info");
12381251
for (const auto& it : model.get_rt_info()) {
@@ -1241,6 +1254,7 @@ void ngfunction_2_ir(pugi::xml_node& netXml,
12411254
continue;
12421255
serialize_rt_info(rt_info_node, it.first, it.second);
12431256
}
1257+
std::cerr << "DEBUG " << iii++ << std::endl;
12441258
}
12451259

12461260
std::string valid_xml_path(const std::string& path) {

src/frontends/pytorch/src/op/linear.cpp

Lines changed: 23 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -31,11 +31,28 @@ OutputVector translate_linear(const NodeContext& context) {
3131
return {matmul};
3232
};
3333

34+
std::string demangle_dict_state_name(const std::string& name) {
35+
const std::string prefix = "self.";
36+
if (name.rfind(prefix, 0) == 0) {
37+
return name.substr(prefix.size());
38+
}
39+
FRONT_END_OP_CONVERSION_CHECK(false, "Name does not start with 'self.' prefix: " + name);
40+
}
41+
3442
OutputVector translate_linear_ext(const NodeContext& context) {
3543
num_inputs_check(context, 2, 3);
44+
// std::cerr << "[ DEBUG ] translate_linear_ext" << std::endl;
3645
auto x = context.get_input(0);
3746
auto initial_x = x;
3847
auto weight = context.get_input(1);
48+
auto weight_node = weight.get_node_shared_ptr();
49+
std::cerr << "[ DEBUG ] Name of weight layer: " << weight_node->get_friendly_name() << std::endl;
50+
// std::cerr << "[ DEBUG ] Name of weight tensor: " << weight.get_any_name() << std::endl;
51+
if (auto constant_node = ov::as_type_ptr<v0::Constant>(weight_node)) {
52+
auto name = demangle_dict_state_name(weight_node->get_friendly_name());
53+
std::cerr << " [ DEBUG ] Set external constant name: " << name << std::endl;
54+
constant_node->set_external_name(name);
55+
}
3956
bool convert_back = false;
4057
if (weight.get_element_type() != element::f32) {
4158
// In case of patched linear it can have mixed fp16/bf16 and fp32 input type.
@@ -48,9 +65,15 @@ OutputVector translate_linear_ext(const NodeContext& context) {
4865
}
4966
}
5067
auto matmul = context.mark_node(std::make_shared<v0::MatMul>(x, weight, false, true));
68+
// std::cerr << "[ DEBUG ] Name of matmul layer: " << matmul->get_friendly_name() << std::endl;
5169
if (!context.input_is_none(2)) {
70+
std::cerr << "[ DEBUG ] There is bias addon" << std::endl;
5271
auto bias = context.get_input(2);
5372

73+
auto bias_node = weight.get_node_shared_ptr();
74+
std::cerr << "[ DEBUG ] Name of bias layer: " << bias_node->get_friendly_name() << std::endl;
75+
76+
5477
if (bias.get_element_type() != element::f32) {
5578
// Same reason as for weight.
5679
bias = context.mark_node(std::make_shared<v0::Convert>(bias, element::f32));

0 commit comments

Comments
 (0)