Skip to content

Commit 2ea382a

Browse files
committed
Introduce SinkHolder to keep nodes that do not return any outputs
1 parent a161186 commit 2ea382a

File tree

3 files changed

+63
-9
lines changed

3 files changed

+63
-9
lines changed

src/bindings/python/src/openvino/frontend/pytorch/inlined_extension.py

Lines changed: 22 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -23,15 +23,22 @@ def __init__(self, *args):
2323
# TODO: What about attributes?
2424
super().__init__(self, args)
2525
self.attrs = {"id": global_counter_id} # `id` attribute distinguishes different instances of the same class, we need it because different instances may have different behaviour
26-
# print(f'Made custom op class with id = {self.attrs["id"]}')
27-
# print(f"Input signature: {input_signature}")
28-
# print(f"Output signature: {output_signature}")
26+
#print('output_signature from ctro:', output_signature)
27+
if output_signature == ():
28+
# The operation doesn't have outputs, so we need to take extra care to avoid eliminating the op from the graph
29+
#print('===================== MARKING AS SINK ========================')
30+
self.get_rt_info()['__sink__'] = True
31+
#print(f'Made custom op class with id = {self.attrs["id"]}')
32+
#print(f"Input signature: {input_signature}")
33+
#print(f"Output signature: {output_signature}")
2934
self.constructor_validate_and_infer_types()
3035

3136
def evaluate(self, outputs, inputs):
3237
# print("called evaluate")
3338
inputs_torch = (torch.from_numpy(input.data) for input in inputs) # TODO: Check memory sharing
3439
result = func(*inputs_torch)
40+
if result is None:
41+
result = ()
3542
if not isinstance(result, tuple):
3643
result = (result,)
3744
for i, tensor in enumerate(result):
@@ -47,8 +54,13 @@ def visit_attributes(self, visitor):
4754

4855
def validate_and_infer_types(self):
4956
#TODO: Validate input signature
50-
for i, output in enumerate(output_signature):
51-
self.set_output_type(i, output[0], output[1])
57+
if output_signature == ():
58+
# Even when the original wrapped function doesn't give any return value, we need to set some output type to avoid eliminating the op from the graph
59+
# Data type and shape doesn't matter, so we set default empty tensor of type u8
60+
self.set_output_type(0, ov.Type.u8, ov.PartialShape([0]))
61+
else:
62+
for i, output in enumerate(output_signature):
63+
self.set_output_type(i, output[0], output[1])
5264
global_counter_id += 1
5365
return InlinedCustomOp
5466

@@ -71,6 +83,7 @@ def make_input_signature(args, kwargs):
7183
def make_output_signature(args):
7284
if args is None:
7385
# TODO: This case is not really supported by PT FE -- because we don't support ops that do not have outputs, they will be lost
86+
#print('=================== None PROCESSING ======================')
7487
args = ()
7588
if not isinstance(args, tuple):
7689
args = (args,)
@@ -108,6 +121,7 @@ def forward(ctx, *call_args, **call_kwargs): #TODO: what is `ctx`?
108121
result = func_target(*call_args, **call_kwargs)
109122
if not op:
110123
output_signature = make_output_signature(result)
124+
#print('about to make custom op class with output signature', output_signature)
111125
__class__.op = make_custom_op_class(func_target, input_signature, output_signature)
112126
else:
113127
__class__.op = op
@@ -129,7 +143,9 @@ def trampoline(*args, **kwargs):
129143
# It is required because `func` is fused inside Trampoline class and can have different behaviour from call to call in PyTorch world even if
130144
# the same op is specified to wrap multiple different functions.
131145
trampoline = make_trampoline_class(func, op, op_attrs)
132-
return trampoline.apply(*args, **kwargs)
146+
result = trampoline.apply(*args, **kwargs)
147+
#print('just called trampoline with result:', result)
148+
return result
133149
return trampoline
134150

135151
if len(args) == 1 and callable(args[0]) and not (isinstance(args[0], type) and issubclass(args[0], ov.Op)):

src/bindings/python/src/openvino/frontend/pytorch/ts_decoder.py

Lines changed: 7 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -635,6 +635,11 @@ def convert(self, node_context):
635635
if op_extension := self.get_op_extension():
636636
trampoline, target_extension = op_extension
637637
assert isinstance(target_extension, InlineConversionExtension)
638-
result = trampoline.convert(node_context)
639-
return result
638+
try:
639+
return trampoline.convert(node_context)
640+
except Exception as e:
641+
print('[ ERROR ] Exception happened during calling of custom converter for PyTorch operation')
642+
print(' PyTorch Script code:', self.graph_element)
643+
print(' Exception:', e)
644+
raise
640645
assert False, "PyTorch FrontEnd Internal Error: `converter` method of TorchScriptPythonDecoder is called for node that has no custom converter"

src/frontends/pytorch/src/translate_session.cpp

Lines changed: 34 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -45,6 +45,32 @@ std::shared_ptr<ov::Model> TranslateSession::get_converted_model() {
4545
return m_ov_model;
4646
}
4747

48+
/// @brief Sink operation that preserves another node from removing from the graph, when it is not possible to derive that target node op from Sink class
49+
class SinkHolder : public Sink {
50+
public:
51+
OPENVINO_OP("SinkHolder", "util", ov::op::Sink);
52+
SinkHolder() = default;
53+
explicit SinkHolder(const OutputVector& arguments) : Sink(arguments) {}
54+
55+
std::shared_ptr<Node> clone_with_new_inputs(const OutputVector& new_args) const override {
56+
check_new_args_count(this, new_args);
57+
return std::make_shared<SinkHolder>(new_args);
58+
}
59+
60+
void validate_and_infer_types() override {
61+
set_output_type(0, element::u8, PartialShape({0}));
62+
}
63+
64+
bool evaluate(ov::TensorVector& output_values, const ov::TensorVector& input_values) const override {
65+
// This op has a dummy output -- no need to evaluate
66+
return true;
67+
}
68+
69+
bool has_evaluate() const override {
70+
return true;
71+
}
72+
};
73+
4874
std::shared_ptr<ov::Model> TranslateSession::translate_graph(const ov::frontend::InputModel::Ptr& input_model) {
4975
auto pytorch_model = std::dynamic_pointer_cast<pytorch::InputModel>(input_model);
5076
FRONT_END_GENERAL_CHECK(pytorch_model != nullptr, "Invalid input model");
@@ -88,6 +114,7 @@ std::shared_ptr<Model> TranslateSession::convert_pytorch_model(
88114
auto tensor_map = std::make_shared<TensorMap>(); // tensor map of the current context
89115
auto mutated_tensors = std::make_shared<std::set<size_t>>();
90116
std::vector<size_t> inserted_params;
117+
SinkVector sinks;
91118

92119
if (input_model && input_model->m_requested_places.size() == 0) {
93120
// When we have input model we should use its inputs order to create Parameters
@@ -179,6 +206,12 @@ std::shared_ptr<Model> TranslateSession::convert_pytorch_model(
179206

180207
const bool has_inputs = !node->inputs().empty();
181208
const size_t in_tensor_id = has_inputs ? node->inputs().at(0) : 0;
209+
if (converted_outputs.size() == 1) {
210+
// Check if converted node is marked as a sink
211+
if(converted_outputs.front().get_node()->get_rt_info().count("__sink__")) {
212+
sinks.push_back(std::make_shared<SinkHolder>(converted_outputs));
213+
}
214+
}
182215
for (size_t i = 0; i < fw_outputs.size(); ++i) {
183216
size_t fw_tensor_id = node->output(i);
184217
if (has_inputs && node->may_produce_alias(0, i)) {
@@ -303,7 +336,7 @@ std::shared_ptr<Model> TranslateSession::convert_pytorch_model(
303336
}),
304337
parameters->end());
305338
}
306-
resulting_model = std::make_shared<Model>(results, *parameters);
339+
resulting_model = std::make_shared<Model>(results, sinks, *parameters);
307340
// Did a conversion in a nested scope to automatically remove any holders of nodes except those in the graph
308341
}
309342

0 commit comments

Comments
 (0)