|
| 1 | +# Copyright (C) 2018-2025 Intel Corporation |
| 2 | +# SPDX-License-Identifier: Apache-2.0 |
| 3 | + |
| 4 | +# flake8: noqa |
| 5 | +# mypy: ignore-errors |
| 6 | + |
| 7 | +import inspect |
| 8 | +from openvino.frontend.pytorch.ts_decoder import InlineConversionExtension |
| 9 | +from openvino.frontend.pytorch.utils import pt_to_ov_type_map |
| 10 | +import openvino as ov |
| 11 | + |
| 12 | +global_counter_id = 0 |
| 13 | + |
| 14 | +# makes a custom op class from a func and input/output signatures |
| 15 | +def make_custom_op_class(func, input_signature, output_signature): |
| 16 | + import torch, numpy |
| 17 | + global global_counter_id |
| 18 | + # print('make_custom_op_class, id =', global_counter_id) |
| 19 | + class InlinedCustomOp(ov.Op): |
| 20 | + class_type_info = ov.runtime.DiscreteTypeInfo("InlinedCustomOp", "extension") |
| 21 | + |
| 22 | + def __init__(self, *args): |
| 23 | + # TODO: What about attributes? |
| 24 | + super().__init__(self, args) |
| 25 | + self.attrs = {"id": global_counter_id} # `id` attribute distinguishes different instances of the same class, we need it because different instances may have different behaviour |
| 26 | + # print(f'Made custom op class with id = {self.attrs["id"]}') |
| 27 | + # print(f"Input signature: {input_signature}") |
| 28 | + # print(f"Output signature: {output_signature}") |
| 29 | + self.constructor_validate_and_infer_types() |
| 30 | + |
| 31 | + def evaluate(self, outputs, inputs): |
| 32 | + # print("called evaluate") |
| 33 | + inputs_torch = (torch.from_numpy(input.data) for input in inputs) # TODO: Check memory sharing |
| 34 | + result = func(*inputs_torch) |
| 35 | + if not isinstance(result, tuple): |
| 36 | + result = (result,) |
| 37 | + for i, tensor in enumerate(result): |
| 38 | + ov.Tensor(numpy.array(tensor), shared_memory=True).copy_to(outputs[i]) # TODO: set the output tensor directly without copying |
| 39 | + return True |
| 40 | + |
| 41 | + def has_evaluate(self, *args): |
| 42 | + return True |
| 43 | + |
| 44 | + def visit_attributes(self, visitor): |
| 45 | + visitor.on_attributes(self.attrs) |
| 46 | + return True |
| 47 | + |
| 48 | + def validate_and_infer_types(self): |
| 49 | + #TODO: Validate input signature |
| 50 | + for i, output in enumerate(output_signature): |
| 51 | + self.set_output_type(i, output[0], output[1]) |
| 52 | + global_counter_id += 1 |
| 53 | + return InlinedCustomOp |
| 54 | + |
| 55 | + |
| 56 | +def make_signature(args): |
| 57 | + # TODO: Extend beyond just tensors |
| 58 | + # convert each torch.Tensor object in args to a tuple (element_type, partial_shape) in OpenVINO terms |
| 59 | + return tuple((pt_to_ov_type_map[str(arg.dtype)], ov.PartialShape.dynamic(len(arg.shape))) for arg in args) |
| 60 | + |
| 61 | + |
| 62 | +# Returns a tuple of tuples (element_type, partial_shape) for each argument, flattening nested structures if needed, setting all dimensions dynamic preserving rank |
| 63 | +# Currently assumes that all input arguments are torch.Tensor objects |
| 64 | +def make_input_signature(args, kwargs): |
| 65 | + # TODO: Avoid the current limitation: kwargs parameters should be passed in the same order as the function signature without gaps |
| 66 | + # flatten kwargs relying on the order of the keys |
| 67 | + assert not kwargs, "Keyword arguments are not supported yet" |
| 68 | + return make_signature(args + tuple(kwargs.values())) |
| 69 | + |
| 70 | + |
| 71 | +def make_output_signature(args): |
| 72 | + if args is None: |
| 73 | + # TODO: This case is not really supported by PT FE -- because we don't support ops that do not have outputs, they will be lost |
| 74 | + args = () |
| 75 | + if not isinstance(args, tuple): |
| 76 | + args = (args,) |
| 77 | + return make_signature(args) |
| 78 | + |
| 79 | + |
| 80 | +def is_class_method(obj): |
| 81 | + if not inspect.isfunction(obj) and not inspect.ismethod(obj): |
| 82 | + return False |
| 83 | + argspec = inspect.getfullargspec(obj) |
| 84 | + if argspec.args and argspec.args[0] == 'self': |
| 85 | + return True |
| 86 | + else: |
| 87 | + return False |
| 88 | + |
| 89 | + |
| 90 | +def make_trampoline_class(func, op, op_attrs): |
| 91 | + import torch |
| 92 | + class Trampoline(torch.autograd.Function): |
| 93 | + target_extension = InlineConversionExtension() # this is a marker for this type of extension |
| 94 | + |
| 95 | + # This function defines how the operation behaves when called as a part of PyTorch model code in eager execution or while jit.trace |
| 96 | + @staticmethod |
| 97 | + def forward(ctx, *call_args, **call_kwargs): #TODO: what is `ctx`? |
| 98 | + # print('Called through the trampoline') |
| 99 | + func_target = func |
| 100 | + if not op: |
| 101 | + if is_class_method(func): |
| 102 | + self_obj = call_args[0] |
| 103 | + call_args = call_args[1:] |
| 104 | + wrapped = lambda *distil_args, **distil_kwargs: func(self_obj, *distil_args, **distil_kwargs) |
| 105 | + func_target = wrapped |
| 106 | + input_signature = make_input_signature(call_args, call_kwargs) |
| 107 | + # TODO: Try to trace `func` with the hope to obtain tracable shapes to build more precise `validate_and_infer_types` automatically (unlikely possible) |
| 108 | + result = func_target(*call_args, **call_kwargs) |
| 109 | + if not op: |
| 110 | + output_signature = make_output_signature(result) |
| 111 | + __class__.op = make_custom_op_class(func_target, input_signature, output_signature) |
| 112 | + else: |
| 113 | + __class__.op = op |
| 114 | + return result |
| 115 | + |
| 116 | + # This function defines how the operation is represented in OpenVINO model graph |
| 117 | + @staticmethod |
| 118 | + def convert(node_context): |
| 119 | + inputs = [node_context.get_input(i) for i in range(node_context.get_input_size())] |
| 120 | + return __class__.op(*inputs, **op_attrs).outputs() |
| 121 | + |
| 122 | + return Trampoline |
| 123 | + |
| 124 | + |
| 125 | +def inlined_extension(*args, **op_attrs): |
| 126 | + def make_trampoline(func, op=None): |
| 127 | + def trampoline(*args, **kwargs): |
| 128 | + # Keep trampoline class creation at the point when the function is called to make each time a new trampoline. |
| 129 | + # It is required because `func` is fused inside Trampoline class and can have different behaviour from call to call in PyTorch world even if |
| 130 | + # the same op is specified to wrap multiple different functions. |
| 131 | + trampoline = make_trampoline_class(func, op, op_attrs) |
| 132 | + return trampoline.apply(*args, **kwargs) |
| 133 | + return trampoline |
| 134 | + |
| 135 | + if len(args) == 1 and callable(args[0]) and not (isinstance(args[0], type) and issubclass(args[0], ov.Op)): |
| 136 | + func = args[0] |
| 137 | + return make_trampoline(func) |
| 138 | + else: |
| 139 | + op = args[0] |
| 140 | + return lambda func: make_trampoline(func, op) |
0 commit comments