forked from pytorch/pytorch
-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathruntime_compatibility.cpp
56 lines (48 loc) · 1.69 KB
/
runtime_compatibility.cpp
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
#include <ATen/core/dispatch/Dispatcher.h>
#include <caffe2/serialize/inline_container.h>
#include <torch/csrc/jit/mobile/runtime_compatibility.h>
#include <torch/csrc/jit/runtime/operator.h>
namespace torch {
namespace jit {
uint64_t _get_runtime_bytecode_version() {
return caffe2::serialize::kMaxSupportedBytecodeVersion;
}
/*
* Returns all registered PyTorch ops and their versioning
*/
std::unordered_map<std::string, OperatorInfo> _get_runtime_ops_and_info() {
std::unordered_map<std::string, OperatorInfo> result;
// Grab the jit operators
auto nonDispatcherOperators = torch::jit::getAllOperators();
for (const auto& full_op : nonDispatcherOperators) {
auto op = full_op->schema();
int num_schema_args = op.arguments().size();
auto op_name = op.name();
if (!op.overload_name().empty()) {
op_name += ("." + op.overload_name());
}
result.emplace(op_name, OperatorInfo{num_schema_args});
}
// Grab the dispatcher operators
auto dispatcherOperators = c10::Dispatcher::singleton().getAllOpNames();
for (auto& op : dispatcherOperators) {
// grab schema
const auto op_handle = c10::Dispatcher::singleton().findOp(op);
c10::optional<int> num_schema_args;
if (op_handle->hasSchema()) {
num_schema_args = op_handle->schema().arguments().size();
}
auto op_name = op.name;
if (!op.overload_name.empty()) {
op_name += ("." + op.overload_name);
}
result.emplace(op_name, OperatorInfo{num_schema_args});
}
return result;
}
RuntimeCompatibilityInfo RuntimeCompatibilityInfo::get() {
return RuntimeCompatibilityInfo{
_get_runtime_bytecode_version(), _get_runtime_ops_and_info()};
}
} // namespace jit
} // namespace torch