Skip to content

Commit ef9e864

Browse files
Gasoonjiafacebook-github-bot
authored andcommitted
turn on dim order in cadence test (#7756)
Summary: This diff turns on dim order in cadence test. Also we get around `to_copy` operator in verifier to keep the verifier check enable. Reviewed By: digantdesai, mcremon-meta Differential Revision: D68246404
1 parent e78ed83 commit ef9e864

File tree

3 files changed

+48
-1
lines changed

3 files changed

+48
-1
lines changed

backends/cadence/aot/compiler.py

+5-1
Original file line numberDiff line numberDiff line change
@@ -33,6 +33,7 @@
3333
ExecutorchProgramManager,
3434
to_edge,
3535
)
36+
from executorch.exir.dialects._ops import ops as exir_ops
3637
from executorch.exir.pass_base import PassResult
3738
from executorch.exir.passes import ToOutVarPass
3839
from executorch.exir.passes.sym_shape_eval_pass import HintBasedSymShapeEvalPass
@@ -186,14 +187,17 @@ def export_to_edge(
186187
edge_prog_manager = to_edge(
187188
expo_program,
188189
compile_config=EdgeCompileConfig(
189-
_skip_dim_order=True,
190190
# Allow specific non-core aten ops in the IR.
191191
_core_aten_ops_exception_list=[
192192
torch.ops.aten._native_batch_norm_legit_functional.default,
193193
torch.ops.aten.linear.default,
194194
torch.ops.aten.linalg_vector_norm.default,
195195
torch.ops.aten.unfold.default,
196196
torch.ops.aten.angle.default,
197+
# cadence replaced to_dim_order_copy with _to_copy for performance
198+
# skip _to_copy op to get around of dim order check
199+
# We should remove this op once cadence can support dim order
200+
exir_ops.edge.aten._to_copy.default,
197201
],
198202
),
199203
constant_methods=constant_methods,

backends/cadence/aot/replace_ops.py

+42
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,7 @@
1111

1212
# pyre-unsafe
1313

14+
import copy
1415
import math
1516
from operator import neg
1617
from typing import cast, Dict, Iterable, Sequence, Set, Tuple
@@ -1799,6 +1800,46 @@ def call_operator(
17991800
)
18001801

18011802

1803+
@register_cadence_pass(CadencePassAttribute(opt_level=0))
1804+
class ReplaceToDimOrderCopyWithToCopyPass(ExportPass):
1805+
"""
1806+
dim_order_ops::to_dim_order_copy is not supported, so this is an opt_level=0 pass.
1807+
If the dim order is sequential, we don't need the extra work with strides and
1808+
can just use to_copy.
1809+
"""
1810+
1811+
def call_operator(
1812+
self,
1813+
op,
1814+
args: Tuple[Argument, ...],
1815+
kwargs: Dict[str, Argument],
1816+
meta: NodeMetadata,
1817+
) -> ProxyValue:
1818+
if op != exir_ops.edge.dim_order_ops._to_dim_order_copy.default:
1819+
return super().call_operator(op, args, kwargs, meta)
1820+
1821+
# new kwargs with dim_order, and no memory_format for the new op
1822+
nkwargs = dict(copy.deepcopy(kwargs)) # orig kwargs are immutable
1823+
1824+
# pyre-ignore[16]: `None` has no attribute `to_tensor`.
1825+
assert args[0] == range(
1826+
args[0].to_tensor().dim()
1827+
), "Only sequential dims supported"
1828+
1829+
# remove dim_order from kwargs
1830+
nkwargs.pop("dim_order", None)
1831+
1832+
# bring back memory format
1833+
nkwargs["memory_format"] = torch.contiguous_format
1834+
1835+
return super().call_operator(
1836+
exir_ops.edge.aten._to_copy.default,
1837+
args,
1838+
nkwargs,
1839+
meta,
1840+
)
1841+
1842+
18021843
@register_cadence_pass(CadencePassAttribute(opt_level=0))
18031844
class ReplaceFullLikeWithFullPass(ExportPass):
18041845
"""
@@ -2108,4 +2149,5 @@ class CadenceReplaceOpsInGraph:
21082149
ReplaceSingleElementTensorArgumentsFromFullOpWithScalarPass,
21092150
ReplaceAtenAvgPoolWithJarvisAvgPoolPass,
21102151
ReplaceAtenLinalgVectorNormWithCadenceLinalgVectorNormPass,
2152+
ReplaceToDimOrderCopyWithToCopyPass,
21112153
]

extension/flat_tensor/serialize/flat_tensor_schema.py

+1
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,7 @@
1414
# Note: check executorch/extension/data_format/flat_tensor.fbs for explanations of these fields.
1515

1616

17+
1718
@dataclass
1819
class TensorMetadata:
1920
fully_qualified_name: str

0 commit comments

Comments
 (0)