diff --git a/backends/mediatek/partitioner.py b/backends/mediatek/partitioner.py index 056336d606..b9e342e937 100644 --- a/backends/mediatek/partitioner.py +++ b/backends/mediatek/partitioner.py @@ -44,6 +44,11 @@ def is_node_supported(self, _, node: torch.fx.Node) -> bool: return False op_type = node.target.__name__ + + # Skip until we can handle the dimension order representation + if op_type == "aten._to_copy.default": + return False + if op_type in self._op_types_to_skip or node.name in self._op_names_to_skip: print( f"[Neuropilot Backend] The {op_type} operator with name '{node.name}' is skipped." diff --git a/backends/mediatek/preprocess.py b/backends/mediatek/preprocess.py index 92a1e196ad..3ec8c87e08 100644 --- a/backends/mediatek/preprocess.py +++ b/backends/mediatek/preprocess.py @@ -22,6 +22,23 @@ SKIP_COMPILE_SPEC_KEYS = {"ImportForever"} +def assert_default_dim_order(edge_graph_module: torch.fx.GraphModule) -> None: + for node in edge_graph_module.graph.nodes: + if node.op != "placeholder": + continue + + # We expect the default dim order for all tensor-like inputs i.e. inputs, buffers, and params + t = node.meta.get("val", None) + if t is not None and getattr(t, "dim_order", None) is not None: + default_dim_order = tuple(range(t.dim())) + if t.dim_order() != default_dim_order: + raise RuntimeError( + f"Neuropilot backend only supports contiguous memory format for inputs." + f"Expecting dim_order: {default_dim_order}, but got " + f"{node.meta['val'].dim_order()} for a placeholder node {node}." + ) + + @final class NeuropilotBackend(BackendDetails): @@ -30,6 +47,9 @@ def preprocess( cls, edge_program: ExportedProgram, module_compile_spec: List[CompileSpec] ) -> PreprocessResult: + # Make sure all inputs are contiguous_format or NCHW or default dim order + assert_default_dim_order(edge_program.graph_module) + name_to_node_mappings = {node.name: node for node in edge_program.graph.nodes} input_names = edge_program.graph_signature.user_inputs output_names = edge_program.graph_signature.user_outputs diff --git a/backends/mediatek/runtime/NeuronBackend.cpp b/backends/mediatek/runtime/NeuronBackend.cpp index 342534474d..fa3d0d9dc6 100644 --- a/backends/mediatek/runtime/NeuronBackend.cpp +++ b/backends/mediatek/runtime/NeuronBackend.cpp @@ -13,6 +13,7 @@ #include "api/NeuronAdapter.h" #include "executorch/runtime/core/error.h" +#include "executorch/runtime/core/exec_aten/util/dim_order_util.h" #include #include @@ -111,6 +112,14 @@ Error NeuronExecuTorchDelegate::execute( size_t inputCount = mInputSizes.size(), outputCount = mOutputSizes.size(); for (int i = 0; i < inputCount; i++) { + auto tensor_in = args[i]->toTensor(); + ET_CHECK_OR_RETURN_ERROR( + runtime::is_contiguous_dim_order( + tensor_in.dim_order().data(), tensor_in.dim()), + Internal, + "Expecting default dim_order but got a non default dim_order tensor for external input %u", + i); + auto data_ptr = args[i]->toTensor().data_ptr(); auto data_size = args[i]->toTensor().nbytes(); if (IsCached(i, data_ptr)) {