Skip to content

Arm backend: Don't handle const placeholders in match_arg_ranks_pass #9134

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
Mar 17, 2025
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
49 changes: 13 additions & 36 deletions backends/arm/_passes/match_arg_ranks_pass.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
# Copyright (c) Meta Platforms, Inc. and affiliates.
# Copyright 2024 Arm Limited and/or its affiliates.
# All rights reserved.
# Copyright 2024-2025 Arm Limited and/or its affiliates.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
Expand All @@ -23,7 +23,17 @@
class MatchArgRanksPass(ExportPass):
"""
For ops in 'targeted_ops', make sure that the inputs share the same rank.
New dimensions are inserted at from the beginning of the
New dimensions are inserted from the beginning of the inputs that have a
lower rank to match the input with the highest rank.

Example:
input0 = shape(4, 3, 2)
input1 = shape(2)
input2 = shape(3, 1)
Becomes:
input0 = shape(4, 3, 2)
input1 = shape(1, 1, 2)
input2 = shape(1, 3, 1)
"""

def __init__(self, exported_program):
Expand Down Expand Up @@ -54,34 +64,6 @@ def _match_op_rank(self, graph_module, node, arg, max_rank):
)
node.replace_input_with(arg, view)

def _match_buffer_rank(self, arg, max_rank):
"""
Change arg's fake tensor meta to match max_rank if:
- arg is found in inputs_to_buffers or inputs_to_parameters.
"""
fake_tensor = get_first_fake_tensor(arg)
shape = fake_tensor.shape
rank = len(shape)
new_shape = list([1] * (max_rank - rank) + list(shape))

buffer_name = None
if arg.name in self.exported_program.graph_signature.inputs_to_buffers:
buffer_name = self.exported_program.graph_signature.inputs_to_buffers[
arg.name
]
elif arg.name in self.exported_program.graph_signature.inputs_to_parameters:
buffer_name = self.exported_program.graph_signature.inputs_to_parameters[
arg.name
]
if buffer_name:
new_tensor = self.exported_program.state_dict[buffer_name].reshape(
new_shape
)
self.exported_program.state_dict[buffer_name] = new_tensor
arg.meta["val"] = fake_tensor.fake_mode.from_tensor(
new_tensor, static_shapes=True
)

def call(self, graph_module: GraphModule) -> PassResult:
for node in graph_module.graph.nodes:
node = cast(Node, node)
Expand All @@ -105,12 +87,7 @@ def call(self, graph_module: GraphModule) -> PassResult:
if rank == max_rank:
continue

# If the argument is call_function, match shape by inserting view node.
if arg.op == "call_function":
self._match_op_rank(graph_module, node, arg, max_rank)
else:
# If the argument is a buffer or parameter, adjust shape by changing the fake tensor meta.
self._match_buffer_rank(arg, max_rank)
self._match_op_rank(graph_module, node, arg, max_rank)

graph_module.recompile()
graph_module = super().call(graph_module).graph_module
Expand Down
Loading