Skip to content

Commit 2de3fa6

Browse files
authored
Merge branch 'main' into where_cleanup
2 parents 0a2d2cf + 3c21e3a commit 2de3fa6

File tree

135 files changed

+7392
-7268
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

135 files changed

+7392
-7268
lines changed

.ci/docker/ci_commit_pins/buck2.txt

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1 +1 @@
1-
2024-12-16
1+
2025-05-06

.github/workflows/apple.yml

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,8 @@ on:
55
branches:
66
- main
77
- release/*
8+
tags:
9+
- ciflow/trunk/*
810
pull_request:
911
paths:
1012
- .ci/scripts/setup-ios.sh

CMakeLists.txt

Lines changed: 8 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -44,6 +44,14 @@
4444

4545
cmake_minimum_required(VERSION 3.24)
4646
project(executorch)
47+
48+
# MARK: - Start EXECUTORCH_H12025_BUILD_MIGRATION --------------------------------------------------
49+
50+
include(${PROJECT_SOURCE_DIR}/tools/cmake/common/preset.cmake)
51+
include(${PROJECT_SOURCE_DIR}/tools/cmake/preset/default.cmake)
52+
53+
# MARK: - End EXECUTORCH_H12025_BUILD_MIGRATION ----------------------------------------------------
54+
4755
include(tools/cmake/Utils.cmake)
4856
include(CMakeDependentOption)
4957

@@ -96,9 +104,6 @@ set(EXECUTORCH_PAL_DEFAULT
96104
"Which PAL default implementation to use: one of {posix, minimal}"
97105
)
98106

99-
option(EXECUTORCH_ENABLE_LOGGING "Build with ET_LOG_ENABLED"
100-
${_default_release_disabled_options}
101-
)
102107
if(NOT EXECUTORCH_ENABLE_LOGGING)
103108
# Avoid pulling in the logging strings, which can be large. Note that this
104109
# will set the compiler flag for all targets in this directory, and for all

backends/arm/_passes/annotate_decomposed_matmul.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,13 +1,12 @@
11
# Copyright 2024-2025 Arm Limited and/or its affiliates.
2-
# All rights reserved.
32
#
43
# This source code is licensed under the BSD-style license found in the
54
# LICENSE file in the root directory of this source tree.
65

76
# pyre-unsafe
87

98
import itertools
10-
9+
import operator
1110
from typing import List
1211

1312
import torch
@@ -22,7 +21,7 @@
2221

2322
class AnnotateDecomposedMatmulPass(ExportPass):
2423
"""
25-
torch.matmul can be decomposed in many ways, for instance:
24+
torch.matmul and it's equivalent operator @ can be decomposed in many ways, for instance:
2625
dq -> matmul -> q can become
2726
dq -> repeat -> view -> bmm -> view -> dq which makes quantization folding
2827
difficult. This helper function find all matmul partitions and annotate its
@@ -50,6 +49,7 @@ def call(self, graph_module: GraphModule) -> PassResult:
5049
graph_module.graph,
5150
[
5251
torch.matmul,
52+
operator.matmul,
5353
],
5454
None,
5555
)

backends/arm/operator_support/pool_2d_support.py

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -54,8 +54,11 @@ def is_node_tosa_supported(self, node: fx.Node, tosa_spec: TosaSpecification):
5454
kernel = cast(tuple[int, int], node.args[1])
5555
stride = cast(tuple[int, int], node.args[2])
5656
if len(node.args) > 3:
57+
padding = cast(tuple[int, int], node.args[3])
5758
# Padding case
58-
if not all(1 <= k <= 8 for k in kernel):
59+
if not all(1 <= k <= 8 for k in kernel) and not all(
60+
v == 0 for v in padding
61+
):
5962
self.reporter.report_reject(
6063
node, f"Avgpool2d with padding needs kernel dims < 8, got {kernel}"
6164
)

backends/arm/operator_support/tosa_supported_operators.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -335,6 +335,7 @@ def _is_matmul_node_supported(
335335
graph_module.graph,
336336
[
337337
torch.matmul,
338+
operator.matmul,
338339
],
339340
None,
340341
)
@@ -385,7 +386,7 @@ def is_node_supported(
385386
):
386387
source_fn_stack: tuple[typing.Any] = node.meta.get("source_fn_stack", [])
387388
if len(source_fn_stack) > 0:
388-
if source_fn_stack[-1][1] in (torch.matmul,):
389+
if source_fn_stack[-1][1] in (torch.matmul, operator.matmul):
389390
return self._is_matmul_node_supported(submodules, node)
390391

391392
elif node.target in (exir_ops.edge.aten.max_pool2d_with_indices.default,):

backends/arm/operators/op_max_pool2d.py

Lines changed: 46 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -23,6 +23,24 @@
2323
from executorch.backends.arm.tosa_specification import TosaSpecification
2424

2525

26+
# Similarly to Conv2d, the TOSA spec requires that following is exactly divisible:
27+
# `(input + 2 * pad - kernel_size) / stride`
28+
# PyTorch however, does not require this, so as needed, we must adjust the padding.
29+
def adjust_pad_if_needed(
30+
input_size: int, kernel_size: int, stride: int, pad: int
31+
) -> int:
32+
if pad == 0:
33+
return pad
34+
35+
mod_remainder = (input_size + 2 * pad - kernel_size) % stride
36+
37+
# No need to adjust
38+
if mod_remainder == 0:
39+
return pad
40+
41+
return pad - mod_remainder
42+
43+
2644
@register_node_visitor
2745
class MaxPool2dVisitor_0_80(NodeVisitor):
2846
target = "aten.max_pool2d.default"
@@ -61,6 +79,20 @@ def define_node(
6179
except IndexError:
6280
pad_size_list = [0, 0, 0, 0]
6381

82+
# Adjust the padding as necessary
83+
pad_size_list[1] = adjust_pad_if_needed(
84+
input_tensor.shape[2],
85+
kernel_size[0],
86+
stride[0],
87+
pad_size_list[1],
88+
)
89+
pad_size_list[3] = adjust_pad_if_needed(
90+
input_tensor.shape[3],
91+
kernel_size[1],
92+
stride[1],
93+
pad_size_list[3],
94+
)
95+
6496
accumulator_type = output.dtype
6597

6698
# Initilize zero point to zero.
@@ -131,6 +163,20 @@ def define_node(
131163
except IndexError:
132164
pad_size_list = [0, 0, 0, 0]
133165

166+
# Adjust the padding as necessary
167+
pad_size_list[1] = adjust_pad_if_needed(
168+
input_tensor.shape[2],
169+
kernel_size[0],
170+
stride[0],
171+
pad_size_list[1],
172+
)
173+
pad_size_list[3] = adjust_pad_if_needed(
174+
input_tensor.shape[3],
175+
kernel_size[1],
176+
stride[1],
177+
pad_size_list[3],
178+
)
179+
134180
attr = ts.TosaSerializerAttribute()
135181
attr.MaxPool2dAttribute(
136182
kernel=kernel_size, stride=stride, pad=pad_size_list, nan_mode=1

backends/arm/operators/op_slice.py

Lines changed: 20 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -68,8 +68,16 @@ def define_node(
6868
end_index = _fixup_end(end, shape, dim)
6969
size = end_index - start_index
7070

71-
assert size > 0
72-
assert size <= shape[dim]
71+
if size <= 0:
72+
raise ValueError(
73+
f"The calculated slice size must be positive. Got {size=} "
74+
f"with {start_index=} and {end_index=}."
75+
)
76+
if size > shape[dim]:
77+
raise ValueError(
78+
f"The calculated slice size cannot be greater than the dimension size"
79+
f". Got {size=} and {shape[dim]=}."
80+
)
7381

7482
# Convert aten args to Tosa's start and size attributes and in TOSA dim order.
7583
attr = ts.TosaSerializerAttribute()
@@ -122,8 +130,16 @@ def define_node(
122130
end_index = _fixup_end(end, shape, dim)
123131
size = end_index - start_index
124132

125-
assert size > 0
126-
assert size <= shape[dim]
133+
if size <= 0:
134+
raise ValueError(
135+
f"The calculated slice size must be positive. Got {size=} "
136+
f"with {start_index=} and {end_index=}."
137+
)
138+
if size > shape[dim]:
139+
raise ValueError(
140+
f"The calculated slice size cannot be greater than the dimension size"
141+
f". Got {size=} and {shape[dim]=}."
142+
)
127143

128144
# Convert aten args to Tosa's start and size shape_t tensors and in TOSA dim order.
129145
starts = [

backends/arm/scripts/parse_test_names.py

Lines changed: 9 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,15 @@
55
from executorch.exir.dialects.edge.spec.utils import SAMPLE_INPUT
66

77
# Add edge ops which we lower but which are not included in exir/dialects/edge/edge.yaml here.
8-
CUSTOM_EDGE_OPS = ["linspace.default", "eye.default"]
8+
CUSTOM_EDGE_OPS = [
9+
"linspace.default",
10+
"eye.default",
11+
"hardsigmoid.default",
12+
"hardswish.default",
13+
"linear.default",
14+
"maximum.default",
15+
"adaptive_avg_pool2d.default",
16+
]
917
ALL_EDGE_OPS = SAMPLE_INPUT.keys() | CUSTOM_EDGE_OPS
1018

1119
# Add all targets and TOSA profiles we support here.

backends/arm/test/common.py

Lines changed: 6 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -259,17 +259,15 @@ def decorator_func(func):
259259
raise RuntimeError(
260260
"xfail info needs to be str, or tuple[str, type[Exception]]"
261261
)
262-
pytest_param = pytest.param(
263-
test_parameters,
264-
id=id,
265-
marks=pytest.mark.xfail(
266-
reason=reason, raises=raises, strict=strict
267-
),
262+
# Set up our fail marker
263+
marker = (
264+
pytest.mark.xfail(reason=reason, raises=raises, strict=strict),
268265
)
269266
else:
270-
pytest_param = pytest.param(test_parameters, id=id)
271-
pytest_testsuite.append(pytest_param)
267+
marker = ()
272268

269+
pytest_param = pytest.param(test_parameters, id=id, marks=marker)
270+
pytest_testsuite.append(pytest_param)
273271
return pytest.mark.parametrize(arg_name, pytest_testsuite)(func)
274272

275273
return decorator_func

0 commit comments

Comments
 (0)