Skip to content

Commit f36af71

Browse files
Qualcomm AI Engine Direct - oss model enablement (EfficientSAM)
- e2e script for https://github.com/yformer/EfficientSAM - Fastvit breakage fix - Passes order correction - Add support for cum_sum
1 parent e86c9c9 commit f36af71

File tree

19 files changed

+752
-13
lines changed

19 files changed

+752
-13
lines changed

backends/qualcomm/_passes/layout_transform.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -52,6 +52,7 @@ class LayoutTransform(ExportPass):
5252
exir_ops.edge.aten.ceil.default,
5353
exir_ops.edge.aten.clamp.default,
5454
exir_ops.edge.aten.constant_pad_nd.default,
55+
exir_ops.edge.aten.cumsum.default,
5556
exir_ops.edge.aten.div.Tensor,
5657
exir_ops.edge.aten.eq.Tensor,
5758
exir_ops.edge.aten.full.default,

backends/qualcomm/_passes/lift_constant_scalar_operands.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -46,6 +46,7 @@ class TensorOpInfo:
4646
aten.pow.Tensor_Scalar: TensorOpInfo(aten.pow.Tensor_Tensor, False),
4747
# The scalar number arg[1] is missing when using default. Result in a corner case to deal
4848
aten.leaky_relu.default: TensorOpInfo(aten.prelu.default, True),
49+
aten.where.ScalarOther: TensorOpInfo(aten.where.self, False),
4950
}
5051

5152

backends/qualcomm/_passes/recompose_pixel_unshuffle.py

Lines changed: 5 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -45,13 +45,11 @@ def call(self, graph_module: torch.fx.GraphModule):
4545
continue
4646

4747
view_node = premute_node.args[0]
48-
if any(
49-
[
50-
view_node.op != "call_function",
51-
view_node.target != self.view_target,
52-
len(view_node.args[1]) != 6,
53-
len(premute_node.args[1]) != 6,
54-
]
48+
if (
49+
view_node.op != "call_function"
50+
or view_node.target != self.view_target
51+
or len(view_node.args[1]) != 6
52+
or len(premute_node.args[1]) != 6
5553
):
5654
continue
5755

backends/qualcomm/_passes/utils.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -93,7 +93,7 @@ def get_passes_dependency_for_capture_program():
9393
ConvertToLinear: [RecomposePixelUnshuffle],
9494
DecomposeAny: [RemoveRedundancy],
9595
DecomposeLinalgVectorNorm: [RemoveRedundancy],
96-
ExpandBroadcastTensorShape: [RemoveRedundancy],
96+
ExpandBroadcastTensorShape: [ConstantI64toI32, TensorI64toI32],
9797
FoldQDQ: [AnnotateQuantAttrs, AnnotateDecomposed],
9898
LayoutTransform: [
9999
AnnotateQuantAttrs,

backends/qualcomm/builders/__init__.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,7 @@
1919
op_clamp,
2020
op_conv2d,
2121
op_cos,
22+
op_cum_sum,
2223
op_depth_to_space,
2324
op_dequantize,
2425
op_div,
@@ -98,6 +99,7 @@
9899
op_clamp,
99100
op_conv2d,
100101
op_cos,
102+
op_cum_sum,
101103
op_depth_to_space,
102104
op_dequantize,
103105
op_div,

backends/qualcomm/builders/op_cos.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,6 @@
33
#
44
# This source code is licensed under the BSD-style license found in the
55
# LICENSE file in the root directory of this source tree.
6-
76
from typing import Dict
87

98
import executorch.backends.qualcomm.python.PyQnnWrapperAdaptor as PyQnnWrapper
Lines changed: 84 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,84 @@
1+
# Copyright (c) Qualcomm Innovation Center, Inc.
2+
# All rights reserved
3+
#
4+
# This source code is licensed under the BSD-style license found in the
5+
# LICENSE file in the root directory of this source tree.
6+
from typing import cast, Dict
7+
8+
import executorch.backends.qualcomm.python.PyQnnWrapperAdaptor as PyQnnWrapper
9+
10+
import numpy as np
11+
import torch
12+
from executorch.backends.qualcomm.utils.constants import QCOM_AXIS_ORDER, QCOM_DATA
13+
14+
from .node_visitor import NodeVisitor, register_node_visitor
15+
from .qnn_constants import OpCumulativeSum, QNN_OP_PACKAGE_NAME_QTI_AISW
16+
17+
18+
@register_node_visitor
19+
class CumulativeSum(NodeVisitor):
20+
target = ["aten.cumsum.default"]
21+
22+
def __init__(self, *args) -> None:
23+
super().__init__(*args)
24+
25+
def get_param(self, node, input_tensor):
26+
dim = node.args[1]
27+
28+
if dim < 0:
29+
dim = dim % len(input_tensor.shape)
30+
if QCOM_AXIS_ORDER in node.meta:
31+
dim = node.meta[QCOM_AXIS_ORDER].index(dim)
32+
33+
return cast(np.uint32, dim)
34+
35+
def define_node(
36+
self,
37+
node: torch.fx.Node,
38+
nodes_to_wrappers: Dict[torch.fx.Node, PyQnnWrapper.TensorWrapper],
39+
) -> PyQnnWrapper.PyQnnOpWrapper:
40+
input_node = node.args[0]
41+
input_tensor = self.get_tensor(input_node, node)
42+
input_tensor_wrapper = self.define_tensor(
43+
input_node,
44+
node,
45+
input_tensor,
46+
PyQnnWrapper.Qnn_TensorType_t.QNN_TENSOR_TYPE_NATIVE,
47+
nodes_to_wrappers,
48+
)
49+
50+
dim = self.get_param(node, input_tensor)
51+
52+
output_tensor = self.get_tensor(node, node)
53+
output_tensor_wrapper = self.define_tensor(
54+
node,
55+
node,
56+
output_tensor,
57+
PyQnnWrapper.Qnn_TensorType_t.QNN_TENSOR_TYPE_NATIVE,
58+
nodes_to_wrappers,
59+
)
60+
61+
cumsum_op = PyQnnWrapper.PyQnnOpWrapper(
62+
node.name,
63+
QNN_OP_PACKAGE_NAME_QTI_AISW,
64+
OpCumulativeSum.op_name,
65+
)
66+
cumsum_op.AddInputTensors([input_tensor_wrapper])
67+
cumsum_op.AddOutputTensors([output_tensor_wrapper])
68+
cumsum_op.AddScalarParam(
69+
OpCumulativeSum.param_axis,
70+
PyQnnWrapper.Qnn_DataType_t.QNN_DATATYPE_UINT_32,
71+
{QCOM_DATA: dim},
72+
)
73+
cumsum_op.AddScalarParam(
74+
OpCumulativeSum.param_exclusive,
75+
PyQnnWrapper.Qnn_DataType_t.QNN_DATATYPE_BOOL_8,
76+
{QCOM_DATA: False},
77+
)
78+
cumsum_op.AddScalarParam(
79+
OpCumulativeSum.param_reverse,
80+
PyQnnWrapper.Qnn_DataType_t.QNN_DATATYPE_BOOL_8,
81+
{QCOM_DATA: False},
82+
)
83+
84+
return cumsum_op

backends/qualcomm/builders/op_sin.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,6 @@
33
#
44
# This source code is licensed under the BSD-style license found in the
55
# LICENSE file in the root directory of this source tree.
6-
76
from typing import Dict
87

98
import executorch.backends.qualcomm.python.PyQnnWrapperAdaptor as PyQnnWrapper

backends/qualcomm/builders/qnn_constants.py

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -50,6 +50,14 @@ class OpConvert:
5050
op_name: str = "Convert"
5151

5252

53+
@dataclass(init=False, frozen=True)
54+
class OpCumulativeSum:
55+
op_name = "CumulativeSum"
56+
param_axis = "axis"
57+
param_exclusive = "exclusive"
58+
param_reverse = "reverse"
59+
60+
5361
@dataclass(init=False, frozen=True)
5462
class OpDepthToSpace:
5563
op_name: str = "DepthToSpace"

backends/qualcomm/quantizer/annotators.py

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -920,6 +920,11 @@ def annotate_conv2d(node: Node, quantization_config: QuantizationConfig) -> None
920920
)
921921

922922

923+
@register_annotator([torch.ops.aten.cumsum.default])
924+
def annotate_cumsum(node: Node, quantization_config: QuantizationConfig) -> None:
925+
annotate_single_in_single_out(node, quantization_config)
926+
927+
923928
@register_annotator([torch.ops.aten.linear.default])
924929
def annotate_linear(node: Node, quantization_config: QuantizationConfig) -> None:
925930
act_node = node.args[0]

0 commit comments

Comments
 (0)