|
| 1 | +# Copyright 2025 Arm Limited and/or its affiliates. |
| 2 | +# All rights reserved. |
| 3 | +# |
| 4 | +# This source code is licensed under the BSD-style license found in the |
| 5 | +# LICENSE file in the root directory of this source tree. |
| 6 | + |
| 7 | +# pyre-unsafe |
| 8 | + |
| 9 | +import torch |
| 10 | +from executorch.backends.arm._passes import ArmPass |
| 11 | +from executorch.exir.dialects._ops import ops as exir_ops |
| 12 | + |
| 13 | +edge_ops = (exir_ops.edge.aten.leaky_relu.default,) |
| 14 | +torch_ops = (torch.ops.aten.leaky_relu.default,) |
| 15 | + |
| 16 | + |
| 17 | +def _get_leaky_relu_ops(op) -> tuple: |
| 18 | + if op in edge_ops: |
| 19 | + return ( |
| 20 | + exir_ops.edge.aten.clamp.default, |
| 21 | + exir_ops.edge.aten.full.default, |
| 22 | + exir_ops.edge.aten.mul.Tensor, |
| 23 | + exir_ops.edge.aten.add.Tensor, |
| 24 | + ) |
| 25 | + elif op in torch_ops: |
| 26 | + return ( |
| 27 | + torch.ops.aten.clamp.default, |
| 28 | + torch.ops.aten.full.default, |
| 29 | + torch.ops.aten.mul.Tensor, |
| 30 | + torch.ops.aten.add.Tensor, |
| 31 | + ) |
| 32 | + else: |
| 33 | + raise RuntimeError(f"Can't get decomposition ops for op {op}") |
| 34 | + |
| 35 | + |
| 36 | +class DecomposeLeakyReLUPass(ArmPass): |
| 37 | + """ |
| 38 | + This pass decomposes Leaky ReLU into primitive operations. |
| 39 | + LeakyReLU(x,slope) = max(0,x) + slope * min(0,x) |
| 40 | +
|
| 41 | + Example: |
| 42 | + %op1 = clamp(x,0,None) (equivalent to max(0,x)) |
| 43 | + %op2 = clamp(x,None,0) (equivalent to min(0,x)) |
| 44 | + %op3 = full(x.shape,slope) |
| 45 | + %op4 = mul(%op3,%op2) |
| 46 | + %op5 = add(%op1,%op4) |
| 47 | + """ |
| 48 | + |
| 49 | + def call_operator(self, op, args, kwargs, meta): |
| 50 | + if op not in (edge_ops + torch_ops): |
| 51 | + return super().call_operator(op, args, kwargs, meta) |
| 52 | + |
| 53 | + x = args[0] |
| 54 | + slope = args[1] if len(args) > 1 else 0.01 |
| 55 | + dtype = x.node.meta["val"].dtype |
| 56 | + clamp, full, mul, add = _get_leaky_relu_ops(op) |
| 57 | + op1 = super().call_operator( |
| 58 | + op=clamp, args=(x, 0, None), kwargs=kwargs, meta=meta |
| 59 | + ) |
| 60 | + op2 = super().call_operator( |
| 61 | + op=clamp, args=(x, None, 0), kwargs=kwargs, meta=meta |
| 62 | + ) |
| 63 | + op3 = super().call_operator( |
| 64 | + op=full, |
| 65 | + args=(x.node.meta["val"].shape, slope), |
| 66 | + kwargs={"dtype": dtype}, |
| 67 | + meta=meta, |
| 68 | + ) |
| 69 | + op4 = super().call_operator(op=mul, args=(op3, op2), kwargs=kwargs, meta=meta) |
| 70 | + op5 = super().call_operator(op=add, args=(op1, op4), kwargs=kwargs, meta=meta) |
| 71 | + return op5 |
0 commit comments