Skip to content

Commit 6ce0748

Browse files
mcremon-metafacebook-github-bot
authored andcommitted
Update name from xtensa to cadence
Summary: As titled. Differential Revision: D55998135
1 parent 2fc99b0 commit 6ce0748

32 files changed

+35
-35
lines changed
File renamed without changes.
File renamed without changes.

examples/xtensa/aot/export_example.py renamed to examples/cadence/aot/export_example.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -28,7 +28,7 @@
2828
logging.basicConfig(level=logging.INFO, format=FORMAT)
2929

3030

31-
def export_xtensa_model(model, example_inputs):
31+
def export_model(model, example_inputs):
3232
# Quantizer
3333
quantizer = XtensaBaseQuantizer()
3434

@@ -46,20 +46,20 @@ def export_xtensa_model(model, example_inputs):
4646
patterns = [q.pattern for q in quantizer.quantizers]
4747
QuantFusion(patterns)(converted_model)
4848

49-
# Get edge program (note: the name will change to export_to_xtensa in future PRs)
49+
# Get edge program (note: the name will change to export_to_cadence in future PRs)
5050
edge_prog_manager = export_to_edge(converted_model, example_inputs, pt2_quant=True)
5151

5252
# Run a couple required passes for quant/dequant ops
53-
xtensa_prog_manager = edge_prog_manager.transform(
53+
cadence_prog_manager = edge_prog_manager.transform(
5454
[ReplacePT2QuantWithXtensaQuant(), ReplacePT2DequantWithXtensaDequant()],
5555
check_ir_validity=False,
5656
)
5757

58-
exec_prog = xtensa_prog_manager.to_executorch()
58+
exec_prog = cadence_prog_manager.to_executorch()
5959

6060
logging.info(
6161
f"Final exported graph module:\n{exec_prog.exported_program().graph_module}"
6262
)
6363

64-
# Save the program as XtensaDemoModel.pte
65-
save_pte_program(exec_prog, "XtensaDemoModel")
64+
# Save the program as CadenceDemoModel.pte
65+
save_pte_program(exec_prog, "CadenceDemoModel")
File renamed without changes.

examples/xtensa/aot/quantizer.py renamed to examples/cadence/aot/quantizer.py

Lines changed: 11 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -437,7 +437,7 @@ def get_anchors(
437437
)
438438

439439
def replacement_op(self):
440-
return torch.ops.xtensa.quantized_linear.default
440+
return torch.ops.cadence.quantized_linear.default
441441

442442

443443
class LinearFunctionalPattern(QuantizationPattern):
@@ -457,7 +457,7 @@ def get_anchors(
457457
)
458458

459459
def replacement_op(self):
460-
return torch.ops.xtensa.quantized_linear.default
460+
return torch.ops.cadence.quantized_linear.default
461461

462462

463463
class LayerNormPattern(QuantizationPattern):
@@ -476,7 +476,7 @@ def get_anchors(self, gm, fused_partition) -> PartitionAnchors:
476476
)
477477

478478
def replacement_op(self):
479-
return torch.ops.xtensa.quantized_layer_norm.default
479+
return torch.ops.cadence.quantized_layer_norm.default
480480

481481

482482
class Conv1dPattern(QuantizationPattern):
@@ -503,7 +503,7 @@ def get_anchors(
503503
)
504504

505505
def replacement_op(self):
506-
return torch.ops.xtensa.quantized_conv.default
506+
return torch.ops.cadence.quantized_conv.default
507507

508508

509509
class Conv2dPattern(QuantizationPattern):
@@ -530,7 +530,7 @@ def get_anchors(
530530
)
531531

532532
def replacement_op(self):
533-
return torch.ops.xtensa.quantized_conv.default
533+
return torch.ops.cadence.quantized_conv.default
534534

535535

536536
class AddmmPattern(QuantizationPattern):
@@ -550,7 +550,7 @@ def get_anchors(
550550
)
551551

552552
def replacement_op(self):
553-
return torch.ops.xtensa.quantized_linear.default
553+
return torch.ops.cadence.quantized_linear.default
554554

555555

556556
class ReluPattern(QuantizationPattern):
@@ -573,7 +573,7 @@ def get_anchors(
573573
)
574574

575575
def replacement_op(self):
576-
return torch.ops.xtensa.quantized_relu.default
576+
return torch.ops.cadence.quantized_relu.default
577577

578578

579579
class GenericQuantizer(Quantizer):
@@ -823,15 +823,15 @@ def mark_fused(cls, nodes) -> bool:
823823

824824
class ReplacePT2QuantWithXtensaQuant(ExportPass):
825825
"""
826-
Replace the pt2 quantization ops with custom xtensa quantization ops.
826+
Replace the pt2 quantization ops with custom cadence quantization ops.
827827
"""
828828

829829
def call_operator(self, op, args, kwargs, meta):
830830
if op not in {exir_ops.edge.quantized_decomposed.quantize_per_tensor.default}:
831831
return super().call_operator(op, args, kwargs, meta)
832832

833833
return super().call_operator(
834-
exir_ops.edge.xtensa.quantize_per_tensor.default,
834+
exir_ops.edge.cadence.quantize_per_tensor.default,
835835
args,
836836
kwargs,
837837
meta,
@@ -840,15 +840,15 @@ def call_operator(self, op, args, kwargs, meta):
840840

841841
class ReplacePT2DequantWithXtensaDequant(ExportPass):
842842
"""
843-
Replace the pt2 dequantization ops with custom xtensa dequantization ops.
843+
Replace the pt2 dequantization ops with custom cadence dequantization ops.
844844
"""
845845

846846
def call_operator(self, op, args, kwargs, meta):
847847
if op not in {exir_ops.edge.quantized_decomposed.dequantize_per_tensor.default}:
848848
return super().call_operator(op, args, kwargs, meta)
849849

850850
return super().call_operator(
851-
exir_ops.edge.xtensa.dequantize_per_tensor.default,
851+
exir_ops.edge.cadence.dequantize_per_tensor.default,
852852
args,
853853
kwargs,
854854
meta,
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.

0 commit comments

Comments
 (0)