|
6 | 6 | * LICENSE file in the root directory of this source tree.
|
7 | 7 | */
|
8 | 8 |
|
9 |
| -#include <executorch/backends/qualcomm/aot/ir/qcir_utils.h> |
10 | 9 | #include <executorch/backends/qualcomm/runtime/QnnManager.h>
|
11 | 10 | #include <executorch/backends/qualcomm/runtime/SharedBuffer.h>
|
12 | 11 | #include <executorch/backends/qualcomm/runtime/Utils.h>
|
@@ -572,126 +571,6 @@ Error QnnManager::CompileDlc() {
|
572 | 571 | return Error::Ok;
|
573 | 572 | }
|
574 | 573 |
|
575 |
| -Error QnnManager::CompileQcir() { |
576 |
| - QnnQcirCustomProtocol qnn_qcir_custom_protocol; |
577 |
| - auto [status, qcir_fbs_size, tensor_size, qcir_fbs_ptr, tensor_ptr] = |
578 |
| - qnn_qcir_custom_protocol.DeserializeQcirCustomBuffer( |
579 |
| - qnn_context_blob_.buffer); |
580 |
| - |
581 |
| - if (status != Error::Ok) { |
582 |
| - QNN_EXECUTORCH_LOG_ERROR("Failed to verify QnnQcirCustomProtocol"); |
583 |
| - return Error::Internal; |
584 |
| - } |
585 |
| - |
586 |
| - auto context = qcir::GetContext(qcir_fbs_ptr); |
587 |
| - for (const auto& graph : *context->graphs()) { |
588 |
| - // qcir tensors to TensorWrapper |
589 |
| - std::vector<std::shared_ptr<TensorWrapper>> graph_inputs, graph_outputs, |
590 |
| - tensors; |
591 |
| - for (const auto& tensor : *graph->tensors()) { |
592 |
| - tensors.emplace_back(CreateTensorWrapper(ToTensor( |
593 |
| - tensor, static_cast<uint8_t*>(tensor_ptr) + tensor->offset()))); |
594 |
| - if (tensor->type() == qcir::TensorType::WRITE) { |
595 |
| - graph_inputs.push_back(tensors.back()); |
596 |
| - } else if (tensor->type() == qcir::TensorType::READ) { |
597 |
| - graph_outputs.push_back(tensors.back()); |
598 |
| - } |
599 |
| - } |
600 |
| - std::vector<std::shared_ptr<OpWrapper>> op_wrappers; |
601 |
| - // qcir graph node to OpWrapper |
602 |
| - for (const auto& node : *graph->nodes()) { |
603 |
| - std::shared_ptr<OpWrapper> op = std::make_shared<OpWrapper>( |
604 |
| - node->name()->str(), |
605 |
| - node->package_name()->str(), |
606 |
| - node->type_name()->str()); |
607 |
| - |
608 |
| - // qcir input tensors to OpWrapper input tensors |
609 |
| - std::vector<std::shared_ptr<TensorWrapper>> inputs; |
610 |
| - for (uint32_t index : *node->inputs()) { |
611 |
| - inputs.push_back(tensors[index]); |
612 |
| - } |
613 |
| - op->AddInputTensors(inputs); |
614 |
| - |
615 |
| - // qcir output tensors to OpWrapper output tensors |
616 |
| - std::vector<std::shared_ptr<TensorWrapper>> outputs; |
617 |
| - for (uint32_t index : *node->outputs()) { |
618 |
| - outputs.push_back(tensors[index]); |
619 |
| - } |
620 |
| - op->AddOutputTensors(outputs); |
621 |
| - |
622 |
| - // qcir operator param to OpWrapper param |
623 |
| - for (uint32_t index : *node->params()) { |
624 |
| - const auto& tensor = graph->tensors()->Get(index); |
625 |
| - std::string name = tensor->name()->str(); |
626 |
| - Qnn_DataType_t dtype = ToDataType(tensor->dtype()); |
627 |
| - const uint8_t* data_ptr = |
628 |
| - static_cast<uint8_t*>(tensor_ptr) + tensor->offset(); |
629 |
| - if (tensor->shape()->size() != 0) { |
630 |
| - // add tensor param |
631 |
| - op->AddTensorParam( |
632 |
| - name, |
633 |
| - dtype, |
634 |
| - tensor->shape()->size(), |
635 |
| - tensor->shape()->data(), |
636 |
| - data_ptr); |
637 |
| - } else { |
638 |
| - // add scalar param |
639 |
| - switch (dtype) { |
640 |
| - case Qnn_DataType_t::QNN_DATATYPE_INT_32: |
641 |
| - op->AddScalarParam( |
642 |
| - name, dtype, *reinterpret_cast<const int32_t*>(data_ptr)); |
643 |
| - break; |
644 |
| - case Qnn_DataType_t::QNN_DATATYPE_INT_16: |
645 |
| - op->AddScalarParam( |
646 |
| - name, dtype, *reinterpret_cast<const int16_t*>(data_ptr)); |
647 |
| - break; |
648 |
| - case Qnn_DataType_t::QNN_DATATYPE_INT_8: |
649 |
| - op->AddScalarParam(name, dtype, static_cast<int8_t>(*data_ptr)); |
650 |
| - break; |
651 |
| - case Qnn_DataType_t::QNN_DATATYPE_UINT_32: |
652 |
| - op->AddScalarParam( |
653 |
| - name, dtype, *reinterpret_cast<const uint32_t*>(data_ptr)); |
654 |
| - break; |
655 |
| - case Qnn_DataType_t::QNN_DATATYPE_UINT_16: |
656 |
| - op->AddScalarParam( |
657 |
| - name, dtype, *reinterpret_cast<const uint16_t*>(data_ptr)); |
658 |
| - break; |
659 |
| - case Qnn_DataType_t::QNN_DATATYPE_UINT_8: |
660 |
| - op->AddScalarParam(name, dtype, *data_ptr); |
661 |
| - break; |
662 |
| - case Qnn_DataType_t::QNN_DATATYPE_FLOAT_32: |
663 |
| - case Qnn_DataType_t::QNN_DATATYPE_FLOAT_16: |
664 |
| - op->AddScalarParam( |
665 |
| - name, dtype, *reinterpret_cast<const float*>(data_ptr)); |
666 |
| - break; |
667 |
| - case Qnn_DataType_t::QNN_DATATYPE_BOOL_8: |
668 |
| - op->AddScalarParam(name, dtype, *data_ptr); |
669 |
| - break; |
670 |
| - default: |
671 |
| - QNN_EXECUTORCH_LOG_ERROR( |
672 |
| - "Invalid scalar type: %s", tensor->name()->c_str()); |
673 |
| - break; |
674 |
| - } |
675 |
| - } |
676 |
| - } |
677 |
| - op_wrappers.emplace_back(std::move(op)); |
678 |
| - } |
679 |
| - ET_CHECK_OR_RETURN_ERROR( |
680 |
| - Compile(graph->name()->str(), op_wrappers) == Error::Ok, |
681 |
| - Internal, |
682 |
| - "Fail to compile graph from qcir with graph_name: %s", |
683 |
| - graph->name()->str().c_str()); |
684 |
| - ET_CHECK_OR_RETURN_ERROR( |
685 |
| - AllocateTensor(graph->name()->str(), graph_inputs, graph_outputs) == |
686 |
| - Error::Ok, |
687 |
| - Internal, |
688 |
| - "Fail to allocate tensor for qcir with graph_name: %s", |
689 |
| - graph->name()->str().c_str()); |
690 |
| - } |
691 |
| - |
692 |
| - return Error::Ok; |
693 |
| -} |
694 |
| - |
695 | 574 | Error QnnManager::Compile(
|
696 | 575 | const std::string& graph_name,
|
697 | 576 | std::vector<std::shared_ptr<OpWrapper>>& op_wrappers) {
|
|
0 commit comments