|
6 | 6 | * LICENSE file in the root directory of this source tree.
|
7 | 7 | */
|
8 | 8 | #include <executorch/backends/qualcomm/runtime/QnnManager.h>
|
| 9 | +#include <executorch/backends/qualcomm/runtime/SharedBuffer.h> |
9 | 10 | #include <executorch/backends/qualcomm/runtime/Utils.h>
|
10 | 11 | #include <executorch/backends/qualcomm/runtime/backends/QnnImplementation.h>
|
11 |
| - |
12 | 12 | #include <cstdlib>
|
13 | 13 | #include <cstring>
|
14 | 14 | #include <fstream>
|
@@ -54,7 +54,9 @@ QnnManager::QnnManager(
|
54 | 54 | "the size of qnn context binary: %d",
|
55 | 55 | qnn_executorch_context_binary.nbytes);
|
56 | 56 | QNN_EXECUTORCH_LOG_INFO(
|
57 |
| - "Is on-device graph construction: %d", options_->online_prepare()); |
| 57 | + "Is on-device graph construction: %d", options->online_prepare()); |
| 58 | + QNN_EXECUTORCH_LOG_INFO( |
| 59 | + "Enable shared buffer: %d", options->shared_buffer()); |
58 | 60 | }
|
59 | 61 |
|
60 | 62 | if (library_path.empty()) {
|
@@ -82,6 +84,53 @@ Error QnnManager::LoadQnnLibrary() {
|
82 | 84 | return ret;
|
83 | 85 | }
|
84 | 86 |
|
| 87 | +Error QnnManager::RegisterMem( |
| 88 | + void* data_ptr, |
| 89 | + const std::shared_ptr<TensorWrapper>& tensor_wrapper) { |
| 90 | + SharedBuffer& shared_buffer_manager = SharedBuffer::GetSharedBufferManager(); |
| 91 | + // Not enable shared buffer |
| 92 | + if (!options_->shared_buffer()) |
| 93 | + return Error::Internal; |
| 94 | + |
| 95 | + if (backend_params_ptr_->qnn_mem_manager_ptr_ == nullptr) { |
| 96 | + QNN_EXECUTORCH_LOG_WARN( |
| 97 | + "Backend %s doesn't supported shared buffer.", |
| 98 | + EnumNameQnnExecuTorchBackendType( |
| 99 | + options_->backend_options()->backend_type())); |
| 100 | + return Error::Internal; |
| 101 | + } |
| 102 | + |
| 103 | + if (!shared_buffer_manager.IsAllocated(data_ptr)) { |
| 104 | + // It means two scenarios here: |
| 105 | + // 1. the input and output partitioned graph |
| 106 | + // 2. Actually, user doesn't allocate shared buffer with |
| 107 | + // QnnExecuTorchAllocCustomMem API |
| 108 | + return Error::Internal; |
| 109 | + } else if (backend_params_ptr_->qnn_mem_manager_ptr_->IsRegistered( |
| 110 | + tensor_wrapper->GetMemHandle())) { |
| 111 | + if (options_->log_level() >= QnnExecuTorchLogLevel::kLogLevelInfo) |
| 112 | + QNN_EXECUTORCH_LOG_INFO( |
| 113 | + "Tensor name %s has been registered shared memory.", |
| 114 | + tensor_wrapper->GetName().c_str()); |
| 115 | + return Error::Ok; |
| 116 | + } |
| 117 | + |
| 118 | + int32_t mem_fd = SharedBuffer::GetSharedBufferManager().MemToFd(data_ptr); |
| 119 | + if (mem_fd == -1) { |
| 120 | + QNN_EXECUTORCH_LOG_WARN( |
| 121 | + "Tensor name %s is failed to get file descriptor.", |
| 122 | + tensor_wrapper->GetName().c_str()); |
| 123 | + return Error::Internal; |
| 124 | + } |
| 125 | + ET_CHECK_OR_RETURN_ERROR( |
| 126 | + backend_params_ptr_->qnn_mem_manager_ptr_->RegisterMem( |
| 127 | + tensor_wrapper, mem_fd) == Error::Ok, |
| 128 | + Internal, |
| 129 | + "Fail to register to shared memory."); |
| 130 | + |
| 131 | + return Error::Ok; |
| 132 | +} |
| 133 | + |
85 | 134 | Error QnnManager::Init() {
|
86 | 135 | ET_CHECK_OR_RETURN_ERROR(
|
87 | 136 | LoadQnnLibrary() == Error::Ok, Internal, "Fail to load Qnn library");
|
@@ -219,14 +268,6 @@ void QnnManager::Destroy() {
|
219 | 268 | qnn_loaded_backend_.TerminateAllBackends();
|
220 | 269 | }
|
221 | 270 |
|
222 |
| -bool QnnManager::IsAvailable() { |
223 |
| - return true; |
224 |
| -} |
225 |
| - |
226 |
| -bool QnnManager::IsOnlinePrepare() { |
227 |
| - return options_->online_prepare(); |
228 |
| -} |
229 |
| - |
230 | 271 | bool QnnManager::IsNodeSupportedByBackend(
|
231 | 272 | std::vector<std::shared_ptr<OpWrapper>>& op_wrappers) {
|
232 | 273 | Qnn_ErrorHandle_t error = QNN_SUCCESS;
|
@@ -329,3 +370,14 @@ Error QnnManager::Compile(
|
329 | 370 | } // namespace qnn
|
330 | 371 | } // namespace executor
|
331 | 372 | } // namespace torch
|
| 373 | +void* QnnExecuTorchAllocCustomMem(size_t bytes, size_t alignment) { |
| 374 | + using torch::executor::qnn::SharedBuffer; |
| 375 | + void* buffer_ptr = |
| 376 | + SharedBuffer::GetSharedBufferManager().AllocMem(bytes, alignment); |
| 377 | + return buffer_ptr; |
| 378 | +} |
| 379 | + |
| 380 | +void QnnExecuTorchFreeCustomMem(void* buffer_ptr) { |
| 381 | + using torch::executor::qnn::SharedBuffer; |
| 382 | + SharedBuffer::GetSharedBufferManager().FreeMem(buffer_ptr); |
| 383 | +} |
0 commit comments