77 */
88
99#include < executorch/backends/qualcomm/aot/wrappers/TensorWrapper.h>
10+ #include < executorch/backends/qualcomm/qc_compiler_spec_generated.h>
1011#include < executorch/backends/qualcomm/runtime/QnnExecuTorchBackend.h>
1112#include < executorch/backends/qualcomm/runtime/QnnManager.h>
12- #include < executorch/backends/qualcomm/schema_generated.h>
1313
1414namespace executorch {
1515namespace backends {
1616namespace qnn {
1717
18- // CRC32 hasher
19- class CRC32 {
20- public:
21- CRC32 () {
22- uint32_t ieee_802_3 = 0x04C11DB7 ;
23- for (uint32_t i = 0 , poly = 0 ; i < 256 ; i++, poly = i) {
24- for (size_t j = 0 ; j < 8 ; j++) {
25- poly = (poly & 1 ) ? (ieee_802_3 ^ (poly >> 1 )) : (poly >> 1 );
26- }
27- lookup_table_.push_back (poly);
28- }
29- }
30- uint32_t hash (const uint8_t * buf, uint32_t length) const {
31- uint32_t val = 0xFFFFFFFF ;
32- for (size_t i = 0 ; i < length; ++i) {
33- val = lookup_table_[(val ^ buf[i]) & 0xFF ] ^ (val >> 8 );
34- }
35- return val ^ 0xFFFFFFFF ;
36- }
37-
38- private:
39- std::vector<uint32_t > lookup_table_;
40- };
41-
4218using namespace qnn_delegate ;
4319using executorch::runtime::ArrayRef;
4420using executorch::runtime::BackendExecutionContext;
@@ -56,24 +32,6 @@ Result<DelegateHandle*> QnnExecuTorchBackend::init(
5632 BackendInitContext& context,
5733 FreeableBuffer* processed,
5834 ArrayRef<CompileSpec> compile_specs) const {
59- // record the method name to be executed
60- // method_name_ = context.get_method_name();
61-
62- // TODO: this is a temporal solution for multi-graph support, will be
63- // removed once framework starts to accept runtime configuration
64- // ---
65- // check if current context binary has already been initialized
66- // return cached one for reducing memory footprint
67- uint32_t hash_val = CRC32 ().hash (
68- static_cast <const uint8_t *>(processed->data ()), processed->size ());
69- auto iter = delegate_map_.find (hash_val);
70- if (iter != delegate_map_.end ()) {
71- QNN_EXECUTORCH_LOG_INFO (
72- " Use cached delegate handle for current method: %s" ,
73- method_name_.c_str ());
74- return iter->second ;
75- }
76-
7735 // covert SizedBuffer to qnn ExecuTorch option
7836 QnnExecuTorchContextBinary qnn_context_blob;
7937 const qnn_delegate::QnnExecuTorchOptions* qnn_executorch_options = nullptr ;
@@ -99,6 +57,20 @@ Result<DelegateHandle*> QnnExecuTorchBackend::init(
9957 // destructible, we must call the destructor manually in destroy().
10058 new (qnn_manager) QnnManager (qnn_executorch_options, qnn_context_blob);
10159
60+ // TODO: this is a temporal solution for multi-graph support, will be
61+ // removed once framework starts to accept runtime configuration
62+ // ---
63+ // check if current context binary has already been initialized
64+ // return cached one for reducing memory footprint
65+ std::string binary_hash = qnn_manager->GetBinaryHash ();
66+ auto iter = delegate_map_.find (binary_hash);
67+ if (iter != delegate_map_.end ()) {
68+ QNN_EXECUTORCH_LOG_INFO (
69+ " Use cached delegate handle for current method: %s" ,
70+ context.get_method_name ());
71+ return iter->second ;
72+ }
73+
10274 ET_CHECK_OR_RETURN_ERROR (
10375 qnn_manager->Init () == Error::Ok,
10476 Internal,
@@ -117,7 +89,7 @@ Result<DelegateHandle*> QnnExecuTorchBackend::init(
11789 " Fail to allocate tensor" );
11890 }
11991 }
120- add_cached_delegate (hash_val , qnn_manager);
92+ add_cached_delegate (binary_hash , qnn_manager);
12193 return qnn_manager;
12294}
12395
@@ -131,10 +103,11 @@ Error QnnExecuTorchBackend::execute(
131103 " DelegateHandle has been deleted" );
132104 QnnManager* qnn_manager = static_cast <QnnManager*>(handle);
133105
106+ std::string method_name = context.get_method_name ();
134107 std::vector<std::shared_ptr<TensorWrapper>> input_tensors =
135- qnn_manager->GetGraphInputs (method_name_ );
108+ qnn_manager->GetGraphInputs (method_name );
136109 std::vector<std::shared_ptr<TensorWrapper>> output_tensors =
137- qnn_manager->GetGraphOutputs (method_name_ );
110+ qnn_manager->GetGraphOutputs (method_name );
138111 std::vector<Qnn_Tensor_t> input_tensor_structs;
139112 std::vector<Qnn_Tensor_t> output_tensor_structs;
140113
@@ -167,14 +140,14 @@ Error QnnExecuTorchBackend::execute(
167140
168141 ET_CHECK_OR_RETURN_ERROR (
169142 qnn_manager->Execute (
170- method_name_ ,
143+ method_name ,
171144 input_tensor_structs,
172145 output_tensor_structs,
173146 context.event_tracer ()) == Error::Ok,
174147 Internal,
175148 " Fail to execute graph" );
176149 ET_CHECK_OR_RETURN_ERROR (
177- qnn_manager->ProfileExecuteData (method_name_ , context.event_tracer ()) ==
150+ qnn_manager->ProfileExecuteData (method_name , context.event_tracer ()) ==
178151 Error::Ok,
179152 Internal,
180153 " Fail to profile graph" );
@@ -195,27 +168,24 @@ bool QnnExecuTorchBackend::is_available() const {
195168}
196169
197170void QnnExecuTorchBackend::add_cached_delegate (
198- uint32_t hash_val,
199- executorch::runtime::DelegateHandle* handle) {
171+ const std::string& hash_val,
172+ executorch::runtime::DelegateHandle* handle) const {
200173 std::lock_guard<std::mutex> guard (mutex_);
201174 delegate_map_[hash_val] = handle;
202175 delegate_map_rev_[handle] = hash_val;
203176}
204177
205178void QnnExecuTorchBackend::erase_cached_delegate (
206- executorch::runtime::DelegateHandle* handle) {
179+ executorch::runtime::DelegateHandle* handle) const {
207180 std::lock_guard<std::mutex> guard (mutex_);
208- uint32_t hash_val = delegate_map_rev_[handle];
209- delegate_map_.erase (hash_val);
181+ auto iter = delegate_map_rev_.find (handle);
182+ if (iter == delegate_map_rev_.end ()) {
183+ return ;
184+ }
185+ delegate_map_.erase (iter->second );
210186 delegate_map_rev_.erase (handle);
211187}
212188
213- std::mutex QnnExecuTorchBackend::mutex_;
214- std::unordered_map<uint32_t , executorch::runtime::DelegateHandle*>
215- QnnExecuTorchBackend::delegate_map_;
216- std::unordered_map<executorch::runtime::DelegateHandle*, uint32_t >
217- QnnExecuTorchBackend::delegate_map_rev_;
218-
219189namespace {
220190auto cls = QnnExecuTorchBackend();
221191executorch::runtime::Backend backend{" QnnBackend" , &cls};
0 commit comments