diff --git a/frameworks/native/neural_network_runtime/device.h b/frameworks/native/neural_network_runtime/device.h index 274b9796e26d928404db56702ce2b500218490f4..015e9e5d0380eff8e1a4515c12eb538b6ab9039b 100644 --- a/frameworks/native/neural_network_runtime/device.h +++ b/frameworks/native/neural_network_runtime/device.h @@ -49,6 +49,7 @@ public: virtual OH_NN_ReturnCode IsModelCacheSupported(bool& isSupported) = 0; virtual OH_NN_ReturnCode PrepareModel(std::shared_ptr model, + const Buffer& quantBuffer, const ModelConfig& config, std::shared_ptr& preparedModel) = 0; virtual OH_NN_ReturnCode PrepareModel(const void* metaGraph, @@ -72,4 +73,4 @@ public: }; } // namespace NeuralNetworkRuntime } // namespace OHOS -#endif // NEURAL_NETWORK_RUNTIME_DEVICE_H \ No newline at end of file +#endif // NEURAL_NETWORK_RUNTIME_DEVICE_H diff --git a/frameworks/native/neural_network_runtime/hdi_device_v1_0.cpp b/frameworks/native/neural_network_runtime/hdi_device_v1_0.cpp index ef69c9a13e9ee921de17b9627cfa5d857c3e72d6..87cd2eb9ef3be9efae984f80a895707ed8358971 100644 --- a/frameworks/native/neural_network_runtime/hdi_device_v1_0.cpp +++ b/frameworks/native/neural_network_runtime/hdi_device_v1_0.cpp @@ -238,7 +238,7 @@ OH_NN_ReturnCode HDIDeviceV1_0::IsModelCacheSupported(bool& isSupported) } OH_NN_ReturnCode HDIDeviceV1_0::PrepareModel(std::shared_ptr model, - const ModelConfig& config, std::shared_ptr& preparedModel) + const Buffer& quantBuffer, const ModelConfig& config, std::shared_ptr& preparedModel) { if (model == nullptr) { LOGE("Model is nullptr, cannot prepare model."); diff --git a/frameworks/native/neural_network_runtime/hdi_device_v1_0.h b/frameworks/native/neural_network_runtime/hdi_device_v1_0.h index a7167ba177f7e1d9a7efb962557ae92f31962a19..fb0f2aeb41f0782ef1b0d7f36720c3642a53e16c 100644 --- a/frameworks/native/neural_network_runtime/hdi_device_v1_0.h +++ b/frameworks/native/neural_network_runtime/hdi_device_v1_0.h @@ -45,6 +45,7 @@ public: OH_NN_ReturnCode IsModelCacheSupported(bool& isSupported) override; OH_NN_ReturnCode PrepareModel(std::shared_ptr model, + const Buffer& quantBuffer, const ModelConfig& config, std::shared_ptr& preparedModel) override; OH_NN_ReturnCode PrepareModel(const void* metaGraph, @@ -76,4 +77,4 @@ private: }; } // namespace NeuralNetworkRuntime } // namespace OHOS -#endif // NEURAL_NETWORK_RUNTIME_HDI_DEVICE_V1_0_H \ No newline at end of file +#endif // NEURAL_NETWORK_RUNTIME_HDI_DEVICE_V1_0_H diff --git a/frameworks/native/neural_network_runtime/hdi_device_v2_0.cpp b/frameworks/native/neural_network_runtime/hdi_device_v2_0.cpp index 221bb6b0369f7c2b0af23cc96e6e9503a6a87918..09c92d0ef203c03faab799b7d8caca5603d83158 100644 --- a/frameworks/native/neural_network_runtime/hdi_device_v2_0.cpp +++ b/frameworks/native/neural_network_runtime/hdi_device_v2_0.cpp @@ -277,7 +277,7 @@ OH_NN_ReturnCode HDIDeviceV2_0::IsModelCacheSupported(bool& isSupported) } OH_NN_ReturnCode HDIDeviceV2_0::PrepareModel(std::shared_ptr model, - const ModelConfig& config, std::shared_ptr& preparedModel) + const Buffer& quantBuffer, const ModelConfig& config, std::shared_ptr& preparedModel) { if (model == nullptr) { LOGE("Model is nullptr, cannot prepare model."); diff --git a/frameworks/native/neural_network_runtime/hdi_device_v2_0.h b/frameworks/native/neural_network_runtime/hdi_device_v2_0.h index b8dc5f8f0c58a9bd3204e6e0ee009c2ba93bb026..2c3aab5e0b7f940feb8fe8e2c82d3879e94687f2 100644 --- a/frameworks/native/neural_network_runtime/hdi_device_v2_0.h +++ b/frameworks/native/neural_network_runtime/hdi_device_v2_0.h @@ -45,6 +45,7 @@ public: OH_NN_ReturnCode IsModelCacheSupported(bool& isSupported) override; OH_NN_ReturnCode PrepareModel(std::shared_ptr model, + const Buffer& quantBuffer, const ModelConfig& config, std::shared_ptr& preparedModel) override; OH_NN_ReturnCode PrepareModel(const void* metaGraph, diff --git a/frameworks/native/neural_network_runtime/hdi_device_v2_1.cpp b/frameworks/native/neural_network_runtime/hdi_device_v2_1.cpp index 72b5cc4558ca4899827987ae22467a5e13a0ede4..9b9f0f67253f500fbaeaabcba3509f4d23d5691b 100644 --- a/frameworks/native/neural_network_runtime/hdi_device_v2_1.cpp +++ b/frameworks/native/neural_network_runtime/hdi_device_v2_1.cpp @@ -277,7 +277,7 @@ OH_NN_ReturnCode HDIDeviceV2_1::IsModelCacheSupported(bool& isSupported) } OH_NN_ReturnCode HDIDeviceV2_1::PrepareModel(std::shared_ptr model, - const ModelConfig& config, std::shared_ptr& preparedModel) + const Buffer& quantBuffer, const ModelConfig& config, std::shared_ptr& preparedModel) { if (model == nullptr) { LOGE("Model is nullptr, cannot prepare model."); diff --git a/frameworks/native/neural_network_runtime/hdi_device_v2_1.h b/frameworks/native/neural_network_runtime/hdi_device_v2_1.h index a2c4910f3bf8b46b9d52b8c4c879f5affaf52e78..06016b9e334d89e62d79a3c0e96c177b90c6ee31 100644 --- a/frameworks/native/neural_network_runtime/hdi_device_v2_1.h +++ b/frameworks/native/neural_network_runtime/hdi_device_v2_1.h @@ -45,6 +45,7 @@ public: OH_NN_ReturnCode IsModelCacheSupported(bool& isSupported) override; OH_NN_ReturnCode PrepareModel(std::shared_ptr model, + const Buffer& quantBuffer, const ModelConfig& config, std::shared_ptr& preparedModel) override; OH_NN_ReturnCode PrepareModel(const void* metaGraph, diff --git a/frameworks/native/neural_network_runtime/nnbackend.h b/frameworks/native/neural_network_runtime/nnbackend.h index 826817010216f3bb3ddaafb8c373ee1d7f4d62c5..83a4af85fab211e0188faf4d8ae34b1cc7bea561 100644 --- a/frameworks/native/neural_network_runtime/nnbackend.h +++ b/frameworks/native/neural_network_runtime/nnbackend.h @@ -54,11 +54,11 @@ public: OH_NN_ReturnCode GetSupportedOperation(std::shared_ptr model, std::vector& ops); -private: +protected: std::shared_ptr m_device; size_t m_backendID; }; } // NeuralNetworkRuntime } // OHOS -#endif // NEURAL_NETWORK_RUNTIME_NNBACKEND_H \ No newline at end of file +#endif // NEURAL_NETWORK_RUNTIME_NNBACKEND_H diff --git a/frameworks/native/neural_network_runtime/nncompiler.cpp b/frameworks/native/neural_network_runtime/nncompiler.cpp index bc7a40987b5e539b60ee913832b1da848bdcae84..56ab88632363f1d65b9d2a3514243a5a4cd9f8a4 100644 --- a/frameworks/native/neural_network_runtime/nncompiler.cpp +++ b/frameworks/native/neural_network_runtime/nncompiler.cpp @@ -377,7 +377,7 @@ OH_NN_ReturnCode NNCompiler::NormalBuild() ModelConfig config {m_enableFp16, static_cast(m_performance), static_cast(m_priority), m_isProfiling, m_cachePath, m_opLayouts}; if (m_liteGraph != nullptr) { - ret = m_device->PrepareModel(m_liteGraph, config, m_preparedModel); + ret = m_device->PrepareModel(m_liteGraph, m_quantBuffer, config, m_preparedModel); } if (m_metaGraph != nullptr) { ret = m_device->PrepareModel(m_metaGraph, m_quantBuffer, config, m_preparedModel); @@ -856,4 +856,4 @@ OH_NN_ReturnCode NNCompiler::DeserializedTensorsFromBuffer( } } // NeuralNetworkRuntime -} // OHOS \ No newline at end of file +} // OHOS