diff --git a/frameworks/native/compilation.cpp b/frameworks/native/compilation.cpp index e715b934b87d5fbd758a393d9aa68c436c4675af..1d54550dacf65dddbb4f39576f966b2daba25de7 100644 --- a/frameworks/native/compilation.cpp +++ b/frameworks/native/compilation.cpp @@ -606,21 +606,9 @@ OH_NN_ReturnCode Compilation::LoadCacheBuild(std::shared_ptr& pre return OH_NN_SUCCESS; } -OH_NN_ReturnCode Compilation::InnerBuild() +OH_NN_ReturnCode Compilation::BuildCacheModel(std::shared_ptr& preparedModel) { OH_NN_ReturnCode ret; - std::shared_ptr preparedModel; - if (m_cachePath.empty()) { - ret = NormalBuild(preparedModel); - if (ret != OH_NN_SUCCESS) { - LOGE("Fail to normally build."); - return ret; - } - - m_isBuild = true; - return OH_NN_SUCCESS; - } - std::string cacheInfoPath = m_cachePath + "cache_info.nncache"; if (access(cacheInfoPath.c_str(), 0) != 0) { ret = GenCacheBuild(preparedModel); @@ -660,6 +648,51 @@ OH_NN_ReturnCode Compilation::InnerBuild() } m_isBuild = true; + + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode Compilation::InnerBuild() +{ + OH_NN_ReturnCode ret; + std::shared_ptr preparedModel; + + // Prepare from offline model. + bool isOfflineModel {false}; + ret = IsOfflineModel(isOfflineModel); + if (ret != OH_NN_SUCCESS) { + LOGE("[Compilation] Failed when identifying the offline model."); + return ret; + } + + if (isOfflineModel) { + ret = BuildOfflineModel(preparedModel); + if (ret != OH_NN_SUCCESS) { + LOGE("[Compilation] Failed to build offline model."); + return ret; + } + + m_isBuild = true; + return OH_NN_SUCCESS; + } + + if (m_cachePath.empty()) { + ret = NormalBuild(preparedModel); + if (ret != OH_NN_SUCCESS) { + LOGE("Fail to normally build."); + return ret; + } + + m_isBuild = true; + return OH_NN_SUCCESS; + } + + ret = BuildCacheModel(preparedModel); + if (ret != OH_NN_SUCCESS) { + LOGE("Fail to build cache model."); + return ret; + } + return OH_NN_SUCCESS; } @@ -714,5 +747,56 @@ bool Compilation::IsDynamicShape() const } return false; } + +OH_NN_ReturnCode Compilation::IsOfflineModel(bool& isOfflineModel) const +{ + isOfflineModel = false; // Initialize the returned value + if (m_liteGraph == nullptr) { + LOGE("[Compilation] LiteGraph is empty when identifying the offline model."); + return OH_NN_NULL_PTR; + } + + if (m_liteGraph->all_nodes_.size() == 0) { + LOGE("[Compilation] Find empty node in the model."); + return OH_NN_INVALID_PARAMETER; + } + + // If the model consists of more than 1 node, it will not be considered as offline model. + if (m_liteGraph->all_nodes_.size() > 1) { + isOfflineModel = false; + return OH_NN_SUCCESS; + } + + const mindspore::lite::LiteGraph::Node* pNode = m_liteGraph->all_nodes_[0]; + if (pNode == nullptr) { + LOGE("[Compilation] Find invalid node in the model."); + return OH_NN_NULL_PTR; + } + + const mindspore::lite::NodeType& nodeType = mindspore::lite::MindIR_Primitive_GetType(pNode->primitive_); + if (nodeType == mindspore::lite::NodeType::NODE_TYPE_CUSTOM) { + isOfflineModel = true; + } + + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode Compilation::BuildOfflineModel(std::shared_ptr& preparedModel) +{ + ModelConfig config {m_enableFp16, m_performance, m_priority}; + OH_NN_ReturnCode ret = m_device->PrepareOfflineModel(m_liteGraph, config, preparedModel); + if (ret != OH_NN_SUCCESS) { + LOGE("[Compilation] Preparing model failed when building from offline model."); + return ret; + } + + m_executionPlan = CreateSharedPtr(preparedModel, m_device); + if (m_executionPlan == nullptr) { + LOGE("[Compilation] Failed to create ExecutionPlan when building from offline model."); + return OH_NN_MEMORY_ERROR; + } + + return OH_NN_SUCCESS; +} } // namespace NeuralNetworkRuntime } // namespace OHOS \ No newline at end of file diff --git a/frameworks/native/compilation.h b/frameworks/native/compilation.h index a85f6a50294257721020dfd91c0ba68de598395f..5fa3099a71d1082bda235eee77f9394cc0a57206 100644 --- a/frameworks/native/compilation.h +++ b/frameworks/native/compilation.h @@ -74,11 +74,14 @@ private: OH_NN_ReturnCode CheckCacheModel(const ModelCacheInfo& modelCacheInfo, std::vector& modelBuffers) const; OH_NN_ReturnCode NormalBuild(std::shared_ptr& preparedModel); + OH_NN_ReturnCode BuildCacheModel(std::shared_ptr& preparedModel); OH_NN_ReturnCode GenCacheBuild(std::shared_ptr& preparedModel); OH_NN_ReturnCode ReGenCacheBuild(uint32_t fileNumber, std::shared_ptr& preparedModel); OH_NN_ReturnCode LoadCacheBuild(std::shared_ptr& preparedModel, const ModelCacheInfo& cacheInfo); OH_NN_ReturnCode InnerBuild(); OH_NN_ReturnCode GetCacheFileLength(std::ifstream& ifs, int& fsize) const; + OH_NN_ReturnCode IsOfflineModel(bool& isOfflineModel) const; + OH_NN_ReturnCode BuildOfflineModel(std::shared_ptr& preparedModel); }; } // namespace NeuralNetworkRuntime } // namespace OHOS diff --git a/frameworks/native/device.h b/frameworks/native/device.h index c34e0432d139de3c9d54934b8f9f2a10620746d8..e4ae27782a1902980ebd83a24daff3a5d1dae77c 100644 --- a/frameworks/native/device.h +++ b/frameworks/native/device.h @@ -52,6 +52,9 @@ public: virtual OH_NN_ReturnCode PrepareModelFromModelCache(const std::vector& modelCache, const ModelConfig& config, std::shared_ptr& preparedModel) = 0; + virtual OH_NN_ReturnCode PrepareOfflineModel(std::shared_ptr model, + const ModelConfig& config, + std::shared_ptr& preparedModel) = 0; virtual void* AllocateBuffer(size_t length) = 0; virtual OH_NN_ReturnCode ReleaseBuffer(const void* buffer) = 0; diff --git a/frameworks/native/device_discover.h b/frameworks/native/device_discover.h index fd79e65352993ff38b3b4c3c607487bed8127269..ae614759cb6995fbb02e2adaba59ad3718e30c7c 100644 --- a/frameworks/native/device_discover.h +++ b/frameworks/native/device_discover.h @@ -16,9 +16,6 @@ #ifndef NEURAL_NETWORK_RUNTIME_DEVICE_DISCOVER_H #define NEURAL_NETWORK_RUNTIME_DEVICE_DISCOVER_H -#include -#include - #include "device.h" namespace OHOS { @@ -27,4 +24,4 @@ std::shared_ptr DiscoverHDIDevicesV1_0(std::string& deviceName, std::str std::shared_ptr DiscoverHDIDevicesV2_0(std::string& deviceName, std::string& vendorName, std::string& version); } // namespace NeuralNetworkRuntime } // namespace OHOS -#endif // NEURAL_NETWORK_RUNTIME_DEVICE_DISCOVER_H \ No newline at end of file +#endif // NEURAL_NETWORK_RUNTIME_DEVICE_DISCOVER_H diff --git a/frameworks/native/device_discover_v2_0.cpp b/frameworks/native/device_discover_v2_0.cpp index de5e8b792aa8c3cf088a9195cabf61808589b52e..7b0ad86aaf87d2bd5437b9f992bdaac9b0935d61 100644 --- a/frameworks/native/device_discover_v2_0.cpp +++ b/frameworks/native/device_discover_v2_0.cpp @@ -15,6 +15,7 @@ #include "device_discover.h" #include "hdi_device_v2_0.h" +#include "hdi_returncode_utils.h" #include "common/log.h" #include "common/utils.h" @@ -29,20 +30,37 @@ std::shared_ptr DiscoverHDIDevicesV2_0(std::string& deviceName, std::str return nullptr; } - auto hdiRet = iDevice->GetDeviceName(deviceName); - if (hdiRet != HDF_SUCCESS) { - LOGW("Get device name failed. ErrorCode=%d", hdiRet); + auto ret = iDevice->GetDeviceName(deviceName); + if (ret != V2_0::NNRT_ReturnCode::NNRT_SUCCESS) { + if (ret < V2_0::NNRT_ReturnCode::NNRT_SUCCESS) { + LOGW("Get device name failed. An error occurred in HDI, errorcode is %{public}d.", ret); + } else { + OHOS::HDI::Nnrt::V2_0::NNRT_ReturnCode nnrtRet = static_cast(ret); + LOGW("Get device name failed. Errorcode is %{public}s.", ConverterRetToString(nnrtRet).c_str()); + } return nullptr; } - hdiRet = iDevice->GetVendorName(vendorName); - if (hdiRet != HDF_SUCCESS) { - LOGW("Get vendor name failed. ErrorCode=%d", hdiRet); + + ret = iDevice->GetVendorName(vendorName); + if (ret != V2_0::NNRT_ReturnCode::NNRT_SUCCESS) { + if (ret < V2_0::NNRT_ReturnCode::NNRT_SUCCESS) { + LOGW("Get vendor name failed. An error occurred in HDI, errorcode is %{public}d.", ret); + } else { + OHOS::HDI::Nnrt::V2_0::NNRT_ReturnCode nnrtRet = static_cast(ret); + LOGW("Get vendor name failed. Errorcode is %{public}s.", ConverterRetToString(nnrtRet).c_str()); + } return nullptr; } + std::pair hdiVersion; - hdiRet = iDevice->GetVersion(hdiVersion.first, hdiVersion.second); - if (hdiRet != HDF_SUCCESS) { - LOGW("Get version failed. ErrorCode=%d", hdiRet); + ret = iDevice->GetVersion(hdiVersion.first, hdiVersion.second); + if (ret != V2_0::NNRT_ReturnCode::NNRT_SUCCESS) { + if (ret < V2_0::NNRT_ReturnCode::NNRT_SUCCESS) { + LOGW("Get version failed. An error occurred in HDI, errorcode is %{public}d.", ret); + } else { + OHOS::HDI::Nnrt::V2_0::NNRT_ReturnCode nnrtRet = static_cast(ret); + LOGW("Get version failed. Errorcode is %{public}s.", ConverterRetToString(nnrtRet).c_str()); + } return nullptr; } version = 'v' + std::to_string(hdiVersion.first) + '_' + std::to_string(hdiVersion.second); @@ -54,4 +72,4 @@ std::shared_ptr DiscoverHDIDevicesV2_0(std::string& deviceName, std::str return device; } } // namespace NeuralNetworkRuntime -} // namespace OHOS \ No newline at end of file +} // namespace OHOS diff --git a/frameworks/native/execution_plan.cpp b/frameworks/native/execution_plan.cpp index 5199199bfd9cf4c11a8d0c58cddcfc2108743ead..a07449286d25c5f7c86f5de6e7a6e888afdc26c8 100644 --- a/frameworks/native/execution_plan.cpp +++ b/frameworks/native/execution_plan.cpp @@ -62,19 +62,6 @@ OH_NN_ReturnCode ExecutionPlan::Run(const std::vector> return ret; } - // Check if the output buffer is sufficient - bool bufferFailed {false}; - for (size_t i = 0; i < outputSize; ++i) { - if (!isSufficientDataBuffer[i]) { - // Print all output indices with insufficient buffer, don't return until traversing all outputs. - LOGE("Run failed, Output %zu does not have enough buffer to store the data.", i); - bufferFailed = true; - } - } - if (bufferFailed) { - return OH_NN_FAILED; - } - // Set the output NNTensor's dimensions from output IOTensor if it is dynamic. // NNTensor::SetDimensions will check if the tensor buffer is enough for the new dimensions. for (size_t i = 0; i < outputSize; ++i) { diff --git a/frameworks/native/hdi_device_v1_0.cpp b/frameworks/native/hdi_device_v1_0.cpp index 146eb3dcca4412dbacd65878ca3f0b4f938a9919..110485e267f06dd44dcaef4263dba73a4bed9db2 100644 --- a/frameworks/native/hdi_device_v1_0.cpp +++ b/frameworks/native/hdi_device_v1_0.cpp @@ -87,9 +87,7 @@ V1_0::Priority TransPriority(const OH_NN_Priority& priority) } HDIDeviceV1_0::HDIDeviceV1_0(OHOS::sptr device) : m_iDevice(device) -{ - device->GetVersion(m_hdiVersion.first, m_hdiVersion.second); -} +{} OH_NN_ReturnCode HDIDeviceV1_0::GetDeviceName(std::string& name) { @@ -113,6 +111,11 @@ OH_NN_ReturnCode HDIDeviceV1_0::GetVendorName(std::string& name) OH_NN_ReturnCode HDIDeviceV1_0::GetVersion(std::string& version) { + auto ret = m_iDevice->GetVersion(m_hdiVersion.first, m_hdiVersion.second); + if (ret != HDF_SUCCESS) { + LOGE("Get HDI version failed. ErrorCode=%d", ret); + return OH_NN_UNAVALIDABLE_DEVICE; + } version = 'v' + std::to_string(m_hdiVersion.first) + '_' + std::to_string(m_hdiVersion.second); return OH_NN_SUCCESS; } @@ -150,7 +153,7 @@ OH_NN_ReturnCode HDIDeviceV1_0::GetSupportedOperation(std::shared_ptr 0) { @@ -241,7 +244,7 @@ OH_NN_ReturnCode HDIDeviceV1_0::PrepareModel(std::shared_ptr 0) { @@ -391,5 +394,13 @@ OH_NN_ReturnCode HDIDeviceV1_0::ReleaseSharedBuffer(const V1_0::SharedBuffer& bu } return OH_NN_SUCCESS; } + +OH_NN_ReturnCode HDIDeviceV1_0::PrepareOfflineModel(std::shared_ptr model, + const ModelConfig& config, + std::shared_ptr& preparedModel) +{ + LOGE("HDIDeviceV1.0 not support PrepareOfflineModel."); + return OH_NN_OPERATION_FORBIDDEN; +} } // namespace NeuralNetworkRuntime } // namespace OHOS diff --git a/frameworks/native/hdi_device_v1_0.h b/frameworks/native/hdi_device_v1_0.h index 3d43aa17d9c049a0c44e9f4f9c587e53325bbe5f..740835d9b0d91d7f93e5a04397d4e5b0dd448529 100644 --- a/frameworks/native/hdi_device_v1_0.h +++ b/frameworks/native/hdi_device_v1_0.h @@ -50,6 +50,9 @@ public: OH_NN_ReturnCode PrepareModelFromModelCache(const std::vector& modelCache, const ModelConfig& config, std::shared_ptr& preparedModel) override; + OH_NN_ReturnCode PrepareOfflineModel(std::shared_ptr model, + const ModelConfig& config, + std::shared_ptr& preparedModel) override; void* AllocateBuffer(size_t length) override; OH_NN_ReturnCode ReleaseBuffer(const void* buffer) override; diff --git a/frameworks/native/hdi_device_v2_0.cpp b/frameworks/native/hdi_device_v2_0.cpp index 8458394357f976722a55778a29a2b97aa3f389fd..f2a8865209e45bb16d8e4417bdac5e90abec1528 100644 --- a/frameworks/native/hdi_device_v2_0.cpp +++ b/frameworks/native/hdi_device_v2_0.cpp @@ -17,8 +17,10 @@ #include "hdf_base.h" #include "mindir.h" +#include "securec.h" #include "hdi_prepared_model_v2_0.h" +#include "hdi_returncode_utils.h" #include "memory_manager.h" #include "transform.h" #include "common/log.h" @@ -26,6 +28,8 @@ namespace OHOS { namespace NeuralNetworkRuntime { +const size_t OFFLINE_MODEL_MINIMUM_INPUT_SIZE = 2; + namespace { OH_NN_DeviceType TransHDIDeviceV2_0Type(const V2_0::DeviceType& iDeviceType) { @@ -84,19 +88,49 @@ V2_0::Priority TransPriority(const OH_NN_Priority& priority) return V2_0::Priority::PRIORITY_NONE; } } -} -HDIDeviceV2_0::HDIDeviceV2_0(OHOS::sptr device) : m_iDevice(device) +OH_NN_ReturnCode IsOfflineModel(std::shared_ptr liteGraph, bool& isOfflineModel) { - device->GetVersion(m_hdiVersion.first, m_hdiVersion.second); + isOfflineModel = false; // Initialize the returned value + if (liteGraph == nullptr) { + LOGE("LiteGraph is empty when identifying the offline model."); + return OH_NN_NULL_PTR; + } + + if (liteGraph->all_nodes_.size() == 0) { + LOGE("Find empty node in the model."); + return OH_NN_INVALID_PARAMETER; + } + + // If the model consists of more than 1 node, it will not be considered as offline model. + if (liteGraph->all_nodes_.size() > 1) { + isOfflineModel = false; + return OH_NN_SUCCESS; + } + + const mindspore::lite::LiteGraph::Node* pNode = liteGraph->all_nodes_[0]; + if (pNode == nullptr) { + LOGE("Find invalid node in the model."); + return OH_NN_NULL_PTR; + } + + const mindspore::lite::NodeType& nodeType = mindspore::lite::MindIR_Primitive_GetType(pNode->primitive_); + if (nodeType == mindspore::lite::NodeType::NODE_TYPE_CUSTOM) { + isOfflineModel = true; + } + + return OH_NN_SUCCESS; } +} // unamed namespace + +HDIDeviceV2_0::HDIDeviceV2_0(OHOS::sptr device) : m_iDevice(device) +{} OH_NN_ReturnCode HDIDeviceV2_0::GetDeviceName(std::string& name) { auto ret = m_iDevice->GetDeviceName(name); - if (ret != HDF_SUCCESS) { - LOGE("Get HDI device name failed. ErrorCode=%d", ret); - return OH_NN_UNAVALIDABLE_DEVICE; + if (ret != V2_0::NNRT_ReturnCode::NNRT_SUCCESS) { + return CheckReturnCode(ret, OH_NN_UNAVALIDABLE_DEVICE, "Get HDI device name failed"); } return OH_NN_SUCCESS; } @@ -104,15 +138,18 @@ OH_NN_ReturnCode HDIDeviceV2_0::GetDeviceName(std::string& name) OH_NN_ReturnCode HDIDeviceV2_0::GetVendorName(std::string& name) { auto ret = m_iDevice->GetVendorName(name); - if (ret != HDF_SUCCESS) { - LOGE("Get HDI device vendor name failed. ErrorCode=%d", ret); - return OH_NN_UNAVALIDABLE_DEVICE; + if (ret != V2_0::NNRT_ReturnCode::NNRT_SUCCESS) { + return CheckReturnCode(ret, OH_NN_UNAVALIDABLE_DEVICE, "Get HDI vendor name failed"); } return OH_NN_SUCCESS; } OH_NN_ReturnCode HDIDeviceV2_0::GetVersion(std::string& version) { + auto ret = m_iDevice->GetVersion(m_hdiVersion.first, m_hdiVersion.second); + if (ret != V2_0::NNRT_ReturnCode::NNRT_SUCCESS) { + return CheckReturnCode(ret, OH_NN_UNAVALIDABLE_DEVICE, "Get HDI version failed"); + } version = 'v' + std::to_string(m_hdiVersion.first) + '_' + std::to_string(m_hdiVersion.second); return OH_NN_SUCCESS; } @@ -121,9 +158,8 @@ OH_NN_ReturnCode HDIDeviceV2_0::GetDeviceType(OH_NN_DeviceType& deviceType) { V2_0::DeviceType iDeviceType; auto ret = m_iDevice->GetDeviceType(iDeviceType); - if (ret != HDF_SUCCESS) { - LOGE("Get HDI device type failed. ErrorCode=%d", ret); - return OH_NN_UNAVALIDABLE_DEVICE; + if (ret != V2_0::NNRT_ReturnCode::NNRT_SUCCESS) { + return CheckReturnCode(ret, OH_NN_UNAVALIDABLE_DEVICE, "Get HDI device type failed"); } deviceType = TransHDIDeviceV2_0Type(iDeviceType); @@ -134,9 +170,8 @@ OH_NN_ReturnCode HDIDeviceV2_0::GetDeviceStatus(DeviceStatus& status) { V2_0::DeviceStatus iDeviceStatus; auto ret = m_iDevice->GetDeviceStatus(iDeviceStatus); - if (ret != HDF_SUCCESS) { - LOGE("Get HDI device status failed. ErrorCode=%d", ret); - return OH_NN_UNAVALIDABLE_DEVICE; + if (ret != V2_0::NNRT_ReturnCode::NNRT_SUCCESS) { + return CheckReturnCode(ret, OH_NN_UNAVALIDABLE_DEVICE, "Get HDI device status failed"); } status = TransHDIDeviceV2_0Status(iDeviceStatus); return OH_NN_SUCCESS; @@ -150,14 +185,27 @@ OH_NN_ReturnCode HDIDeviceV2_0::GetSupportedOperation(std::shared_ptr 0) { - hdiRet = m_iDevice->AllocateBuffer(tensorSize, tensorBuffer); - if (hdiRet != HDF_SUCCESS || tensorBuffer.fd == INVALID_FD) { - LOGE("Allocate tensor buffer error when get supported operation. ErrorCode: %d", hdiRet); - return OH_NN_FAILED; + ret = m_iDevice->AllocateBuffer(tensorSize, tensorBuffer); + if (ret != V2_0::NNRT_ReturnCode::NNRT_SUCCESS || tensorBuffer.fd == INVALID_FD) { + return CheckReturnCode(ret, OH_NN_FAILED, "Allocate tensor buffer error when get supported operation"); } } @@ -168,17 +216,16 @@ OH_NN_ReturnCode HDIDeviceV2_0::GetSupportedOperation(std::shared_ptrGetSupportedOperation(*iModel, ops); + ret = m_iDevice->GetSupportedOperation(*iModel, ops); mindspore::lite::MindIR_Model_Destroy(&iModel); - auto ret = ReleaseSharedBuffer(tensorBuffer); - if (ret != OH_NN_SUCCESS) { + innerRet = ReleaseSharedBuffer(tensorBuffer); + if (innerRet != OH_NN_SUCCESS) { LOGE("Release tensorBuffer failed."); return OH_NN_FAILED; } - if (hdiRet != HDF_SUCCESS) { - LOGE("Get supported operation failed. ErrorCode=%d", hdiRet); - return OH_NN_UNAVALIDABLE_DEVICE; + if (ret != V2_0::NNRT_ReturnCode::NNRT_SUCCESS) { + return CheckReturnCode(ret, OH_NN_UNAVALIDABLE_DEVICE, "Get supported operation failed"); } return OH_NN_SUCCESS; } @@ -186,9 +233,8 @@ OH_NN_ReturnCode HDIDeviceV2_0::GetSupportedOperation(std::shared_ptrIsFloat16PrecisionSupported(isSupported); - if (ret != HDF_SUCCESS) { - LOGE("Query fp16 precision supported failed. ErrorCode=%d", ret); - return OH_NN_UNAVALIDABLE_DEVICE; + if (ret != V2_0::NNRT_ReturnCode::NNRT_SUCCESS) { + return CheckReturnCode(ret, OH_NN_UNAVALIDABLE_DEVICE, "Query fp16 precision supported failed"); } return OH_NN_SUCCESS; } @@ -196,9 +242,8 @@ OH_NN_ReturnCode HDIDeviceV2_0::IsFloat16PrecisionSupported(bool& isSupported) OH_NN_ReturnCode HDIDeviceV2_0::IsPerformanceModeSupported(bool& isSupported) { auto ret = m_iDevice->IsPerformanceModeSupported(isSupported); - if (ret != HDF_SUCCESS) { - LOGE("Query performance mode supported failed. ErrorCode=%d", ret); - return OH_NN_UNAVALIDABLE_DEVICE; + if (ret != V2_0::NNRT_ReturnCode::NNRT_SUCCESS) { + return CheckReturnCode(ret, OH_NN_UNAVALIDABLE_DEVICE, "Query performance mode supported failed"); } return OH_NN_SUCCESS; } @@ -206,9 +251,8 @@ OH_NN_ReturnCode HDIDeviceV2_0::IsPerformanceModeSupported(bool& isSupported) OH_NN_ReturnCode HDIDeviceV2_0::IsPrioritySupported(bool& isSupported) { auto ret = m_iDevice->IsPrioritySupported(isSupported); - if (ret != HDF_SUCCESS) { - LOGE("Query priority supported failed. ErrorCode=%d", ret); - return OH_NN_UNAVALIDABLE_DEVICE; + if (ret != V2_0::NNRT_ReturnCode::NNRT_SUCCESS) { + return CheckReturnCode(ret, OH_NN_UNAVALIDABLE_DEVICE, "Query priority supported failed"); } return OH_NN_SUCCESS; } @@ -216,9 +260,8 @@ OH_NN_ReturnCode HDIDeviceV2_0::IsPrioritySupported(bool& isSupported) OH_NN_ReturnCode HDIDeviceV2_0::IsDynamicInputSupported(bool& isSupported) { auto ret = m_iDevice->IsDynamicInputSupported(isSupported); - if (ret != HDF_SUCCESS) { - LOGE("Query dynamic input supported failed. ErrorCode=%d", ret); - return OH_NN_UNAVALIDABLE_DEVICE; + if (ret != V2_0::NNRT_ReturnCode::NNRT_SUCCESS) { + return CheckReturnCode(ret, OH_NN_UNAVALIDABLE_DEVICE, "Query dynamic input supported failed"); } return OH_NN_SUCCESS; } @@ -226,9 +269,8 @@ OH_NN_ReturnCode HDIDeviceV2_0::IsDynamicInputSupported(bool& isSupported) OH_NN_ReturnCode HDIDeviceV2_0::IsModelCacheSupported(bool& isSupported) { auto ret = m_iDevice->IsModelCacheSupported(isSupported); - if (ret != HDF_SUCCESS) { - LOGE("Query cache model supported failed. ErrorCode=%d", ret); - return OH_NN_UNAVALIDABLE_DEVICE; + if (ret != V2_0::NNRT_ReturnCode::NNRT_SUCCESS) { + return CheckReturnCode(ret, OH_NN_UNAVALIDABLE_DEVICE, "Query cache model supported failed"); } return OH_NN_SUCCESS; } @@ -241,14 +283,13 @@ OH_NN_ReturnCode HDIDeviceV2_0::PrepareModel(std::shared_ptr 0) { - hdiRet = m_iDevice->AllocateBuffer(tensorSize, tensorBuffer); - if (hdiRet != HDF_SUCCESS || tensorBuffer.fd == INVALID_FD) { - LOGE("Allocate tensor buffer error when prepare model. ErrorCode: %d", hdiRet); - return OH_NN_FAILED; + ret = m_iDevice->AllocateBuffer(tensorSize, tensorBuffer); + if (ret != V2_0::NNRT_ReturnCode::NNRT_SUCCESS || tensorBuffer.fd == INVALID_FD) { + return CheckReturnCode(ret, OH_NN_FAILED, "Allocate tensor buffer error when prepare model"); } } @@ -265,17 +306,16 @@ OH_NN_ReturnCode HDIDeviceV2_0::PrepareModel(std::shared_ptr iPreparedModel; - auto preparedRet = m_iDevice->PrepareModel(*iModel, iModelConfig, iPreparedModel); + ret = m_iDevice->PrepareModel(*iModel, iModelConfig, iPreparedModel); mindspore::lite::MindIR_Model_Destroy(&iModel); - auto ret = ReleaseSharedBuffer(tensorBuffer); - if (ret != OH_NN_SUCCESS) { + auto innerRet = ReleaseSharedBuffer(tensorBuffer); + if (innerRet != OH_NN_SUCCESS) { LOGE("Release tensorBuffer failed."); return OH_NN_FAILED; } - if (preparedRet != HDF_SUCCESS || iPreparedModel == nullptr) { - LOGE("Prepare model failed. ErrorCode=%d", preparedRet); - return OH_NN_FAILED; + if (ret != V2_0::NNRT_ReturnCode::NNRT_SUCCESS || iPreparedModel == nullptr) { + return CheckReturnCode(ret, OH_NN_FAILED, "Prepare model failed"); } preparedModel = CreateSharedPtr(iPreparedModel); @@ -298,7 +338,7 @@ OH_NN_ReturnCode HDIDeviceV2_0::PrepareModelFromModelCache(const std::vectorGetMemory(modelCache[i].buffer, memory); if (ret != OH_NN_SUCCESS) { - LOGE("The %zuth model cache is invalid. Please put valid model cache.", i + 1); + LOGE("The %{public}zuth model cache is invalid. Please put valid model cache.", i + 1); return ret; } iBuffers.emplace_back(V2_0::SharedBuffer {memory.fd, memory.length, 0, memory.length}); @@ -310,10 +350,9 @@ OH_NN_ReturnCode HDIDeviceV2_0::PrepareModelFromModelCache(const std::vector iPreparedModel; - auto hdiRet = m_iDevice->PrepareModelFromModelCache(iBuffers, iModelConfig, iPreparedModel); - if (hdiRet != HDF_SUCCESS) { - LOGE("Prepare model from cache failed. ErrorCode=%d", hdiRet); - return OH_NN_UNAVALIDABLE_DEVICE; + auto nnrtRet = m_iDevice->PrepareModelFromModelCache(iBuffers, iModelConfig, iPreparedModel); + if (nnrtRet != V2_0::NNRT_ReturnCode::NNRT_SUCCESS) { + return CheckReturnCode(nnrtRet, OH_NN_FAILED, "Prepare model from cache failed"); } preparedModel = CreateSharedPtr(iPreparedModel); @@ -333,9 +372,8 @@ void* HDIDeviceV2_0::AllocateBuffer(size_t length) V2_0::SharedBuffer buffer; auto ret = m_iDevice->AllocateBuffer(length, buffer); - if (ret != HDF_SUCCESS) { - LOGE("Allocate buffer error. ErrorCode: %d", ret); - return nullptr; + if (ret != V2_0::NNRT_ReturnCode::NNRT_SUCCESS) { + return CheckReturnCode(ret, nullptr, "Allocate buffer error"); } auto memManager = MemoryManager::GetInstance(); @@ -363,9 +401,8 @@ OH_NN_ReturnCode HDIDeviceV2_0::ReleaseBuffer(const void* buffer) V2_0::SharedBuffer hdiBuffer {memory.fd, memory.length, 0, memory.length}; auto deviceResult = m_iDevice->ReleaseBuffer(hdiBuffer); - if (deviceResult != HDF_SUCCESS) { - LOGE("Device release buffer error. ErrorCode: %d", deviceResult); - return OH_NN_FAILED; + if (deviceResult != V2_0::NNRT_ReturnCode::NNRT_SUCCESS) { + return CheckReturnCode(deviceResult, OH_NN_FAILED, "Device release buffer error"); } ret = memManager->UnMapMemory(buffer); @@ -380,16 +417,221 @@ OH_NN_ReturnCode HDIDeviceV2_0::ReleaseBuffer(const void* buffer) OH_NN_ReturnCode HDIDeviceV2_0::ReleaseSharedBuffer(const V2_0::SharedBuffer& buffer) { if (buffer.fd == INVALID_FD) { - LOGI("No need to release. fd=%d", INVALID_FD); + LOGI("No need to release. fd=%{public}d", INVALID_FD); return OH_NN_SUCCESS; } auto ret = m_iDevice->ReleaseBuffer(buffer); - if (ret != HDF_SUCCESS) { - LOGE("Device release buffer error. ErrorCode=%d", ret); - return OH_NN_FAILED; + if (ret != V2_0::NNRT_ReturnCode::NNRT_SUCCESS) { + return CheckReturnCode(ret, OH_NN_FAILED, "Device release buffer error"); + } + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode HDIDeviceV2_0::GetOfflineModelFromLiteGraph(std::shared_ptr graph, + std::vector>& offlineModels) +{ + // graph has been checked in PrepareOfflineModel, no need to check twice. + offlineModels.clear(); + + const size_t inputNum = graph->all_nodes_[0]->input_indices_.size(); + if (inputNum < OFFLINE_MODEL_MINIMUM_INPUT_SIZE) { + LOGE("LiteGraph with offline model should have at least two input tensors, only get %zu.", inputNum); + return OH_NN_INVALID_PARAMETER; + } + + // The offline model is integrated into the last input tensor. + uint32_t index = graph->all_nodes_[0]->input_indices_[inputNum - 1]; + mindspore::lite::TensorPtr pTensor = graph->all_tensors_[index]; + std::vector offlineModel = mindspore::lite::MindIR_Tensor_GetData(pTensor); + if (offlineModel.size() == (size_t) 0) { + LOGE("Offline model has size of 0, please check the ms model."); + return OH_NN_INVALID_PARAMETER; + } + offlineModels.emplace_back(std::move(offlineModel)); + + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode HDIDeviceV2_0::AllocateDeviceBufferForOfflineModel( + const std::vector>& offlineModels, std::vector& deviceBuffers) +{ + // offlineModels is guaranteed to have at least one element in GetOfflineModelFromLiteGraph, no need to check size. + deviceBuffers.clear(); + + for (const std::vector& offlineModel : offlineModels) { + const size_t offlineModelSize = offlineModel.size(); + + void* newModelBuffer = AllocateBuffer(offlineModelSize); + if (newModelBuffer == nullptr) { + // Release allocated model buffer if error happens. + OH_NN_ReturnCode status {OH_NN_SUCCESS}; + for (const ModelBuffer& deviceBuffer : deviceBuffers) { + status = ReleaseBuffer(deviceBuffer.buffer); + if (status != OH_NN_SUCCESS) { + LOGE("Release shared buffer of offline model failed."); + return status; + } + } + + deviceBuffers.clear(); + LOGE("Error happens when allocating shared buffer for offline model."); + return OH_NN_MEMORY_ERROR; + } + + ModelBuffer modelBuffer {nullptr, 0}; + modelBuffer.buffer = newModelBuffer; + modelBuffer.length = offlineModelSize; + deviceBuffers.emplace_back(modelBuffer); + } + + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode HDIDeviceV2_0::CopyOfflineModelToDevice(const std::vector>& offlineModels, + std::vector& deviceBuffers) +{ + if (offlineModels.size() != deviceBuffers.size()) { + LOGE("CopyOfflineModelToDevice failed, number of offlineModels not equal to allocated buffers."); + return OH_NN_INVALID_PARAMETER; + } + + const void* offlineModel {nullptr}; + size_t offlineModelSize {0}; + void* deviceBuffer {nullptr}; + size_t deviceBufferSize {0}; + + size_t offlineModelsSize = offlineModels.size(); + for (size_t i = 0; i < offlineModelsSize; i++) { + offlineModel = offlineModels[i].data(); + offlineModelSize = offlineModels[i].size(); + deviceBuffer = deviceBuffers[i].buffer; + deviceBufferSize = deviceBuffers[i].length; + + // Copy offline model to shared buffer of device. + errno_t errorCode = memcpy_s(deviceBuffer, deviceBufferSize, offlineModel, offlineModelSize); + if (errorCode != EOK) { + LOGE("Error happened when copy offline model to device buffer. Error code: %d.", errorCode); + return OH_NN_MEMORY_ERROR; + } + } + + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode HDIDeviceV2_0::PrepareOfflineModel(std::vector& deviceBuffers, + const ModelConfig& config, + const std::map> extensions, + std::shared_ptr& preparedModel) +{ + V2_0::ModelConfig iModelConfig; + iModelConfig.enableFloat16 = config.enableFloat16; + iModelConfig.mode = TransPerformanceMode(config.mode); + iModelConfig.priority = TransPriority(config.priority); + iModelConfig.extensions = extensions; + OHOS::sptr iPreparedModel; + + std::vector iBuffers; + auto memManager = MemoryManager::GetInstance(); + Memory memory; + OH_NN_ReturnCode ret; + size_t numOfflineModel = deviceBuffers.size(); + for (size_t i = 0; i < numOfflineModel; i++) { + ret = memManager->GetMemory(deviceBuffers[i].buffer, memory); + if (ret != OH_NN_SUCCESS) { + LOGE("Retrieve the memory of %zuth device buffer failed.", i); + return ret; + } + iBuffers.emplace_back(V2_0::SharedBuffer {memory.fd, memory.length, 0, memory.length}); + } + + auto preparedRet = m_iDevice->PrepareOfflineModel(iBuffers, iModelConfig, iPreparedModel); + + // Release allocated model buffer after prepare model. + OH_NN_ReturnCode status {OH_NN_SUCCESS}; + for (const ModelBuffer& deviceBuffer : deviceBuffers) { + status = ReleaseBuffer(deviceBuffer.buffer); + if (status != OH_NN_SUCCESS) { + LOGE("Release shared buffer of offline model failed."); + return status; + } + } + deviceBuffers.clear(); + + if (preparedRet != V2_0::NNRT_ReturnCode::NNRT_SUCCESS || iPreparedModel == nullptr) { + return CheckReturnCode(preparedRet, OH_NN_FAILED, "Prepare offline model failed"); + } + + preparedModel = CreateSharedPtr(iPreparedModel); + if (preparedModel == nullptr) { + LOGE("Prepare model failed, because fail to create preparedModel instance."); + return OH_NN_MEMORY_ERROR; + } + + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode HDIDeviceV2_0::PrepareOfflineModel(std::shared_ptr model, + const ModelConfig& config, + std::shared_ptr& preparedModel) +{ + if (model == nullptr) { + LOGE("LiteGraph is empty when identifying the offline model."); + return OH_NN_NULL_PTR; } + + std::vector> offlineModels; + OH_NN_ReturnCode status = GetOfflineModelFromLiteGraph(model, offlineModels); + if (status != OH_NN_SUCCESS) { + LOGE("Error happens when getting offline models from lite graph."); + return status; + } + + std::vector deviceBuffers; + status = AllocateDeviceBufferForOfflineModel(offlineModels, deviceBuffers); + if (status != OH_NN_SUCCESS) { + LOGE("Error happens when allocating device buffers for offline model."); + return status; + } + + status = CopyOfflineModelToDevice(offlineModels, deviceBuffers); + if (status != OH_NN_SUCCESS) { + LOGE("Error happened when copying offline models to device buffers."); + + OH_NN_ReturnCode ret {OH_NN_SUCCESS}; + // Release allocated model buffer if error happens. + for (const ModelBuffer& deviceBuffer : deviceBuffers) { + ret = ReleaseBuffer(deviceBuffer.buffer); + if (ret != OH_NN_SUCCESS) { + LOGE("Releasing device buffer failed after copying offline models to device buffers failed."); + return ret; + } + } + + return status; + } + + // Retrieve offline model configs from Custom primitive and insert to extensions. + std::string key; + std::vector valueFromCustomPrimitive; + std::vector value; + std::map> extensions; + std::vector attributes = + mindspore::lite::MindIR_Custom_GetAttr(model->all_nodes_[0]->primitive_); + for (const auto& attribute : attributes) { + key = mindspore::lite::MindIR_Attribute_GetName(*attribute); + valueFromCustomPrimitive = mindspore::lite::MindIR_Attribute_GetData(*attribute); + value.assign(valueFromCustomPrimitive.begin(), valueFromCustomPrimitive.end()); + extensions.insert(std::pair>(key, value)); + } + + status = PrepareOfflineModel(deviceBuffers, config, extensions, preparedModel); + if (status != OH_NN_SUCCESS) { + LOGE("PrepareOfflineModel failed."); + return status; + } + return OH_NN_SUCCESS; } } // namespace NeuralNetworkRuntime -} // namespace OHOS \ No newline at end of file +} // namespace OHOS diff --git a/frameworks/native/hdi_device_v2_0.h b/frameworks/native/hdi_device_v2_0.h index 4964d98844ce67a6db0703705b2ba73152be7f4a..8c493f05aff8291e6442413a3a10a85ca1fa4741 100644 --- a/frameworks/native/hdi_device_v2_0.h +++ b/frameworks/native/hdi_device_v2_0.h @@ -50,12 +50,25 @@ public: OH_NN_ReturnCode PrepareModelFromModelCache(const std::vector& modelCache, const ModelConfig& config, std::shared_ptr& preparedModel) override; + OH_NN_ReturnCode PrepareOfflineModel(std::shared_ptr model, + const ModelConfig& config, + std::shared_ptr& preparedModel) override; void* AllocateBuffer(size_t length) override; OH_NN_ReturnCode ReleaseBuffer(const void* buffer) override; private: OH_NN_ReturnCode ReleaseSharedBuffer(const V2_0::SharedBuffer& buffer); + OH_NN_ReturnCode GetOfflineModelFromLiteGraph(std::shared_ptr graph, + std::vector>& offlineModels); + OH_NN_ReturnCode AllocateDeviceBufferForOfflineModel(const std::vector>& offlineModels, + std::vector& deviceBuffers); + OH_NN_ReturnCode CopyOfflineModelToDevice(const std::vector>& offlineModels, + std::vector& deviceBuffers); + OH_NN_ReturnCode PrepareOfflineModel(std::vector& deviceBuffers, + const ModelConfig& config, + const std::map> extensions, + std::shared_ptr& preparedModel); private: // first: major version, second: minor version @@ -64,4 +77,4 @@ private: }; } // namespace NeuralNetworkRuntime } // namespace OHOS -#endif // NEURAL_NETWORK_RUNTIME_HDI_DEVICE_V2_0_H \ No newline at end of file +#endif // NEURAL_NETWORK_RUNTIME_HDI_DEVICE_V2_0_H diff --git a/frameworks/native/hdi_prepared_model_v2_0.cpp b/frameworks/native/hdi_prepared_model_v2_0.cpp index 0bf9fdfb741aea4aa3973e278c9b6855ed4917c6..37777f97f39d7890d64271a2b09a37941621f1c2 100644 --- a/frameworks/native/hdi_prepared_model_v2_0.cpp +++ b/frameworks/native/hdi_prepared_model_v2_0.cpp @@ -16,6 +16,7 @@ #include "hdi_prepared_model_v2_0.h" #include "common/log.h" +#include "hdi_returncode_utils.h" #include "memory_manager.h" namespace OHOS { @@ -102,15 +103,14 @@ HDIPreparedModelV2_0::HDIPreparedModelV2_0(OHOS::sptr hdiP OH_NN_ReturnCode HDIPreparedModelV2_0::ExportModelCache(std::vector& modelCache) { if (!modelCache.empty()) { - LOGE("The vector of modelCache should be empty. size=%zu", modelCache.size()); + LOGE("The vector of modelCache should be empty. size=%{public}zu", modelCache.size()); return OH_NN_INVALID_PARAMETER; } std::vector iBuffers; auto ret = m_hdiPreparedModel->ExportModelCache(iBuffers); - if (ret != HDF_SUCCESS) { - LOGE("Export model cache failed. ErrorCode=%d", ret); - return OH_NN_UNAVALIDABLE_DEVICE; + if (ret != V2_0::NNRT_ReturnCode::NNRT_SUCCESS) { + return CheckReturnCode(ret, OH_NN_UNAVALIDABLE_DEVICE, "Export model cache failed"); } auto memManager = MemoryManager::GetInstance(); @@ -118,7 +118,7 @@ OH_NN_ReturnCode HDIPreparedModelV2_0::ExportModelCache(std::vector for (size_t i = 0; i < iBuffersSize; i++) { auto addr = memManager->MapMemory(iBuffers[i].fd, iBuffers[i].bufferSize); if (addr == nullptr) { - LOGE("Export the %zuth model cache failed, cannot not map fd to address.", i + 1); + LOGE("Export the %{public}zuth model cache failed, cannot not map fd to address.", i + 1); return OH_NN_MEMORY_ERROR; } ModelBuffer modelbuffer {addr, iBuffers[i].bufferSize}; @@ -152,9 +152,12 @@ OH_NN_ReturnCode HDIPreparedModelV2_0::Run(const std::vector& inputs, iOutputTensors.emplace_back(iTensor); } - auto ret = m_hdiPreparedModel->Run(iInputTensors, iOutputTensors, outputsDims, isOutputBufferEnough); - if (ret != HDF_SUCCESS || outputsDims.empty()) { - LOGE("Run model failed. ErrorCode=%d", ret); + auto ret = m_hdiPreparedModel->Run(iInputTensors, iOutputTensors, outputsDims); + if (ret != V2_0::NNRT_ReturnCode::NNRT_SUCCESS) { + return CheckReturnCode(ret, OH_NN_UNAVALIDABLE_DEVICE, "Run model failed"); + } + if (outputsDims.empty()) { + LOGE("Run failed, outputsDims is empty."); return OH_NN_UNAVALIDABLE_DEVICE; } @@ -165,12 +168,11 @@ OH_NN_ReturnCode HDIPreparedModelV2_0::GetInputDimRanges(std::vector>& maxInputDims) { auto ret = m_hdiPreparedModel->GetInputDimRanges(minInputDims, maxInputDims); - if (ret != HDF_SUCCESS) { - LOGE("GetInputDimRanges failed. ErrorCode=%d", ret); - return OH_NN_UNAVALIDABLE_DEVICE; + if (ret != V2_0::NNRT_ReturnCode::NNRT_SUCCESS) { + return CheckReturnCode(ret, OH_NN_UNAVALIDABLE_DEVICE, "Get input dim ranges failed"); } return OH_NN_SUCCESS; } } // namespace NeuralNetworkRuntime -} // OHOS \ No newline at end of file +} // OHOS diff --git a/frameworks/native/hdi_prepared_model_v2_0.h b/frameworks/native/hdi_prepared_model_v2_0.h index ad42dcbcb314c56727b8f641132fcddc23e2bb64..5b3e67ad98a78772026fb974f47488712bf4e18e 100644 --- a/frameworks/native/hdi_prepared_model_v2_0.h +++ b/frameworks/native/hdi_prepared_model_v2_0.h @@ -19,13 +19,13 @@ #include -#include #include #include +#include -#include "refbase.h" -#include "prepared_model.h" #include "cpp_type.h" +#include "prepared_model.h" +#include "refbase.h" namespace V2_0 = OHOS::HDI::Nnrt::V2_0; @@ -52,4 +52,4 @@ private: }; } // namespace NeuralNetworkRuntime } // OHOS -#endif // NEURAL_NETWORK_RUNTIME_HDI_PREPARED_MODEL_V2_0_H \ No newline at end of file +#endif // NEURAL_NETWORK_RUNTIME_HDI_PREPARED_MODEL_V2_0_H diff --git a/frameworks/native/hdi_returncode_utils.h b/frameworks/native/hdi_returncode_utils.h new file mode 100644 index 0000000000000000000000000000000000000000..7d82e967b094719f06e0f0b1670672f7c5ce825a --- /dev/null +++ b/frameworks/native/hdi_returncode_utils.h @@ -0,0 +1,87 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +#ifndef NEURAL_NETWORK_RUNTIME_HDI_RETURNCODE_UTILS_H +#define NEURAL_NETWORK_RUNTIME_HDI_RETURNCODE_UTILS_H + +#include +#include +#include +#include "common/log.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +inline std::string ConverterRetToString(OHOS::HDI::Nnrt::V2_0::NNRT_ReturnCode returnCode) +{ + static std::unordered_map nnrtRet2StringMap { + {V2_0::NNRT_ReturnCode::NNRT_SUCCESS, "NNRT_SUCCESS"}, + {V2_0::NNRT_ReturnCode::NNRT_FAILED, "NNRT_FAILED"}, + {V2_0::NNRT_ReturnCode::NNRT_NULL_PTR, "NNRT_NULL_PTR"}, + {V2_0::NNRT_ReturnCode::NNRT_INVALID_PARAMETER, "NNRT_INVALID_PARAMETER"}, + {V2_0::NNRT_ReturnCode::NNRT_MEMORY_ERROR, "NNRT_MEMORY_ERROR"}, + {V2_0::NNRT_ReturnCode::NNRT_OUT_OF_MEMORY, "NNRT_OUT_OF_MEMORY"}, + {V2_0::NNRT_ReturnCode::NNRT_OPERATION_FORBIDDEN, "NNRT_OPERATION_FORBIDDEN"}, + {V2_0::NNRT_ReturnCode::NNRT_INVALID_FILE, "NNRT_INVALID_FILE"}, + {V2_0::NNRT_ReturnCode::NNRT_INVALID_PATH, "NNRT_INVALID_PATH"}, + {V2_0::NNRT_ReturnCode::NNRT_INSUFFICIENT_BUFFER, "NNRT_INSUFFICIENT_BUFFER"}, + {V2_0::NNRT_ReturnCode::NNRT_NO_CHANGE, "NNRT_NO_CHANGE"}, + {V2_0::NNRT_ReturnCode::NNRT_NOT_SUPPORT, "NNRT_NOT_SUPPORT"}, + {V2_0::NNRT_ReturnCode::NNRT_SERVICE_ERROR, "NNRT_SERVICE_ERROR"}, + {V2_0::NNRT_ReturnCode::NNRT_DEVICE_ERROR, "NNRT_DEVICE_ERROR"}, + {V2_0::NNRT_ReturnCode::NNRT_DEVICE_BUSY, "NNRT_DEVICE_BUSY"}, + {V2_0::NNRT_ReturnCode::NNRT_CANCELLED, "NNRT_CANCELLED"}, + {V2_0::NNRT_ReturnCode::NNRT_PERMISSION_DENIED, "NNRT_PERMISSION_DENIED"}, + {V2_0::NNRT_ReturnCode::NNRT_TIME_OUT, "NNRT_TIME_OUT"}, + {V2_0::NNRT_ReturnCode::NNRT_INVALID_TENSOR, "NNRT_INVALID_TENSOR"}, + {V2_0::NNRT_ReturnCode::NNRT_INVALID_NODE, "NNRT_INVALID_NODE"}, + {V2_0::NNRT_ReturnCode::NNRT_INVALID_INPUT, "NNRT_INVALID_INPUT"}, + {V2_0::NNRT_ReturnCode::NNRT_INVALID_OUTPUT, "NNRT_INVALID_OUTPUT"}, + {V2_0::NNRT_ReturnCode::NNRT_INVALID_DATATYPE, "NNRT_INVALID_DATATYPE"}, + {V2_0::NNRT_ReturnCode::NNRT_INVALID_FORMAT, "NNRT_INVALID_FORMAT"}, + {V2_0::NNRT_ReturnCode::NNRT_INVALID_TENSOR_NAME, "NNRT_INVALID_TENSOR_NAME"}, + {V2_0::NNRT_ReturnCode::NNRT_INVALID_SHAPE, "NNRT_INVALID_SHAPE"}, + {V2_0::NNRT_ReturnCode::NNRT_OUT_OF_DIMENTION_RANGES, "NNRT_OUT_OF_DIMENTION_RANGES"}, + {V2_0::NNRT_ReturnCode::NNRT_INVALID_BUFFER, "NNRT_INVALID_BUFFER"}, + {V2_0::NNRT_ReturnCode::NNRT_INVALID_BUFFER_SIZE, "NNRT_INVALID_BUFFER_SIZE"}, + {V2_0::NNRT_ReturnCode::NNRT_INVALID_PERFORMANCE_MODE, "NNRT_INVALID_PERFORMANCE_MODE"}, + {V2_0::NNRT_ReturnCode::NNRT_INVALID_PRIORITY, "NNRT_INVALID_PRIORITY"}, + {V2_0::NNRT_ReturnCode::NNRT_INVALID_MODEL, "NNRT_INVALID_MODEL"}, + {V2_0::NNRT_ReturnCode::NNRT_INVALID_MODEL_CACHE, "NNRT_INVALID_MODEL_CACHE"}, + {V2_0::NNRT_ReturnCode::NNRT_UNSUPPORTED_OP, "NNRT_UNSUPPORTED_OP"} + }; + + if (nnrtRet2StringMap.find(returnCode) == nnrtRet2StringMap.end()) { + return "ConverterRetToString failed, returnCode is invalid."; + } + + return nnrtRet2StringMap.at(returnCode); +} + +template +T CheckReturnCode(int32_t ret, T funcRet, const std::string& errorInfo) +{ + if (ret < V2_0::NNRT_ReturnCode::NNRT_SUCCESS) { + LOGE("%{public}s. An error occurred in HDI, errorcode is %{public}d.", errorInfo.c_str(), ret); + } else if (ret > V2_0::NNRT_ReturnCode::NNRT_SUCCESS) { + OHOS::HDI::Nnrt::V2_0::NNRT_ReturnCode nnrtRet = static_cast(ret); + LOGE("%{public}s. Errorcode is %{public}s.", errorInfo.c_str(), ConverterRetToString(nnrtRet).c_str()); + } + + return funcRet; +} +} // namespace NeuralNetworkRuntime +} // OHOS +#endif // NEURAL_NETWORK_RUNTIME_HDI_RETURNCODE_UTILS_H \ No newline at end of file