From f73099586720064d76fc9e822798568be03dc57c Mon Sep 17 00:00:00 2001 From: zhuyinlin <1085905529@qq.com> Date: Thu, 3 Jul 2025 17:28:59 +0800 Subject: [PATCH] check cache path Signed-off-by: zhuyinlin <1085905529@qq.com> --- .../neural_network_core.cpp | 196 ++++++++++-------- .../nncompiled_cache.cpp | 6 +- .../neural_network_runtime/nncompiler.cpp | 9 +- .../nn_compiled_cache_test.cpp | 2 +- 4 files changed, 121 insertions(+), 92 deletions(-) diff --git a/frameworks/native/neural_network_core/neural_network_core.cpp b/frameworks/native/neural_network_core/neural_network_core.cpp index b55a08b..fcaa18a 100644 --- a/frameworks/native/neural_network_core/neural_network_core.cpp +++ b/frameworks/native/neural_network_core/neural_network_core.cpp @@ -17,6 +17,7 @@ #include #include +#include #include #include #include @@ -33,6 +34,112 @@ using namespace OHOS::NeuralNetworkRuntime; #define NNRT_API __attribute__((visibility("default"))) const size_t INPUT_OUTPUT_MAX_INDICES = 200; +namespace { +OH_NN_ReturnCode GetNnrtModelId(Compilation* compilationImpl, NNRtServiceApi& nnrtService) +{ + std::string modelName; + compilationImpl->compiler->GetModelName(modelName); + if (compilationImpl->cachePath != nullptr) { + struct stat buffer; + if (stat(compilationImpl->cachePath, &buffer) != 0) { + LOGE("GetModelId failed, cachePath is not exit or permission."); + return OH_NN_INVALID_PARAMETER; + } + } + + if (compilationImpl->nnModel != nullptr) { + compilationImpl->nnrtModelID = nnrtService.GetNNRtModelIDFromCache(compilationImpl->cachePath, + modelName.c_str()); + if (compilationImpl->nnrtModelID == 1) { + compilationImpl->nnrtModelID = nnrtService.GetNNRtModelIDFromModel(compilationImpl->nnModel); + } + } else if (compilationImpl->offlineModelPath != nullptr) { + compilationImpl->nnrtModelID = nnrtService.GetNNRtModelIDFromPath(compilationImpl->offlineModelPath); + } else if (compilationImpl->cachePath != nullptr) { + compilationImpl->nnrtModelID = + nnrtService.GetNNRtModelIDFromCache(compilationImpl->cachePath, modelName.c_str()); + } else if ((compilationImpl->offlineModelBuffer.first != nullptr) && \ + (compilationImpl->offlineModelBuffer.second != size_t(0))) { + compilationImpl->nnrtModelID = nnrtService.GetNNRtModelIDFromBuffer( + compilationImpl->offlineModelBuffer.first, compilationImpl->offlineModelBuffer.second); + } else if ((compilationImpl->cacheBuffer.first != nullptr) && \ + (compilationImpl->cacheBuffer.second != size_t(0))) { + compilationImpl->nnrtModelID = nnrtService.GetNNRtModelIDFromBuffer( + compilationImpl->cacheBuffer.first, compilationImpl->cacheBuffer.second); + } else { + LOGE("GetModelId failed, no available model to set modelId, please check."); + return OH_NN_INVALID_PARAMETER; + } + + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode IsCompilationAvaliable(Compilation* compilationImpl) +{ + if (compilationImpl == nullptr) { + LOGE("IsCompilationAvaliable failed, compilation implementation is nullptr."); + return OH_NN_INVALID_PARAMETER; + } + + if (((compilationImpl->nnModel != nullptr) && (compilationImpl->offlineModelPath != nullptr)) || + ((compilationImpl->nnModel != nullptr) && + ((compilationImpl->offlineModelBuffer.first != nullptr) || + (compilationImpl->offlineModelBuffer.second != static_cast(0)))) || + ((compilationImpl->offlineModelPath != nullptr) && + ((compilationImpl->offlineModelBuffer.first != nullptr) || + (compilationImpl->offlineModelBuffer.second != static_cast(0))))) { + LOGE("IsCompilationAvaliable failed, find multi model to build compilation."); + return OH_NN_INVALID_PARAMETER; + } + + if (compilationImpl->compiler != nullptr) { + LOGE("IsCompilationAvaliable failed, the compiler in compilation is not nullptr, " + "please input a new compilation."); + return OH_NN_INVALID_PARAMETER; + } + + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode CheckModelSize(const Compilation* compilation, NNRtServiceApi& nnrtService, bool& isExceedRamLimit) +{ + int ret = static_cast(OH_NN_SUCCESS); + if (compilation->nnModel != nullptr) { + ret = nnrtService.CheckModelSizeFromModel(compilation->nnModel, isExceedRamLimit); + } else if (compilation->offlineModelPath != nullptr) { + ret = nnrtService.CheckModelSizeFromPath(compilation->offlineModelPath, isExceedRamLimit); + } else if (compilation->cachePath != nullptr) { + struct stat buffer; + if (stat(compilation->cachePath, &buffer) != 0) { + LOGE("CheckExceedRamLimit failed, cachePath is not exit or permission."); + return OH_NN_INVALID_PARAMETER; + } + + std::string modelName; + compilation->compiler->GetModelName(modelName); + ret = nnrtService.CheckModelSizeFromCache(compilation->cachePath, modelName, isExceedRamLimit); + } else if ((compilation->offlineModelBuffer.first != nullptr) && \ + (compilation->offlineModelBuffer.second != size_t(0))) { + ret = nnrtService.CheckModelSizeFromBuffer( + compilation->offlineModelBuffer.first, compilation->offlineModelBuffer.second, isExceedRamLimit); + } else if ((compilation->cacheBuffer.first != nullptr) && \ + (compilation->cacheBuffer.second != size_t(0))) { + ret = nnrtService.CheckModelSizeFromBuffer( + compilation->cacheBuffer.first, compilation->cacheBuffer.second, isExceedRamLimit); + } else { + LOGE("CheckExceedRamLimit failed, no available model to check."); + return OH_NN_INVALID_PARAMETER; + } + + if (ret != static_cast(OH_NN_SUCCESS)) { + LOGE("CheckExceedRamLimit failed, some error happened when check if model exceed ram limit."); + return OH_NN_INVALID_PARAMETER; + } + + return OH_NN_SUCCESS; +} +} + NNRT_API OH_NN_ReturnCode OH_NNDevice_GetAllDevicesID(const size_t **allDevicesID, uint32_t *deviceCount) { if (allDevicesID == nullptr) { @@ -505,33 +612,11 @@ OH_NN_ReturnCode CheckExceedRamLimit(const Compilation* compilation, bool& isExc return OH_NN_INVALID_PARAMETER; } - int ret = static_cast(OH_NN_SUCCESS); - if (compilation->nnModel != nullptr) { - ret = nnrtService.CheckModelSizeFromModel(compilation->nnModel, isExceedRamLimit); - } else if (compilation->offlineModelPath != nullptr) { - ret = nnrtService.CheckModelSizeFromPath(compilation->offlineModelPath, isExceedRamLimit); - } else if (compilation->cachePath != nullptr) { - std::string modelName; - compilation->compiler->GetModelName(modelName); - ret = nnrtService.CheckModelSizeFromCache(compilation->cachePath, modelName, isExceedRamLimit); - } else if ((compilation->offlineModelBuffer.first != nullptr) && \ - (compilation->offlineModelBuffer.second != size_t(0))) { - ret = nnrtService.CheckModelSizeFromBuffer( - compilation->offlineModelBuffer.first, compilation->offlineModelBuffer.second, isExceedRamLimit); - } else if ((compilation->cacheBuffer.first != nullptr) && \ - (compilation->cacheBuffer.second != size_t(0))) { - ret = nnrtService.CheckModelSizeFromBuffer( - compilation->cacheBuffer.first, compilation->cacheBuffer.second, isExceedRamLimit); - } else { - LOGE("CheckExceedRamLimit failed, no available model to check."); - return OH_NN_INVALID_PARAMETER; - } - - if (ret != static_cast(OH_NN_SUCCESS)) { - LOGE("CheckExceedRamLimit failed, some error happened when check if model exceed ram limit."); + OH_NN_ReturnCode ret = CheckModelSize(compilation, nnrtService, isExceedRamLimit); + if (ret != OH_NN_SUCCESS) { + LOGE("CheckExceedRamLimit failed, fail to check model size."); return OH_NN_INVALID_PARAMETER; } - return OH_NN_SUCCESS; } @@ -611,67 +696,6 @@ OH_NN_ReturnCode Authentication(Compilation** compilation, bool &isExceedRamLimi return OH_NN_SUCCESS; } -namespace { -OH_NN_ReturnCode GetNnrtModelId(Compilation* compilationImpl, NNRtServiceApi& nnrtService) -{ - std::string modelName; - compilationImpl->compiler->GetModelName(modelName); - - if (compilationImpl->nnModel != nullptr) { - compilationImpl->nnrtModelID = nnrtService.GetNNRtModelIDFromCache(compilationImpl->cachePath, - modelName.c_str()); - if (compilationImpl->nnrtModelID == 0) { - compilationImpl->nnrtModelID = nnrtService.GetNNRtModelIDFromModel(compilationImpl->nnModel); - } - } else if (compilationImpl->offlineModelPath != nullptr) { - compilationImpl->nnrtModelID = nnrtService.GetNNRtModelIDFromPath(compilationImpl->offlineModelPath); - } else if (compilationImpl->cachePath != nullptr) { - compilationImpl->nnrtModelID = - nnrtService.GetNNRtModelIDFromCache(compilationImpl->cachePath, modelName.c_str()); - } else if ((compilationImpl->offlineModelBuffer.first != nullptr) && \ - (compilationImpl->offlineModelBuffer.second != size_t(0))) { - compilationImpl->nnrtModelID = nnrtService.GetNNRtModelIDFromBuffer( - compilationImpl->offlineModelBuffer.first, compilationImpl->offlineModelBuffer.second); - } else if ((compilationImpl->cacheBuffer.first != nullptr) && \ - (compilationImpl->cacheBuffer.second != size_t(0))) { - compilationImpl->nnrtModelID = nnrtService.GetNNRtModelIDFromBuffer( - compilationImpl->cacheBuffer.first, compilationImpl->cacheBuffer.second); - } else { - LOGE("GetModelId failed, no available model to set modelId, please check."); - return OH_NN_INVALID_PARAMETER; - } - - return OH_NN_SUCCESS; -} - -OH_NN_ReturnCode IsCompilationAvaliable(Compilation* compilationImpl) -{ - if (compilationImpl == nullptr) { - LOGE("IsCompilationAvaliable failed, compilation implementation is nullptr."); - return OH_NN_INVALID_PARAMETER; - } - - if (((compilationImpl->nnModel != nullptr) && (compilationImpl->offlineModelPath != nullptr)) || - ((compilationImpl->nnModel != nullptr) && - ((compilationImpl->offlineModelBuffer.first != nullptr) || - (compilationImpl->offlineModelBuffer.second != static_cast(0)))) || - ((compilationImpl->offlineModelPath != nullptr) && - ((compilationImpl->offlineModelBuffer.first != nullptr) || - (compilationImpl->offlineModelBuffer.second != static_cast(0))))) { - LOGE("IsCompilationAvaliable failed, find multi model to build compilation."); - return OH_NN_INVALID_PARAMETER; - } - - if (compilationImpl->compiler != nullptr) { - LOGE("IsCompilationAvaliable failed, the compiler in compilation is not nullptr, " - "please input a new compilation."); - return OH_NN_INVALID_PARAMETER; - } - - return OH_NN_SUCCESS; -} -} - OH_NN_ReturnCode GetModelId(Compilation** compilation) { if (compilation == nullptr) { diff --git a/frameworks/native/neural_network_runtime/nncompiled_cache.cpp b/frameworks/native/neural_network_runtime/nncompiled_cache.cpp index effcab5..df34f45 100644 --- a/frameworks/native/neural_network_runtime/nncompiled_cache.cpp +++ b/frameworks/native/neural_network_runtime/nncompiled_cache.cpp @@ -109,7 +109,7 @@ OH_NN_ReturnCode NNCompiledCache::Restore(const std::string& cacheDir, if (static_cast(version) > cacheInfo.version) { LOGE("[NNCompiledCache] Restore failed, version is not match."); - return OH_NN_INVALID_PARAMETER; + return OH_NN_INVALID_FILE; } if (static_cast(version) < cacheInfo.version) { @@ -124,7 +124,7 @@ OH_NN_ReturnCode NNCompiledCache::Restore(const std::string& cacheDir, ret = ReadCacheModelFile(cacheModelPath, modelBuffer); if (ret != OH_NN_SUCCESS) { LOGE("[NNCompiledCache] Restore failed, error happened when calling ReadCacheModelFile."); - return ret; + return OH_NN_INVALID_FILE; } if (GetCrc16(static_cast(modelBuffer.data), modelBuffer.length) != @@ -343,7 +343,7 @@ OH_NN_ReturnCode NNCompiledCache::CheckCacheInfo(NNCompiledCacheInfo& modelCache LOGE("[NNCompiledCache] CheckCacheInfo failed. The deviceId in the cache files " "is different from current deviceId," "please change the cache directory or current deviceId."); - return OH_NN_INVALID_PARAMETER; + return OH_NN_INVALID_FILE; } if (j["data"].find("fileNumber") == j["data"].end()) { diff --git a/frameworks/native/neural_network_runtime/nncompiler.cpp b/frameworks/native/neural_network_runtime/nncompiler.cpp index 0b66abf..4a4aad7 100644 --- a/frameworks/native/neural_network_runtime/nncompiler.cpp +++ b/frameworks/native/neural_network_runtime/nncompiler.cpp @@ -453,12 +453,17 @@ OH_NN_ReturnCode NNCompiler::OnlineBuild() { // cache存在,从cache直接复原prepareModel、input/output TensorDesc OH_NN_ReturnCode ret = RestoreFromCacheFile(); - if (ret != OH_NN_SUCCESS) { + if (ret == OH_NN_INVALID_FILE) { char path[PATH_MAX]; - realpath(m_cachePath.c_str(), path); + if (realpath(m_cachePath.c_str(), path) == nullptr) { + LOGE("[NNCompiler] Build failed, fail to get the real path of cacheDir."); + return OH_NN_INVALID_PARAMETER; + } + std::string cachePath = path; std::string cacheInfo = cachePath + "/" + m_extensionConfig.modelName + "cache_info.nncache"; if (std::filesystem::exists(cacheInfo)) { + LOGW("[NNCompiler] cache file is failed, fail to delete cache file."); std::filesystem::remove_all(cacheInfo); } } diff --git a/test/unittest/components/nn_compiled_cache/nn_compiled_cache_test.cpp b/test/unittest/components/nn_compiled_cache/nn_compiled_cache_test.cpp index 1f6be0f..dff82c5 100644 --- a/test/unittest/components/nn_compiled_cache/nn_compiled_cache_test.cpp +++ b/test/unittest/components/nn_compiled_cache/nn_compiled_cache_test.cpp @@ -551,7 +551,7 @@ HWTEST_F(NNCompiledCacheTest, nncompiledcachetest_checkcacheinfo_003, TestSize.L std::string cacheInfoPath = "/data/data/testcache_info.nncache"; OH_NN_ReturnCode ret = nncompiledCache.CheckCacheInfo(modelCacheInfo, cacheInfoPath); - EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + EXPECT_EQ(OH_NN_INVALID_FILE, ret); } /** -- Gitee