From 8b64b887e3dc845dd87a9e656344013f7f5803fd Mon Sep 17 00:00:00 2001 From: w30052974 Date: Tue, 24 Dec 2024 15:13:45 +0800 Subject: [PATCH] 5.0.2 cache number limited Signed-off-by: w30052974 --- .../neural_network_core/backend_manager.cpp | 17 +++++++++++++++++ .../neural_network_core/backend_manager.h | 17 +---------------- .../neural_network_runtime.cpp | 13 +++++++++++++ .../neural_network_runtime/nncompiled_cache.cpp | 6 +++++- .../neural_network_runtime/nncompiled_cache.h | 1 + .../neural_network_runtime/nncompiler.cpp | 6 ++++++ 6 files changed, 43 insertions(+), 17 deletions(-) diff --git a/frameworks/native/neural_network_core/backend_manager.cpp b/frameworks/native/neural_network_core/backend_manager.cpp index c0b543d..b1920d4 100644 --- a/frameworks/native/neural_network_core/backend_manager.cpp +++ b/frameworks/native/neural_network_core/backend_manager.cpp @@ -28,6 +28,23 @@ BackendManager::~BackendManager() m_backendIDGroup.clear(); } +BackendManager& BackendManager::GetInstance() +{ + // if libneural_network_runtime.so loaded + if (dlopen("libneural_network_runtime.so", RTLD_NOLOAD) != nullptr) { + // if libneural_network_runtime_ext.so not loaded, try to dlopen it + if (dlopen("libneural_network_runtime_ext.so", RTLD_NOLOAD) == nullptr) { + LOGI("dlopen libneural_network_runtime_ext.so."); + void* libHandle = dlopen("libneural_network_runtime_ext.so", RTLD_NOW | RTLD_GLOBAL); + if (libHandle == nullptr) { + LOGW("Failed to dlopen libneural_network_runtime_ext.so."); + } + } + } + static BackendManager instance; + return instance; +} + const std::vector& BackendManager::GetAllBackendsID() { const std::lock_guard lock(m_mtx); diff --git a/frameworks/native/neural_network_core/backend_manager.h b/frameworks/native/neural_network_core/backend_manager.h index 118502c..954fab5 100644 --- a/frameworks/native/neural_network_core/backend_manager.h +++ b/frameworks/native/neural_network_core/backend_manager.h @@ -41,22 +41,7 @@ public: const std::string& backendName, std::function()> creator); void RemoveBackend(const std::string& backendName); - static BackendManager& GetInstance() - { - // if libneural_network_runtime.so loaded - if (dlopen("libneural_network_runtime.so", RTLD_NOLOAD) != nullptr) { - // if libneural_network_runtime_ext.so not loaded, try to dlopen it - if (dlopen("libneural_network_runtime_ext.so", RTLD_NOLOAD) == nullptr) { - LOGI("dlopen libneural_network_runtime_ext.so."); - void* libHandle = dlopen("libneural_network_runtime_ext.so", RTLD_NOW | RTLD_GLOBAL); - if (libHandle == nullptr) { - LOGW("Failed to dlopen libneural_network_runtime_ext.so."); - } - } - } - static BackendManager instance; - return instance; - } + static BackendManager& GetInstance(); private: BackendManager() = default; diff --git a/frameworks/native/neural_network_runtime/neural_network_runtime.cpp b/frameworks/native/neural_network_runtime/neural_network_runtime.cpp index 4ab5d6a..9320227 100644 --- a/frameworks/native/neural_network_runtime/neural_network_runtime.cpp +++ b/frameworks/native/neural_network_runtime/neural_network_runtime.cpp @@ -27,6 +27,7 @@ #include #include +#include #include #include @@ -46,6 +47,7 @@ const std::string NULL_HARDWARE_NAME = "default"; const std::string HARDWARE_NAME = "const.ai.nnrt_deivce"; const std::string HARDWARE_VERSION = "v5_0"; constexpr size_t HARDWARE_NAME_MAX_LENGTH = 128; +constexpr size_t FILE_NUMBER_MAX = 100; // 限制cache文件数量最大为100 NNRT_API NN_QuantParam *OH_NNQuantParam_Create() { @@ -594,11 +596,22 @@ NNRT_API bool OH_NNModel_HasCache(const char *cacheDir, const char *modelName, u return false; } + if (fileNumber <= 0 || fileNumber > FILE_NUMBER_MAX) { + LOGE("OH_NNModel_HasCache fileNumber is invalid or more than 100"); + std::filesystem::remove_all(cacheInfoPath); + return false; + } + // determine whether cache model files exist for (int64_t i = 0; i < fileNumber; ++i) { std::string cacheModelPath = std::string(cacheDir) + "/" + std::string(modelName) + std::to_string(i) + ".nncache"; exist = (exist && (stat(cacheModelPath.c_str(), &buffer) == 0)); + if (!exist) { + LOGE("OH_NNModel_HasCache cacheModelPath is not existed."); + std::filesystem::remove_all(cacheInfoPath); + return false; + } } if (cacheVersion != version) { diff --git a/frameworks/native/neural_network_runtime/nncompiled_cache.cpp b/frameworks/native/neural_network_runtime/nncompiled_cache.cpp index d4f3ba9..6c0b91a 100644 --- a/frameworks/native/neural_network_runtime/nncompiled_cache.cpp +++ b/frameworks/native/neural_network_runtime/nncompiled_cache.cpp @@ -26,7 +26,7 @@ namespace OHOS { namespace NeuralNetworkRuntime { -constexpr int32_t MAX_MODEL_SIZE = 200 * 1024 * 1024; // 200MB +constexpr int32_t MAX_MODEL_SIZE = 500 * 1024 * 1024; // 200MB constexpr int32_t NULL_PTR_LENGTH = 0; constexpr int32_t NUMBER_CACHE_INFO_MEMBERS = 3; constexpr int32_t HEX_UNIT = 16; @@ -194,6 +194,10 @@ OH_NN_ReturnCode NNCompiledCache::GenerateCacheModel(const std::vector NN_CACHE_FILE_NUMBER_MAX) { + LOGE("[NNCompiledCache] Caches size is equal 0 or greater than 100."); + return OH_NN_FAILED; + } auto cacheInfoPtr = cacheInfo.get(); *cacheInfoPtr++ = static_cast(cacheNumber); diff --git a/frameworks/native/neural_network_runtime/nncompiled_cache.h b/frameworks/native/neural_network_runtime/nncompiled_cache.h index 45897e9..c27e52e 100644 --- a/frameworks/native/neural_network_runtime/nncompiled_cache.h +++ b/frameworks/native/neural_network_runtime/nncompiled_cache.h @@ -27,6 +27,7 @@ namespace OHOS { namespace NeuralNetworkRuntime { const uint32_t INVALID_CAHCE_VERSION = UINT32_MAX; // UINT32_MAX is reserved for invalid cache version. +constexpr size_t NN_CACHE_FILE_NUMBER_MAX = 100; // 限制cache文件数量最大为100 struct NNCompiledCacheInfo { int64_t fileNumber{0}; diff --git a/frameworks/native/neural_network_runtime/nncompiler.cpp b/frameworks/native/neural_network_runtime/nncompiler.cpp index 3fa25fb..a681294 100644 --- a/frameworks/native/neural_network_runtime/nncompiler.cpp +++ b/frameworks/native/neural_network_runtime/nncompiler.cpp @@ -531,6 +531,12 @@ OH_NN_ReturnCode NNCompiler::SaveToCacheFile() const return ret; } + size_t cacheNumber = caches.size(); + if (cacheNumber == 0 || cacheNumber > NN_CACHE_FILE_NUMBER_MAX) { + LOGE("[NNCompiler] Caches size is equal 0 or greater than 100."); + return OH_NN_FAILED; + } + NNCompiledCache compiledCache; ret = compiledCache.SetBackend(m_backendID); if (ret != OH_NN_SUCCESS) { -- Gitee