From 9cb57ac93f20e4a7da5fb5b77ba942ca9ae85a6d Mon Sep 17 00:00:00 2001 From: zhuyinlin <1085905529@qq.com> Date: Wed, 25 Jun 2025 11:09:03 +0800 Subject: [PATCH 1/2] =?UTF-8?q?=E6=97=A5=E5=BF=97=E6=95=B4=E6=94=B9?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: zhuyinlin <1085905529@qq.com> --- .../native/neural_network_core/backend_manager.cpp | 3 --- .../neural_network_core/neural_network_core.cpp | 7 ------- .../neural_network_runtime/nncompiled_cache.cpp | 3 --- .../native/neural_network_runtime/nncompiler.cpp | 13 ------------- 4 files changed, 26 deletions(-) diff --git a/frameworks/native/neural_network_core/backend_manager.cpp b/frameworks/native/neural_network_core/backend_manager.cpp index 699df8d..b1c6f11 100644 --- a/frameworks/native/neural_network_core/backend_manager.cpp +++ b/frameworks/native/neural_network_core/backend_manager.cpp @@ -34,7 +34,6 @@ BackendManager& BackendManager::GetInstance() if (dlopen("libneural_network_runtime.so", RTLD_NOLOAD) != nullptr) { // if libneural_network_runtime_ext.so not loaded, try to dlopen it if (dlopen("libneural_network_runtime_ext.so", RTLD_NOLOAD) == nullptr) { - LOGI("dlopen libneural_network_runtime_ext.so."); void* libHandle = dlopen("libneural_network_runtime_ext.so", RTLD_NOW | RTLD_GLOBAL); if (libHandle == nullptr) { LOGW("Failed to dlopen libneural_network_runtime_ext.so."); @@ -142,7 +141,6 @@ OH_NN_ReturnCode BackendManager::RegisterBackend( void BackendManager::RemoveBackend(const std::string& backendName) { - LOGI("[RemoveBackend] start remove backend for %{public}s.", backendName.c_str()); const std::lock_guard lock(m_mtx); if (m_backendIDGroup.find(backendName) == m_backendIDGroup.end()) { LOGI("[RemoveBackend] No need to remove backend for %{public}s.", backendName.c_str()); @@ -161,7 +159,6 @@ void BackendManager::RemoveBackend(const std::string& backendName) if (m_backendNames.find(backendID) != m_backendNames.end()) { m_backendNames.erase(backendID); } - LOGI("[RemoveBackend] remove backendID[%{public}zu] for %{public}s success.", backendID, backendName.c_str()); } m_backendIDGroup.erase(backendName); } diff --git a/frameworks/native/neural_network_core/neural_network_core.cpp b/frameworks/native/neural_network_core/neural_network_core.cpp index e5cc6b9..0b52796 100644 --- a/frameworks/native/neural_network_core/neural_network_core.cpp +++ b/frameworks/native/neural_network_core/neural_network_core.cpp @@ -544,7 +544,6 @@ OH_NN_ReturnCode AuthenticateModel(const Compilation* compilation, bool &isExcee } if (!isExceedRamLimit) { - LOGI("Model accupy memory less then limit, no need authenticating."); return OH_NN_SUCCESS; // If model ram is less than max limit, no need authenticating. } @@ -609,7 +608,6 @@ OH_NN_ReturnCode Authentication(Compilation** compilation, bool &isExceedRamLimi return ret; } - LOGI("Authentication success."); return OH_NN_SUCCESS; } @@ -618,9 +616,6 @@ OH_NN_ReturnCode GetNnrtModelId(Compilation* compilationImpl, NNRtServiceApi& nn { std::string modelName; OH_NN_ReturnCode retCode = compilationImpl->compiler->GetModelName(modelName); - if (retCode != OH_NN_SUCCESS) { - LOGW("GetModelName is failed."); - } if (compilationImpl->nnModel != nullptr) { compilationImpl->nnrtModelID = nnrtService.GetNNRtModelIDFromCache(compilationImpl->cachePath, @@ -757,7 +752,6 @@ NNRT_API OH_NN_ReturnCode OH_NNCompilation_Build(OH_NNCompilation *compilation) } std::unordered_map> configs; - LOGI("[OH_NNCompilation_Build] model isExceedRamLimit: %{public}d", static_cast(isExceedRamLimit)); std::vector configContents; if (isExceedRamLimit) { @@ -1663,7 +1657,6 @@ OH_NN_ReturnCode UpdateModelLatency(const ExecutorConfig* config, int32_t modelL return static_cast(ret); } - LOGI("UpdateModelLatency success."); return OH_NN_SUCCESS; } diff --git a/frameworks/native/neural_network_runtime/nncompiled_cache.cpp b/frameworks/native/neural_network_runtime/nncompiled_cache.cpp index 5053e52..effcab5 100644 --- a/frameworks/native/neural_network_runtime/nncompiled_cache.cpp +++ b/frameworks/native/neural_network_runtime/nncompiled_cache.cpp @@ -40,7 +40,6 @@ OH_NN_ReturnCode NNCompiledCache::Save(const std::vector(m_isExceedRamLimit)); if (caches.empty()) { LOGE("[NNCompiledCache] Save failed, caches is empty."); return OH_NN_INVALID_PARAMETER; @@ -57,7 +56,6 @@ OH_NN_ReturnCode NNCompiledCache::Save(const std::vector(m_isExceedRamLimit)); cacheInfo["data"]["isExceedRamLimit"] = m_isExceedRamLimit ? 1 : 0; const size_t dataLength = cacheInfo["data"].dump().length(); diff --git a/frameworks/native/neural_network_runtime/nncompiler.cpp b/frameworks/native/neural_network_runtime/nncompiler.cpp index ec40f00..c27b313 100644 --- a/frameworks/native/neural_network_runtime/nncompiler.cpp +++ b/frameworks/native/neural_network_runtime/nncompiler.cpp @@ -287,7 +287,6 @@ OH_NN_ReturnCode NNCompiler::CheckModelParameter() const { // If m_innerModel is not passed, the compiler must be construct from cache, jump check m_innerModel. if (m_innerModel == nullptr) { - LOGW("[NNCompiler] Restoring from cache not need to check model."); return OH_NN_SUCCESS; } @@ -309,7 +308,6 @@ OH_NN_ReturnCode NNCompiler::IsOfflineModel(bool& isOfflineModel) const { // If m_innerModel is not passed, the compiler must be construct from cache, jump check m_innerModel. if (m_innerModel == nullptr) { - LOGW("[NNCompiler] Restoring from cache not need to judge offline model."); return OH_NN_SUCCESS; } @@ -456,12 +454,7 @@ OH_NN_ReturnCode NNCompiler::OnlineBuild() // cache存在,从cache直接复原prepareModel、input/output TensorDesc OH_NN_ReturnCode ret = RestoreFromCacheFile(); if (ret != OH_NN_SUCCESS) { - LOGW("[NNCompiler] cache file is failed, to delete cache file."); char path[PATH_MAX]; - if (realpath(m_cachePath.c_str(), path) == nullptr) { - LOGW("[NNCompiledCache] WriteCacheInfo failed, fail to get the real path of cacheDir."); - } - std::string cachePath = path; std::string cacheInfo = cachePath + "/" + m_extensionConfig.modelName + "cache_info.nncache"; if (std::filesystem::exists(cacheInfo)) { @@ -474,7 +467,6 @@ OH_NN_ReturnCode NNCompiler::OnlineBuild() return ret; } if (ret == OH_NN_SUCCESS) { - LOGI("[NNCompiler] Build success, restore from cache file."); m_isBuild = true; } @@ -705,7 +697,6 @@ OH_NN_ReturnCode NNCompiler::RestoreFromCacheFile() m_inputTensorDescs = inputTensorDescs; m_outputTensorDescs = outputTensorDescs; - LOGI("[NNCompiler] Restore model cache successfully."); return OH_NN_SUCCESS; } @@ -730,7 +721,6 @@ OH_NN_ReturnCode NNCompiler::SetExtensionConfig(const std::unordered_map>& options) { - LOGE("[NNCompiler] SetOptions is not supported for NN compiler currently."); return OH_NN_UNSUPPORTED; } -- Gitee From 8eb088c48212777225acb977318fc26faef55be7 Mon Sep 17 00:00:00 2001 From: zhuyinlin <1085905529@qq.com> Date: Wed, 25 Jun 2025 14:14:15 +0800 Subject: [PATCH 2/2] =?UTF-8?q?=E6=97=A5=E5=BF=97=E6=95=B4=E6=94=B9?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: zhuyinlin <1085905529@qq.com> --- frameworks/native/neural_network_core/neural_network_core.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/frameworks/native/neural_network_core/neural_network_core.cpp b/frameworks/native/neural_network_core/neural_network_core.cpp index 0b52796..b55a08b 100644 --- a/frameworks/native/neural_network_core/neural_network_core.cpp +++ b/frameworks/native/neural_network_core/neural_network_core.cpp @@ -615,7 +615,7 @@ namespace { OH_NN_ReturnCode GetNnrtModelId(Compilation* compilationImpl, NNRtServiceApi& nnrtService) { std::string modelName; - OH_NN_ReturnCode retCode = compilationImpl->compiler->GetModelName(modelName); + compilationImpl->compiler->GetModelName(modelName); if (compilationImpl->nnModel != nullptr) { compilationImpl->nnrtModelID = nnrtService.GetNNRtModelIDFromCache(compilationImpl->cachePath, -- Gitee