diff --git a/frameworks/native/neural_network_core/backend_manager.cpp b/frameworks/native/neural_network_core/backend_manager.cpp index 699df8d504a6a112ba833d590b030669cf2c759a..b1c6f11760aa2bf15aa32b9406401493ca13d8cb 100644 --- a/frameworks/native/neural_network_core/backend_manager.cpp +++ b/frameworks/native/neural_network_core/backend_manager.cpp @@ -34,7 +34,6 @@ BackendManager& BackendManager::GetInstance() if (dlopen("libneural_network_runtime.so", RTLD_NOLOAD) != nullptr) { // if libneural_network_runtime_ext.so not loaded, try to dlopen it if (dlopen("libneural_network_runtime_ext.so", RTLD_NOLOAD) == nullptr) { - LOGI("dlopen libneural_network_runtime_ext.so."); void* libHandle = dlopen("libneural_network_runtime_ext.so", RTLD_NOW | RTLD_GLOBAL); if (libHandle == nullptr) { LOGW("Failed to dlopen libneural_network_runtime_ext.so."); @@ -142,7 +141,6 @@ OH_NN_ReturnCode BackendManager::RegisterBackend( void BackendManager::RemoveBackend(const std::string& backendName) { - LOGI("[RemoveBackend] start remove backend for %{public}s.", backendName.c_str()); const std::lock_guard lock(m_mtx); if (m_backendIDGroup.find(backendName) == m_backendIDGroup.end()) { LOGI("[RemoveBackend] No need to remove backend for %{public}s.", backendName.c_str()); @@ -161,7 +159,6 @@ void BackendManager::RemoveBackend(const std::string& backendName) if (m_backendNames.find(backendID) != m_backendNames.end()) { m_backendNames.erase(backendID); } - LOGI("[RemoveBackend] remove backendID[%{public}zu] for %{public}s success.", backendID, backendName.c_str()); } m_backendIDGroup.erase(backendName); } diff --git a/frameworks/native/neural_network_core/neural_network_core.cpp b/frameworks/native/neural_network_core/neural_network_core.cpp index e5cc6b97bda16a5e19e7dd7bf729d6ae730ee3cc..b55a08be15c8272871f1709a6b546946b7e82ca1 100644 --- a/frameworks/native/neural_network_core/neural_network_core.cpp +++ b/frameworks/native/neural_network_core/neural_network_core.cpp @@ -544,7 +544,6 @@ OH_NN_ReturnCode AuthenticateModel(const Compilation* compilation, bool &isExcee } if (!isExceedRamLimit) { - LOGI("Model accupy memory less then limit, no need authenticating."); return OH_NN_SUCCESS; // If model ram is less than max limit, no need authenticating. } @@ -609,7 +608,6 @@ OH_NN_ReturnCode Authentication(Compilation** compilation, bool &isExceedRamLimi return ret; } - LOGI("Authentication success."); return OH_NN_SUCCESS; } @@ -617,10 +615,7 @@ namespace { OH_NN_ReturnCode GetNnrtModelId(Compilation* compilationImpl, NNRtServiceApi& nnrtService) { std::string modelName; - OH_NN_ReturnCode retCode = compilationImpl->compiler->GetModelName(modelName); - if (retCode != OH_NN_SUCCESS) { - LOGW("GetModelName is failed."); - } + compilationImpl->compiler->GetModelName(modelName); if (compilationImpl->nnModel != nullptr) { compilationImpl->nnrtModelID = nnrtService.GetNNRtModelIDFromCache(compilationImpl->cachePath, @@ -757,7 +752,6 @@ NNRT_API OH_NN_ReturnCode OH_NNCompilation_Build(OH_NNCompilation *compilation) } std::unordered_map> configs; - LOGI("[OH_NNCompilation_Build] model isExceedRamLimit: %{public}d", static_cast(isExceedRamLimit)); std::vector configContents; if (isExceedRamLimit) { @@ -1663,7 +1657,6 @@ OH_NN_ReturnCode UpdateModelLatency(const ExecutorConfig* config, int32_t modelL return static_cast(ret); } - LOGI("UpdateModelLatency success."); return OH_NN_SUCCESS; } diff --git a/frameworks/native/neural_network_runtime/nncompiled_cache.cpp b/frameworks/native/neural_network_runtime/nncompiled_cache.cpp index 5053e52fcf2474d0ee8b34b1c332a9043979ba24..effcab5ad5360e739bc82cafab988521f3b5c48f 100644 --- a/frameworks/native/neural_network_runtime/nncompiled_cache.cpp +++ b/frameworks/native/neural_network_runtime/nncompiled_cache.cpp @@ -40,7 +40,6 @@ OH_NN_ReturnCode NNCompiledCache::Save(const std::vector(m_isExceedRamLimit)); if (caches.empty()) { LOGE("[NNCompiledCache] Save failed, caches is empty."); return OH_NN_INVALID_PARAMETER; @@ -57,7 +56,6 @@ OH_NN_ReturnCode NNCompiledCache::Save(const std::vector(m_isExceedRamLimit)); cacheInfo["data"]["isExceedRamLimit"] = m_isExceedRamLimit ? 1 : 0; const size_t dataLength = cacheInfo["data"].dump().length(); diff --git a/frameworks/native/neural_network_runtime/nncompiler.cpp b/frameworks/native/neural_network_runtime/nncompiler.cpp index ec40f00e2d037745cd27b02905dce5f6e5a50aa5..c27b3131ed83ea7f5c481a183a0b8350d57f542b 100644 --- a/frameworks/native/neural_network_runtime/nncompiler.cpp +++ b/frameworks/native/neural_network_runtime/nncompiler.cpp @@ -287,7 +287,6 @@ OH_NN_ReturnCode NNCompiler::CheckModelParameter() const { // If m_innerModel is not passed, the compiler must be construct from cache, jump check m_innerModel. if (m_innerModel == nullptr) { - LOGW("[NNCompiler] Restoring from cache not need to check model."); return OH_NN_SUCCESS; } @@ -309,7 +308,6 @@ OH_NN_ReturnCode NNCompiler::IsOfflineModel(bool& isOfflineModel) const { // If m_innerModel is not passed, the compiler must be construct from cache, jump check m_innerModel. if (m_innerModel == nullptr) { - LOGW("[NNCompiler] Restoring from cache not need to judge offline model."); return OH_NN_SUCCESS; } @@ -456,12 +454,7 @@ OH_NN_ReturnCode NNCompiler::OnlineBuild() // cache存在,从cache直接复原prepareModel、input/output TensorDesc OH_NN_ReturnCode ret = RestoreFromCacheFile(); if (ret != OH_NN_SUCCESS) { - LOGW("[NNCompiler] cache file is failed, to delete cache file."); char path[PATH_MAX]; - if (realpath(m_cachePath.c_str(), path) == nullptr) { - LOGW("[NNCompiledCache] WriteCacheInfo failed, fail to get the real path of cacheDir."); - } - std::string cachePath = path; std::string cacheInfo = cachePath + "/" + m_extensionConfig.modelName + "cache_info.nncache"; if (std::filesystem::exists(cacheInfo)) { @@ -474,7 +467,6 @@ OH_NN_ReturnCode NNCompiler::OnlineBuild() return ret; } if (ret == OH_NN_SUCCESS) { - LOGI("[NNCompiler] Build success, restore from cache file."); m_isBuild = true; } @@ -705,7 +697,6 @@ OH_NN_ReturnCode NNCompiler::RestoreFromCacheFile() m_inputTensorDescs = inputTensorDescs; m_outputTensorDescs = outputTensorDescs; - LOGI("[NNCompiler] Restore model cache successfully."); return OH_NN_SUCCESS; } @@ -730,7 +721,6 @@ OH_NN_ReturnCode NNCompiler::SetExtensionConfig(const std::unordered_map>& options) { - LOGE("[NNCompiler] SetOptions is not supported for NN compiler currently."); return OH_NN_UNSUPPORTED; }