diff --git a/frameworks/native/neural_network_runtime/hdi_device_v1_0.cpp b/frameworks/native/neural_network_runtime/hdi_device_v1_0.cpp index 87cd2eb9ef3be9efae984f80a895707ed8358971..5fb12c0a790f4c7ccc29662aa94cdeeeb1fdd2ce 100644 --- a/frameworks/native/neural_network_runtime/hdi_device_v1_0.cpp +++ b/frameworks/native/neural_network_runtime/hdi_device_v1_0.cpp @@ -354,6 +354,7 @@ void* HDIDeviceV1_0::AllocateBuffer(size_t length) auto addr = memManager->MapMemory(buffer.fd, length); if (addr == nullptr) { LOGE("Map fd to address failed."); + m_iDevice->ReleaseBuffer(buffer); } return addr; } diff --git a/frameworks/native/neural_network_runtime/hdi_device_v2_0.cpp b/frameworks/native/neural_network_runtime/hdi_device_v2_0.cpp index 09c92d0ef203c03faab799b7d8caca5603d83158..3d5bd6578b2f805a45e738b64d1ac5b031d63e85 100644 --- a/frameworks/native/neural_network_runtime/hdi_device_v2_0.cpp +++ b/frameworks/native/neural_network_runtime/hdi_device_v2_0.cpp @@ -389,6 +389,7 @@ void* HDIDeviceV2_0::AllocateBuffer(size_t length) auto addr = memManager->MapMemory(buffer.fd, length); if (addr == nullptr) { LOGE("Map fd to address failed."); + m_iDevice->ReleaseBuffer(buffer); } return addr; } diff --git a/frameworks/native/neural_network_runtime/hdi_prepared_model_v1_0.cpp b/frameworks/native/neural_network_runtime/hdi_prepared_model_v1_0.cpp index e3211ceeb7a980554fbcc7fd49b7b45c2fe1abd3..9c94d81777ba0b912588f81226db9b3c3d138aab 100644 --- a/frameworks/native/neural_network_runtime/hdi_prepared_model_v1_0.cpp +++ b/frameworks/native/neural_network_runtime/hdi_prepared_model_v1_0.cpp @@ -165,6 +165,17 @@ HDIPreparedModelV1_0::HDIPreparedModelV1_0(OHOS::sptr hdiP hdiPreparedModel->GetVersion(m_hdiVersion.first, m_hdiVersion.second); } +HDIPreparedModelV1_0::~HDIPreparedModelV1_0() +{ + for (auto addr : m_addrs) { + auto memManager = MemoryManager::GetInstance(); + OH_NN_ReturnCode ret = memManager->UnMapMemory(addr); + if (ret != OH_NN_SUCCESS) { + LOGE("~HDIPreparedModelV1_0 UnMapMemory failed."); + } + } +} + OH_NN_ReturnCode HDIPreparedModelV1_0::ExportModelCache(std::vector& modelCache) { if (!modelCache.empty()) { @@ -187,6 +198,7 @@ OH_NN_ReturnCode HDIPreparedModelV1_0::ExportModelCache(std::vector& mod LOGE("Export the %zuth model cache failed, cannot not map fd to address.", i + 1); return OH_NN_MEMORY_ERROR; } + m_addrs.emplace_back(addr); Buffer modelbuffer {addr, iBuffers[i].bufferSize}; modelCache.emplace_back(modelbuffer); } diff --git a/frameworks/native/neural_network_runtime/hdi_prepared_model_v1_0.h b/frameworks/native/neural_network_runtime/hdi_prepared_model_v1_0.h index c1beeb75b238192a71b0d5c97e0860a2b799de0f..ff227747ceb35e6499b5bd46b978d59c88c35a4d 100644 --- a/frameworks/native/neural_network_runtime/hdi_prepared_model_v1_0.h +++ b/frameworks/native/neural_network_runtime/hdi_prepared_model_v1_0.h @@ -33,6 +33,7 @@ namespace NeuralNetworkRuntime { class HDIPreparedModelV1_0 : public PreparedModel { public: explicit HDIPreparedModelV1_0(OHOS::sptr hdiPreparedModel); + ~HDIPreparedModelV1_0() override; OH_NN_ReturnCode ExportModelCache(std::vector& modelCache) override; @@ -50,6 +51,7 @@ private: // first: major version, second: minor version std::pair m_hdiVersion; OHOS::sptr m_hdiPreparedModel {nullptr}; + std::vector m_addrs; }; } // namespace NeuralNetworkRuntime } // OHOS diff --git a/frameworks/native/neural_network_runtime/memory_manager.cpp b/frameworks/native/neural_network_runtime/memory_manager.cpp index 969efa43085d5e43fe5207b1338b0039924b3034..217b33b17aeac9e576650a283cc9a41235234c02 100644 --- a/frameworks/native/neural_network_runtime/memory_manager.cpp +++ b/frameworks/native/neural_network_runtime/memory_manager.cpp @@ -55,6 +55,7 @@ OH_NN_ReturnCode MemoryManager::UnMapMemory(const void* buffer) return OH_NN_INVALID_PARAMETER; } + std::lock_guard lock(m_mtx); auto iter = m_memorys.find(buffer); if (iter == m_memorys.end()) { LOGE("This buffer is not found, cannot release."); @@ -69,23 +70,18 @@ OH_NN_ReturnCode MemoryManager::UnMapMemory(const void* buffer) } memory.data = nullptr; - if (close(memory.fd) != 0) { - LOGE("Close memory fd failed. fd=%d", memory.fd); - return OH_NN_MEMORY_ERROR; - } - - std::lock_guard lock(m_mtx); m_memorys.erase(iter); return OH_NN_SUCCESS; } -OH_NN_ReturnCode MemoryManager::GetMemory(const void* buffer, Memory& memory) const +OH_NN_ReturnCode MemoryManager::GetMemory(const void* buffer, Memory& memory) { if (buffer == nullptr) { LOGE("Memory is nullptr."); return OH_NN_NULL_PTR; } + std::lock_guard lock(m_mtx); auto iter = m_memorys.find(buffer); if (iter == m_memorys.end()) { LOGE("Memory is not found."); diff --git a/frameworks/native/neural_network_runtime/memory_manager.h b/frameworks/native/neural_network_runtime/memory_manager.h index ae56bec343ce358e8dcd78b64b0197c613ce7f94..d6c96077de0618e98ee9d24ebe26c0564b7e86c1 100644 --- a/frameworks/native/neural_network_runtime/memory_manager.h +++ b/frameworks/native/neural_network_runtime/memory_manager.h @@ -37,7 +37,7 @@ public: void* MapMemory(int fd, size_t length); OH_NN_ReturnCode UnMapMemory(const void* buffer); - OH_NN_ReturnCode GetMemory(const void* buffer, Memory& memory) const; + OH_NN_ReturnCode GetMemory(const void* buffer, Memory& memory); static MemoryManager* GetInstance() { diff --git a/frameworks/native/neural_network_runtime/nntensor.cpp b/frameworks/native/neural_network_runtime/nntensor.cpp index d234227d863dfa96b457b482c24e37577131e18a..0bfdc71f015143916eec2c69af6f3b517eb7f302 100644 --- a/frameworks/native/neural_network_runtime/nntensor.cpp +++ b/frameworks/native/neural_network_runtime/nntensor.cpp @@ -252,10 +252,6 @@ OH_NN_ReturnCode NNTensor2_0::ReleaseMemory() } if (!m_isUserData) { - if (close(m_fd) != 0) { - LOGE("NNTensor2_0::ReleaseMemory failed. fd=%{public}d", m_fd); - return OH_NN_MEMORY_ERROR; - } BackendManager& backendManager = BackendManager::GetInstance(); std::shared_ptr backend = backendManager.GetBackend(m_backendID); if (backend == nullptr) {