From 32c0d16e8e93770fd03c3899b4819d0c639b53a7 Mon Sep 17 00:00:00 2001 From: w30052974 Date: Fri, 2 Feb 2024 19:00:53 +0800 Subject: [PATCH 1/2] =?UTF-8?q?profiling/opLayouts=E5=90=88=E5=85=A5?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: w30052974 --- frameworks/native/neural_network_core/cpp_type.h | 4 ++++ .../neural_network_runtime/inner_model.cpp | 16 ++++++++++++++-- .../native/neural_network_runtime/inner_model.h | 7 ++++++- .../neural_network_runtime.cpp | 12 +++++++++++- .../native/neural_network_runtime/nncompiler.cpp | 4 +++- .../native/neural_network_runtime/nncompiler.h | 2 ++ 6 files changed, 40 insertions(+), 5 deletions(-) diff --git a/frameworks/native/neural_network_core/cpp_type.h b/frameworks/native/neural_network_core/cpp_type.h index 96d6594..faf9039 100644 --- a/frameworks/native/neural_network_core/cpp_type.h +++ b/frameworks/native/neural_network_core/cpp_type.h @@ -19,6 +19,7 @@ #include #include #include +#include #include "interfaces/kits/c/neural_network_runtime/neural_network_runtime_type.h" @@ -37,6 +38,9 @@ struct ModelConfig { bool enableFloat16; OH_NN_PerformanceMode mode; OH_NN_Priority priority; + std::string isProfiling; + std::string cachePath; + std::map opLayout; }; struct Buffer { diff --git a/frameworks/native/neural_network_runtime/inner_model.cpp b/frameworks/native/neural_network_runtime/inner_model.cpp index 2b275ba..4f68ed3 100644 --- a/frameworks/native/neural_network_runtime/inner_model.cpp +++ b/frameworks/native/neural_network_runtime/inner_model.cpp @@ -148,7 +148,8 @@ OH_NN_ReturnCode InnerModel::BuildFromLiteGraph(const MSLITE::LiteGraph* liteGra } OH_NN_ReturnCode InnerModel::BuildFromMetaGraph( - const void* metaGraph, const Buffer& quantBuffer, const std::string& modelName) + const void* metaGraph, const Buffer& quantBuffer, const std::string& modelName, const std::string& isProfiling, + std::map& opLayouts) { NNRT_TRACE_NAME("Build model from meta graph"); if (metaGraph == nullptr) { @@ -169,7 +170,8 @@ OH_NN_ReturnCode InnerModel::BuildFromMetaGraph( m_metaGraph = const_cast(metaGraph); m_quantBuffer = quantBuffer; m_modelName = modelName; - + m_isProfiling = isProfiling; + m_opLayouts = opLayouts; return OH_NN_SUCCESS; } @@ -762,5 +764,15 @@ std::string InnerModel::GetModelName() const { return m_modelName; } + +std::string InnerModel::GetProfiling() const +{ + return m_isProfiling; +} + +std::map InnerModel::GetOpLayouts() const +{ + return m_opLayouts; +} } // namespace NeuralNetworkRuntime } // namespace OHOS \ No newline at end of file diff --git a/frameworks/native/neural_network_runtime/inner_model.h b/frameworks/native/neural_network_runtime/inner_model.h index a538ed0..f95862d 100644 --- a/frameworks/native/neural_network_runtime/inner_model.h +++ b/frameworks/native/neural_network_runtime/inner_model.h @@ -34,7 +34,8 @@ public: bool IsBuild() const; OH_NN_ReturnCode BuildFromLiteGraph(const mindspore::lite::LiteGraph* liteGraph); OH_NN_ReturnCode BuildFromMetaGraph(const void* metaGraph, const Buffer& quantBuffer, - const std::string& modelName); + const std::string& modelName, const std::string& isProfiling, + std::map& opLayouts); OH_NN_ReturnCode AddTensor(const OH_NN_Tensor& nnTensor); OH_NN_ReturnCode AddTensorDesc(const NN_TensorDesc* nnTensorDesc); OH_NN_ReturnCode SetTensorQuantParam(uint32_t index, const NN_QuantParam* quantParam); @@ -58,6 +59,8 @@ public: void* GetMetaGraph() const; Buffer GetQuantBuffer() const; std::string GetModelName() const; + std::string GetProfiling() const; + std::map GetOpLayouts() const; private: void AddTensorsToLiteGraph(std::unordered_map& modelIDToGraphID); @@ -79,6 +82,8 @@ private: void* m_metaGraph {nullptr}; Buffer m_quantBuffer = {nullptr, 0}; std::string m_modelName; + std::string m_isProfiling; + std::map m_opLayouts; }; } // namespace NeuralNetworkRuntime } // namespace OHOS diff --git a/frameworks/native/neural_network_runtime/neural_network_runtime.cpp b/frameworks/native/neural_network_runtime/neural_network_runtime.cpp index 6207fe4..0931923 100644 --- a/frameworks/native/neural_network_runtime/neural_network_runtime.cpp +++ b/frameworks/native/neural_network_runtime/neural_network_runtime.cpp @@ -330,6 +330,9 @@ NNRT_API OH_NN_ReturnCode OH_NNModel_BuildFromMetaGraph(OH_NNModel *model, const Buffer buffer; std::string modelName; + std::string isProfiling; + std::string opLayout; + std::map opLayouts; for (size_t i = 0; i < extensionSize; ++i) { std::string name = extensions[i].name; if (name == "QuantBuffer") { @@ -337,11 +340,18 @@ NNRT_API OH_NN_ReturnCode OH_NNModel_BuildFromMetaGraph(OH_NNModel *model, const buffer.length = extensions[i].valueSize; } else if (name == "ModelName") { modelName.assign(extensions[i].value, extensions[i].value + extensions[i].valueSize); + } else if (name == "Profiling") { + isProfiling.assign(extensions[i].value, extensions[i].value + extensions[i].valueSize); + LOGI("OH_NNModel_BuildFromMetaGraph isProfiling enable."); + } else if (name == "opLayout") { + opLayout.assign(extensions[i].value, extensions[i].value + extensions[i].valueSize); + opLayout.insert({opLayout, "hiai::ExecuteDevice::CPU"}); + LOGI("OH_NNModel_BuildFromMetaGraph opLayout:%{public}s.", opLayout.c_str()); } } InnerModel *innerModel = reinterpret_cast(model); - return innerModel->BuildFromMetaGraph(metaGraph, buffer, modelName); + return innerModel->BuildFromMetaGraph(metaGraph, buffer, modelName, isProfiling, opLayouts); } NNRT_API OH_NN_ReturnCode OH_NNModel_SetInputsAndOutputsInfo(OH_NNModel *model, const OH_NN_TensorInfo *inputsInfo, diff --git a/frameworks/native/neural_network_runtime/nncompiler.cpp b/frameworks/native/neural_network_runtime/nncompiler.cpp index cb70f2c..bc7a409 100644 --- a/frameworks/native/neural_network_runtime/nncompiler.cpp +++ b/frameworks/native/neural_network_runtime/nncompiler.cpp @@ -128,6 +128,8 @@ NNCompiler::NNCompiler(const void* model, std::shared_ptr device, size_t m_metaGraph = m_innerModel->GetMetaGraph(); m_quantBuffer = m_innerModel->GetQuantBuffer(); m_modelName = m_innerModel->GetModelName(); + m_isProfiling = m_innerModel->GetProfiling(); + m_opLayouts = m_innerModel->GetOpLayouts(); } NNCompiler::~NNCompiler() @@ -373,7 +375,7 @@ OH_NN_ReturnCode NNCompiler::NormalBuild() } ModelConfig config {m_enableFp16, static_cast(m_performance), - static_cast(m_priority)}; + static_cast(m_priority), m_isProfiling, m_cachePath, m_opLayouts}; if (m_liteGraph != nullptr) { ret = m_device->PrepareModel(m_liteGraph, config, m_preparedModel); } diff --git a/frameworks/native/neural_network_runtime/nncompiler.h b/frameworks/native/neural_network_runtime/nncompiler.h index f95c8da..87bbb07 100644 --- a/frameworks/native/neural_network_runtime/nncompiler.h +++ b/frameworks/native/neural_network_runtime/nncompiler.h @@ -82,6 +82,8 @@ private: std::shared_ptr m_preparedModel {nullptr}; Buffer m_quantBuffer {nullptr, 0}; std::string m_modelName; + std::string m_isProfiling; + std::map m_opLayouts; void* m_metaGraph {nullptr}; InnerModel* m_innerModel {nullptr}; std::shared_ptr m_liteGraph {nullptr}; -- Gitee From f88b63d6786cc1f36145c3c8b8d9dba74880f853 Mon Sep 17 00:00:00 2001 From: w30052974 Date: Sun, 4 Feb 2024 10:20:35 +0800 Subject: [PATCH 2/2] =?UTF-8?q?profiling/opLayouts=E5=90=88=E5=85=A5?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: w30052974 --- .../native/neural_network_runtime/neural_network_runtime.cpp | 2 +- frameworks/native/neural_network_runtime/nncompiler.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/frameworks/native/neural_network_runtime/neural_network_runtime.cpp b/frameworks/native/neural_network_runtime/neural_network_runtime.cpp index 0931923..c8f502e 100644 --- a/frameworks/native/neural_network_runtime/neural_network_runtime.cpp +++ b/frameworks/native/neural_network_runtime/neural_network_runtime.cpp @@ -345,7 +345,7 @@ NNRT_API OH_NN_ReturnCode OH_NNModel_BuildFromMetaGraph(OH_NNModel *model, const LOGI("OH_NNModel_BuildFromMetaGraph isProfiling enable."); } else if (name == "opLayout") { opLayout.assign(extensions[i].value, extensions[i].value + extensions[i].valueSize); - opLayout.insert({opLayout, "hiai::ExecuteDevice::CPU"}); + opLayouts.insert({opLayout, "hiai::ExecuteDevice::CPU"}); LOGI("OH_NNModel_BuildFromMetaGraph opLayout:%{public}s.", opLayout.c_str()); } } diff --git a/frameworks/native/neural_network_runtime/nncompiler.h b/frameworks/native/neural_network_runtime/nncompiler.h index 87bbb07..b5655a7 100644 --- a/frameworks/native/neural_network_runtime/nncompiler.h +++ b/frameworks/native/neural_network_runtime/nncompiler.h @@ -83,7 +83,7 @@ private: Buffer m_quantBuffer {nullptr, 0}; std::string m_modelName; std::string m_isProfiling; - std::map m_opLayouts; + std::map m_opLayouts; void* m_metaGraph {nullptr}; InnerModel* m_innerModel {nullptr}; std::shared_ptr m_liteGraph {nullptr}; -- Gitee