From 72fb95d8d8ed0c354efdac980ab4ee2bc1641b2c Mon Sep 17 00:00:00 2001 From: gWX1231951 Date: Thu, 24 Aug 2023 17:03:53 +0800 Subject: [PATCH] =?UTF-8?q?nnrt=E5=91=8A=E8=AD=A6=E4=BF=AE=E6=94=B9?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: gWX1231951 --- .../src/nnrt_device_service.cpp | 22 ++++++++++--------- .../src/nnrt_device_service.cpp | 22 ++++++++++--------- frameworks/native/device_discover_v2_0.cpp | 13 ++++++----- frameworks/native/hdi_device_v2_0.cpp | 2 +- frameworks/native/hdi_device_v2_0.h | 2 +- frameworks/native/hdi_prepared_model_v1_0.cpp | 4 ++-- frameworks/native/hdi_prepared_model_v2_0.cpp | 4 ++-- frameworks/native/inner_model.h | 2 +- test/fuzztest/data.h | 4 ++-- .../hdinnrtdevice_fuzzer.cpp | 2 +- .../hdinnrtpreparedmodel_fuzzer.cpp | 3 +-- 11 files changed, 42 insertions(+), 38 deletions(-) diff --git a/example/drivers/nnrt/v1_0/hdi_cpu_service/src/nnrt_device_service.cpp b/example/drivers/nnrt/v1_0/hdi_cpu_service/src/nnrt_device_service.cpp index f609edf..3ce36d6 100644 --- a/example/drivers/nnrt/v1_0/hdi_cpu_service/src/nnrt_device_service.cpp +++ b/example/drivers/nnrt/v1_0/hdi_cpu_service/src/nnrt_device_service.cpp @@ -265,18 +265,20 @@ int32_t NnrtDeviceService::ValidateModel(const Model& model) const } size_t tensorSize = model.allTensors.size(); - for (auto index : model.inputIndex) { - if (index > tensorSize) { - HDF_LOGE("Input index is invalid, index=%u", index); - return HDF_ERR_INVALID_PARAM; - } + auto inputIt = std::find_if(model.inputIndex.begin(), model.inputIndex.end(), [tensorSize](size_t inputIndex) { + return inputIndex > tensorSize; + }) + if (inputIt != model.inputIndex.end()) { + HDF_LOGE("Input index is invalid, index=%u", *inputIt); + return HDF_ERR_INVALID_PARAM; } - for (auto index : model.outputIndex) { - if (index > tensorSize) { - HDF_LOGE("Output index is invalid, index=%u", index); - return HDF_ERR_INVALID_PARAM; - } + auto outputIt = std::find_if(model.outputIndex.begin(), model.outputIndex.end(), [tensorSize](size_t outputIndex) { + return outputIndex > tensorSize; + }) + if (outputIt != model.outputIndex.end()) { + HDF_LOGE("Output index is invalid, index=%u", *outputIt); + return HDF_ERR_INVALID_PARAM; } return HDF_SUCCESS; diff --git a/example/drivers/nnrt/v2_0/hdi_cpu_service/src/nnrt_device_service.cpp b/example/drivers/nnrt/v2_0/hdi_cpu_service/src/nnrt_device_service.cpp index 5da4e95..a2482a0 100644 --- a/example/drivers/nnrt/v2_0/hdi_cpu_service/src/nnrt_device_service.cpp +++ b/example/drivers/nnrt/v2_0/hdi_cpu_service/src/nnrt_device_service.cpp @@ -300,18 +300,20 @@ NNRT_ReturnCode NnrtDeviceService::ValidateModel(const Model& model) const } size_t tensorSize = model.allTensors.size(); - for (auto index : model.inputIndex) { - if (index > tensorSize) { - HDF_LOGE("Input index is invalid, index=%u", index); - return NNRT_ReturnCode::NNRT_INVALID_INPUT; - } + auto inputIt = std::find_if(model.inputIndex.begin(), model.inputIndex.end(), [tensorSize](size_t inputIndex) { + return inputIndex > tensorSize; + }) + if (inputIt != model.inputIndex.end()) { + HDF_LOGE("Input index is invalid, index=%u", *inputIt); + return NNRT_ReturnCode::NNRT_INVALID_INPUT; } - for (auto index : model.outputIndex) { - if (index > tensorSize) { - HDF_LOGE("Output index is invalid, index=%u", index); - return NNRT_ReturnCode::NNRT_INVALID_OUTPUT; - } + auto outputIt = std::find_if(model.outputIndex.begin(), model.outputIndex.end(), [tensorSize](size_t outputIndex) { + return outputIndex > tensorSize; + }) + if (outputIt != model.outputIndex.end()) { + HDF_LOGE("Output index is invalid, index=%u", *outputIt); + return NNRT_ReturnCode::NNRT_INVALID_OUTPUT; } return NNRT_ReturnCode::NNRT_SUCCESS; diff --git a/frameworks/native/device_discover_v2_0.cpp b/frameworks/native/device_discover_v2_0.cpp index 7b0ad86..d71ff33 100644 --- a/frameworks/native/device_discover_v2_0.cpp +++ b/frameworks/native/device_discover_v2_0.cpp @@ -31,8 +31,9 @@ std::shared_ptr DiscoverHDIDevicesV2_0(std::string& deviceName, std::str } auto ret = iDevice->GetDeviceName(deviceName); - if (ret != V2_0::NNRT_ReturnCode::NNRT_SUCCESS) { - if (ret < V2_0::NNRT_ReturnCode::NNRT_SUCCESS) { + int32_t nnrtSuccess = static_cast(V2_0::NNRT_ReturnCode::NNRT_SUCCESS); + if (ret != nnrtSuccess) { + if (ret < nnrtSuccess) { LOGW("Get device name failed. An error occurred in HDI, errorcode is %{public}d.", ret); } else { OHOS::HDI::Nnrt::V2_0::NNRT_ReturnCode nnrtRet = static_cast(ret); @@ -42,8 +43,8 @@ std::shared_ptr DiscoverHDIDevicesV2_0(std::string& deviceName, std::str } ret = iDevice->GetVendorName(vendorName); - if (ret != V2_0::NNRT_ReturnCode::NNRT_SUCCESS) { - if (ret < V2_0::NNRT_ReturnCode::NNRT_SUCCESS) { + if (ret != nnrtSuccess) { + if (ret < nnrtSuccess) { LOGW("Get vendor name failed. An error occurred in HDI, errorcode is %{public}d.", ret); } else { OHOS::HDI::Nnrt::V2_0::NNRT_ReturnCode nnrtRet = static_cast(ret); @@ -54,8 +55,8 @@ std::shared_ptr DiscoverHDIDevicesV2_0(std::string& deviceName, std::str std::pair hdiVersion; ret = iDevice->GetVersion(hdiVersion.first, hdiVersion.second); - if (ret != V2_0::NNRT_ReturnCode::NNRT_SUCCESS) { - if (ret < V2_0::NNRT_ReturnCode::NNRT_SUCCESS) { + if (ret != nnrtSuccess) { + if (ret < nnrtSuccess) { LOGW("Get version failed. An error occurred in HDI, errorcode is %{public}d.", ret); } else { OHOS::HDI::Nnrt::V2_0::NNRT_ReturnCode nnrtRet = static_cast(ret); diff --git a/frameworks/native/hdi_device_v2_0.cpp b/frameworks/native/hdi_device_v2_0.cpp index d10b508..e55d6a6 100644 --- a/frameworks/native/hdi_device_v2_0.cpp +++ b/frameworks/native/hdi_device_v2_0.cpp @@ -534,7 +534,7 @@ OH_NN_ReturnCode HDIDeviceV2_0::CopyOfflineModelToDevice(const std::vector& deviceBuffers, const ModelConfig& config, - const std::map> extensions, + const std::map>& extensions, std::shared_ptr& preparedModel) { V2_0::ModelConfig iModelConfig; diff --git a/frameworks/native/hdi_device_v2_0.h b/frameworks/native/hdi_device_v2_0.h index 61095f8..d3c3461 100644 --- a/frameworks/native/hdi_device_v2_0.h +++ b/frameworks/native/hdi_device_v2_0.h @@ -72,7 +72,7 @@ private: std::vector& deviceBuffers); OH_NN_ReturnCode PrepareOfflineModel(std::vector& deviceBuffers, const ModelConfig& config, - const std::map> extensions, + const std::map>& extensions, std::shared_ptr& preparedModel); private: diff --git a/frameworks/native/hdi_prepared_model_v1_0.cpp b/frameworks/native/hdi_prepared_model_v1_0.cpp index 35fccc6..d697ea3 100644 --- a/frameworks/native/hdi_prepared_model_v1_0.cpp +++ b/frameworks/native/hdi_prepared_model_v1_0.cpp @@ -133,7 +133,7 @@ OH_NN_ReturnCode HDIPreparedModelV1_0::Run(const std::vector& inputs, { V1_0::IOTensor iTensor; std::vector iInputTensors; - for (auto& input: inputs) { + for (const auto& input: inputs) { iTensor = TransIOTensor(input); if (iTensor.data.fd == INVALID_FD) { LOGE("Transform inputs tensor failed, cannot find data file descriptor."); @@ -143,7 +143,7 @@ OH_NN_ReturnCode HDIPreparedModelV1_0::Run(const std::vector& inputs, } std::vector iOutputTensors; - for (auto& output: outputs) { + for (const auto& output: outputs) { iTensor = TransIOTensor(output); if (iTensor.data.fd == INVALID_FD) { LOGE("Transform outputs tensor failed, cannot find data file descriptor."); diff --git a/frameworks/native/hdi_prepared_model_v2_0.cpp b/frameworks/native/hdi_prepared_model_v2_0.cpp index 40b15b2..9b0f0a4 100644 --- a/frameworks/native/hdi_prepared_model_v2_0.cpp +++ b/frameworks/native/hdi_prepared_model_v2_0.cpp @@ -133,7 +133,7 @@ OH_NN_ReturnCode HDIPreparedModelV2_0::Run(const std::vector& inputs, { V2_0::IOTensor iTensor; std::vector iInputTensors; - for (auto& input: inputs) { + for (const auto& input: inputs) { iTensor = TransIOTensor(input); if (iTensor.data.fd == INVALID_FD) { LOGE("Transform inputs tensor failed, cannot find data file descriptor."); @@ -143,7 +143,7 @@ OH_NN_ReturnCode HDIPreparedModelV2_0::Run(const std::vector& inputs, } std::vector iOutputTensors; - for (auto& output: outputs) { + for (const auto& output: outputs) { iTensor = TransIOTensor(output); if (iTensor.data.fd == INVALID_FD) { LOGE("Transform outputs tensor failed, cannot find data file descriptor."); diff --git a/frameworks/native/inner_model.h b/frameworks/native/inner_model.h index 8c7f3d5..7deec70 100644 --- a/frameworks/native/inner_model.h +++ b/frameworks/native/inner_model.h @@ -71,7 +71,7 @@ private: std::vector> m_outputTensors; // Used to pass output tensors to compilation. std::shared_ptr m_liteGraph {nullptr}; void* m_metaGraph {nullptr}; - Buffer m_quantBuffer; + Buffer m_quantBuffer = {nullptr, 0}; std::string m_modelName; }; } // namespace NeuralNetworkRuntime diff --git a/test/fuzztest/data.h b/test/fuzztest/data.h index f5b92d5..8ac8442 100644 --- a/test/fuzztest/data.h +++ b/test/fuzztest/data.h @@ -46,12 +46,12 @@ public: return object; } - const uint8_t* GetNowData() + const uint8_t* GetNowData() const { return dataFuzz + dataPos; } - size_t GetNowDataSize() + size_t GetNowDataSize() const { return dataSize - dataPos; } diff --git a/test/fuzztest/hdinnrtdevice_fuzzer/hdinnrtdevice_fuzzer.cpp b/test/fuzztest/hdinnrtdevice_fuzzer/hdinnrtdevice_fuzzer.cpp index 04a9928..9e2caf5 100644 --- a/test/fuzztest/hdinnrtdevice_fuzzer/hdinnrtdevice_fuzzer.cpp +++ b/test/fuzztest/hdinnrtdevice_fuzzer/hdinnrtdevice_fuzzer.cpp @@ -44,7 +44,7 @@ bool HdiNnrtDeviceFuzzTest(const uint8_t* data, size_t size) datas.RewindRead(0); MessageParcel reply; MessageOption option; - std::shared_ptr nnrtDeviceStub = std::make_shared(device); + OHOS::sptr nnrtDeviceStub = new V2_0::NnrtDeviceStub(device); if (nnrtDeviceStub == nullptr) { LOGE("[HdiNnrtDeviceFuzzTest]Nnrt device stub make failed."); return false; diff --git a/test/fuzztest/hdinnrtpreparedmodel_fuzzer/hdinnrtpreparedmodel_fuzzer.cpp b/test/fuzztest/hdinnrtpreparedmodel_fuzzer/hdinnrtpreparedmodel_fuzzer.cpp index 9473fa6..b02ae6e 100644 --- a/test/fuzztest/hdinnrtpreparedmodel_fuzzer/hdinnrtpreparedmodel_fuzzer.cpp +++ b/test/fuzztest/hdinnrtpreparedmodel_fuzzer/hdinnrtpreparedmodel_fuzzer.cpp @@ -44,8 +44,7 @@ bool HdiNnrtPreparedModelFuzzTest(const uint8_t* data, size_t size) datas.RewindRead(0); MessageParcel reply; MessageOption option; - std::shared_ptr preparedModelStub = - std::make_shared(preparedModel); + OHOS::sptr preparedModelStub = new V2_0::PreparedModelStub(preparedModel); if (preparedModelStub == nullptr) { LOGE("[HdiNnrtPreparedModelFuzzTest]Nnrt preparemodel stub make failed."); return false; -- Gitee