diff --git a/test/unittest/components/BUILD.gn b/test/unittest/components/BUILD.gn index 777368d9fd2635bac058adb0d4af9d21beea527b..4442563e2659700b94be305fdd9951957ef58560 100644 --- a/test/unittest/components/BUILD.gn +++ b/test/unittest/components/BUILD.gn @@ -293,6 +293,26 @@ ohos_unittest("NNCompilerTest") { ] } +ohos_unittest("NNExecutorTest") { + module_out_path = module_output_path + + sources = [ "./nn_executor/nn_executor_test.cpp" ] + configs = [ ":module_private_config" ] + + deps = [ + "../../../frameworks/native/neural_network_core:libneural_network_core", + "../../../frameworks/native/neural_network_runtime:libneural_network_runtime", + "//third_party/googletest:gmock_main", + "//third_party/googletest:gtest_main", + ] + + external_deps = [ + "drivers_interface_nnrt:libnnrt_proxy_1.0", + "hilog:libhilog", + "hitrace:libhitracechain", + ] +} + ohos_unittest("TransformV1_0Test") { module_out_path = module_output_path @@ -739,10 +759,6 @@ ohos_unittest("NeuralNetworkRuntimeV2_0Test") { group("components_unittest") { testonly = true deps = [ - ":CompilationV1_0Test", - ":CompilationV2_0Test", - ":ExecutorV1_0Test", - ":ExecutorV2_0Test", ":HDIDeviceV1_0Test", ":HDIDeviceV2_0Test", ":HDIPreparedModelV1_0Test", @@ -754,6 +770,7 @@ group("components_unittest") { ":NNBackendTest", ":NNCompiledCacheTest", ":NNCompilerTest", + ":NNExecutorTest", ":NeuralNetworkCoreV1_0Test", ":NeuralNetworkRuntimeV1_0Test", ":NeuralNetworkRuntimeV2_0Test", diff --git a/test/unittest/components/nn_backend/nn_backend_test.cpp b/test/unittest/components/nn_backend/nn_backend_test.cpp index dbf41ffabd5f6e8761d63ecd12c0410e8d8ef4eb..4b2ea39e4bcc451e1f14409aed104412bfe7e10f 100644 --- a/test/unittest/components/nn_backend/nn_backend_test.cpp +++ b/test/unittest/components/nn_backend/nn_backend_test.cpp @@ -80,6 +80,8 @@ HWTEST_F(NNBackendTest, nnbackendtest_construct_001, TestSize.Level0) std::shared_ptr device = std::make_shared(); std::unique_ptr hdiDevice = std::make_unique(device, backendID); EXPECT_NE(hdiDevice, nullptr); + + testing::Mock::AllowLeak(device.get()); } /** @@ -112,6 +114,8 @@ HWTEST_F(NNBackendTest, nnbackendtest_getbackendname_002, TestSize.Level0) std::unique_ptr hdiDevice = std::make_unique(device, backendID); EXPECT_EQ(OH_NN_FAILED, hdiDevice->GetBackendName(backendName)); + + testing::Mock::AllowLeak(device.get()); } /** @@ -133,6 +137,8 @@ HWTEST_F(NNBackendTest, nnbackendtest_getbackendname_005, TestSize.Level0) std::unique_ptr hdiDevice = std::make_unique(device, backendID); EXPECT_EQ(OH_NN_FAILED, hdiDevice->GetBackendName(backendName)); + + testing::Mock::AllowLeak(device.get()); } /** @@ -157,6 +163,8 @@ HWTEST_F(NNBackendTest, nnbackendtest_getbackendname_007, TestSize.Level0) std::unique_ptr hdiDevice = std::make_unique(device, backendID); EXPECT_EQ(OH_NN_FAILED, hdiDevice->GetBackendName(backendName)); + + testing::Mock::AllowLeak(device.get()); } /** @@ -181,6 +189,8 @@ HWTEST_F(NNBackendTest, nnbackendtest_getbackendname_008, TestSize.Level0) std::unique_ptr hdiDevice = std::make_unique(device, backendID); EXPECT_EQ(OH_NN_SUCCESS, hdiDevice->GetBackendName(backendName)); + + testing::Mock::AllowLeak(device.get()); } /** @@ -214,6 +224,8 @@ HWTEST_F(NNBackendTest, nnbackendtest_getgackendtype_002, TestSize.Level0) std::unique_ptr hdiDevice = std::make_unique(device, backendID); EXPECT_EQ(OH_NN_FAILED, hdiDevice->GetBackendType(backendName)); + + testing::Mock::AllowLeak(device.get()); } /** @@ -232,6 +244,8 @@ HWTEST_F(NNBackendTest, nnbackendtest_getgackendtype_003, TestSize.Level0) std::unique_ptr hdiDevice = std::make_unique(device, backendID); EXPECT_EQ(OH_NN_SUCCESS, hdiDevice->GetBackendType(backendName)); + + testing::Mock::AllowLeak(device.get()); } /** @@ -265,6 +279,8 @@ HWTEST_F(NNBackendTest, nnbackendtest_getbackendstatus_002, TestSize.Level0) std::unique_ptr hdiDevice = std::make_unique(device, backendID); EXPECT_EQ(OH_NN_FAILED, hdiDevice->GetBackendStatus(backendName)); + + testing::Mock::AllowLeak(device.get()); } /** @@ -283,6 +299,8 @@ HWTEST_F(NNBackendTest, nnbackendtest_getbackendstatus_003, TestSize.Level0) std::unique_ptr hdiDevice = std::make_unique(device, backendID); EXPECT_EQ(OH_NN_SUCCESS, hdiDevice->GetBackendStatus(backendName)); + + testing::Mock::AllowLeak(device.get()); } /** @@ -321,6 +339,8 @@ HWTEST_F(NNBackendTest, nnbackendtest_createcompiler_002, TestSize.Level0) std::unique_ptr hdiDevice = std::make_unique(device, backendID); EXPECT_EQ(nullptr, hdiDevice->CreateCompiler(compilation)); + + testing::Mock::AllowLeak(device.get()); } /** @@ -350,6 +370,8 @@ HWTEST_F(NNBackendTest, nnbackendtest_destroycompiler_002, TestSize.Level0) std::unique_ptr hdiDevice = std::make_unique(device, backendID); EXPECT_EQ(OH_NN_SUCCESS, hdiDevice->DestroyCompiler(nncompiler)); + + testing::Mock::AllowLeak(device.get()); } /** @@ -364,6 +386,8 @@ HWTEST_F(NNBackendTest, nnbackendtest_CreateExecutor_001, TestSize.Level0) std::unique_ptr hdiDevice = std::make_unique(device, backendID); EXPECT_EQ(nullptr, hdiDevice->CreateExecutor(nullptr)); + + testing::Mock::AllowLeak(device.get()); } /** @@ -381,6 +405,8 @@ HWTEST_F(NNBackendTest, nnbackendtest_CreateExecutor_002, TestSize.Level0) std::unique_ptr hdiDevice = std::make_unique(device, backendID); EXPECT_EQ(nullptr, hdiDevice->CreateExecutor(compilation)); + + testing::Mock::AllowLeak(device.get()); } /** @@ -397,6 +423,8 @@ HWTEST_F(NNBackendTest, nnbackendtest_CreateExecutor_003, TestSize.Level0) std::unique_ptr hdiDevice = std::make_unique(device, backendID); EXPECT_EQ(nullptr, hdiDevice->CreateExecutor(compilation)); + + testing::Mock::AllowLeak(device.get()); } /** @@ -411,6 +439,8 @@ HWTEST_F(NNBackendTest, nnbackendtest_DestroyExecutor_001, TestSize.Level0) std::unique_ptr hdiDevice = std::make_unique(device, backendID); EXPECT_EQ(OH_NN_INVALID_PARAMETER, hdiDevice->DestroyExecutor(nullptr)); + + testing::Mock::AllowLeak(device.get()); } /** @@ -425,6 +455,8 @@ HWTEST_F(NNBackendTest, nnbackendtest_createtensor_001, TestSize.Level0) std::unique_ptr hdiDevice = std::make_unique(device, backendID); EXPECT_EQ(nullptr, hdiDevice->CreateTensor(nullptr)); + + testing::Mock::AllowLeak(device.get()); } /** @@ -441,6 +473,8 @@ HWTEST_F(NNBackendTest, nnbackendtest_createtensor_002, TestSize.Level0) std::unique_ptr hdiDevice = std::make_unique(device, backendID); EXPECT_NE(nullptr, hdiDevice->CreateTensor(tensorDesc)); + + testing::Mock::AllowLeak(device.get()); } /** @@ -455,6 +489,8 @@ HWTEST_F(NNBackendTest, nnbackendtest_destroytensor_001, TestSize.Level0) std::unique_ptr hdiDevice = std::make_unique(device, backendID); EXPECT_EQ(OH_NN_INVALID_PARAMETER, hdiDevice->DestroyTensor(nullptr)); + + testing::Mock::AllowLeak(device.get()); } /** @@ -469,6 +505,8 @@ HWTEST_F(NNBackendTest, nnbackendtest_getdevice_001, TestSize.Level0) std::unique_ptr hdiDevice = std::make_unique(nullptr, backendID); EXPECT_EQ(nullptr, hdiDevice->GetDevice()); + + testing::Mock::AllowLeak(device.get()); } /** @@ -486,6 +524,8 @@ HWTEST_F(NNBackendTest, nnbackendtest_getsupportedoperation_001, TestSize.Level0 std::unique_ptr hdiDevice = std::make_unique(nullptr, backendID); EXPECT_EQ(OH_NN_INVALID_PARAMETER, hdiDevice->GetSupportedOperation(model, ops)); + + testing::Mock::AllowLeak(device.get()); } /** @@ -503,6 +543,8 @@ HWTEST_F(NNBackendTest, nnbackendtest_getsupportedoperation_002, TestSize.Level0 std::unique_ptr hdiDevice = std::make_unique(nullptr, backendID); EXPECT_EQ(OH_NN_FAILED, hdiDevice->GetSupportedOperation(model, ops)); + + testing::Mock::AllowLeak(device.get()); } /** @@ -523,6 +565,8 @@ HWTEST_F(NNBackendTest, nnbackendtest_getsupportedoperation_003, TestSize.Level0 std::unique_ptr hdiDevice = std::make_unique(nullptr, backendID); EXPECT_EQ(OH_NN_FAILED, hdiDevice->GetSupportedOperation(model, ops)); + + testing::Mock::AllowLeak(device.get()); } /** @@ -543,6 +587,8 @@ HWTEST_F(NNBackendTest, nnbackendtest_getsupportedoperation_004, TestSize.Level0 std::unique_ptr hdiDevice = std::make_unique(nullptr, backendID); EXPECT_EQ(OH_NN_FAILED, hdiDevice->GetSupportedOperation(model, ops)); + + testing::Mock::AllowLeak(device.get()); } } // namespace UnitTest } // namespace NeuralNetworkRuntime diff --git a/test/unittest/components/nn_compiler/nn_compiler_test.cpp b/test/unittest/components/nn_compiler/nn_compiler_test.cpp index 3f0cd78af7e115167a3990518efbaa211eb2ac00..33953621bf2bf9af5d283e9867121f49d6365a9a 100644 --- a/test/unittest/components/nn_compiler/nn_compiler_test.cpp +++ b/test/unittest/components/nn_compiler/nn_compiler_test.cpp @@ -82,6 +82,8 @@ HWTEST_F(NNCompilerTest, nncompilertest_construct_001, TestSize.Level0) NNCompiler* nncompiler = new (std::nothrow) NNCompiler(device, backendID); EXPECT_NE(nullptr, nncompiler); + + testing::Mock::AllowLeak(device.get()); } } // namespace UnitTest } // namespace NeuralNetworkRuntime diff --git a/test/unittest/components/nn_executor/nn_executor_test.cpp b/test/unittest/components/nn_executor/nn_executor_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..40e2d3b3d20a8ba25396f902480003063f78d725 --- /dev/null +++ b/test/unittest/components/nn_executor/nn_executor_test.cpp @@ -0,0 +1,1834 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include + +#include "nnexecutor.h" +#include "nncompiler.h" +#include "nnbackend.h" +#include "device.h" +#include "prepared_model.h" +#include "interfaces/kits/c/neural_network_runtime/neural_network_runtime_type.h" +#include "common/utils.h" +#include "common/log.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime; + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class NNExecutorTest : public testing::Test { +public: + NNExecutorTest() = default; + ~NNExecutorTest() = default; + +public: + uint32_t m_index {0}; + const std::vector m_dim {3, 3}; + const std::vector m_dimOut {3, 3}; + const int32_t m_dimArry[2] {3, 3}; + uint32_t m_dimensionCount {2}; + float m_dataArry[9] {0, 1, 2, 3, 4, 5, 6, 7, 8}; +}; + +class MockIDevice : public Device { +public: + MOCK_METHOD1(GetDeviceName, OH_NN_ReturnCode(std::string&)); + MOCK_METHOD1(GetVendorName, OH_NN_ReturnCode(std::string&)); + MOCK_METHOD1(GetVersion, OH_NN_ReturnCode(std::string&)); + MOCK_METHOD1(GetDeviceType, OH_NN_ReturnCode(OH_NN_DeviceType&)); + MOCK_METHOD1(GetDeviceStatus, OH_NN_ReturnCode(DeviceStatus&)); + MOCK_METHOD2(GetSupportedOperation, OH_NN_ReturnCode(std::shared_ptr, + std::vector&)); + MOCK_METHOD1(IsFloat16PrecisionSupported, OH_NN_ReturnCode(bool&)); + MOCK_METHOD1(IsPerformanceModeSupported, OH_NN_ReturnCode(bool&)); + MOCK_METHOD1(IsPrioritySupported, OH_NN_ReturnCode(bool&)); + MOCK_METHOD1(IsDynamicInputSupported, OH_NN_ReturnCode(bool&)); + MOCK_METHOD1(IsModelCacheSupported, OH_NN_ReturnCode(bool&)); + MOCK_METHOD3(PrepareModel, OH_NN_ReturnCode(std::shared_ptr, + const ModelConfig&, + std::shared_ptr&)); + MOCK_METHOD3(PrepareModel, OH_NN_ReturnCode(const void*, + const ModelConfig&, + std::shared_ptr&)); + MOCK_METHOD4(PrepareModelFromModelCache, OH_NN_ReturnCode(const std::vector&, + const ModelConfig&, + std::shared_ptr&, + bool&)); + MOCK_METHOD3(PrepareOfflineModel, OH_NN_ReturnCode(std::shared_ptr, + const ModelConfig&, + std::shared_ptr&)); + MOCK_METHOD1(AllocateBuffer, void*(size_t)); + MOCK_METHOD2(AllocateTensorBuffer, void*(size_t, std::shared_ptr)); + MOCK_METHOD2(AllocateTensorBuffer, void*(size_t, std::shared_ptr)); + MOCK_METHOD1(ReleaseBuffer, OH_NN_ReturnCode(const void*)); + MOCK_METHOD2(AllocateBuffer, OH_NN_ReturnCode(size_t, int&)); + MOCK_METHOD2(ReleaseBuffer, OH_NN_ReturnCode(int, size_t)); +}; + +class MockIPreparedModel : public PreparedModel { +public: + MOCK_METHOD1(ExportModelCache, OH_NN_ReturnCode(std::vector&)); + MOCK_METHOD4(Run, OH_NN_ReturnCode(const std::vector&, + const std::vector&, + std::vector>&, + std::vector&)); + MOCK_METHOD4(Run, OH_NN_ReturnCode(const std::vector&, + const std::vector&, + std::vector>&, + std::vector&)); + MOCK_CONST_METHOD1(GetModelID, OH_NN_ReturnCode(uint32_t&)); + MOCK_METHOD2(GetInputDimRanges, OH_NN_ReturnCode(std::vector>&, + std::vector>&)); +}; + +class MockTensorDesc : public TensorDesc { +public: + MOCK_METHOD1(GetDataType, OH_NN_ReturnCode(OH_NN_DataType*)); + MOCK_METHOD1(SetDataType, OH_NN_ReturnCode(OH_NN_DataType)); + MOCK_METHOD1(GetFormat, OH_NN_ReturnCode(OH_NN_Format*)); + MOCK_METHOD1(SetFormat, OH_NN_ReturnCode(OH_NN_Format)); + MOCK_METHOD2(GetShape, OH_NN_ReturnCode(int32_t**, size_t*)); + MOCK_METHOD2(SetShape, OH_NN_ReturnCode(const int32_t*, size_t)); + MOCK_METHOD1(GetElementNum, OH_NN_ReturnCode(size_t*)); + MOCK_METHOD1(GetByteSize, OH_NN_ReturnCode(size_t*)); + MOCK_METHOD1(SetName, OH_NN_ReturnCode(const char*)); + MOCK_METHOD1(GetName, OH_NN_ReturnCode(const char**)); +}; + +OH_NN_Tensor SetTensor(OH_NN_DataType dataType, uint32_t dimensionCount, const int32_t *dimensions, + const OH_NN_QuantParam *quantParam, OH_NN_TensorType type) +{ + OH_NN_Tensor tensor; + tensor.dataType = dataType; + tensor.dimensionCount = dimensionCount; + tensor.dimensions = dimensions; + tensor.quantParam = quantParam; + tensor.type = type; + + return tensor; +} + +/** + * @tc.name: nnexecutortest_construct_001 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(NNExecutorTest, nnexecutortest_construct_001, TestSize.Level0) +{ + LOGE("NNExecutor nnexecutortest_construct_001"); + size_t m_backendID {0}; + std::shared_ptr device = std::make_shared(); + + std::shared_ptr m_preparedModel {nullptr}; + std::vector, OH_NN_TensorType>> m_inputTensorDescs; + std::vector, OH_NN_TensorType>> m_outputTensorDescs; + + std::pair, OH_NN_TensorType> pair1; + std::pair, OH_NN_TensorType> pair2; + std::shared_ptr tensorDesr = std::make_shared(); + int32_t expectDim[2] = {3, 3}; + int32_t* ptr = expectDim; + uint32_t dimensionCount = 2; + tensorDesr->SetShape(ptr, dimensionCount); + pair1.first = tensorDesr; + pair2.first = tensorDesr; + m_inputTensorDescs.emplace_back(pair1); + m_inputTensorDescs.emplace_back(pair2); + m_outputTensorDescs.emplace_back(pair1); + m_outputTensorDescs.emplace_back(pair2); + + float dataArry[9] = {0, 1, 2, 3, 4, 5, 6, 7, 8}; + size_t length = 9 * sizeof(float); + EXPECT_CALL(*((MockIDevice *) device.get()), AllocateTensorBuffer(length, m_outputTensorDescs[m_index].first)) + .WillRepeatedly(::testing::Return(reinterpret_cast(0x1000))); + + NNExecutor* nnExecutor = new (std::nothrow) NNExecutor( + m_backendID, device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs); + EXPECT_NE(nullptr, nnExecutor); + + OH_NN_Memory** memory = nullptr; + void* const data = dataArry; + OH_NN_Memory memoryPtr = {data, 9 * sizeof(float)}; + OH_NN_Memory* mPtr = &memoryPtr; + memory = &mPtr; + + OH_NN_ReturnCode retOutput = nnExecutor->CreateOutputMemory(m_index, length, memory); + EXPECT_EQ(OH_NN_SUCCESS, retOutput); + EXPECT_CALL(*((MockIDevice *) device.get()), AllocateTensorBuffer(length, m_inputTensorDescs[m_index].first)) + .WillRepeatedly(::testing::Return(reinterpret_cast(0x1000))); + OH_NN_ReturnCode retinput = nnExecutor->CreateInputMemory(m_index, length, memory); + EXPECT_EQ(OH_NN_SUCCESS, retinput); + + delete nnExecutor; + + testing::Mock::AllowLeak(device.get()); +} + +/** + * @tc.name: nnexecutortest_getinputdimrange_001 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(NNExecutorTest, nnexecutortest_getinputdimrange_001, TestSize.Level0) +{ + LOGE("GetInputDimRange nnexecutortest_getinputdimrange_001"); + size_t m_backendID {0}; + std::shared_ptr m_device {nullptr}; + + std::shared_ptr mockIPreparedMode = std::make_shared(); + EXPECT_CALL(*((MockIPreparedModel *) mockIPreparedMode.get()), GetInputDimRanges(::testing::_, ::testing::_)) + .WillRepeatedly(::testing::Return(OH_NN_FAILED)); + std::vector, OH_NN_TensorType>> m_inputTensorDescs; + std::vector, OH_NN_TensorType>> m_outputTensorDescs; + NNExecutor* nnExecutor = new (std::nothrow) NNExecutor( + m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs); + + size_t index = 0; + size_t min = 1; + size_t max = 10; + size_t *minInputDims = &min; + size_t *maxInputDIms = &max; + size_t shapeLength = 0; + OH_NN_ReturnCode ret = nnExecutor->GetInputDimRange(index, &minInputDims, &maxInputDIms, &shapeLength); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); + + testing::Mock::AllowLeak(mockIPreparedMode.get()); +} + +/** + * @tc.name: nnexecutortest_getinputdimrange_002 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(NNExecutorTest, nnexecutortest_getinputdimrange_002, TestSize.Level0) +{ + LOGE("GetInputDimRange nnexecutortest_getinputdimrange_002"); + size_t m_backendID {0}; + std::shared_ptr m_device {nullptr}; + + std::shared_ptr mockIPreparedMode = std::make_shared(); + EXPECT_CALL(*((MockIPreparedModel *) mockIPreparedMode.get()), GetInputDimRanges(::testing::_, ::testing::_)) + .WillRepeatedly(::testing::Return(OH_NN_FAILED)); + std::vector, OH_NN_TensorType>> m_inputTensorDescs; + std::vector, OH_NN_TensorType>> m_outputTensorDescs; + NNExecutor* nnExecutor = new (std::nothrow) NNExecutor( + m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs); + + size_t index = 0; + size_t max = 10; + size_t *maxInputDIms = &max; + size_t shapeLength = 0; + OH_NN_ReturnCode ret = nnExecutor->GetInputDimRange(index, nullptr, &maxInputDIms, &shapeLength); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + + testing::Mock::AllowLeak(mockIPreparedMode.get()); +} + +/** + * @tc.name: nnexecutortest_getinputdimrange_003 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(NNExecutorTest, nnexecutortest_getinputdimrange_003, TestSize.Level0) +{ + LOGE("GetInputDimRange nnexecutortest_getinputdimrange_003"); + size_t m_backendID {0}; + std::shared_ptr m_device {nullptr}; + + std::shared_ptr mockIPreparedMode = std::make_shared(); + EXPECT_CALL(*((MockIPreparedModel *) mockIPreparedMode.get()), GetInputDimRanges(::testing::_, ::testing::_)) + .WillRepeatedly(::testing::Return(OH_NN_FAILED)); + std::vector, OH_NN_TensorType>> m_inputTensorDescs; + std::vector, OH_NN_TensorType>> m_outputTensorDescs; + NNExecutor* nnExecutor = new (std::nothrow) NNExecutor( + m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs); + + size_t index = 0; + size_t min = 1; + size_t *minInputDims = &min; + size_t shapeLength = 0; + OH_NN_ReturnCode ret = nnExecutor->GetInputDimRange(index, &minInputDims, nullptr, &shapeLength); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + + testing::Mock::AllowLeak(mockIPreparedMode.get()); +} + +/** + * @tc.name: nnexecutortest_getinputdimrange_004 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(NNExecutorTest, nnexecutortest_getinputdimrange_004, TestSize.Level0) +{ + LOGE("GetInputDimRange nnexecutortest_getinputdimrange_004"); + size_t m_backendID {0}; + std::shared_ptr m_device {nullptr}; + + std::shared_ptr mockIPreparedMode = std::make_shared(); + EXPECT_CALL(*((MockIPreparedModel *) mockIPreparedMode.get()), GetInputDimRanges(::testing::_, ::testing::_)) + .WillRepeatedly(::testing::Return(OH_NN_FAILED)); + std::vector, OH_NN_TensorType>> m_inputTensorDescs; + std::vector, OH_NN_TensorType>> m_outputTensorDescs; + NNExecutor* nnExecutor = new (std::nothrow) NNExecutor( + m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs); + + size_t index = 0; + size_t min = 1; + size_t max = 10; + size_t *minInputDims = &min; + size_t *maxInputDIms = &max; + OH_NN_ReturnCode ret = nnExecutor->GetInputDimRange(index, &minInputDims, &maxInputDIms, nullptr); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + + testing::Mock::AllowLeak(mockIPreparedMode.get()); +} + +/** + * @tc.name: nnexecutortest_getinputdimrange_005 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(NNExecutorTest, nnexecutortest_getinputdimrange_005, TestSize.Level0) +{ + LOGE("GetInputDimRange nnexecutortest_getinputdimrange_005"); + size_t m_backendID {0}; + std::shared_ptr m_device {nullptr}; + + std::shared_ptr mockIPreparedMode = std::make_shared(); + EXPECT_CALL(*((MockIPreparedModel *) mockIPreparedMode.get()), GetInputDimRanges(::testing::_, ::testing::_)) + .WillRepeatedly(::testing::Return(OH_NN_SUCCESS)); + std::vector, OH_NN_TensorType>> m_inputTensorDescs; + std::vector, OH_NN_TensorType>> m_outputTensorDescs; + NNExecutor* nnExecutor = new (std::nothrow) NNExecutor( + m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs); + + size_t index = 0; + size_t min = 1; + size_t max = 10; + size_t *minInputDims = &min; + size_t *maxInputDIms = &max; + size_t shapeLength = 0; + OH_NN_ReturnCode ret = nnExecutor->GetInputDimRange(index, &minInputDims, &maxInputDIms, &shapeLength); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + + testing::Mock::AllowLeak(mockIPreparedMode.get()); +} + +/** + * @tc.name: nnexecutortest_getinputdimrange_006 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(NNExecutorTest, nnexecutortest_getinputdimrange_006, TestSize.Level0) +{ + LOGE("GetInputDimRange nnexecutortest_getinputdimrange_006"); + size_t m_backendID {0}; + std::shared_ptr m_device {nullptr}; + + std::shared_ptr mockIPreparedMode = std::make_shared(); + + std::vector> minDims = {{1, 2, 3}}; + std::vector> maxDims = {{4, 5, 6}}; + EXPECT_CALL(*((MockIPreparedModel *) mockIPreparedMode.get()), GetInputDimRanges(::testing::_, ::testing::_)) + .WillOnce(Invoke([&minDims, &maxDims](std::vector>& minInputDims, + std::vector>& maxInputDims) { + // 这里直接修改传入的引用参数 + minInputDims = minDims; + maxInputDims = maxDims; + return OH_NN_SUCCESS; // 假设成功的状态码 + })); + std::vector, OH_NN_TensorType>> m_inputTensorDescs; + std::vector, OH_NN_TensorType>> m_outputTensorDescs; + NNExecutor* nnExecutor = new (std::nothrow) NNExecutor( + m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs); + + size_t index = 0; + size_t min = 1; + size_t max = 10; + size_t *minInputDims = &min; + size_t *maxInputDIms = &max; + size_t shapeLength = 0; + OH_NN_ReturnCode ret = nnExecutor->GetInputDimRange(index, &minInputDims, &maxInputDIms, &shapeLength); + EXPECT_EQ(OH_NN_SUCCESS, ret); + + testing::Mock::AllowLeak(mockIPreparedMode.get()); +} + +/** + * @tc.name: nnexecutortest_getinputdimrange_007 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(NNExecutorTest, nnexecutortest_getinputdimrange_007, TestSize.Level0) +{ + LOGE("GetInputDimRange nnexecutortest_getinputdimrange_007"); + size_t m_backendID {0}; + std::shared_ptr m_device {nullptr}; + + std::shared_ptr mockIPreparedMode = std::make_shared(); + + std::vector> minDims = {{1, 2}, {1, 2, 3}}; + std::vector> maxDims = {{4, 5, 6}}; + EXPECT_CALL(*((MockIPreparedModel *) mockIPreparedMode.get()), GetInputDimRanges(::testing::_, ::testing::_)) + .WillOnce(Invoke([&minDims, &maxDims](std::vector>& minInputDims, + std::vector>& maxInputDims) { + // 这里直接修改传入的引用参数 + minInputDims = minDims; + maxInputDims = maxDims; + return OH_NN_SUCCESS; // 假设成功的状态码 + })); + std::vector, OH_NN_TensorType>> m_inputTensorDescs; + std::vector, OH_NN_TensorType>> m_outputTensorDescs; + NNExecutor* nnExecutor = new (std::nothrow) NNExecutor( + m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs); + + size_t index = 0; + size_t min = 1; + size_t max = 10; + size_t *minInputDims = &min; + size_t *maxInputDIms = &max; + size_t shapeLength = 0; + OH_NN_ReturnCode ret = nnExecutor->GetInputDimRange(index, &minInputDims, &maxInputDIms, &shapeLength); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + + testing::Mock::AllowLeak(mockIPreparedMode.get()); +} + +/** + * @tc.name: nnexecutortest_getinputdimrange_008 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(NNExecutorTest, nnexecutortest_getinputdimrange_008, TestSize.Level0) +{ + LOGE("GetInputDimRange nnexecutortest_getinputdimrange_008"); + size_t m_backendID {0}; + std::shared_ptr m_device {nullptr}; + + std::shared_ptr mockIPreparedMode = std::make_shared(); + + std::vector> minDims = {{1, 2}}; + std::vector> maxDims = {{4, 5, 6}}; + EXPECT_CALL(*((MockIPreparedModel *) mockIPreparedMode.get()), GetInputDimRanges(::testing::_, ::testing::_)) + .WillOnce(Invoke([&minDims, &maxDims](std::vector>& minInputDims, + std::vector>& maxInputDims) { + // 这里直接修改传入的引用参数 + minInputDims = minDims; + maxInputDims = maxDims; + return OH_NN_SUCCESS; // 假设成功的状态码 + })); + std::vector, OH_NN_TensorType>> m_inputTensorDescs; + std::vector, OH_NN_TensorType>> m_outputTensorDescs; + NNExecutor* nnExecutor = new (std::nothrow) NNExecutor( + m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs); + + size_t index = 0; + size_t min = 1; + size_t max = 10; + size_t *minInputDims = &min; + size_t *maxInputDIms = &max; + size_t shapeLength = 0; + OH_NN_ReturnCode ret = nnExecutor->GetInputDimRange(index, &minInputDims, &maxInputDIms, &shapeLength); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + + testing::Mock::AllowLeak(mockIPreparedMode.get()); +} + +/** + * @tc.name: nnexecutortest_getoutputshape_001 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(NNExecutorTest, nnexecutortest_getoutputshape_001, TestSize.Level0) +{ + LOGE("GetOutputShape nnexecutortest_getoutputshape_001"); + size_t m_backendID {0}; + std::shared_ptr m_device {nullptr}; + std::shared_ptr m_preparedModel {nullptr}; + std::vector, OH_NN_TensorType>> m_inputTensorDescs; + std::vector, OH_NN_TensorType>> m_outputTensorDescs; + NNExecutor* nnExecutor = new (std::nothrow) NNExecutor( + m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs); + + int32_t expectDim[2] = {3, 3}; + int32_t* ptr = expectDim; + int32_t** dimensions = &ptr; + uint32_t dimensionCount = 2; + uint32_t* shapeNum = &dimensionCount; + OH_NN_ReturnCode ret = nnExecutor->GetOutputShape(m_index, dimensions, shapeNum); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: nnexecutortest_getoutputshape_002 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(NNExecutorTest, nnexecutortest_getoutputshape_002, TestSize.Level0) +{ + LOGE("GetOutputShape nnexecutortest_getoutputshape_002"); + size_t m_backendID {0}; + std::shared_ptr m_device {nullptr}; + std::shared_ptr m_preparedModel {nullptr}; + std::vector, OH_NN_TensorType>> m_inputTensorDescs; + std::vector, OH_NN_TensorType>> m_outputTensorDescs; + std::pair, OH_NN_TensorType> pair1; + std::pair, OH_NN_TensorType> pair2; + m_outputTensorDescs.emplace_back(pair1); + m_outputTensorDescs.emplace_back(pair2); + NNExecutor* nnExecutor = new (std::nothrow) NNExecutor( + m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs); + + int32_t expectDim[2] = {3, 3}; + int32_t* ptr = expectDim; + int32_t** dimensions = &ptr; + uint32_t dimensionCount = 2; + uint32_t* shapeNum = &dimensionCount; + OH_NN_ReturnCode ret = nnExecutor->GetOutputShape(m_index, dimensions, shapeNum); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: nnexecutortest_getoutputshape_003 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(NNExecutorTest, nnexecutortest_getoutputshape_003, TestSize.Level0) +{ + LOGE("GetOutputShape nnexecutortest_getoutputshape_003"); + size_t m_backendID {0}; + std::shared_ptr m_device {nullptr}; + std::shared_ptr m_preparedModel {nullptr}; + std::vector, OH_NN_TensorType>> m_inputTensorDescs; + std::vector, OH_NN_TensorType>> m_outputTensorDescs; + std::pair, OH_NN_TensorType> pair1; + std::pair, OH_NN_TensorType> pair2; + std::shared_ptr tensorDesr = std::make_shared(); + pair1.first = tensorDesr; + m_outputTensorDescs.emplace_back(pair1); + m_outputTensorDescs.emplace_back(pair2); + NNExecutor* nnExecutor = new (std::nothrow) NNExecutor( + m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs); + + int32_t expectDim[2] = {3, 3}; + int32_t* ptr = expectDim; + int32_t** dimensions = &ptr; + uint32_t dimensionCount = 2; + uint32_t* shapeNum = &dimensionCount; + OH_NN_ReturnCode ret = nnExecutor->GetOutputShape(m_index, dimensions, shapeNum); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: nnexecutortest_getoutputshape_004 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(NNExecutorTest, nnexecutortest_getoutputshape_004, TestSize.Level0) +{ + LOGE("GetOutputShape nnexecutortest_getoutputshape_004"); + size_t m_backendID {0}; + std::shared_ptr m_device {nullptr}; + std::shared_ptr m_preparedModel {nullptr}; + std::vector, OH_NN_TensorType>> m_inputTensorDescs; + std::vector, OH_NN_TensorType>> m_outputTensorDescs; + std::pair, OH_NN_TensorType> pair1; + std::pair, OH_NN_TensorType> pair2; + std::shared_ptr tensorDesr = std::make_shared(); + + int32_t expectDim[2] = {3, 3}; + int32_t* ptr = expectDim; + uint32_t dimensionCount = 2; + tensorDesr->SetShape(ptr, dimensionCount); + pair1.first = tensorDesr; + pair2.first = tensorDesr; + m_outputTensorDescs.emplace_back(pair1); + m_outputTensorDescs.emplace_back(pair2); + NNExecutor* nnExecutor = new (std::nothrow) NNExecutor( + m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs); + + int32_t expectDim2[2] = {3, 3}; + int32_t* ptr2 = expectDim2; + int32_t** dimensions = &ptr2; + uint32_t* shapeNum = &dimensionCount; + *dimensions = nullptr; + OH_NN_ReturnCode ret = nnExecutor->GetOutputShape(m_index, dimensions, shapeNum); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/** + * @tc.name: nnexecutortest_getinputnum_001 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(NNExecutorTest, nnexecutortest_getinputnum_001, TestSize.Level0) +{ + LOGE("GetInputNum nnexecutortest_getinputnum_001"); + size_t m_backendID {0}; + std::shared_ptr m_device {nullptr}; + std::shared_ptr m_preparedModel {nullptr}; + std::vector, OH_NN_TensorType>> m_inputTensorDescs; + std::vector, OH_NN_TensorType>> m_outputTensorDescs; + NNExecutor* nnExecutor = new (std::nothrow) NNExecutor( + m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs); + + size_t ret = nnExecutor->GetInputNum(); + EXPECT_EQ(0, ret); +} + +/** + * @tc.name: nnexecutortest_getoutputnum_001 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(NNExecutorTest, nnexecutortest_getoutputnum_001, TestSize.Level0) +{ + LOGE("GetOutputNum nnexecutortest_getoutputnum_001"); + size_t m_backendID {0}; + std::shared_ptr m_device {nullptr}; + std::shared_ptr m_preparedModel {nullptr}; + std::vector, OH_NN_TensorType>> m_inputTensorDescs; + std::vector, OH_NN_TensorType>> m_outputTensorDescs; + NNExecutor* nnExecutor = new (std::nothrow) NNExecutor( + m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs); + + + size_t ret = nnExecutor->GetOutputNum(); + EXPECT_EQ(0, ret); +} + +/** + * @tc.name: nnexecutortest_createinputtensordesc_001 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(NNExecutorTest, nnexecutortest_createinputtensordesc_001, TestSize.Level0) +{ + LOGE("CreateInputTensorDesc nnexecutortest_createinputtensordesc_001"); + size_t m_backendID {0}; + std::shared_ptr m_device {nullptr}; + std::shared_ptr m_preparedModel {nullptr}; + std::vector, OH_NN_TensorType>> m_inputTensorDescs; + std::vector, OH_NN_TensorType>> m_outputTensorDescs; + NNExecutor* nnExecutor = new (std::nothrow) NNExecutor( + m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs); + + size_t index = 1; + NN_TensorDesc* ret = nnExecutor->CreateInputTensorDesc(index); + EXPECT_EQ(nullptr, ret); +} + +/** + * @tc.name: nnexecutortest_createinputtensordesc_002 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(NNExecutorTest, nnexecutortest_createinputtensordesc_002, TestSize.Level0) +{ + LOGE("CreateInputTensorDesc nnexecutortest_createinputtensordesc_002"); + size_t m_backendID {0}; + std::shared_ptr m_device {nullptr}; + std::shared_ptr m_preparedModel {nullptr}; + std::vector, OH_NN_TensorType>> m_inputTensorDescs; + std::vector, OH_NN_TensorType>> m_outputTensorDescs; + + std::pair, OH_NN_TensorType> pair1; + std::pair, OH_NN_TensorType> pair2; + m_inputTensorDescs.emplace_back(pair1); + m_inputTensorDescs.emplace_back(pair2); + + NNExecutor* nnExecutor = new (std::nothrow) NNExecutor( + m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs); + + size_t index = 1; + NN_TensorDesc* ret = nnExecutor->CreateInputTensorDesc(index); + EXPECT_EQ(nullptr, ret); +} + +/** + * @tc.name: nnexecutortest_createinputtensordesc_003 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(NNExecutorTest, nnexecutortest_createinputtensordesc_003, TestSize.Level0) +{ + LOGE("CreateInputTensorDesc nnexecutortest_createinputtensordesc_003"); + size_t m_backendID {0}; + std::shared_ptr m_device {nullptr}; + std::shared_ptr m_preparedModel {nullptr}; + std::vector, OH_NN_TensorType>> m_inputTensorDescs; + std::vector, OH_NN_TensorType>> m_outputTensorDescs; + + std::pair, OH_NN_TensorType> pair1; + std::pair, OH_NN_TensorType> pair2; + std::shared_ptr tensorDesr = std::make_shared(); + int32_t expectDim[2] = {3, 3}; + int32_t* ptr = expectDim; + uint32_t dimensionCount = 2; + tensorDesr->SetShape(ptr, dimensionCount); + pair1.first = tensorDesr; + pair2.first = tensorDesr; + m_inputTensorDescs.emplace_back(pair1); + m_inputTensorDescs.emplace_back(pair2); + + NNExecutor* nnExecutor = new (std::nothrow) NNExecutor( + m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs); + + size_t index = 0; + NN_TensorDesc* ret = nnExecutor->CreateInputTensorDesc(index); + EXPECT_NE(nullptr, ret); +} + +/** + * @tc.name: nnexecutortest_createoutputtensordesc_001 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(NNExecutorTest, nnexecutortest_createoutputtensordesc_001, TestSize.Level0) +{ + LOGE("CreateOutputTensorDesc nnexecutortest_createoutputtensordesc_001"); + size_t m_backendID {0}; + std::shared_ptr m_device {nullptr}; + std::shared_ptr m_preparedModel {nullptr}; + std::vector, OH_NN_TensorType>> m_inputTensorDescs; + std::vector, OH_NN_TensorType>> m_outputTensorDescs; + NNExecutor* nnExecutor = new (std::nothrow) NNExecutor( + m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs); + + size_t index = 1; + NN_TensorDesc* ret = nnExecutor->CreateOutputTensorDesc(index); + EXPECT_EQ(nullptr, ret); +} + +/** + * @tc.name: nnexecutortest_createoutputtensordesc_002 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(NNExecutorTest, nnexecutortest_createoutputtensordesc_002, TestSize.Level0) +{ + LOGE("CreateOutputTensorDesc nnexecutortest_createoutputtensordesc_002"); + size_t m_backendID {0}; + std::shared_ptr m_device {nullptr}; + std::shared_ptr m_preparedModel {nullptr}; + std::vector, OH_NN_TensorType>> m_inputTensorDescs; + std::vector, OH_NN_TensorType>> m_outputTensorDescs; + + std::pair, OH_NN_TensorType> pair1; + std::pair, OH_NN_TensorType> pair2; + m_outputTensorDescs.emplace_back(pair1); + m_outputTensorDescs.emplace_back(pair2); + + NNExecutor* nnExecutor = new (std::nothrow) NNExecutor( + m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs); + + size_t index = 1; + NN_TensorDesc* ret = nnExecutor->CreateOutputTensorDesc(index); + EXPECT_EQ(nullptr, ret); +} + +/** + * @tc.name: nnexecutortest_createoutputtensordesc_003 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(NNExecutorTest, nnexecutortest_createoutputtensordesc_003, TestSize.Level0) +{ + LOGE("CreateOutputTensorDesc nnexecutortest_createoutputtensordesc_003"); + size_t m_backendID {0}; + std::shared_ptr m_device {nullptr}; + std::shared_ptr m_preparedModel {nullptr}; + std::vector, OH_NN_TensorType>> m_inputTensorDescs; + std::vector, OH_NN_TensorType>> m_outputTensorDescs; + + std::pair, OH_NN_TensorType> pair1; + std::pair, OH_NN_TensorType> pair2; + std::shared_ptr tensorDesr = std::make_shared(); + int32_t expectDim[2] = {3, 3}; + int32_t* ptr = expectDim; + uint32_t dimensionCount = 2; + tensorDesr->SetShape(ptr, dimensionCount); + pair1.first = tensorDesr; + pair2.first = tensorDesr; + m_outputTensorDescs.emplace_back(pair1); + m_outputTensorDescs.emplace_back(pair2); + + NNExecutor* nnExecutor = new (std::nothrow) NNExecutor( + m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs); + + size_t index = 1; + NN_TensorDesc* ret = nnExecutor->CreateOutputTensorDesc(index); + EXPECT_NE(nullptr, ret); +} + +void MyOnRunDone(void *userData, OH_NN_ReturnCode errCode, void *outputTensor[], int32_t outputCount) +{ + LOGE("MyOnRunDone"); + // 在这里处理你的逻辑,例如: + if (errCode != OH_NN_SUCCESS) { + // 处理错误 + LOGE("Neural network execution failed with error code: %d", errCode); + } else { + // 使用 outputTensor[] 和 outputCount 处理成功的结果 + // 例如,outputTensor 可能指向了神经网络输出数据的内存位置 + } + // 如果 userData 指向了需要清理的资源,在这里进行清理 +} + +/** + * @tc.name: nnexecutortest_setonrundone_001 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(NNExecutorTest, nnexecutortest_setonrundone_001, TestSize.Level0) +{ + LOGE("SetOnRunDone nnexecutortest_setonrundone_001"); + size_t m_backendID {0}; + std::shared_ptr m_device {nullptr}; + std::shared_ptr m_preparedModel {nullptr}; + std::vector, OH_NN_TensorType>> m_inputTensorDescs; + std::vector, OH_NN_TensorType>> m_outputTensorDescs; + NNExecutor* nnExecutor = new (std::nothrow) NNExecutor( + m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs); + + OH_NN_ReturnCode ret = nnExecutor->SetOnRunDone(MyOnRunDone); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +void MyOnServiceDied(void *userData) +{ + LOGE("MyOnServiceDied"); +} + +/** + * @tc.name: nnexecutortest_setonservicedied_001 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(NNExecutorTest, nnexecutortest_setonservicedied_001, TestSize.Level0) +{ + LOGE("SetOnServiceDied nnexecutortest_setonservicedied_001"); + size_t m_backendID {0}; + std::shared_ptr m_device {nullptr}; + std::shared_ptr m_preparedModel {nullptr}; + std::vector, OH_NN_TensorType>> m_inputTensorDescs; + std::vector, OH_NN_TensorType>> m_outputTensorDescs; + NNExecutor* nnExecutor = new (std::nothrow) NNExecutor( + m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs); + + OH_NN_ReturnCode ret = nnExecutor->SetOnServiceDied(MyOnServiceDied); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/** + * @tc.name: nnexecutortest_runsync_001 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(NNExecutorTest, nnexecutortest_runsync_001, TestSize.Level0) +{ + LOGE("RunSync nnexecutortest_runsync_001"); + size_t m_backendID {0}; + std::shared_ptr m_device {nullptr}; + std::shared_ptr m_preparedModel {nullptr}; + std::vector, OH_NN_TensorType>> m_inputTensorDescs; + std::vector, OH_NN_TensorType>> m_outputTensorDescs; + NNExecutor* nnExecutor = new (std::nothrow) NNExecutor( + m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs); + + size_t inputSize = 1; + size_t outputSize = 1; + OH_NN_ReturnCode ret = nnExecutor->RunSync(nullptr, inputSize, nullptr, outputSize); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: nnexecutortest_runsync_002 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(NNExecutorTest, nnexecutortest_runsync_002, TestSize.Level0) +{ + LOGE("RunAsync nnexecutortest_runsync_002"); + size_t m_backendID {0}; + std::shared_ptr m_device {nullptr}; + std::shared_ptr m_preparedModel {nullptr}; + std::vector, OH_NN_TensorType>> m_inputTensorDescs; + std::vector, OH_NN_TensorType>> m_outputTensorDescs; + NNExecutor* nnExecutor = new (std::nothrow) NNExecutor( + m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs); + + size_t inputSize = 0; + size_t outputSize = 1; + OH_NN_ReturnCode ret = nnExecutor->RunSync(nullptr, inputSize, nullptr, outputSize); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: nnexecutortest_runsync_003 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(NNExecutorTest, nnexecutortest_runsync_003, TestSize.Level0) +{ + LOGE("RunAsync nnexecutortest_runsync_003"); + size_t m_backendID {0}; + std::shared_ptr m_device {nullptr}; + + std::shared_ptr mockIPreparedMode = std::make_shared(); + + std::vector> minDims = {{1, 2, 3}}; + std::vector> maxDims = {{4, 5, 6}}; + EXPECT_CALL(*((MockIPreparedModel *) mockIPreparedMode.get()), GetInputDimRanges(::testing::_, ::testing::_)) + .WillOnce(Invoke([&minDims, &maxDims](std::vector>& minInputDims, + std::vector>& maxInputDims) { + // 这里直接修改传入的引用参数 + minInputDims = minDims; + maxInputDims = maxDims; + return OH_NN_OPERATION_FORBIDDEN; // 假设成功的状态码 + })); + + std::vector, OH_NN_TensorType>> m_inputTensorDescs; + std::vector, OH_NN_TensorType>> m_outputTensorDescs; + + std::pair, OH_NN_TensorType> pair1; + std::pair, OH_NN_TensorType> pair2; + std::shared_ptr tensorDesr = std::make_shared(); + int32_t expectDim[2] = {3, 3}; + int32_t* ptr = expectDim; + uint32_t dimensionCount = 2; + tensorDesr->SetShape(ptr, dimensionCount); + pair1.first = tensorDesr; + pair2.first = tensorDesr; + m_inputTensorDescs.emplace_back(pair1); + m_inputTensorDescs.emplace_back(pair2); + m_outputTensorDescs.emplace_back(pair1); + m_outputTensorDescs.emplace_back(pair2); + + NNExecutor* nnExecutor = new (std::nothrow) NNExecutor( + m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs); + + size_t backendID = 1; + std::shared_ptr device = std::make_shared(); + TensorDesc desc; + TensorDesc* tensorDesc = &desc; + + std::unique_ptr hdiDevice = std::make_unique(device, backendID); + NN_Tensor* tensor = reinterpret_cast(hdiDevice->CreateTensor(tensorDesc)); + + size_t inputSize = 2; + size_t outputSize = 2; + OH_NN_ReturnCode ret = nnExecutor->RunSync(&tensor, inputSize, &tensor, outputSize); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + + testing::Mock::AllowLeak(mockIPreparedMode.get()); + testing::Mock::AllowLeak(device.get()); +} + +/** + * @tc.name: nnexecutortest_runsync_004 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(NNExecutorTest, nnexecutortest_runsync_004, TestSize.Level0) +{ + LOGE("RunAsync nnexecutortest_runsync_004"); + size_t m_backendID {0}; + std::shared_ptr m_device {nullptr}; + + std::shared_ptr mockIPreparedMode = std::make_shared(); + + std::vector> minDims = {{1, 2, 3}}; + std::vector> maxDims = {{4, 5, 6}}; + EXPECT_CALL(*((MockIPreparedModel *) mockIPreparedMode.get()), GetInputDimRanges(::testing::_, ::testing::_)) + .WillOnce(Invoke([&minDims, &maxDims](std::vector>& minInputDims, + std::vector>& maxInputDims) { + // 这里直接修改传入的引用参数 + minInputDims = minDims; + maxInputDims = maxDims; + return OH_NN_SUCCESS; // 假设成功的状态码 + })); + + std::vector, OH_NN_TensorType>> m_inputTensorDescs; + std::vector, OH_NN_TensorType>> m_outputTensorDescs; + + std::pair, OH_NN_TensorType> pair1; + std::pair, OH_NN_TensorType> pair2; + std::shared_ptr tensorDesr = std::make_shared(); + int32_t expectDim[2] = {3, 3}; + int32_t* ptr = expectDim; + uint32_t dimensionCount = 2; + tensorDesr->SetShape(ptr, dimensionCount); + pair1.first = tensorDesr; + pair2.first = tensorDesr; + m_inputTensorDescs.emplace_back(pair1); + m_inputTensorDescs.emplace_back(pair2); + m_outputTensorDescs.emplace_back(pair1); + m_outputTensorDescs.emplace_back(pair2); + + NNExecutor* nnExecutor = new (std::nothrow) NNExecutor( + m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs); + + size_t backendID = 1; + std::shared_ptr device = std::make_shared(); + TensorDesc desc; + TensorDesc* tensorDesc = &desc; + + std::unique_ptr hdiDevice = std::make_unique(device, backendID); + NN_Tensor* tensor = reinterpret_cast(hdiDevice->CreateTensor(tensorDesc)); + + size_t inputSize = 2; + size_t outputSize = 2; + OH_NN_ReturnCode ret = nnExecutor->RunSync(&tensor, inputSize, &tensor, outputSize); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + + testing::Mock::AllowLeak(mockIPreparedMode.get()); +} + +/** + * @tc.name: nnexecutortest_runsync_005 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(NNExecutorTest, nnexecutortest_runsync_005, TestSize.Level0) +{ + LOGE("RunAsync nnexecutortest_runsync_005"); + size_t m_backendID {0}; + std::shared_ptr m_device {nullptr}; + + std::shared_ptr mockIPreparedMode = std::make_shared(); + + std::vector> minDims = {{1, 2, 3}, {1, 2, 3}}; + std::vector> maxDims = {{4, 5, 6}}; + EXPECT_CALL(*((MockIPreparedModel *) mockIPreparedMode.get()), GetInputDimRanges(::testing::_, ::testing::_)) + .WillOnce(Invoke([&minDims, &maxDims](std::vector>& minInputDims, + std::vector>& maxInputDims) { + // 这里直接修改传入的引用参数 + minInputDims = minDims; + maxInputDims = maxDims; + return OH_NN_SUCCESS; // 假设成功的状态码 + })); + + std::vector, OH_NN_TensorType>> m_inputTensorDescs; + std::vector, OH_NN_TensorType>> m_outputTensorDescs; + + std::pair, OH_NN_TensorType> pair1; + std::pair, OH_NN_TensorType> pair2; + std::shared_ptr tensorDesr = std::make_shared(); + int32_t expectDim[2] = {3, 3}; + int32_t* ptr = expectDim; + uint32_t dimensionCount = 2; + tensorDesr->SetShape(ptr, dimensionCount); + pair1.first = tensorDesr; + pair2.first = tensorDesr; + m_inputTensorDescs.emplace_back(pair1); + m_inputTensorDescs.emplace_back(pair2); + m_outputTensorDescs.emplace_back(pair1); + m_outputTensorDescs.emplace_back(pair2); + + NNExecutor* nnExecutor = new (std::nothrow) NNExecutor( + m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs); + + size_t backendID = 1; + std::shared_ptr device = std::make_shared(); + TensorDesc desc; + TensorDesc* tensorDesc = &desc; + + std::unique_ptr hdiDevice = std::make_unique(device, backendID); + NN_Tensor* tensor = reinterpret_cast(hdiDevice->CreateTensor(tensorDesc)); + + size_t inputSize = 2; + size_t outputSize = 2; + OH_NN_ReturnCode ret = nnExecutor->RunSync(&tensor, inputSize, &tensor, outputSize); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + + testing::Mock::AllowLeak(mockIPreparedMode.get()); +} + +/** + * @tc.name: nnexecutortest_runasync_001 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(NNExecutorTest, nnexecutortest_runasync_001, TestSize.Level0) +{ + LOGE("RunAsync nnexecutortest_runasync_001"); + size_t m_backendID {0}; + std::shared_ptr m_device {nullptr}; + std::shared_ptr m_preparedModel {nullptr}; + std::vector, OH_NN_TensorType>> m_inputTensorDescs; + std::vector, OH_NN_TensorType>> m_outputTensorDescs; + NNExecutor* nnExecutor = new (std::nothrow) NNExecutor( + m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs); + + void* buffer = m_dataArry; + size_t inputSize = 1; + size_t outputSize = 1; + int32_t timeout = 10; + OH_NN_ReturnCode ret = nnExecutor->RunAsync(nullptr, inputSize, nullptr, outputSize, timeout, buffer); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/** + * @tc.name: nnexecutortest_runasync_002 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(NNExecutorTest, nnexecutortest_runasync_002, TestSize.Level0) +{ + LOGE("RunAsync nnexecutortest_runasync_001"); + size_t m_backendID {0}; + std::shared_ptr m_device {nullptr}; + std::shared_ptr m_preparedModel {nullptr}; + std::vector, OH_NN_TensorType>> m_inputTensorDescs; + std::vector, OH_NN_TensorType>> m_outputTensorDescs; + NNExecutor* nnExecutor = new (std::nothrow) NNExecutor( + m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs); + + void* buffer = m_dataArry; + size_t inputSize = 0; + size_t outputSize = 1; + int32_t timeout = 10; + OH_NN_ReturnCode ret = nnExecutor->RunAsync(nullptr, inputSize, nullptr, outputSize, timeout, buffer); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/** + * @tc.name: nnexecutortest_runasync_003 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(NNExecutorTest, nnexecutortest_runasync_003, TestSize.Level0) +{ + LOGE("RunAsync nnexecutortest_runasync_003"); + size_t m_backendID {0}; + std::shared_ptr m_device {nullptr}; + + std::shared_ptr mockIPreparedMode = std::make_shared(); + + std::vector> minDims = {{1, 2, 3}}; + std::vector> maxDims = {{4, 5, 6}}; + EXPECT_CALL(*((MockIPreparedModel *) mockIPreparedMode.get()), GetInputDimRanges(::testing::_, ::testing::_)) + .WillOnce(Invoke([&minDims, &maxDims](std::vector>& minInputDims, + std::vector>& maxInputDims) { + // 这里直接修改传入的引用参数 + minInputDims = minDims; + maxInputDims = maxDims; + return OH_NN_OPERATION_FORBIDDEN; // 假设成功的状态码 + })); + + std::vector, OH_NN_TensorType>> m_inputTensorDescs; + std::vector, OH_NN_TensorType>> m_outputTensorDescs; + + std::pair, OH_NN_TensorType> pair1; + std::pair, OH_NN_TensorType> pair2; + std::shared_ptr tensorDesr = std::make_shared(); + int32_t expectDim[2] = {3, 3}; + int32_t* ptr = expectDim; + uint32_t dimensionCount = 2; + tensorDesr->SetShape(ptr, dimensionCount); + pair1.first = tensorDesr; + pair2.first = tensorDesr; + m_inputTensorDescs.emplace_back(pair1); + m_inputTensorDescs.emplace_back(pair2); + m_outputTensorDescs.emplace_back(pair1); + m_outputTensorDescs.emplace_back(pair2); + NNExecutor* nnExecutor = new (std::nothrow) NNExecutor( + m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs); + + size_t backendID = 1; + std::shared_ptr device = std::make_shared(); + TensorDesc desc; + TensorDesc* tensorDesc = &desc; + + std::unique_ptr hdiDevice = std::make_unique(device, backendID); + NN_Tensor* tensor = reinterpret_cast(hdiDevice->CreateTensor(tensorDesc)); + + void* buffer = m_dataArry; + size_t inputSize = 2; + size_t outputSize = 2; + int32_t timeout = 10; + OH_NN_ReturnCode ret = nnExecutor->RunAsync(&tensor, inputSize, &tensor, outputSize, timeout, buffer); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); + + testing::Mock::AllowLeak(mockIPreparedMode.get()); +} + +/** + * @tc.name: nnexecutortest_getbackendid_001 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(NNExecutorTest, nnexecutortest_getbackendid_001, TestSize.Level0) +{ + LOGE("GetBackendID nnexecutortest_getbackendid_001"); + size_t m_backendID {0}; + std::shared_ptr m_device {nullptr}; + std::shared_ptr m_preparedModel {nullptr}; + std::vector, OH_NN_TensorType>> m_inputTensorDescs; + std::vector, OH_NN_TensorType>> m_outputTensorDescs; + NNExecutor* nnExecutor = new (std::nothrow) NNExecutor( + m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs); + + size_t ret = nnExecutor->GetBackendID(); + EXPECT_EQ(0, ret); +} + +/** + * @tc.name: nnexecutortest_setinput_001 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(NNExecutorTest, nnexecutortest_setinput_001, TestSize.Level0) +{ + LOGE("SetInput nnexecutortest_setinput_001"); + size_t m_backendID {0}; + std::shared_ptr m_device {nullptr}; + std::shared_ptr mockIPreparedMode = std::make_shared(); + EXPECT_CALL(*((MockIPreparedModel *) mockIPreparedMode.get()), GetInputDimRanges(::testing::_, ::testing::_)) + .WillRepeatedly(::testing::Return(OH_NN_FAILED)); + std::vector, OH_NN_TensorType>> m_inputTensorDescs; + std::vector, OH_NN_TensorType>> m_outputTensorDescs; + NNExecutor* nnExecutor = new (std::nothrow) NNExecutor( + m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs); + + OH_NN_Tensor tensor = SetTensor(OH_NN_FLOAT32, m_dimensionCount, m_dimArry, nullptr, OH_NN_TENSOR); + void* buffer = m_dataArry; + size_t length = 9 * sizeof(float); + + OH_NN_ReturnCode ret = nnExecutor->SetInput(m_index, tensor, buffer, length); + EXPECT_EQ(OH_NN_FAILED, ret); + + testing::Mock::AllowLeak(mockIPreparedMode.get()); +} + +/** + * @tc.name: nnexecutortest_setinput_002 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(NNExecutorTest, nnexecutortest_setinput_002, TestSize.Level0) +{ + LOGE("SetInput nnexecutortest_setinput_002"); + size_t m_backendID {0}; + std::shared_ptr m_device {nullptr}; + std::shared_ptr mockIPreparedMode = std::make_shared(); + EXPECT_CALL(*((MockIPreparedModel *) mockIPreparedMode.get()), GetInputDimRanges(::testing::_, ::testing::_)) + .WillRepeatedly(::testing::Return(OH_NN_OPERATION_FORBIDDEN)); + std::vector, OH_NN_TensorType>> m_inputTensorDescs; + std::vector, OH_NN_TensorType>> m_outputTensorDescs; + + std::pair, OH_NN_TensorType> pair1; + std::pair, OH_NN_TensorType> pair2; + m_inputTensorDescs.emplace_back(pair1); + m_inputTensorDescs.emplace_back(pair2); + + NNExecutor* nnExecutor = new (std::nothrow) NNExecutor( + m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs); + + OH_NN_Tensor tensor = SetTensor(OH_NN_FLOAT32, m_dimensionCount, m_dimArry, nullptr, OH_NN_TENSOR); + void* buffer = m_dataArry; + size_t length = 9 * sizeof(float); + + OH_NN_ReturnCode ret = nnExecutor->SetInput(m_index, tensor, buffer, length); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + + testing::Mock::AllowLeak(mockIPreparedMode.get()); +} + +/** + * @tc.name: nnexecutortest_setinput_003 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(NNExecutorTest, nnexecutortest_setinput_003, TestSize.Level0) +{ + LOGE("SetInput nnexecutortest_setinput_003"); + size_t m_backendID {0}; + std::shared_ptr m_device {nullptr}; + std::shared_ptr mockIPreparedMode = std::make_shared(); + EXPECT_CALL(*((MockIPreparedModel *) mockIPreparedMode.get()), GetInputDimRanges(::testing::_, ::testing::_)) + .WillRepeatedly(::testing::Return(OH_NN_OPERATION_FORBIDDEN)); + std::vector, OH_NN_TensorType>> m_inputTensorDescs; + std::vector, OH_NN_TensorType>> m_outputTensorDescs; + + std::pair, OH_NN_TensorType> pair1; + std::pair, OH_NN_TensorType> pair2; + std::shared_ptr tensorDesr = std::make_shared(); + int32_t expectDim[2] = {3, 3}; + int32_t* ptr = expectDim; + uint32_t dimensionCount = 2; + tensorDesr->SetShape(ptr, dimensionCount); + pair1.first = tensorDesr; + pair2.first = tensorDesr; + m_inputTensorDescs.emplace_back(pair1); + m_inputTensorDescs.emplace_back(pair2); + + NNExecutor* nnExecutor = new (std::nothrow) NNExecutor( + m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs); + + OH_NN_Tensor tensor = SetTensor(OH_NN_FLOAT32, m_dimensionCount, m_dimArry, nullptr, OH_NN_TENSOR); + void* buffer = m_dataArry; + size_t length = 9 * sizeof(float); + + OH_NN_ReturnCode ret = nnExecutor->SetInput(m_index, tensor, buffer, length); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + + testing::Mock::AllowLeak(mockIPreparedMode.get()); +} + +/** + * @tc.name: nnexecutortest_setinputfrommemory_001 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(NNExecutorTest, nnexecutortest_setinputfrommemory_001, TestSize.Level0) +{ + LOGE("SetInputFromMemory nnexecutortest_setinputfrommemory_001"); + size_t m_backendID {0}; + std::shared_ptr m_device {nullptr}; + std::shared_ptr mockIPreparedMode = std::make_shared(); + EXPECT_CALL(*((MockIPreparedModel *) mockIPreparedMode.get()), GetInputDimRanges(::testing::_, ::testing::_)) + .WillRepeatedly(::testing::Return(OH_NN_FAILED)); + std::vector, OH_NN_TensorType>> m_inputTensorDescs; + std::vector, OH_NN_TensorType>> m_outputTensorDescs; + NNExecutor* nnExecutor = new (std::nothrow) NNExecutor( + m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs); + + OH_NN_Tensor tensor = SetTensor(OH_NN_FLOAT32, m_dimensionCount, m_dimArry, nullptr, OH_NN_TENSOR); + void* const data = m_dataArry; + OH_NN_Memory memory = {data, 9 * sizeof(float)}; + + OH_NN_ReturnCode ret = nnExecutor->SetInputFromMemory(m_index, tensor, memory); + EXPECT_EQ(OH_NN_FAILED, ret); + + testing::Mock::AllowLeak(mockIPreparedMode.get()); +} + +/** + * @tc.name: nnexecutortest_setinputfrommemory_002 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(NNExecutorTest, nnexecutortest_setinputfrommemory_002, TestSize.Level0) +{ + LOGE("SetInputFromMemory nnexecutortest_setinputfrommemory_002"); + size_t m_backendID {0}; + std::shared_ptr m_device {nullptr}; + std::shared_ptr mockIPreparedMode = std::make_shared(); + EXPECT_CALL(*((MockIPreparedModel *) mockIPreparedMode.get()), GetInputDimRanges(::testing::_, ::testing::_)) + .WillRepeatedly(::testing::Return(OH_NN_FAILED)); + std::vector, OH_NN_TensorType>> m_inputTensorDescs; + std::vector, OH_NN_TensorType>> m_outputTensorDescs; + + std::pair, OH_NN_TensorType> pair1; + std::pair, OH_NN_TensorType> pair2; + m_inputTensorDescs.emplace_back(pair1); + m_inputTensorDescs.emplace_back(pair2); + NNExecutor* nnExecutor = new (std::nothrow) NNExecutor( + m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs); + + OH_NN_Tensor tensor = SetTensor(OH_NN_FLOAT32, m_dimensionCount, m_dimArry, nullptr, OH_NN_TENSOR); + void* const data = m_dataArry; + OH_NN_Memory memory = {data, 9 * sizeof(float)}; + + OH_NN_ReturnCode ret = nnExecutor->SetInputFromMemory(m_index, tensor, memory); + EXPECT_EQ(OH_NN_FAILED, ret); + + testing::Mock::AllowLeak(mockIPreparedMode.get()); +} + +/** + * @tc.name: nnexecutortest_setinputfrommemory_003 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(NNExecutorTest, nnexecutortest_setinputfrommemory_003, TestSize.Level0) +{ + LOGE("SetInputFromMemory nnexecutortest_setinputfrommemory_003"); + size_t m_backendID {0}; + std::shared_ptr m_device {nullptr}; + std::shared_ptr mockIPreparedMode = std::make_shared(); + EXPECT_CALL(*((MockIPreparedModel *) mockIPreparedMode.get()), GetInputDimRanges(::testing::_, ::testing::_)) + .WillRepeatedly(::testing::Return(OH_NN_FAILED)); + std::vector, OH_NN_TensorType>> m_inputTensorDescs; + std::vector, OH_NN_TensorType>> m_outputTensorDescs; + + std::pair, OH_NN_TensorType> pair1; + std::pair, OH_NN_TensorType> pair2; + std::shared_ptr tensorDesr = std::make_shared(); + int32_t expectDim[2] = {3, 3}; + int32_t* ptr = expectDim; + uint32_t dimensionCount = 2; + tensorDesr->SetShape(ptr, dimensionCount); + pair1.first = tensorDesr; + pair2.first = tensorDesr; + m_inputTensorDescs.emplace_back(pair1); + m_inputTensorDescs.emplace_back(pair2); + NNExecutor* nnExecutor = new (std::nothrow) NNExecutor( + m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs); + + OH_NN_Tensor tensor = SetTensor(OH_NN_FLOAT32, m_dimensionCount, m_dimArry, nullptr, OH_NN_TENSOR); + void* const data = m_dataArry; + OH_NN_Memory memory = {data, 9 * sizeof(float)}; + + OH_NN_ReturnCode ret = nnExecutor->SetInputFromMemory(m_index, tensor, memory); + EXPECT_EQ(OH_NN_FAILED, ret); + + testing::Mock::AllowLeak(mockIPreparedMode.get()); +} + +/** + * @tc.name: nnexecutortest_setoutput_001 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(NNExecutorTest, nnexecutortest_setoutput_001, TestSize.Level0) +{ + LOGE("SetOutput nnexecutortest_setoutput_001"); + size_t m_backendID {0}; + std::shared_ptr m_device {nullptr}; + std::shared_ptr m_preparedModel {nullptr}; + std::vector, OH_NN_TensorType>> m_inputTensorDescs; + std::vector, OH_NN_TensorType>> m_outputTensorDescs; + NNExecutor* nnExecutor = new (std::nothrow) NNExecutor( + m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs); + + size_t length = 9 * sizeof(float); + void* buffer = m_dataArry; + + OH_NN_ReturnCode ret = nnExecutor->SetOutput(m_index, buffer, length); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: nnexecutortest_setoutput_002 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(NNExecutorTest, nnexecutortest_setoutput_002, TestSize.Level0) +{ + LOGE("SetOutput nnexecutortest_setoutput_002"); + size_t m_backendID {0}; + std::shared_ptr m_device {nullptr}; + std::shared_ptr m_preparedModel {nullptr}; + std::vector, OH_NN_TensorType>> m_inputTensorDescs; + std::vector, OH_NN_TensorType>> m_outputTensorDescs; + + std::pair, OH_NN_TensorType> pair1; + std::pair, OH_NN_TensorType> pair2; + m_outputTensorDescs.emplace_back(pair1); + m_outputTensorDescs.emplace_back(pair2); + NNExecutor* nnExecutor = new (std::nothrow) NNExecutor( + m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs); + + size_t length = 9 * sizeof(float); + void* buffer = m_dataArry; + + OH_NN_ReturnCode ret = nnExecutor->SetOutput(m_index, buffer, length); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: nnexecutortest_setoutput_003 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(NNExecutorTest, nnexecutortest_setoutput_003, TestSize.Level0) +{ + LOGE("SetOutput nnexecutortest_setoutput_003"); + size_t m_backendID {0}; + std::shared_ptr m_device {nullptr}; + std::shared_ptr m_preparedModel {nullptr}; + std::vector, OH_NN_TensorType>> m_inputTensorDescs; + std::vector, OH_NN_TensorType>> m_outputTensorDescs; + + std::pair, OH_NN_TensorType> pair1; + std::pair, OH_NN_TensorType> pair2; + std::shared_ptr tensorDesr = std::make_shared(); + int32_t expectDim[2] = {3, 3}; + int32_t* ptr = expectDim; + uint32_t dimensionCount = 2; + tensorDesr->SetShape(ptr, dimensionCount); + pair1.first = tensorDesr; + pair2.first = tensorDesr; + m_outputTensorDescs.emplace_back(pair1); + m_outputTensorDescs.emplace_back(pair2); + NNExecutor* nnExecutor = new (std::nothrow) NNExecutor( + m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs); + + size_t length = 9 * sizeof(float); + void* buffer = m_dataArry; + + OH_NN_ReturnCode ret = nnExecutor->SetOutput(m_index, buffer, length); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: nnexecutortest_setoutputfrommemory_001 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(NNExecutorTest, nnexecutortest_setoutputfrommemory_001, TestSize.Level0) +{ + LOGE("SetOutputFromMemory nnexecutortest_setoutputfrommemory_001"); + size_t m_backendID {0}; + std::shared_ptr m_device {nullptr}; + std::shared_ptr m_preparedModel {nullptr}; + std::vector, OH_NN_TensorType>> m_inputTensorDescs; + std::vector, OH_NN_TensorType>> m_outputTensorDescs; + NNExecutor* nnExecutor = new (std::nothrow) NNExecutor( + m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs); + + void* const data = m_dataArry; + OH_NN_Memory memory = {data, 9 * sizeof(float)}; + + OH_NN_ReturnCode ret = nnExecutor->SetOutputFromMemory(m_index, memory); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: nnexecutortest_setoutputfrommemory_002 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(NNExecutorTest, nnexecutortest_setoutputfrommemory_002, TestSize.Level0) +{ + LOGE("SetOutputFromMemory nnexecutortest_setoutputfrommemory_002"); + size_t m_backendID {0}; + std::shared_ptr m_device {nullptr}; + std::shared_ptr m_preparedModel {nullptr}; + std::vector, OH_NN_TensorType>> m_inputTensorDescs; + std::vector, OH_NN_TensorType>> m_outputTensorDescs; + + std::pair, OH_NN_TensorType> pair1; + std::pair, OH_NN_TensorType> pair2; + m_outputTensorDescs.emplace_back(pair1); + m_outputTensorDescs.emplace_back(pair2); + NNExecutor* nnExecutor = new (std::nothrow) NNExecutor( + m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs); + + void* const data = m_dataArry; + OH_NN_Memory memory = {data, 9 * sizeof(float)}; + + OH_NN_ReturnCode ret = nnExecutor->SetOutputFromMemory(m_index, memory); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: nnexecutortest_setoutputfrommemory_003 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(NNExecutorTest, nnexecutortest_setoutputfrommemory_003, TestSize.Level0) +{ + LOGE("SetOutputFromMemory nnexecutortest_setoutputfrommemory_003"); + size_t m_backendID {0}; + std::shared_ptr m_device {nullptr}; + std::shared_ptr m_preparedModel {nullptr}; + std::vector, OH_NN_TensorType>> m_inputTensorDescs; + std::vector, OH_NN_TensorType>> m_outputTensorDescs; + + std::pair, OH_NN_TensorType> pair1; + std::pair, OH_NN_TensorType> pair2; + std::shared_ptr tensorDesr = std::make_shared(); + int32_t expectDim[2] = {3, 3}; + int32_t* ptr = expectDim; + uint32_t dimensionCount = 2; + tensorDesr->SetShape(ptr, dimensionCount); + pair1.first = tensorDesr; + pair2.first = tensorDesr; + m_outputTensorDescs.emplace_back(pair1); + m_outputTensorDescs.emplace_back(pair2); + NNExecutor* nnExecutor = new (std::nothrow) NNExecutor( + m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs); + + void* const data = m_dataArry; + OH_NN_Memory memory = {data, 9 * sizeof(float)}; + + OH_NN_ReturnCode ret = nnExecutor->SetOutputFromMemory(m_index, memory); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: nnexecutortest_createinputmemory_001 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(NNExecutorTest, nnexecutortest_createinputmemory_001, TestSize.Level0) +{ + LOGE("CreateInputMemory nnexecutortest_createinputmemory_001"); + size_t m_backendID {0}; + std::shared_ptr m_device {nullptr}; + std::shared_ptr m_preparedModel {nullptr}; + std::vector, OH_NN_TensorType>> m_inputTensorDescs; + std::vector, OH_NN_TensorType>> m_outputTensorDescs; + NNExecutor* nnExecutor = new (std::nothrow) NNExecutor( + m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs); + + OH_NN_Memory** memory = nullptr; + float dataArry[9] = {0, 1, 2, 3, 4, 5, 6, 7, 8}; + void* const data = dataArry; + OH_NN_Memory memoryPtr = {data, 9 * sizeof(float)}; + OH_NN_Memory* ptr = &memoryPtr; + memory = &ptr; + size_t length = 9 * sizeof(float); + + OH_NN_ReturnCode ret = nnExecutor->CreateInputMemory(m_index, length, memory); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: nnexecutortest_createinputmemory_002 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(NNExecutorTest, nnexecutortest_createinputmemory_002, TestSize.Level0) +{ + LOGE("CreateInputMemory nnexecutortest_createinputmemory_002"); + size_t m_backendID {0}; + std::shared_ptr m_device {nullptr}; + std::shared_ptr m_preparedModel {nullptr}; + std::vector, OH_NN_TensorType>> m_inputTensorDescs; + std::vector, OH_NN_TensorType>> m_outputTensorDescs; + + std::pair, OH_NN_TensorType> pair1; + std::pair, OH_NN_TensorType> pair2; + m_inputTensorDescs.emplace_back(pair1); + m_inputTensorDescs.emplace_back(pair2); + NNExecutor* nnExecutor = new (std::nothrow) NNExecutor( + m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs); + + OH_NN_Memory** memory = nullptr; + float dataArry[9] = {0, 1, 2, 3, 4, 5, 6, 7, 8}; + void* const data = dataArry; + OH_NN_Memory memoryPtr = {data, 9 * sizeof(float)}; + OH_NN_Memory* ptr = &memoryPtr; + memory = &ptr; + size_t length = 9 * sizeof(float); + + OH_NN_ReturnCode ret = nnExecutor->CreateInputMemory(m_index, length, memory); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: nnexecutortest_createinputmemory_003 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(NNExecutorTest, nnexecutortest_createinputmemory_003, TestSize.Level0) +{ + LOGE("CreateInputMemory nnexecutortest_createinputmemory_003"); + size_t m_backendID {0}; + std::shared_ptr device = std::make_shared(); + + std::shared_ptr m_preparedModel {nullptr}; + std::vector, OH_NN_TensorType>> m_inputTensorDescs; + std::vector, OH_NN_TensorType>> m_outputTensorDescs; + + std::pair, OH_NN_TensorType> pair1; + std::pair, OH_NN_TensorType> pair2; + std::shared_ptr tensorDesr = std::make_shared(); + int32_t expectDim[2] = {3, 3}; + int32_t* ptr = expectDim; + uint32_t dimensionCount = 2; + tensorDesr->SetShape(ptr, dimensionCount); + pair1.first = tensorDesr; + pair2.first = tensorDesr; + m_inputTensorDescs.emplace_back(pair1); + m_inputTensorDescs.emplace_back(pair2); + + float dataArry[9] = {0, 1, 2, 3, 4, 5, 6, 7, 8}; + size_t length = 9 * sizeof(float); + EXPECT_CALL(*((MockIDevice *) device.get()), AllocateTensorBuffer(length, m_inputTensorDescs[m_index].first)) + .WillRepeatedly(::testing::Return(nullptr)); + + NNExecutor* nnExecutor = new (std::nothrow) NNExecutor( + m_backendID, device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs); + + OH_NN_Memory** memory = nullptr; + void* const data = dataArry; + OH_NN_Memory memoryPtr = {data, 9 * sizeof(float)}; + OH_NN_Memory* mPtr = &memoryPtr; + memory = &mPtr; + + OH_NN_ReturnCode ret = nnExecutor->CreateInputMemory(m_index, length, memory); + EXPECT_EQ(OH_NN_MEMORY_ERROR, ret); + + testing::Mock::AllowLeak(device.get()); +} + +/** + * @tc.name: nnexecutortest_createinputmemory_004 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(NNExecutorTest, nnexecutortest_createinputmemory_004, TestSize.Level0) +{ + LOGE("CreateInputMemory nnexecutortest_createinputmemory_004"); + size_t m_backendID {0}; + std::shared_ptr device = std::make_shared(); + + std::shared_ptr m_preparedModel {nullptr}; + std::vector, OH_NN_TensorType>> m_inputTensorDescs; + std::vector, OH_NN_TensorType>> m_outputTensorDescs; + + std::pair, OH_NN_TensorType> pair1; + std::pair, OH_NN_TensorType> pair2; + std::shared_ptr tensorDesr = std::make_shared(); + int32_t expectDim[2] = {3, 3}; + int32_t* ptr = expectDim; + uint32_t dimensionCount = 2; + tensorDesr->SetShape(ptr, dimensionCount); + pair1.first = tensorDesr; + pair2.first = tensorDesr; + m_inputTensorDescs.emplace_back(pair1); + m_inputTensorDescs.emplace_back(pair2); + + float dataArry[9] = {0, 1, 2, 3, 4, 5, 6, 7, 8}; + size_t length = 9 * sizeof(float); + EXPECT_CALL(*((MockIDevice *) device.get()), AllocateTensorBuffer(length, m_inputTensorDescs[m_index].first)) + .WillRepeatedly(::testing::Return(reinterpret_cast(0x1000))); + + NNExecutor* nnExecutor = new (std::nothrow) NNExecutor( + m_backendID, device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs); + + OH_NN_Memory** memory = nullptr; + void* const data = dataArry; + OH_NN_Memory memoryPtr = {data, 9 * sizeof(float)}; + OH_NN_Memory* mPtr = &memoryPtr; + memory = &mPtr; + + OH_NN_ReturnCode ret = nnExecutor->CreateInputMemory(m_index, length, memory); + EXPECT_EQ(OH_NN_SUCCESS, ret); + + testing::Mock::AllowLeak(device.get()); +} + +/** + * @tc.name: nnexecutortest_destroyinputmemory_001 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(NNExecutorTest, nnexecutortest_destroyinputmemory_001, TestSize.Level0) +{ + LOGE("DestroyInputMemory nnexecutortest_destroyinputmemory_001"); + size_t m_backendID {0}; + std::shared_ptr m_device {nullptr}; + std::shared_ptr m_preparedModel {nullptr}; + std::vector, OH_NN_TensorType>> m_inputTensorDescs; + std::vector, OH_NN_TensorType>> m_outputTensorDescs; + NNExecutor* nnExecutor = new (std::nothrow) NNExecutor( + m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs); + + size_t length = 9 * sizeof(float); + OH_NN_Memory** memory = nullptr; + float dataArry[9] = {0, 1, 2, 3, 4, 5, 6, 7, 8}; + void* const data = dataArry; + OH_NN_Memory memoryPtr = {data, 9 * sizeof(float)}; + OH_NN_Memory* ptr = &memoryPtr; + memory = &ptr; + + nnExecutor->CreateInputMemory(m_index, length, memory); + OH_NN_ReturnCode ret = nnExecutor->DestroyInputMemory(m_index, memory); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: nnexecutortest_destroyinputmemory_002 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(NNExecutorTest, nnexecutortest_destroyinputmemory_002, TestSize.Level0) +{ + LOGE("DestroyInputMemory nnexecutortest_destroyinputmemory_002"); + size_t m_backendID {0}; + std::shared_ptr m_device {nullptr}; + std::shared_ptr m_preparedModel {nullptr}; + std::vector, OH_NN_TensorType>> m_inputTensorDescs; + std::vector, OH_NN_TensorType>> m_outputTensorDescs; + + std::pair, OH_NN_TensorType> pair1; + std::pair, OH_NN_TensorType> pair2; + m_inputTensorDescs.emplace_back(pair1); + m_inputTensorDescs.emplace_back(pair2); + NNExecutor* nnExecutor = new (std::nothrow) NNExecutor( + m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs); + + size_t length = 9 * sizeof(float); + OH_NN_Memory** memory = nullptr; + float dataArry[9] = {0, 1, 2, 3, 4, 5, 6, 7, 8}; + void* const data = dataArry; + OH_NN_Memory memoryPtr = {data, 9 * sizeof(float)}; + OH_NN_Memory* ptr = &memoryPtr; + memory = &ptr; + + nnExecutor->CreateInputMemory(m_index, length, memory); + OH_NN_ReturnCode ret = nnExecutor->DestroyInputMemory(m_index, memory); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: nnexecutortest_destroyinputmemory_003 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(NNExecutorTest, nnexecutortest_destroyinputmemory_003, TestSize.Level0) +{ + LOGE("DestroyInputMemory nnexecutortest_destroyinputmemory_003"); + size_t m_backendID {0}; + std::shared_ptr device = std::make_shared(); + std::shared_ptr m_preparedModel {nullptr}; + std::vector, OH_NN_TensorType>> m_inputTensorDescs; + std::vector, OH_NN_TensorType>> m_outputTensorDescs; + + std::pair, OH_NN_TensorType> pair1; + std::pair, OH_NN_TensorType> pair2; + std::shared_ptr tensorDesr = std::make_shared(); + int32_t expectDim[2] = {3, 3}; + int32_t* ptr = expectDim; + uint32_t dimensionCount = 2; + tensorDesr->SetShape(ptr, dimensionCount); + pair1.first = tensorDesr; + pair2.first = tensorDesr; + m_inputTensorDescs.emplace_back(pair1); + m_inputTensorDescs.emplace_back(pair2); + + float dataArry[9] = {0, 1, 2, 3, 4, 5, 6, 7, 8}; + size_t length = 9 * sizeof(float); + EXPECT_CALL(*((MockIDevice *) device.get()), AllocateTensorBuffer(length, m_inputTensorDescs[m_index].first)) + .WillRepeatedly(::testing::Return(reinterpret_cast(0x1000))); + NNExecutor* nnExecutor = new (std::nothrow) NNExecutor( + m_backendID, device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs); + + OH_NN_Memory** memory = nullptr; + void* const data = dataArry; + OH_NN_Memory memoryPtr = {data, 9 * sizeof(float)}; + OH_NN_Memory* mPtr = &memoryPtr; + memory = &mPtr; + + nnExecutor->CreateInputMemory(m_index, length, memory); + OH_NN_ReturnCode ret = nnExecutor->DestroyInputMemory(m_index, memory); + EXPECT_EQ(OH_NN_SUCCESS, ret); + + testing::Mock::AllowLeak(device.get()); +} +} // namespace UnitTest +} // namespace NeuralNetworkRuntime +} // namespace OHOS \ No newline at end of file