diff --git a/bundle.json b/bundle.json index 06ee22904e6c1691f5b05235fe132cab1cd5e78b..f95ed56f43d37ce745d2a0a1642455b324bbe3d5 100644 --- a/bundle.json +++ b/bundle.json @@ -41,9 +41,6 @@ "header_base":"//foundation/ai/neural_network_runtime/interfaces/innerkits/c" } } - ], - "test": [ - "//foundation/ai/neural_network_runtime:nnrt_test_target" ] } } diff --git a/frameworks/BUILD.gn b/frameworks/BUILD.gn index 321fb2f9e1c5e4c8319eca89ca275ef9dd641ad6..f6775126f892a939bce110e6cd9a1f0255c78e3a 100644 --- a/frameworks/BUILD.gn +++ b/frameworks/BUILD.gn @@ -23,8 +23,12 @@ nnrt_sources = [ "native/device_registrar.cpp", "native/execution_plan.cpp", "native/executor.cpp", - "native/hdi_device.cpp", - "native/hdi_prepared_model.cpp", + "native/device_discover_v1_0.cpp", + "native/device_discover_v2_0.cpp", + "native/hdi_device_v1_0.cpp", + "native/hdi_device_v2_0.cpp", + "native/hdi_prepared_model_v1_0.cpp", + "native/hdi_prepared_model_v2_0.cpp", "native/inner_model.cpp", "native/memory_manager.cpp", "native/neural_network_runtime.cpp", @@ -122,6 +126,7 @@ ohos_shared_library("libneural_network_runtime") { external_deps = [ "c_utils:utils", "drivers_interface_nnrt:libnnrt_proxy_1.0", + "drivers_interface_nnrt:libnnrt_proxy_2.0", "hdf_core:libhdf_utils", "hilog_native:libhilog", "hitrace_native:libhitracechain", diff --git a/frameworks/native/device.h b/frameworks/native/device.h index 93415e4bb527e26049cba0563cc3e86bcfa4b148..c34e0432d139de3c9d54934b8f9f2a10620746d8 100644 --- a/frameworks/native/device.h +++ b/frameworks/native/device.h @@ -34,6 +34,7 @@ public: virtual OH_NN_ReturnCode GetDeviceName(std::string& name) = 0; virtual OH_NN_ReturnCode GetVendorName(std::string& name) = 0; + virtual OH_NN_ReturnCode GetVersion(std::string& version) = 0; virtual OH_NN_ReturnCode GetDeviceType(OH_NN_DeviceType& deviceType) = 0; virtual OH_NN_ReturnCode GetDeviceStatus(DeviceStatus& status) = 0; virtual OH_NN_ReturnCode GetSupportedOperation(std::shared_ptr model, diff --git a/frameworks/native/hdi_interfaces.h b/frameworks/native/device_discover.h similarity index 61% rename from frameworks/native/hdi_interfaces.h rename to frameworks/native/device_discover.h index 1d3416ba6f9daff3cd10c3a5ea5bfa2bd02315d4..fd86f1fa069076f6f832ecab8c1d1d27ac7a87e7 100644 --- a/frameworks/native/hdi_interfaces.h +++ b/frameworks/native/device_discover.h @@ -13,17 +13,19 @@ * limitations under the License. */ -#ifndef NEURAL_NETWORK_RUNTIME_HDI_INTERFACES_H -#define NEURAL_NETWORK_RUNTIME_HDI_INTERFACES_H +#ifndef NEURAL_NETWORK_RUNTIME_DEVICE_DISCOVER_H +#define NEURAL_NETWORK_RUNTIME_DEVICE_DISCOVER_H -#include -#include -#include +#include +#include + +#include "device.h" namespace OHOS { namespace NeuralNetworkRuntime { -namespace V1_0 = OHOS::HDI::Nnrt::V1_0; +std::shared_ptr DiscoverHDIDevicesV1_0(std::string& deviceName, std::string& vendorName, std::string& version); +std::shared_ptr DiscoverHDIDevicesV2_0(std::string& deviceName, std::string& vendorName, std::string& version); + } // namespace NeuralNetworkRuntime } // namespace OHOS - -#endif // NEURAL_NETWORK_RUNTIME_HDI_INTERFACES_H \ No newline at end of file +#endif // NEURAL_NETWORK_RUNTIME_DEVICE_DISCOVER_H \ No newline at end of file diff --git a/frameworks/native/device_discover_v1_0.cpp b/frameworks/native/device_discover_v1_0.cpp new file mode 100644 index 0000000000000000000000000000000000000000..e5f802e9d086d61d545320bcb490e7c15f7058d2 --- /dev/null +++ b/frameworks/native/device_discover_v1_0.cpp @@ -0,0 +1,57 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "device_discover.h" +#include "hdi_device_v1_0.h" +#include "common/log.h" +#include "common/utils.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +std::shared_ptr DiscoverHDIDevicesV1_0(std::string& deviceName, std::string& vendorName, std::string& version) +{ + // only one device from HDI now. + OHOS::sptr iDevice = V1_0::INnrtDevice::Get(); + if (iDevice == nullptr) { + LOGW("Get HDI device failed."); + return nullptr; + } + + auto hdiRet = iDevice->GetDeviceName(deviceName); + if (hdiRet != HDF_SUCCESS) { + LOGW("Get device name failed. ErrorCode=%d", hdiRet); + return nullptr; + } + hdiRet = iDevice->GetVendorName(vendorName); + if (hdiRet != HDF_SUCCESS) { + LOGW("Get vendor name failed. ErrorCode=%d", hdiRet); + return nullptr; + } + std::pair hdiVersion; + hdiRet = iDevice->GetVersion(hdiVersion.first, hdiVersion.second); + if (hdiRet != HDF_SUCCESS) { + LOGW("Get version failed. ErrorCode=%d", hdiRet); + return nullptr; + } + version = 'v' + std::to_string(hdiVersion.first) + '_' + std::to_string(hdiVersion.second); + + std::shared_ptr device = CreateSharedPtr(iDevice); + if (device == nullptr) { + LOGW("Failed to register device, because fail to create device instance."); + } + return device; +} +} // namespace NeuralNetworkRuntime +} // namespace OHOS \ No newline at end of file diff --git a/frameworks/native/device_discover_v2_0.cpp b/frameworks/native/device_discover_v2_0.cpp new file mode 100644 index 0000000000000000000000000000000000000000..de5e8b792aa8c3cf088a9195cabf61808589b52e --- /dev/null +++ b/frameworks/native/device_discover_v2_0.cpp @@ -0,0 +1,57 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "device_discover.h" +#include "hdi_device_v2_0.h" +#include "common/log.h" +#include "common/utils.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +std::shared_ptr DiscoverHDIDevicesV2_0(std::string& deviceName, std::string& vendorName, std::string& version) +{ + // only one device from HDI now. + OHOS::sptr iDevice = V2_0::INnrtDevice::Get(); + if (iDevice == nullptr) { + LOGW("Get HDI device failed."); + return nullptr; + } + + auto hdiRet = iDevice->GetDeviceName(deviceName); + if (hdiRet != HDF_SUCCESS) { + LOGW("Get device name failed. ErrorCode=%d", hdiRet); + return nullptr; + } + hdiRet = iDevice->GetVendorName(vendorName); + if (hdiRet != HDF_SUCCESS) { + LOGW("Get vendor name failed. ErrorCode=%d", hdiRet); + return nullptr; + } + std::pair hdiVersion; + hdiRet = iDevice->GetVersion(hdiVersion.first, hdiVersion.second); + if (hdiRet != HDF_SUCCESS) { + LOGW("Get version failed. ErrorCode=%d", hdiRet); + return nullptr; + } + version = 'v' + std::to_string(hdiVersion.first) + '_' + std::to_string(hdiVersion.second); + + std::shared_ptr device = CreateSharedPtr(iDevice); + if (device == nullptr) { + LOGW("Failed to register device, because fail to create device instance."); + } + return device; +} +} // namespace NeuralNetworkRuntime +} // namespace OHOS \ No newline at end of file diff --git a/frameworks/native/device_manager.cpp b/frameworks/native/device_manager.cpp index 6ad79bbc5ffe954305079386c3e6c42c1dcacd85..75ac674f3e2dd3ec9e7ae9bc0ab2e96b79bcfde9 100644 --- a/frameworks/native/device_manager.cpp +++ b/frameworks/native/device_manager.cpp @@ -14,9 +14,8 @@ */ #include "device_manager.h" +#include "device_discover.h" -#include "hdi_interfaces.h" -#include "hdi_device.h" #include "common/log.h" #include "common/utils.h" @@ -70,13 +69,21 @@ const std::string& DeviceManager::GetDeviceName(size_t deviceId) return m_tmpDeviceName; } - m_tmpDeviceName = GenUniqueName(deviceName, vendorName); + std::string version; + ret = iter->second->GetVersion(version); + if (ret != OH_NN_SUCCESS) { + LOGE("Get version failed."); + return m_tmpDeviceName; + } + + m_tmpDeviceName = GenUniqueName(deviceName, vendorName, version); return m_tmpDeviceName; } -std::string DeviceManager::GenUniqueName(const std::string& deviceName, const std::string& vendorName) const +std::string DeviceManager::GenUniqueName( + const std::string& deviceName, const std::string& vendorName, const std::string& version) const { - return deviceName + "_" + vendorName; + return deviceName + "_" + vendorName + "_" + version; } OH_NN_ReturnCode DeviceManager::RegisterDevice(std::function()> creator) @@ -106,8 +113,15 @@ OH_NN_ReturnCode DeviceManager::RegisterDevice(std::functionGetVersion(version); + if (ret != OH_NN_SUCCESS) { + LOGE("Get version failed."); + return ret; + } + const std::lock_guard lock(m_mtx); - std::string uniqueName = GenUniqueName(deviceName, vendorName); + std::string uniqueName = GenUniqueName(deviceName, vendorName, version); auto setResult = m_uniqueName.emplace(uniqueName); if (!setResult.second) { LOGE("Device already exists, cannot register again. deviceName=%s, vendorName=%s", @@ -119,29 +133,10 @@ OH_NN_ReturnCode DeviceManager::RegisterDevice(std::function device) { - // only one device from HDI now. - OHOS::sptr iDevice = V1_0::INnrtDevice::Get(); - if (iDevice == nullptr) { - LOGW("Get HDI device failed."); - return; - } - - std::string deviceName; - std::string vendorName; - auto hdiRet = iDevice->GetDeviceName(deviceName); - if (hdiRet != HDF_SUCCESS) { - LOGW("Get device name failed. ErrorCode=%d", hdiRet); - return; - } - hdiRet = iDevice->GetVendorName(vendorName); - if (hdiRet != HDF_SUCCESS) { - LOGW("Get vendor name failed. ErrorCode=%d", hdiRet); - return; - } - - std::string uniqueName = GenUniqueName(deviceName, vendorName); + std::string uniqueName = GenUniqueName(deviceName, vendorName, version); const std::lock_guard lock(m_mtx); auto setResult = m_uniqueName.emplace(uniqueName); if (!setResult.second) { @@ -150,14 +145,25 @@ void DeviceManager::DiscoverHDIDevices() return; } - std::shared_ptr device = CreateSharedPtr(iDevice); - if (device == nullptr) { - LOGW("Failed to register device, because fail to create device instance."); - return; - } m_devices.emplace(std::hash{}(uniqueName), device); } +void DeviceManager::DiscoverHDIDevices() +{ + std::string deviceName; + std::string vendorName; + std::string version; + std::shared_ptr deviceV1_0 = DiscoverHDIDevicesV1_0(deviceName, vendorName, version); + if (deviceV1_0 != nullptr) { + AddDevice(deviceName, vendorName, version, deviceV1_0); + } + + std::shared_ptr deviceV2_0 = DiscoverHDIDevicesV2_0(deviceName, vendorName, version); + if (deviceV2_0 != nullptr) { + AddDevice(deviceName, vendorName, version, deviceV2_0); + } +} + bool DeviceManager::IsValidDevice(std::shared_ptr device) const { DeviceStatus status {DeviceStatus::UNKNOWN}; diff --git a/frameworks/native/device_manager.h b/frameworks/native/device_manager.h index 20d4bf05834c6cbac02191ffa1f743730ebbfb9e..4d8b9fbd35e1c029c9c8ab46c6a620ce12c63ed6 100644 --- a/frameworks/native/device_manager.h +++ b/frameworks/native/device_manager.h @@ -49,8 +49,11 @@ private: DeviceManager(const DeviceManager&) = delete; DeviceManager& operator=(const DeviceManager&) = delete; + void AddDevice(const std::string& deviceName, const std::string& vendorName, + const std::string& version, std::shared_ptr device); void DiscoverHDIDevices(); - std::string GenUniqueName(const std::string& deviceName, const std::string& vendorName) const; + std::string GenUniqueName( + const std::string& deviceName, const std::string& vendorName, const std::string& version) const; bool IsValidDevice(std::shared_ptr device) const; private: diff --git a/frameworks/native/device_registrar.h b/frameworks/native/device_registrar.h index a9645299821087407c45202087110eca1b8365ea..521a075a336db601d839d10eafe5ca1455c0ca34 100644 --- a/frameworks/native/device_registrar.h +++ b/frameworks/native/device_registrar.h @@ -34,7 +34,7 @@ public: #define REGISTER_DEVICE(deviceName, vendorName, creator) \ namespace { \ - static OHOS::NeuralNetworkRuntime::DeviceRegistrar g_##deviceName##_##vendorName##_device_registrar(creator) \ + static OHOS::NeuralNetworkRuntime::DeviceRegistrar g_##deviceName##_##vendorName##_device_registrar(creator); \ } // namespace } // namespace NeuralNetworkRuntime } // OHOS diff --git a/frameworks/native/execution_plan.cpp b/frameworks/native/execution_plan.cpp index b1ddfe3ac53676fb016bd9aeebd9bd1deaabfd01..5199199bfd9cf4c11a8d0c58cddcfc2108743ead 100644 --- a/frameworks/native/execution_plan.cpp +++ b/frameworks/native/execution_plan.cpp @@ -23,6 +23,18 @@ namespace OHOS { namespace NeuralNetworkRuntime { +OH_NN_ReturnCode ExecutionPlan::GetInputDimRanges(std::vector>& minInputDims, + std::vector>& maxInputDims) +{ + OH_NN_ReturnCode ret = m_preparedModel->GetInputDimRanges(minInputDims, maxInputDims); + if (ret != OH_NN_SUCCESS) { + LOGE("ExecutionPlan GetInputDimRanges() failed."); + return ret; + } + + return OH_NN_SUCCESS; +} + OH_NN_ReturnCode ExecutionPlan::Run(const std::vector>& inputTensors, std::vector>& outputTensors) { diff --git a/frameworks/native/execution_plan.h b/frameworks/native/execution_plan.h index 9644a321d12b0120f24af6835e7eb035548c7445..54f4648b21372a1a23b6a9d7bef5dd0ec023e36b 100644 --- a/frameworks/native/execution_plan.h +++ b/frameworks/native/execution_plan.h @@ -29,6 +29,9 @@ public: ExecutionPlan(std::shared_ptr preparedModel, std::shared_ptr device) : m_preparedModel(preparedModel), m_device(device) {}; + + OH_NN_ReturnCode GetInputDimRanges(std::vector>& minInputDims, + std::vector>& maxInputDims); OH_NN_ReturnCode Run(const std::vector>& inputTensors, std::vector>& outputTensors); diff --git a/frameworks/native/executor.cpp b/frameworks/native/executor.cpp index f99d28cdb1869484968217fb5b03c927e6e7b3f7..93670eb0f715e6640e3e06588d034638a3404135 100644 --- a/frameworks/native/executor.cpp +++ b/frameworks/native/executor.cpp @@ -19,6 +19,7 @@ #include "common/utils.h" #include "common/scoped_trace.h" +#include "transform.h" namespace OHOS { @@ -113,8 +114,64 @@ void Executor::SetInputTensorWithNewBuffer(uint32_t index, } +OH_NN_ReturnCode Executor::CheckInputDimRanges(uint32_t index, const OH_NN_Tensor& nnTensor) const +{ + std::vector> minInputDims; + std::vector> maxInputDims; + auto ret = m_executionPlan->GetInputDimRanges(minInputDims, maxInputDims); + if (ret != OH_NN_SUCCESS) { + LOGE("Get the dimension ranges of input %u failed. ErrorCode=%d", index, ret); + return ret; + } + + if (index >= minInputDims.size()) { + LOGE("index is %u, which exceeds the size of minInputDims:%zu.", index, minInputDims.size()); + return OH_NN_INVALID_PARAMETER; + } + + if (index >= maxInputDims.size()) { + LOGE("index is %u, which exceeds the size of maxInputDims:%zu.", index, maxInputDims.size()); + return OH_NN_INVALID_PARAMETER; + } + + const std::vector& minSingleInputDims = minInputDims[index]; + const std::vector& maxSingleInputDims = maxInputDims[index]; + + std::vector tensorShape = ConstructVectorFromArray(nnTensor.dimensions, nnTensor.dimensionCount); + size_t tensorShapeSize = tensorShape.size(); + if (minSingleInputDims.size() != tensorShapeSize || maxSingleInputDims.size() != tensorShapeSize) { + LOGE("Size of minSingleInputDims, maxSingleInputDims and tensorShape of input %u are not equal.", index); + return OH_NN_INVALID_PARAMETER; + } + + for (size_t j = 0; j < tensorShapeSize; ++j) { + // Dimensions cannot be negative + if (tensorShape[j] < 0) { + LOGE("Dimension %zu of input %u is %d.", j, index, tensorShape[j]); + return OH_NN_INVALID_PARAMETER; + } + uint32_t dim = static_cast(tensorShape[j]); + if (dim < minSingleInputDims[j] || dim > maxSingleInputDims[j]) { + LOGE("Dimension %zu of input %u is %u, which is out of range [%u, %u]", + j, index, dim, minSingleInputDims[j], maxSingleInputDims[j]); + return OH_NN_INVALID_PARAMETER; + } + } + + return OH_NN_SUCCESS; +} + + OH_NN_ReturnCode Executor::SetInput(uint32_t index, const OH_NN_Tensor& nnTensor, const void* buffer, size_t length) { + auto nnRet = CheckInputDimRanges(index, nnTensor); + if (nnRet == OH_NN_OPERATION_FORBIDDEN) { + LOGI("Skip input dimension bounds check."); + } else if (nnRet != OH_NN_SUCCESS) { + LOGE("SetInput failed, Check the range of the %uth input dimension ranges failed.", index); + return nnRet; + } + std::shared_ptr inputTensor = CreateSharedPtr(); if (inputTensor == nullptr) { LOGE("SetInput failed, error happened when creating NNTensor."); @@ -181,6 +238,14 @@ OH_NN_ReturnCode Executor::SetInput(uint32_t index, const OH_NN_Tensor& nnTensor OH_NN_ReturnCode Executor::SetInputFromMemory(uint32_t index, const OH_NN_Tensor& nnTensor, const OH_NN_Memory& memory) { + auto nnRet = CheckInputDimRanges(index, nnTensor); + if (nnRet == OH_NN_OPERATION_FORBIDDEN) { + LOGI("Skip input dimension bounds check."); + } else if (nnRet != OH_NN_SUCCESS) { + LOGE("SetInputFromMemory failed, Check the range of the %uth input dimension ranges failed.", index); + return nnRet; + } + // Build a input tensor std::shared_ptr inputTensor = CreateSharedPtr(); if (inputTensor == nullptr) { diff --git a/frameworks/native/executor.h b/frameworks/native/executor.h index f7a98eb094f35c4235e8f04b5d92e9746f7dff63..c7b2061e911800cef2efcf80bbec771e9b50b6dd 100644 --- a/frameworks/native/executor.h +++ b/frameworks/native/executor.h @@ -49,6 +49,7 @@ private: const void* buffer, size_t dataLength, size_t curBufferLength); void SetInputTensorWithNewBuffer(uint32_t index, std::shared_ptr inputTensor, const void* inputBuffer, size_t length, bool isInnerMem); + OH_NN_ReturnCode CheckInputDimRanges(uint32_t index, const OH_NN_Tensor& nnTensor) const; private: struct ExeTensor { diff --git a/frameworks/native/hdi_device.cpp b/frameworks/native/hdi_device_v1_0.cpp similarity index 70% rename from frameworks/native/hdi_device.cpp rename to frameworks/native/hdi_device_v1_0.cpp index b360ea73145b6c41fcb3324d0b3812a5ef155e4c..3808364f1100b3935d50f628768ee4a65d5fcd17 100644 --- a/frameworks/native/hdi_device.cpp +++ b/frameworks/native/hdi_device_v1_0.cpp @@ -13,12 +13,12 @@ * limitations under the License. */ -#include "hdi_device.h" +#include "hdi_device_v1_0.h" #include "hdf_base.h" #include "mindir.h" -#include "hdi_prepared_model.h" +#include "hdi_prepared_model_v1_0.h" #include "memory_manager.h" #include "transform.h" #include "common/log.h" @@ -26,12 +26,72 @@ namespace OHOS { namespace NeuralNetworkRuntime { -HDIDevice::HDIDevice(OHOS::sptr device) : m_iDevice(device) +namespace { +OH_NN_DeviceType TransHDIDeviceV1_0Type(const V1_0::DeviceType& iDeviceType) +{ + switch (iDeviceType) { + case V1_0::DeviceType::CPU: + return OH_NN_CPU; + case V1_0::DeviceType::GPU: + return OH_NN_GPU; + case V1_0::DeviceType::ACCELERATOR: + return OH_NN_ACCELERATOR; + default: + return OH_NN_OTHERS; + } +} + +DeviceStatus TransHDIDeviceV1_0Status(const V1_0::DeviceStatus& iDeviceStatus) +{ + switch (iDeviceStatus) { + case V1_0::DeviceStatus::AVAILABLE: + return DeviceStatus::AVAILABLE; + case V1_0::DeviceStatus::BUSY: + return DeviceStatus::BUSY; + case V1_0::DeviceStatus::OFFLINE: + return DeviceStatus::OFFLINE; + default: + return DeviceStatus::UNKNOWN; + } +} + +V1_0::PerformanceMode TransPerformanceMode(const OH_NN_PerformanceMode& mode) +{ + switch (mode) { + case OH_NN_PERFORMANCE_LOW: + return V1_0::PerformanceMode::PERFORMANCE_LOW; + case OH_NN_PERFORMANCE_MEDIUM: + return V1_0::PerformanceMode::PERFORMANCE_MEDIUM; + case OH_NN_PERFORMANCE_HIGH: + return V1_0::PerformanceMode::PERFORMANCE_HIGH; + case OH_NN_PERFORMANCE_EXTREME: + return V1_0::PerformanceMode::PERFORMANCE_EXTREME; + default: + return V1_0::PerformanceMode::PERFORMANCE_NONE; + } +} + +V1_0::Priority TransPriority(const OH_NN_Priority& priority) +{ + switch (priority) { + case OH_NN_PRIORITY_LOW: + return V1_0::Priority::PRIORITY_LOW; + case OH_NN_PRIORITY_MEDIUM: + return V1_0::Priority::PRIORITY_MEDIUM; + case OH_NN_PRIORITY_HIGH: + return V1_0::Priority::PRIORITY_HIGH; + default: + return V1_0::Priority::PRIORITY_NONE; + } +} +} + +HDIDeviceV1_0::HDIDeviceV1_0(OHOS::sptr device) : m_iDevice(device) { device->GetVersion(m_hdiVersion.first, m_hdiVersion.second); } -OH_NN_ReturnCode HDIDevice::GetDeviceName(std::string& name) +OH_NN_ReturnCode HDIDeviceV1_0::GetDeviceName(std::string& name) { auto ret = m_iDevice->GetDeviceName(name); if (ret != HDF_SUCCESS) { @@ -41,7 +101,7 @@ OH_NN_ReturnCode HDIDevice::GetDeviceName(std::string& name) return OH_NN_SUCCESS; } -OH_NN_ReturnCode HDIDevice::GetVendorName(std::string& name) +OH_NN_ReturnCode HDIDeviceV1_0::GetVendorName(std::string& name) { auto ret = m_iDevice->GetVendorName(name); if (ret != HDF_SUCCESS) { @@ -51,7 +111,13 @@ OH_NN_ReturnCode HDIDevice::GetVendorName(std::string& name) return OH_NN_SUCCESS; } -OH_NN_ReturnCode HDIDevice::GetDeviceType(OH_NN_DeviceType& deviceType) +OH_NN_ReturnCode HDIDeviceV1_0::GetVersion(std::string& version) +{ + version = 'v' + std::to_string(m_hdiVersion.first) + '_' + std::to_string(m_hdiVersion.second); + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode HDIDeviceV1_0::GetDeviceType(OH_NN_DeviceType& deviceType) { V1_0::DeviceType iDeviceType; auto ret = m_iDevice->GetDeviceType(iDeviceType); @@ -60,11 +126,11 @@ OH_NN_ReturnCode HDIDevice::GetDeviceType(OH_NN_DeviceType& deviceType) return OH_NN_UNAVALIDABLE_DEVICE; } - deviceType = HDIToNN::TransHDIDeviceType(iDeviceType); + deviceType = TransHDIDeviceV1_0Type(iDeviceType); return OH_NN_SUCCESS; } -OH_NN_ReturnCode HDIDevice::GetDeviceStatus(DeviceStatus& status) +OH_NN_ReturnCode HDIDeviceV1_0::GetDeviceStatus(DeviceStatus& status) { V1_0::DeviceStatus iDeviceStatus; auto ret = m_iDevice->GetDeviceStatus(iDeviceStatus); @@ -72,11 +138,11 @@ OH_NN_ReturnCode HDIDevice::GetDeviceStatus(DeviceStatus& status) LOGE("Get HDI device status failed. ErrorCode=%d", ret); return OH_NN_UNAVALIDABLE_DEVICE; } - status = HDIToNN::TransHDIDeviceStatus(iDeviceStatus); + status = TransHDIDeviceV1_0Status(iDeviceStatus); return OH_NN_SUCCESS; } -OH_NN_ReturnCode HDIDevice::GetSupportedOperation(std::shared_ptr model, +OH_NN_ReturnCode HDIDeviceV1_0::GetSupportedOperation(std::shared_ptr model, std::vector& ops) { if (model == nullptr) { @@ -84,7 +150,7 @@ OH_NN_ReturnCode HDIDevice::GetSupportedOperation(std::shared_ptr 0) { @@ -117,7 +183,7 @@ OH_NN_ReturnCode HDIDevice::GetSupportedOperation(std::shared_ptrIsFloat16PrecisionSupported(isSupported); if (ret != HDF_SUCCESS) { @@ -127,7 +193,7 @@ OH_NN_ReturnCode HDIDevice::IsFloat16PrecisionSupported(bool& isSupported) return OH_NN_SUCCESS; } -OH_NN_ReturnCode HDIDevice::IsPerformanceModeSupported(bool& isSupported) +OH_NN_ReturnCode HDIDeviceV1_0::IsPerformanceModeSupported(bool& isSupported) { auto ret = m_iDevice->IsPerformanceModeSupported(isSupported); if (ret != HDF_SUCCESS) { @@ -137,7 +203,7 @@ OH_NN_ReturnCode HDIDevice::IsPerformanceModeSupported(bool& isSupported) return OH_NN_SUCCESS; } -OH_NN_ReturnCode HDIDevice::IsPrioritySupported(bool& isSupported) +OH_NN_ReturnCode HDIDeviceV1_0::IsPrioritySupported(bool& isSupported) { auto ret = m_iDevice->IsPrioritySupported(isSupported); if (ret != HDF_SUCCESS) { @@ -147,7 +213,7 @@ OH_NN_ReturnCode HDIDevice::IsPrioritySupported(bool& isSupported) return OH_NN_SUCCESS; } -OH_NN_ReturnCode HDIDevice::IsDynamicInputSupported(bool& isSupported) +OH_NN_ReturnCode HDIDeviceV1_0::IsDynamicInputSupported(bool& isSupported) { auto ret = m_iDevice->IsDynamicInputSupported(isSupported); if (ret != HDF_SUCCESS) { @@ -157,7 +223,7 @@ OH_NN_ReturnCode HDIDevice::IsDynamicInputSupported(bool& isSupported) return OH_NN_SUCCESS; } -OH_NN_ReturnCode HDIDevice::IsModelCacheSupported(bool& isSupported) +OH_NN_ReturnCode HDIDeviceV1_0::IsModelCacheSupported(bool& isSupported) { auto ret = m_iDevice->IsModelCacheSupported(isSupported); if (ret != HDF_SUCCESS) { @@ -167,7 +233,7 @@ OH_NN_ReturnCode HDIDevice::IsModelCacheSupported(bool& isSupported) return OH_NN_SUCCESS; } -OH_NN_ReturnCode HDIDevice::PrepareModel(std::shared_ptr model, +OH_NN_ReturnCode HDIDeviceV1_0::PrepareModel(std::shared_ptr model, const ModelConfig& config, std::shared_ptr& preparedModel) { @@ -176,7 +242,7 @@ OH_NN_ReturnCode HDIDevice::PrepareModel(std::shared_ptr 0) { @@ -196,8 +262,8 @@ OH_NN_ReturnCode HDIDevice::PrepareModel(std::shared_ptr iPreparedModel; auto preparedRet = m_iDevice->PrepareModel(*iModel, iModelConfig, iPreparedModel); @@ -213,7 +279,7 @@ OH_NN_ReturnCode HDIDevice::PrepareModel(std::shared_ptr(iPreparedModel); + preparedModel = CreateSharedPtr(iPreparedModel); if (preparedModel == nullptr) { LOGE("Prepare model failed, because fail to create preparedModel instance."); return OH_NN_MEMORY_ERROR; @@ -222,7 +288,7 @@ OH_NN_ReturnCode HDIDevice::PrepareModel(std::shared_ptr& modelCache, +OH_NN_ReturnCode HDIDeviceV1_0::PrepareModelFromModelCache(const std::vector& modelCache, const ModelConfig& config, std::shared_ptr& preparedModel) { @@ -242,8 +308,8 @@ OH_NN_ReturnCode HDIDevice::PrepareModelFromModelCache(const std::vector iPreparedModel; auto hdiRet = m_iDevice->PrepareModelFromModelCache(iBuffers, iModelConfig, iPreparedModel); @@ -252,7 +318,7 @@ OH_NN_ReturnCode HDIDevice::PrepareModelFromModelCache(const std::vector(iPreparedModel); + preparedModel = CreateSharedPtr(iPreparedModel); if (preparedModel == nullptr) { LOGE("Prepare model from model cache failed, because fail to create preparedModel instance."); return OH_NN_MEMORY_ERROR; @@ -260,7 +326,7 @@ OH_NN_ReturnCode HDIDevice::PrepareModelFromModelCache(const std::vector +#include +#include #include "device.h" namespace OHOS { namespace NeuralNetworkRuntime { -class HDIDevice : public Device { + +namespace V1_0 = OHOS::HDI::Nnrt::V1_0; + +class HDIDeviceV1_0 : public Device { public: - explicit HDIDevice(OHOS::sptr device); + explicit HDIDeviceV1_0(OHOS::sptr device); OH_NN_ReturnCode GetDeviceName(std::string& name) override; OH_NN_ReturnCode GetVendorName(std::string& name) override; + OH_NN_ReturnCode GetVersion(std::string& version) override; OH_NN_ReturnCode GetDeviceType(OH_NN_DeviceType& deviceType) override; OH_NN_ReturnCode GetDeviceStatus(DeviceStatus& status) override; OH_NN_ReturnCode GetSupportedOperation(std::shared_ptr model, @@ -60,4 +66,4 @@ private: }; } // namespace NeuralNetworkRuntime } // namespace OHOS -#endif // NEURAL_NETWORK_RUNTIME_HDI_DEVICE_H \ No newline at end of file +#endif // NEURAL_NETWORK_RUNTIME_HDI_DEVICE_V1_0_H \ No newline at end of file diff --git a/frameworks/native/hdi_device_v2_0.cpp b/frameworks/native/hdi_device_v2_0.cpp new file mode 100644 index 0000000000000000000000000000000000000000..264382c3be37f25be35defa5b9e5fcf918bbb6e1 --- /dev/null +++ b/frameworks/native/hdi_device_v2_0.cpp @@ -0,0 +1,397 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "hdi_device_v2_0.h" + +#include "hdf_base.h" +#include "mindir.h" + +#include "hdi_prepared_model_v2_0.h" +#include "memory_manager.h" +#include "transform.h" +#include "common/log.h" +#include "common/utils.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace { +OH_NN_DeviceType TransHDIDeviceV2_0Type(const V2_0::DeviceType& iDeviceType) +{ + switch (iDeviceType) { + case V2_0::DeviceType::CPU: + return OH_NN_CPU; + case V2_0::DeviceType::GPU: + return OH_NN_GPU; + case V2_0::DeviceType::ACCELERATOR: + return OH_NN_ACCELERATOR; + default: + return OH_NN_OTHERS; + } +} + +DeviceStatus TransHDIDeviceV2_0Status(const V2_0::DeviceStatus& iDeviceStatus) +{ + switch (iDeviceStatus) { + case V2_0::DeviceStatus::AVAILABLE: + return DeviceStatus::AVAILABLE; + case V2_0::DeviceStatus::BUSY: + return DeviceStatus::BUSY; + case V2_0::DeviceStatus::OFFLINE: + return DeviceStatus::OFFLINE; + default: + return DeviceStatus::UNKNOWN; + } +} + +V2_0::PerformanceMode TransPerformanceMode(const OH_NN_PerformanceMode& mode) +{ + switch (mode) { + case OH_NN_PERFORMANCE_LOW: + return V2_0::PerformanceMode::PERFORMANCE_LOW; + case OH_NN_PERFORMANCE_MEDIUM: + return V2_0::PerformanceMode::PERFORMANCE_MEDIUM; + case OH_NN_PERFORMANCE_HIGH: + return V2_0::PerformanceMode::PERFORMANCE_HIGH; + case OH_NN_PERFORMANCE_EXTREME: + return V2_0::PerformanceMode::PERFORMANCE_EXTREME; + default: + return V2_0::PerformanceMode::PERFORMANCE_NONE; + } +} + +V2_0::Priority TransPriority(const OH_NN_Priority& priority) +{ + switch (priority) { + case OH_NN_PRIORITY_LOW: + return V2_0::Priority::PRIORITY_LOW; + case OH_NN_PRIORITY_MEDIUM: + return V2_0::Priority::PRIORITY_MEDIUM; + case OH_NN_PRIORITY_HIGH: + return V2_0::Priority::PRIORITY_HIGH; + default: + return V2_0::Priority::PRIORITY_NONE; + } +} +} + +HDIDeviceV2_0::HDIDeviceV2_0(OHOS::sptr device) : m_iDevice(device) +{ + device->GetVersion(m_hdiVersion.first, m_hdiVersion.second); +} + +OH_NN_ReturnCode HDIDeviceV2_0::GetDeviceName(std::string& name) +{ + auto ret = m_iDevice->GetDeviceName(name); + if (ret != HDF_SUCCESS) { + LOGE("Get HDI device name failed. ErrorCode=%d", ret); + return OH_NN_UNAVALIDABLE_DEVICE; + } + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode HDIDeviceV2_0::GetVendorName(std::string& name) +{ + auto ret = m_iDevice->GetVendorName(name); + if (ret != HDF_SUCCESS) { + LOGE("Get HDI device vendor name failed. ErrorCode=%d", ret); + return OH_NN_UNAVALIDABLE_DEVICE; + } + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode HDIDeviceV2_0::GetVersion(std::string& version) +{ + version = 'v' + std::to_string(m_hdiVersion.first) + '_' + std::to_string(m_hdiVersion.second); + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode HDIDeviceV2_0::GetDeviceType(OH_NN_DeviceType& deviceType) +{ + V2_0::DeviceType iDeviceType; + auto ret = m_iDevice->GetDeviceType(iDeviceType); + if (ret != HDF_SUCCESS) { + LOGE("Get HDI device type failed. ErrorCode=%d", ret); + return OH_NN_UNAVALIDABLE_DEVICE; + } + + deviceType = TransHDIDeviceV2_0Type(iDeviceType); + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode HDIDeviceV2_0::GetDeviceStatus(DeviceStatus& status) +{ + V2_0::DeviceStatus iDeviceStatus; + auto ret = m_iDevice->GetDeviceStatus(iDeviceStatus); + if (ret != HDF_SUCCESS) { + LOGE("Get HDI device status failed. ErrorCode=%d", ret); + return OH_NN_UNAVALIDABLE_DEVICE; + } + status = TransHDIDeviceV2_0Status(iDeviceStatus); + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode HDIDeviceV2_0::GetSupportedOperation(std::shared_ptr model, + std::vector& ops) +{ + if (model == nullptr) { + LOGE("Model is nullptr, cannot query supported operation."); + return OH_NN_NULL_PTR; + } + + OHOS::HDI::Nnrt::V2_0::SharedBuffer tensorBuffer {INVALID_FD, 0, 0, 0}; + size_t tensorSize = mindspore::lite::MindIR_LiteGraph_GetConstTensorSize(model.get()); + int32_t hdiRet {0}; + if (tensorSize > 0) { + hdiRet = m_iDevice->AllocateBuffer(tensorSize, tensorBuffer); + if (hdiRet != HDF_SUCCESS || tensorBuffer.fd == INVALID_FD) { + LOGE("Allocate tensor buffer error when get supported operation. ErrorCode: %d", hdiRet); + return OH_NN_FAILED; + } + } + + auto iModel = mindspore::lite::MindIR_LiteGraph_To_Model_V2_0(model.get(), tensorBuffer); + if (iModel == nullptr) { + LOGE("Parse litegraph to hdi model failed."); + ReleaseSharedBuffer(tensorBuffer); + return OH_NN_FAILED; + } + + hdiRet = m_iDevice->GetSupportedOperation(*iModel, ops); + + mindspore::lite::MindIR_Model_Destroy(&iModel); + auto ret = ReleaseSharedBuffer(tensorBuffer); + if (ret != OH_NN_SUCCESS) { + LOGE("Release tensorBuffer failed."); + return OH_NN_FAILED; + } + if (hdiRet != HDF_SUCCESS) { + LOGE("Get supported operation failed. ErrorCode=%d", hdiRet); + return OH_NN_UNAVALIDABLE_DEVICE; + } + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode HDIDeviceV2_0::IsFloat16PrecisionSupported(bool& isSupported) +{ + auto ret = m_iDevice->IsFloat16PrecisionSupported(isSupported); + if (ret != HDF_SUCCESS) { + LOGE("Query fp16 precision supported failed. ErrorCode=%d", ret); + return OH_NN_UNAVALIDABLE_DEVICE; + } + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode HDIDeviceV2_0::IsPerformanceModeSupported(bool& isSupported) +{ + auto ret = m_iDevice->IsPerformanceModeSupported(isSupported); + if (ret != HDF_SUCCESS) { + LOGE("Query performance mode supported failed. ErrorCode=%d", ret); + return OH_NN_UNAVALIDABLE_DEVICE; + } + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode HDIDeviceV2_0::IsPrioritySupported(bool& isSupported) +{ + auto ret = m_iDevice->IsPrioritySupported(isSupported); + if (ret != HDF_SUCCESS) { + LOGE("Query priority supported failed. ErrorCode=%d", ret); + return OH_NN_UNAVALIDABLE_DEVICE; + } + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode HDIDeviceV2_0::IsDynamicInputSupported(bool& isSupported) +{ + auto ret = m_iDevice->IsDynamicInputSupported(isSupported); + if (ret != HDF_SUCCESS) { + LOGE("Query dynamic input supported failed. ErrorCode=%d", ret); + return OH_NN_UNAVALIDABLE_DEVICE; + } + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode HDIDeviceV2_0::IsModelCacheSupported(bool& isSupported) +{ + auto ret = m_iDevice->IsModelCacheSupported(isSupported); + if (ret != HDF_SUCCESS) { + LOGE("Query cache model supported failed. ErrorCode=%d", ret); + return OH_NN_UNAVALIDABLE_DEVICE; + } + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode HDIDeviceV2_0::PrepareModel(std::shared_ptr model, + const ModelConfig& config, + std::shared_ptr& preparedModel) +{ + if (model == nullptr) { + LOGE("Model is nullptr, cannot prepare model."); + return OH_NN_INVALID_PARAMETER; + } + + OHOS::HDI::Nnrt::V2_0::SharedBuffer tensorBuffer {INVALID_FD, 0, 0, 0}; + size_t tensorSize = mindspore::lite::MindIR_LiteGraph_GetConstTensorSize(model.get()); + int32_t hdiRet {0}; + if (tensorSize > 0) { + hdiRet = m_iDevice->AllocateBuffer(tensorSize, tensorBuffer); + if (hdiRet != HDF_SUCCESS || tensorBuffer.fd == INVALID_FD) { + LOGE("Allocate tensor buffer error when prepare model. ErrorCode: %d", hdiRet); + return OH_NN_FAILED; + } + } + + V2_0::Model* iModel = mindspore::lite::MindIR_LiteGraph_To_Model_V2_0(model.get(), tensorBuffer); + if (iModel == nullptr) { + LOGE("Parse litegraph to hdi model failed."); + ReleaseSharedBuffer(tensorBuffer); + return OH_NN_FAILED; + } + + V2_0::ModelConfig iModelConfig; + iModelConfig.enableFloat16 = config.enableFloat16; + iModelConfig.mode = TransPerformanceMode(config.mode); + iModelConfig.priority = TransPriority(config.priority); + OHOS::sptr iPreparedModel; + + auto preparedRet = m_iDevice->PrepareModel(*iModel, iModelConfig, iPreparedModel); + + mindspore::lite::MindIR_Model_Destroy(&iModel); + auto ret = ReleaseSharedBuffer(tensorBuffer); + if (ret != OH_NN_SUCCESS) { + LOGE("Release tensorBuffer failed."); + return OH_NN_FAILED; + } + if (preparedRet != HDF_SUCCESS || iPreparedModel == nullptr) { + LOGE("Prepare model failed. ErrorCode=%d", preparedRet); + return OH_NN_FAILED; + } + + preparedModel = CreateSharedPtr(iPreparedModel); + if (preparedModel == nullptr) { + LOGE("Prepare model failed, because fail to create preparedModel instance."); + return OH_NN_MEMORY_ERROR; + } + + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode HDIDeviceV2_0::PrepareModelFromModelCache(const std::vector& modelCache, + const ModelConfig& config, + std::shared_ptr& preparedModel) +{ + std::vector iBuffers; + auto memManager = MemoryManager::GetInstance(); + Memory memory; + OH_NN_ReturnCode ret; + size_t modelCacheSize = modelCache.size(); + for (size_t i = 0; i < modelCacheSize; i++) { + ret = memManager->GetMemory(modelCache[i].buffer, memory); + if (ret != OH_NN_SUCCESS) { + LOGE("The %zuth model cache is invalid. Please put valid model cache.", i + 1); + return ret; + } + iBuffers.emplace_back(V2_0::SharedBuffer {memory.fd, memory.length, 0, memory.length}); + } + + V2_0::ModelConfig iModelConfig; + iModelConfig.enableFloat16 = config.enableFloat16; + iModelConfig.mode = TransPerformanceMode(config.mode); + iModelConfig.priority = TransPriority(config.priority); + + OHOS::sptr iPreparedModel; + auto hdiRet = m_iDevice->PrepareModelFromModelCache(iBuffers, iModelConfig, iPreparedModel); + if (hdiRet != HDF_SUCCESS) { + LOGE("Prepare model from cache failed. ErrorCode=%d", hdiRet); + return OH_NN_UNAVALIDABLE_DEVICE; + } + + preparedModel = CreateSharedPtr(iPreparedModel); + if (preparedModel == nullptr) { + LOGE("Prepare model from model cache failed, because fail to create preparedModel instance."); + return OH_NN_MEMORY_ERROR; + } + return OH_NN_SUCCESS; +} + +void* HDIDeviceV2_0::AllocateBuffer(size_t length) +{ + if (length == 0) { + LOGE("The length param is invalid, length=0"); + return nullptr; + } + + V2_0::SharedBuffer buffer; + auto ret = m_iDevice->AllocateBuffer(length, buffer); + if (ret != HDF_SUCCESS) { + LOGE("Allocate buffer error. ErrorCode: %d", ret); + return nullptr; + } + + auto memManager = MemoryManager::GetInstance(); + auto addr = memManager->MapMemory(buffer.fd, length); + if (addr == nullptr) { + LOGE("Map fd to address failed."); + } + return addr; +} + +OH_NN_ReturnCode HDIDeviceV2_0::ReleaseBuffer(const void* buffer) +{ + if (buffer == nullptr) { + LOGE("Buffer is nullptr, no need to release."); + return OH_NN_INVALID_PARAMETER; + } + + auto memManager = MemoryManager::GetInstance(); + Memory memory; + auto ret = memManager->GetMemory(buffer, memory); + if (ret != OH_NN_SUCCESS) { + LOGE("Invalid Buffer, it is not NNRt buffer."); + return ret; + } + + V2_0::SharedBuffer hdiBuffer {memory.fd, memory.length, 0, memory.length}; + auto deviceResult = m_iDevice->ReleaseBuffer(hdiBuffer); + if (deviceResult != HDF_SUCCESS) { + LOGE("Device release buffer error. ErrorCode: %d", deviceResult); + return OH_NN_FAILED; + } + + ret = memManager->UnMapMemory(buffer); + if (ret != OH_NN_SUCCESS) { + LOGE("Unmap memory failed."); + return ret; + } + + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode HDIDeviceV2_0::ReleaseSharedBuffer(const V2_0::SharedBuffer& buffer) +{ + if (buffer.fd == INVALID_FD) { + LOGI("No need to release. fd=%d", INVALID_FD); + return OH_NN_SUCCESS; + } + + auto ret = m_iDevice->ReleaseBuffer(buffer); + if (ret != HDF_SUCCESS) { + LOGE("Device release buffer error. ErrorCode=%d", ret); + return OH_NN_FAILED; + } + return OH_NN_SUCCESS; +} +} // namespace NeuralNetworkRuntime +} // namespace OHOS \ No newline at end of file diff --git a/frameworks/native/hdi_device_v2_0.h b/frameworks/native/hdi_device_v2_0.h new file mode 100644 index 0000000000000000000000000000000000000000..fee7831df4d556e3a96f254ef5cfbe4054884198 --- /dev/null +++ b/frameworks/native/hdi_device_v2_0.h @@ -0,0 +1,69 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_HDI_DEVICE_V2_0_H +#define NEURAL_NETWORK_RUNTIME_HDI_DEVICE_V2_0_H + +#include "refbase.h" +#include +#include +#include + +#include "device.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { + +namespace V2_0 = OHOS::HDI::Nnrt::V2_0; + +class HDIDeviceV2_0 : public Device { +public: + explicit HDIDeviceV2_0(OHOS::sptr device); + + OH_NN_ReturnCode GetDeviceName(std::string& name) override; + OH_NN_ReturnCode GetVendorName(std::string& name) override; + OH_NN_ReturnCode GetVersion(std::string& version) override; + OH_NN_ReturnCode GetDeviceType(OH_NN_DeviceType& deviceType) override; + OH_NN_ReturnCode GetDeviceStatus(DeviceStatus& status) override; + OH_NN_ReturnCode GetSupportedOperation(std::shared_ptr model, + std::vector& ops) override; + + OH_NN_ReturnCode IsFloat16PrecisionSupported(bool& isSupported) override; + OH_NN_ReturnCode IsPerformanceModeSupported(bool& isSupported) override; + OH_NN_ReturnCode IsPrioritySupported(bool& isSupported) override; + OH_NN_ReturnCode IsDynamicInputSupported(bool& isSupported) override; + OH_NN_ReturnCode IsModelCacheSupported(bool& isSupported) override; + + OH_NN_ReturnCode PrepareModel(std::shared_ptr model, + const ModelConfig& config, + std::shared_ptr& preparedModel) override; + OH_NN_ReturnCode PrepareModelFromModelCache(const std::vector& modelCache, + const ModelConfig& config, + std::shared_ptr& preparedModel) override; + + void* AllocateBuffer(size_t length) override; + OH_NN_ReturnCode ReleaseBuffer(const void* buffer) override; + +private: + OH_NN_ReturnCode ReleaseSharedBuffer(const V2_0::SharedBuffer& buffer); + +private: + // first: major version, second: minor version + std::pair m_hdiVersion; + OHOS::sptr m_iDevice {nullptr}; +}; +} // namespace NeuralNetworkRuntime +} // namespace OHOS +#endif // NEURAL_NETWORK_RUNTIME_HDI_DEVICE_V2_0_H \ No newline at end of file diff --git a/frameworks/native/hdi_prepared_model.cpp b/frameworks/native/hdi_prepared_model_v1_0.cpp similarity index 51% rename from frameworks/native/hdi_prepared_model.cpp rename to frameworks/native/hdi_prepared_model_v1_0.cpp index 491aec696489b34b44c32bad5cdd5a8c93d7c969..898d37990ad63e39538b9206b0f10283cccb6680 100644 --- a/frameworks/native/hdi_prepared_model.cpp +++ b/frameworks/native/hdi_prepared_model_v1_0.cpp @@ -13,21 +13,93 @@ * limitations under the License. */ -#include "hdi_prepared_model.h" +#include "hdi_prepared_model_v1_0.h" #include "common/log.h" #include "memory_manager.h" -#include "transform.h" namespace OHOS { namespace NeuralNetworkRuntime { -HDIPreparedModel::HDIPreparedModel(OHOS::sptr hdiPreparedModel) +namespace { +V1_0::DataType TransDataType(const OH_NN_DataType& dataType) +{ + switch (dataType) { + case OH_NN_BOOL: + return V1_0::DataType::DATA_TYPE_BOOL; + case OH_NN_INT8: + return V1_0::DataType::DATA_TYPE_INT8; + case OH_NN_INT16: + return V1_0::DataType::DATA_TYPE_INT16; + case OH_NN_INT32: + return V1_0::DataType::DATA_TYPE_INT32; + case OH_NN_INT64: + return V1_0::DataType::DATA_TYPE_INT64; + case OH_NN_UINT8: + return V1_0::DataType::DATA_TYPE_UINT8; + case OH_NN_UINT16: + return V1_0::DataType::DATA_TYPE_UINT16; + case OH_NN_UINT32: + return V1_0::DataType::DATA_TYPE_UINT32; + case OH_NN_UINT64: + return V1_0::DataType::DATA_TYPE_UINT64; + case OH_NN_FLOAT16: + return V1_0::DataType::DATA_TYPE_FLOAT16; + case OH_NN_FLOAT32: + return V1_0::DataType::DATA_TYPE_FLOAT32; + case OH_NN_FLOAT64: + return V1_0::DataType::DATA_TYPE_FLOAT64; + default: + return V1_0::DataType::DATA_TYPE_UNKNOWN; + } +} + +V1_0::Format TransFormat(const OH_NN_Format& format) +{ + switch (format) { + case OH_NN_FORMAT_NCHW: + return V1_0::Format::FORMAT_NCHW; + case OH_NN_FORMAT_NHWC: + return V1_0::Format::FORMAT_NHWC; + default: + return V1_0::Format::FORMAT_NONE; + } +} + +V1_0::IOTensor TransIOTensor(const IOTensor& tensor) +{ + V1_0::IOTensor iTensor; + iTensor.name = tensor.name; + iTensor.dataType = TransDataType(tensor.dataType); + iTensor.dimensions = tensor.dimensions; + iTensor.format = TransFormat(tensor.format); + + V1_0::SharedBuffer iBuffer {INVALID_FD, 0, 0, 0}; + if (tensor.data != nullptr) { + auto memManager = MemoryManager::GetInstance(); + Memory memory; + auto ret = memManager->GetMemory(tensor.data, memory); + if (ret != OH_NN_SUCCESS) { + LOGE("Invalid Tensor buffer, cannot transform to fd."); + } else { + iBuffer.fd = memory.fd; + iBuffer.bufferSize = memory.length; + iBuffer.offset = 0; + iBuffer.dataSize = memory.length; + } + } + iTensor.data = iBuffer; + + return iTensor; +} +} // unamed namespace + +HDIPreparedModelV1_0::HDIPreparedModelV1_0(OHOS::sptr hdiPreparedModel) : m_hdiPreparedModel(hdiPreparedModel) { hdiPreparedModel->GetVersion(m_hdiVersion.first, m_hdiVersion.second); } -OH_NN_ReturnCode HDIPreparedModel::ExportModelCache(std::vector& modelCache) +OH_NN_ReturnCode HDIPreparedModelV1_0::ExportModelCache(std::vector& modelCache) { if (!modelCache.empty()) { LOGE("The vector of modelCache should be empty. size=%zu", modelCache.size()); @@ -55,13 +127,13 @@ OH_NN_ReturnCode HDIPreparedModel::ExportModelCache(std::vector& mo return OH_NN_SUCCESS; } -OH_NN_ReturnCode HDIPreparedModel::Run(const std::vector& inputs, const std::vector& outputs, +OH_NN_ReturnCode HDIPreparedModelV1_0::Run(const std::vector& inputs, const std::vector& outputs, std::vector>& outputsDims, std::vector& isOutputBufferEnough) { V1_0::IOTensor iTensor; std::vector iInputTensors; for (auto& input: inputs) { - iTensor = NNToHDI::TransIOTensor(input); + iTensor = TransIOTensor(input); if (iTensor.data.fd == INVALID_FD) { LOGE("Transform inputs tensor failed, cannot find data file descriptor."); return OH_NN_INVALID_PARAMETER; @@ -71,7 +143,7 @@ OH_NN_ReturnCode HDIPreparedModel::Run(const std::vector& inputs, cons std::vector iOutputTensors; for (auto& output: outputs) { - iTensor = NNToHDI::TransIOTensor(output); + iTensor = TransIOTensor(output); if (iTensor.data.fd == INVALID_FD) { LOGE("Transform outputs tensor failed, cannot find data file descriptor."); return OH_NN_INVALID_PARAMETER; diff --git a/frameworks/native/hdi_prepared_model.h b/frameworks/native/hdi_prepared_model_v1_0.h similarity index 75% rename from frameworks/native/hdi_prepared_model.h rename to frameworks/native/hdi_prepared_model_v1_0.h index d111977b329e3377a95c0de03ae825b1121410cf..f5a8911095571fd46c4d14091181b85753ac45e5 100644 --- a/frameworks/native/hdi_prepared_model.h +++ b/frameworks/native/hdi_prepared_model_v1_0.h @@ -14,21 +14,25 @@ */ -#ifndef NEURAL_NETWORK_RUNTIME_HDI_PREPARED_MODEL_H -#define NEURAL_NETWORK_RUNTIME_HDI_PREPARED_MODEL_H +#ifndef NEURAL_NETWORK_RUNTIME_HDI_PREPARED_MODEL_V1_0_H +#define NEURAL_NETWORK_RUNTIME_HDI_PREPARED_MODEL_V1_0_H #include #include "refbase.h" -#include "hdi_interfaces.h" #include "prepared_model.h" #include "cpp_type.h" +#include +#include +#include + +namespace V1_0 = OHOS::HDI::Nnrt::V1_0; namespace OHOS { namespace NeuralNetworkRuntime { -class HDIPreparedModel : public PreparedModel { +class HDIPreparedModelV1_0 : public PreparedModel { public: - explicit HDIPreparedModel(OHOS::sptr hdiPreparedModel); + explicit HDIPreparedModelV1_0(OHOS::sptr hdiPreparedModel); OH_NN_ReturnCode ExportModelCache(std::vector& modelCache) override; @@ -44,4 +48,4 @@ private: }; } // namespace NeuralNetworkRuntime } // OHOS -#endif // NEURAL_NETWORK_RUNTIME_HDI_PREPARED_MODEL_H \ No newline at end of file +#endif // NEURAL_NETWORK_RUNTIME_HDI_PREPARED_MODEL_V1_0_H \ No newline at end of file diff --git a/frameworks/native/hdi_prepared_model_v2_0.cpp b/frameworks/native/hdi_prepared_model_v2_0.cpp new file mode 100644 index 0000000000000000000000000000000000000000..d9c6bc2796b58d726beb1b4d96254aea765c73c8 --- /dev/null +++ b/frameworks/native/hdi_prepared_model_v2_0.cpp @@ -0,0 +1,175 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "hdi_prepared_model_v2_0.h" + +#include "common/log.h" +#include "memory_manager.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace { +V2_0::DataType TransDataType(const OH_NN_DataType& dataType) +{ + switch (dataType) { + case OH_NN_BOOL: + return V2_0::DataType::DATA_TYPE_BOOL; + case OH_NN_INT8: + return V2_0::DataType::DATA_TYPE_INT8; + case OH_NN_INT16: + return V2_0::DataType::DATA_TYPE_INT16; + case OH_NN_INT32: + return V2_0::DataType::DATA_TYPE_INT32; + case OH_NN_INT64: + return V2_0::DataType::DATA_TYPE_INT64; + case OH_NN_UINT8: + return V2_0::DataType::DATA_TYPE_UINT8; + case OH_NN_UINT16: + return V2_0::DataType::DATA_TYPE_UINT16; + case OH_NN_UINT32: + return V2_0::DataType::DATA_TYPE_UINT32; + case OH_NN_UINT64: + return V2_0::DataType::DATA_TYPE_UINT64; + case OH_NN_FLOAT16: + return V2_0::DataType::DATA_TYPE_FLOAT16; + case OH_NN_FLOAT32: + return V2_0::DataType::DATA_TYPE_FLOAT32; + case OH_NN_FLOAT64: + return V2_0::DataType::DATA_TYPE_FLOAT64; + default: + return V2_0::DataType::DATA_TYPE_UNKNOWN; + } +} + +V2_0::Format TransFormat(const OH_NN_Format& format) +{ + switch (format) { + case OH_NN_FORMAT_NCHW: + return V2_0::Format::FORMAT_NCHW; + case OH_NN_FORMAT_NHWC: + return V2_0::Format::FORMAT_NHWC; + default: + return V2_0::Format::FORMAT_NONE; + } +} + +V2_0::IOTensor TransIOTensor(const IOTensor& tensor) +{ + V2_0::IOTensor iTensor; + iTensor.name = tensor.name; + iTensor.dataType = TransDataType(tensor.dataType); + iTensor.dimensions = tensor.dimensions; + iTensor.format = TransFormat(tensor.format); + + V2_0::SharedBuffer iBuffer {INVALID_FD, 0, 0, 0}; + if (tensor.data != nullptr) { + auto memManager = MemoryManager::GetInstance(); + Memory memory; + auto ret = memManager->GetMemory(tensor.data, memory); + if (ret != OH_NN_SUCCESS) { + LOGE("Invalid Tensor buffer, cannot transform to fd."); + } else { + iBuffer.fd = memory.fd; + iBuffer.bufferSize = memory.length; + iBuffer.offset = 0; + iBuffer.dataSize = memory.length; + } + } + iTensor.data = iBuffer; + + return iTensor; +} +} // unamed namespace + +HDIPreparedModelV2_0::HDIPreparedModelV2_0(OHOS::sptr hdiPreparedModel) + : m_hdiPreparedModel(hdiPreparedModel) +{ + hdiPreparedModel->GetVersion(m_hdiVersion.first, m_hdiVersion.second); +} + +OH_NN_ReturnCode HDIPreparedModelV2_0::ExportModelCache(std::vector& modelCache) +{ + if (!modelCache.empty()) { + LOGE("The vector of modelCache should be empty. size=%zu", modelCache.size()); + return OH_NN_INVALID_PARAMETER; + } + + std::vector iBuffers; + auto ret = m_hdiPreparedModel->ExportModelCache(iBuffers); + if (ret != HDF_SUCCESS) { + LOGE("Export model cache failed. ErrorCode=%d", ret); + return OH_NN_UNAVALIDABLE_DEVICE; + } + + auto memManager = MemoryManager::GetInstance(); + for (size_t i = 0; i < iBuffers.size(); i++) { + auto addr = memManager->MapMemory(iBuffers[i].fd, iBuffers[i].bufferSize); + if (addr == nullptr) { + LOGE("Export the %zuth model cache failed, cannot not map fd to address.", i + 1); + return OH_NN_MEMORY_ERROR; + } + ModelBuffer modelbuffer {addr, iBuffers[i].bufferSize}; + modelCache.emplace_back(modelbuffer); + } + + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode HDIPreparedModelV2_0::Run(const std::vector& inputs, const std::vector& outputs, + std::vector>& outputsDims, std::vector& isOutputBufferEnough) +{ + V2_0::IOTensor iTensor; + std::vector iInputTensors; + for (auto& input: inputs) { + iTensor = TransIOTensor(input); + if (iTensor.data.fd == INVALID_FD) { + LOGE("Transform inputs tensor failed, cannot find data file descriptor."); + return OH_NN_INVALID_PARAMETER; + } + iInputTensors.emplace_back(iTensor); + } + + std::vector iOutputTensors; + for (auto& output: outputs) { + iTensor = TransIOTensor(output); + if (iTensor.data.fd == INVALID_FD) { + LOGE("Transform outputs tensor failed, cannot find data file descriptor."); + return OH_NN_INVALID_PARAMETER; + } + iOutputTensors.emplace_back(iTensor); + } + + auto ret = m_hdiPreparedModel->Run(iInputTensors, iOutputTensors, outputsDims, isOutputBufferEnough); + if (ret != HDF_SUCCESS || outputsDims.empty()) { + LOGE("Run model failed. ErrorCode=%d", ret); + return OH_NN_UNAVALIDABLE_DEVICE; + } + + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode HDIPreparedModelV2_0::GetInputDimRanges(std::vector>& minInputDims, + std::vector>& maxInputDims) +{ + auto ret = m_hdiPreparedModel->GetInputDimRanges(minInputDims, maxInputDims); + if (ret != HDF_SUCCESS) { + LOGE("GetInputDimRanges failed. ErrorCode=%d", ret); + return OH_NN_UNAVALIDABLE_DEVICE; + } + + return OH_NN_SUCCESS; +} +} // namespace NeuralNetworkRuntime +} // OHOS \ No newline at end of file diff --git a/frameworks/native/hdi_prepared_model_v2_0.h b/frameworks/native/hdi_prepared_model_v2_0.h new file mode 100644 index 0000000000000000000000000000000000000000..ad42dcbcb314c56727b8f641132fcddc23e2bb64 --- /dev/null +++ b/frameworks/native/hdi_prepared_model_v2_0.h @@ -0,0 +1,55 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +#ifndef NEURAL_NETWORK_RUNTIME_HDI_PREPARED_MODEL_V2_0_H +#define NEURAL_NETWORK_RUNTIME_HDI_PREPARED_MODEL_V2_0_H + +#include + +#include +#include +#include + +#include "refbase.h" +#include "prepared_model.h" +#include "cpp_type.h" + +namespace V2_0 = OHOS::HDI::Nnrt::V2_0; + +namespace OHOS { +namespace NeuralNetworkRuntime { +class HDIPreparedModelV2_0 : public PreparedModel { +public: + explicit HDIPreparedModelV2_0(OHOS::sptr hdiPreparedModel); + + OH_NN_ReturnCode ExportModelCache(std::vector& modelCache) override; + + OH_NN_ReturnCode Run(const std::vector& inputs, + const std::vector& outputs, + std::vector>& outputsDims, + std::vector& isOutputBufferEnough) override; + + OH_NN_ReturnCode GetInputDimRanges(std::vector>& minInputDims, + std::vector>& maxInputDims) override; + +private: + // first: major version, second: minor version + std::pair m_hdiVersion; + OHOS::sptr m_hdiPreparedModel {nullptr}; +}; +} // namespace NeuralNetworkRuntime +} // OHOS +#endif // NEURAL_NETWORK_RUNTIME_HDI_PREPARED_MODEL_V2_0_H \ No newline at end of file diff --git a/frameworks/native/inner_model.cpp b/frameworks/native/inner_model.cpp index bcd20c6e2cf1feafc2e058ea790dce815363b03e..205222fa258f1c5535e9d09477c46bb6ec0fc143 100644 --- a/frameworks/native/inner_model.cpp +++ b/frameworks/native/inner_model.cpp @@ -24,7 +24,6 @@ #include "common/utils.h" #include "common/scoped_trace.h" #include "device_manager.h" -#include "hdi_device.h" #include "validation.h" #include "ops_builder.h" #include "ops_registry.h" diff --git a/frameworks/native/ops/cast_builder.cpp b/frameworks/native/ops/cast_builder.cpp index 81dc1eb2b5f386bbf04fa022649b8dba146b4438..6336926209671c077fe40afd083ba5cfaab5f097 100644 --- a/frameworks/native/ops/cast_builder.cpp +++ b/frameworks/native/ops/cast_builder.cpp @@ -57,7 +57,6 @@ OH_NN_ReturnCode CastBuilder::Build(const std::vector& paramsIndex, LOGE("[Cast] Type of cast operator is not validation."); return OH_NN_INVALID_PARAMETER; } - *castTypeInt = (OH_NN_DataType)NNToHDI::TransDataType(*castTypeInt); if (!paramsIndex.empty()) { LOGE("[Cast] Cast expects no parameters"); diff --git a/frameworks/native/prepared_model.h b/frameworks/native/prepared_model.h index 65741311a999d456487091e11fc54e2f2c4641b1..2d25f6fbf0e33e0cdf6a43cab392db3676f40b29 100644 --- a/frameworks/native/prepared_model.h +++ b/frameworks/native/prepared_model.h @@ -34,6 +34,10 @@ public: const std::vector& outputs, std::vector>& outputsDims, std::vector& isOutputBufferEnough) = 0; + + virtual OH_NN_ReturnCode GetInputDimRanges( + std::vector>& minInputDims, + std::vector>& maxInputDims) { return OH_NN_OPERATION_FORBIDDEN; } }; } // OHOS } // namespace NeuralNetworkRuntime diff --git a/frameworks/native/transform.cpp b/frameworks/native/transform.cpp index ea0d3391a84757016eb600c0e782642a768cd627..d3705d5381f93b45c79d0def51b688b8608adf5f 100644 --- a/frameworks/native/transform.cpp +++ b/frameworks/native/transform.cpp @@ -25,134 +25,6 @@ const uint32_t BIT16_TO_BYTE = 2; const uint32_t BIT32_TO_BYTE = 4; const uint32_t BIT64_TO_BYTE = 8; -OH_NN_DeviceType HDIToNN::TransHDIDeviceType(const V1_0::DeviceType& iDeviceType) -{ - switch (iDeviceType) { - case V1_0::DeviceType::CPU: - return OH_NN_CPU; - case V1_0::DeviceType::GPU: - return OH_NN_GPU; - case V1_0::DeviceType::ACCELERATOR: - return OH_NN_ACCELERATOR; - default: - return OH_NN_OTHERS; - } -} - -DeviceStatus HDIToNN::TransHDIDeviceStatus(const V1_0::DeviceStatus& iDeviceStatus) -{ - switch (iDeviceStatus) { - case V1_0::DeviceStatus::AVAILABLE: - return DeviceStatus::AVAILABLE; - case V1_0::DeviceStatus::BUSY: - return DeviceStatus::BUSY; - case V1_0::DeviceStatus::OFFLINE: - return DeviceStatus::OFFLINE; - default: - return DeviceStatus::UNKNOWN; - } -} - -V1_0::PerformanceMode NNToHDI::TransPerformanceMode(const OH_NN_PerformanceMode& mode) -{ - switch (mode) { - case OH_NN_PERFORMANCE_LOW: - return V1_0::PerformanceMode::PERFORMANCE_LOW; - case OH_NN_PERFORMANCE_MEDIUM: - return V1_0::PerformanceMode::PERFORMANCE_MEDIUM; - case OH_NN_PERFORMANCE_HIGH: - return V1_0::PerformanceMode::PERFORMANCE_HIGH; - case OH_NN_PERFORMANCE_EXTREME: - return V1_0::PerformanceMode::PERFORMANCE_EXTREME; - default: - return V1_0::PerformanceMode::PERFORMANCE_NONE; - } -} -V1_0::Priority NNToHDI::TransPriority(const OH_NN_Priority& priority) -{ - switch (priority) { - case OH_NN_PRIORITY_LOW: - return V1_0::Priority::PRIORITY_LOW; - case OH_NN_PRIORITY_MEDIUM: - return V1_0::Priority::PRIORITY_MEDIUM; - case OH_NN_PRIORITY_HIGH: - return V1_0::Priority::PRIORITY_HIGH; - default: - return V1_0::Priority::PRIORITY_NONE; - } -} - -V1_0::DataType NNToHDI::TransDataType(const OH_NN_DataType& dataType) -{ - switch (dataType) { - case OH_NN_BOOL: - return V1_0::DataType::DATA_TYPE_BOOL; - case OH_NN_INT8: - return V1_0::DataType::DATA_TYPE_INT8; - case OH_NN_INT16: - return V1_0::DataType::DATA_TYPE_INT16; - case OH_NN_INT32: - return V1_0::DataType::DATA_TYPE_INT32; - case OH_NN_INT64: - return V1_0::DataType::DATA_TYPE_INT64; - case OH_NN_UINT8: - return V1_0::DataType::DATA_TYPE_UINT8; - case OH_NN_UINT16: - return V1_0::DataType::DATA_TYPE_UINT16; - case OH_NN_UINT32: - return V1_0::DataType::DATA_TYPE_UINT32; - case OH_NN_UINT64: - return V1_0::DataType::DATA_TYPE_UINT64; - case OH_NN_FLOAT16: - return V1_0::DataType::DATA_TYPE_FLOAT16; - case OH_NN_FLOAT32: - return V1_0::DataType::DATA_TYPE_FLOAT32; - case OH_NN_FLOAT64: - return V1_0::DataType::DATA_TYPE_FLOAT64; - default: - return V1_0::DataType::DATA_TYPE_UNKNOWN; - } -} - -V1_0::Format NNToHDI::TransFormat(const OH_NN_Format& format) -{ - switch (format) { - case OH_NN_FORMAT_NCHW: - return V1_0::Format::FORMAT_NCHW; - case OH_NN_FORMAT_NHWC: - return V1_0::Format::FORMAT_NHWC; - default: - return V1_0::Format::FORMAT_NONE; - } -} - -V1_0::IOTensor NNToHDI::TransIOTensor(const IOTensor& tensor) -{ - V1_0::IOTensor iTensor; - iTensor.name = tensor.name; - iTensor.dataType = TransDataType(tensor.dataType); - iTensor.dimensions = tensor.dimensions; - iTensor.format = TransFormat(tensor.format); - - V1_0::SharedBuffer iBuffer {INVALID_FD, 0, 0, 0}; - if (tensor.data != nullptr) { - auto memManager = MemoryManager::GetInstance(); - Memory memory; - auto ret = memManager->GetMemory(tensor.data, memory); - if (ret != OH_NN_SUCCESS) { - LOGE("Invalid Tensor buffer, cannot transform to fd."); - } else { - iBuffer.fd = memory.fd; - iBuffer.bufferSize = memory.length; - iBuffer.offset = 0; - iBuffer.dataSize = memory.length; - } - } - iTensor.data = iBuffer; - - return iTensor; -} - uint32_t GetTypeSize(OH_NN_DataType type) { switch (type) { diff --git a/frameworks/native/transform.h b/frameworks/native/transform.h index 2472ad3f8d1ca8e77912ac3e4f521d7ce7825c1b..24d54e8d351dc7cf9a05e70eceea8cc365412daa 100644 --- a/frameworks/native/transform.h +++ b/frameworks/native/transform.h @@ -16,7 +16,6 @@ #ifndef NEURAL_NETWORK_RUNTIME_TRANSFORM_H #define NEURAL_NETWORK_RUNTIME_TRANSFORM_H -#include "hdi_interfaces.h" #include "interfaces/kits/c/neural_network_runtime_type.h" #include "cpp_type.h" #include "mindir.h" @@ -38,19 +37,6 @@ std::vector ConstructVectorFromArray(const T* data, size_t size) uint32_t GetTypeSize(OH_NN_DataType type); -namespace HDIToNN { -OH_NN_DeviceType TransHDIDeviceType(const V1_0::DeviceType& iDeviceType); -DeviceStatus TransHDIDeviceStatus(const V1_0::DeviceStatus& iDeviceStatus); -} // namespace HDIToNN - -namespace NNToHDI { -V1_0::PerformanceMode TransPerformanceMode(const OH_NN_PerformanceMode& mode); -V1_0::Priority TransPriority(const OH_NN_Priority& priority); -V1_0::DataType TransDataType(const OH_NN_DataType& dataType); -V1_0::Format TransFormat(const OH_NN_Format& format); -V1_0::IOTensor TransIOTensor(const IOTensor& tensor); -} // namespace NNToHDI - namespace NNToMS { mindspore::lite::DataType TransformDataType(OH_NN_DataType type); mindspore::lite::Format TransformFormat(OH_NN_Format type); diff --git a/test/unittest/common/compilation_mock_idevice.cpp b/test/unittest/common/v1_0/compilation_mock_idevice.cpp similarity index 79% rename from test/unittest/common/compilation_mock_idevice.cpp rename to test/unittest/common/v1_0/compilation_mock_idevice.cpp index 52f647d0af64ec0f253ad5121d18158b5bb111ce..6dc4fb595de74c689ce175ada88836e72baafc42 100644 --- a/test/unittest/common/compilation_mock_idevice.cpp +++ b/test/unittest/common/v1_0/compilation_mock_idevice.cpp @@ -15,9 +15,9 @@ #include "common/utils.h" #include "frameworks/native/device_manager.h" -#include "frameworks/native/hdi_device.h" +#include "frameworks/native/hdi_device_v1_0.h" #include "frameworks/native/nn_tensor.h" -#include "test/unittest/common/mock_idevice.h" +#include "test/unittest/common/v1_0/mock_idevice.h" OH_NN_ReturnCode OHOS::HDI::Nnrt::V1_0::MockIPreparedModel::m_ExpectRetCode = OH_NN_OPERATION_FORBIDDEN; @@ -32,7 +32,7 @@ std::shared_ptr DeviceManager::GetDevice(size_t deviceId) const return nullptr; } - std::shared_ptr device = CreateSharedPtr(idevice); + std::shared_ptr device = CreateSharedPtr(idevice); if (device == nullptr) { LOGE("DeviceManager mock GetDevice failed, the device is nullptr"); return nullptr; @@ -46,7 +46,7 @@ std::shared_ptr DeviceManager::GetDevice(size_t deviceId) const } } -OH_NN_ReturnCode HDIDevice::IsModelCacheSupported(bool& isSupported) +OH_NN_ReturnCode HDIDeviceV1_0::IsModelCacheSupported(bool& isSupported) { // isSupported is false when expecting to return success if (HDI::Nnrt::V1_0::MockIPreparedModel::m_ExpectRetCode == OH_NN_SUCCESS) { @@ -66,7 +66,7 @@ OH_NN_ReturnCode HDIDevice::IsModelCacheSupported(bool& isSupported) return OH_NN_SUCCESS; } -OH_NN_ReturnCode HDIDevice::GetSupportedOperation(std::shared_ptr model, +OH_NN_ReturnCode HDIDeviceV1_0::GetSupportedOperation(std::shared_ptr model, std::vector& ops) { if (HDI::Nnrt::V1_0::MockIPreparedModel::m_ExpectRetCode == OH_NN_INVALID_FILE) { @@ -76,7 +76,7 @@ OH_NN_ReturnCode HDIDevice::GetSupportedOperation(std::shared_ptr model, +OH_NN_ReturnCode HDIDeviceV1_0::PrepareModel(std::shared_ptr model, const ModelConfig& config, std::shared_ptr& preparedModel) { if (model == nullptr) { - LOGE("HDIDevice mock PrepareModel failed, the model is nullptr"); + LOGE("HDIDeviceV1_0 mock PrepareModel failed, the model is nullptr"); return OH_NN_INVALID_PARAMETER; } if (config.enableFloat16 == false) { - LOGE("HDIDevice mock PrepareModel failed, the enableFloat16 is false"); + LOGE("HDIDeviceV1_0 mock PrepareModel failed, the enableFloat16 is false"); return OH_NN_FAILED; } sptr hdiPreparedModel = sptr(new (std::nothrow) OHOS::HDI::Nnrt::V1_0::MockIPreparedModel()); if (hdiPreparedModel == nullptr) { - LOGE("HDIDevice mock PrepareModel failed, error happened when new sptr"); + LOGE("HDIDeviceV1_0 mock PrepareModel failed, error happened when new sptr"); return OH_NN_NULL_PTR; } - preparedModel = CreateSharedPtr(hdiPreparedModel); + preparedModel = CreateSharedPtr(hdiPreparedModel); return OH_NN_SUCCESS; } -OH_NN_ReturnCode HDIPreparedModel::ExportModelCache(std::vector& modelCache) +OH_NN_ReturnCode HDIPreparedModelV1_0::ExportModelCache(std::vector& modelCache) { if (!modelCache.empty()) { - LOGE("HDIPreparedModel mock ExportModelCache failed, the modelCache is not empty"); + LOGE("HDIPreparedModelV1_0 mock ExportModelCache failed, the modelCache is not empty"); return OH_NN_INVALID_PARAMETER; } @@ -215,10 +215,10 @@ OH_NN_ReturnCode HDIPreparedModel::ExportModelCache(std::vector& mo return OH_NN_SUCCESS; } -void* HDIDevice::AllocateBuffer(size_t length) +void* HDIDeviceV1_0::AllocateBuffer(size_t length) { if (length == 0) { - LOGE("HDIDevice mock AllocateBuffer failed, the length param is invalid"); + LOGE("HDIDeviceV1_0 mock AllocateBuffer failed, the length param is invalid"); return nullptr; } @@ -229,16 +229,16 @@ void* HDIDevice::AllocateBuffer(size_t length) void* buffer = (void*)malloc(length); if (buffer == nullptr) { - LOGE("HDIDevice mock AllocateBuffer failed, the buffer is nullptr"); + LOGE("HDIDeviceV1_0 mock AllocateBuffer failed, the buffer is nullptr"); return nullptr; } return buffer; } -OH_NN_ReturnCode HDIDevice::ReleaseBuffer(const void* buffer) +OH_NN_ReturnCode HDIDeviceV1_0::ReleaseBuffer(const void* buffer) { if (buffer == nullptr) { - LOGE("HDIDevice mock ReleaseBuffer failed, the buffer is nullptr"); + LOGE("HDIDeviceV1_0 mock ReleaseBuffer failed, the buffer is nullptr"); return OH_NN_NULL_PTR; } @@ -247,7 +247,7 @@ OH_NN_ReturnCode HDIDevice::ReleaseBuffer(const void* buffer) return OH_NN_SUCCESS; } -OH_NN_ReturnCode HDIDevice::PrepareModelFromModelCache(const std::vector& modelCache, +OH_NN_ReturnCode HDIDeviceV1_0::PrepareModelFromModelCache(const std::vector& modelCache, const ModelConfig& config, std::shared_ptr& preparedModel) { @@ -257,18 +257,18 @@ OH_NN_ReturnCode HDIDevice::PrepareModelFromModelCache(const std::vector hdiPreparedModel = sptr(new (std::nothrow) OHOS::HDI::Nnrt::V1_0::MockIPreparedModel()); if (hdiPreparedModel == nullptr) { - LOGE("HDIDevice mock PrepareModelFromModelCache failed, error happened when new sptr"); + LOGE("HDIDeviceV1_0 mock PrepareModelFromModelCache failed, error happened when new sptr"); return OH_NN_NULL_PTR; } - preparedModel = CreateSharedPtr(hdiPreparedModel); + preparedModel = CreateSharedPtr(hdiPreparedModel); return OH_NN_SUCCESS; } diff --git a/test/unittest/common/executor_mock_device.cpp b/test/unittest/common/v1_0/executor_mock_device.cpp similarity index 84% rename from test/unittest/common/executor_mock_device.cpp rename to test/unittest/common/v1_0/executor_mock_device.cpp index 47934e0e3c2ccee30cc6e0643e558adef38e9bce..51b795086f5b319b812ef16336e1bf6ab96909fd 100644 --- a/test/unittest/common/executor_mock_device.cpp +++ b/test/unittest/common/v1_0/executor_mock_device.cpp @@ -15,8 +15,8 @@ #include "frameworks/native/compilation.h" #include "frameworks/native/execution_plan.h" -#include "frameworks/native/hdi_device.h" -#include "test/unittest/common/mock_idevice.h" +#include "frameworks/native/hdi_device_v1_0.h" +#include "test/unittest/common/v1_0/mock_idevice.h" OH_NN_ReturnCode OHOS::HDI::Nnrt::V1_0::MockIPreparedModel::m_ExpectRetCode = OH_NN_OPERATION_FORBIDDEN; @@ -26,7 +26,7 @@ std::shared_ptr ExecutionPlan::GetInputDevice() const { sptr idevice = sptr(new (std::nothrow) OHOS::HDI::Nnrt::V1_0::MockIDevice()); - std::shared_ptr device = std::make_shared(idevice); + std::shared_ptr device = std::make_shared(idevice); return device; } @@ -34,11 +34,11 @@ std::shared_ptr ExecutionPlan::GetOutputDevice() const { sptr idevice = sptr(new (std::nothrow) OHOS::HDI::Nnrt::V1_0::MockIDevice()); - std::shared_ptr device = std::make_shared(idevice); + std::shared_ptr device = std::make_shared(idevice); return device; } -void* HDIDevice::AllocateBuffer(size_t length) +void* HDIDeviceV1_0::AllocateBuffer(size_t length) { if (length == 0) { LOGE("The length param is invalid, length=0"); @@ -58,7 +58,7 @@ void* HDIDevice::AllocateBuffer(size_t length) return buffer; } -OH_NN_ReturnCode HDIDevice::ReleaseBuffer(const void* buffer) +OH_NN_ReturnCode HDIDeviceV1_0::ReleaseBuffer(const void* buffer) { if (buffer == nullptr) { LOGE("alloct buffer failed"); @@ -69,7 +69,7 @@ OH_NN_ReturnCode HDIDevice::ReleaseBuffer(const void* buffer) return OH_NN_SUCCESS; } -OH_NN_ReturnCode HDIPreparedModel::Run(const std::vector& inputs, const std::vector& outputs, +OH_NN_ReturnCode HDIPreparedModelV1_0::Run(const std::vector& inputs, const std::vector& outputs, std::vector>& outputsDims, std::vector& isOutputBufferEnough) { if (inputs.empty() || outputs.empty()) { @@ -92,10 +92,10 @@ std::shared_ptr Compilation::GetExecutionPlan() const sptr hdiPreparedModel = OHOS::sptr(new (std::nothrow) OHOS::HDI::Nnrt::V1_0::HDI::Nnrt::V1_0::MockIPreparedModel()); - std::shared_ptr preparedModel = std::make_shared(hdiPreparedModel); + std::shared_ptr preparedModel = std::make_shared(hdiPreparedModel); sptr idevice = OHOS::sptr(new (std::nothrow) OHOS::HDI::Nnrt::V1_0::MockIDevice()); - std::shared_ptr device = std::make_shared(idevice); + std::shared_ptr device = std::make_shared(idevice); ExecutionPlan executor(preparedModel, device); std::shared_ptr pExcutor = std::make_shared(executor); return pExcutor; diff --git a/test/unittest/common/inner_model_mock_device.cpp b/test/unittest/common/v1_0/inner_model_mock_device.cpp similarity index 92% rename from test/unittest/common/inner_model_mock_device.cpp rename to test/unittest/common/v1_0/inner_model_mock_device.cpp index 386ee5ba60e5ad17f44d8168ff3badb88c051d22..d9be0bd16419f8618e69444a7abab1b59e1f71ba 100644 --- a/test/unittest/common/inner_model_mock_device.cpp +++ b/test/unittest/common/v1_0/inner_model_mock_device.cpp @@ -18,7 +18,7 @@ #include "common/utils.h" #include "frameworks/native/inner_model.h" -#include "frameworks/native/hdi_device.h" +#include "frameworks/native/hdi_device_v1_0.h" #include "frameworks/native/device_manager.h" #include "frameworks/native/ops/div_builder.h" #include "mock_idevice.h" @@ -35,7 +35,7 @@ std::shared_ptr DeviceManager::GetDevice(size_t deviceId) const LOGE("DeviceManager mock GetDevice failed, error happened when new sptr"); return nullptr; } else { - std::shared_ptr device = CreateSharedPtr(idevice); + std::shared_ptr device = CreateSharedPtr(idevice); if (device == nullptr) { LOGE("DeviceManager mock GetDevice failed, device is nullptr"); return nullptr; @@ -57,7 +57,7 @@ Ops::LiteGraphPrimitvePtr Ops::DivBuilder::GetPrimitive() } // Mock the palce where the device GetSupportedOperation is called in inner_model build function. -OH_NN_ReturnCode HDIDevice::GetSupportedOperation(std::shared_ptr model, +OH_NN_ReturnCode HDIDeviceV1_0::GetSupportedOperation(std::shared_ptr model, std::vector& supportedOperations) { supportedOperations = {true, true, true}; diff --git a/test/unittest/common/mock_idevice.cpp b/test/unittest/common/v1_0/mock_idevice.cpp similarity index 100% rename from test/unittest/common/mock_idevice.cpp rename to test/unittest/common/v1_0/mock_idevice.cpp diff --git a/test/unittest/common/mock_idevice.h b/test/unittest/common/v1_0/mock_idevice.h similarity index 97% rename from test/unittest/common/mock_idevice.h rename to test/unittest/common/v1_0/mock_idevice.h index 64e8231c331b3bdfac76a9e1df8a391036518056..2d871d6bdc81774f5f01804c9044395e0f639b4e 100644 --- a/test/unittest/common/mock_idevice.h +++ b/test/unittest/common/v1_0/mock_idevice.h @@ -18,7 +18,7 @@ #include -#include "frameworks/native/hdi_prepared_model.h" +#include "frameworks/native/hdi_prepared_model_v1_0.h" #include "frameworks/native/memory_manager.h" #include "frameworks/native/transform.h" diff --git a/test/unittest/common/v2_0/compilation_mock_idevice.cpp b/test/unittest/common/v2_0/compilation_mock_idevice.cpp new file mode 100644 index 0000000000000000000000000000000000000000..d9f5290c12c0c0116ce3e004a03d168e32c23d16 --- /dev/null +++ b/test/unittest/common/v2_0/compilation_mock_idevice.cpp @@ -0,0 +1,286 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "common/utils.h" +#include "frameworks/native/device_manager.h" +#include "frameworks/native/hdi_device_v2_0.h" +#include "frameworks/native/nn_tensor.h" +#include "test/unittest/common/v2_0/mock_idevice.h" + +OH_NN_ReturnCode OHOS::HDI::Nnrt::V2_0::MockIPreparedModel::m_ExpectRetCode = OH_NN_OPERATION_FORBIDDEN; + +namespace OHOS { +namespace NeuralNetworkRuntime { +std::shared_ptr DeviceManager::GetDevice(size_t deviceId) const +{ + sptr idevice + = sptr(new (std::nothrow) OHOS::HDI::Nnrt::V2_0::MockIDevice()); + if (idevice == nullptr) { + LOGE("DeviceManager mock GetDevice failed, error happened when new sptr"); + return nullptr; + } + + std::shared_ptr device = CreateSharedPtr(idevice); + if (device == nullptr) { + LOGE("DeviceManager mock GetDevice failed, the device is nullptr"); + return nullptr; + } + + if (deviceId == 0) { + LOGE("DeviceManager mock GetDevice failed, the passed parameter deviceId is 0"); + return nullptr; + } else { + return device; + } +} + +OH_NN_ReturnCode HDIDeviceV2_0::IsModelCacheSupported(bool& isSupported) +{ + // isSupported is false when expecting to return success + if (HDI::Nnrt::V2_0::MockIPreparedModel::m_ExpectRetCode == OH_NN_SUCCESS) { + // In order not to affect other use cases, set to the OH_NN_OPERATION_FORBIDDEN + HDI::Nnrt::V2_0::MockIPreparedModel::m_ExpectRetCode = OH_NN_OPERATION_FORBIDDEN; + isSupported = false; + return OH_NN_SUCCESS; + } + + if (HDI::Nnrt::V2_0::MockIPreparedModel::m_ExpectRetCode == OH_NN_FAILED) { + HDI::Nnrt::V2_0::MockIPreparedModel::m_ExpectRetCode = OH_NN_OPERATION_FORBIDDEN; + isSupported = false; + return OH_NN_FAILED; + } + + isSupported = true; + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode HDIDeviceV2_0::GetSupportedOperation(std::shared_ptr model, + std::vector& ops) +{ + if (HDI::Nnrt::V2_0::MockIPreparedModel::m_ExpectRetCode == OH_NN_INVALID_FILE) { + HDI::Nnrt::V2_0::MockIPreparedModel::m_ExpectRetCode = OH_NN_OPERATION_FORBIDDEN; + ops.emplace_back(true); + return OH_NN_SUCCESS; + } + + if (model == nullptr) { + LOGE("HDIDeviceV2_0 mock GetSupportedOperation failed, Model is nullptr, cannot query supported operation."); + return OH_NN_NULL_PTR; + } + + if (HDI::Nnrt::V2_0::MockIPreparedModel::m_ExpectRetCode == OH_NN_SUCCESS) { + HDI::Nnrt::V2_0::MockIPreparedModel::m_ExpectRetCode = OH_NN_OPERATION_FORBIDDEN; + ops.emplace_back(false); + return OH_NN_SUCCESS; + } + + ops.emplace_back(true); + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode HDIDeviceV2_0::IsDynamicInputSupported(bool& isSupported) +{ + if (HDI::Nnrt::V2_0::MockIPreparedModel::m_ExpectRetCode == OH_NN_FAILED) { + HDI::Nnrt::V2_0::MockIPreparedModel::m_ExpectRetCode = OH_NN_OPERATION_FORBIDDEN; + isSupported = false; + return OH_NN_FAILED; + } + + if (HDI::Nnrt::V2_0::MockIPreparedModel::m_ExpectRetCode == OH_NN_INVALID_PATH) { + HDI::Nnrt::V2_0::MockIPreparedModel::m_ExpectRetCode = OH_NN_OPERATION_FORBIDDEN; + isSupported = false; + return OH_NN_SUCCESS; + } + + isSupported = true; + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode HDIDeviceV2_0::IsPerformanceModeSupported(bool& isSupported) +{ + if (HDI::Nnrt::V2_0::MockIPreparedModel::m_ExpectRetCode == OH_NN_FAILED) { + HDI::Nnrt::V2_0::MockIPreparedModel::m_ExpectRetCode = OH_NN_OPERATION_FORBIDDEN; + isSupported = false; + return OH_NN_FAILED; + } + + if (HDI::Nnrt::V2_0::MockIPreparedModel::m_ExpectRetCode == OH_NN_SUCCESS) { + HDI::Nnrt::V2_0::MockIPreparedModel::m_ExpectRetCode = OH_NN_OPERATION_FORBIDDEN; + isSupported = false; + return OH_NN_SUCCESS; + } + + isSupported = true; + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode HDIDeviceV2_0::IsPrioritySupported(bool& isSupported) +{ + if (HDI::Nnrt::V2_0::MockIPreparedModel::m_ExpectRetCode == OH_NN_INVALID_PARAMETER) { + HDI::Nnrt::V2_0::MockIPreparedModel::m_ExpectRetCode = OH_NN_OPERATION_FORBIDDEN; + isSupported = false; + return OH_NN_INVALID_PARAMETER; + } + + if (HDI::Nnrt::V2_0::MockIPreparedModel::m_ExpectRetCode == OH_NN_SUCCESS) { + HDI::Nnrt::V2_0::MockIPreparedModel::m_ExpectRetCode = OH_NN_OPERATION_FORBIDDEN; + isSupported = false; + return OH_NN_SUCCESS; + } + + isSupported = true; + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode HDIDeviceV2_0::IsFloat16PrecisionSupported(bool& isSupported) +{ + if (HDI::Nnrt::V2_0::MockIPreparedModel::m_ExpectRetCode == OH_NN_SUCCESS) { + HDI::Nnrt::V2_0::MockIPreparedModel::m_ExpectRetCode = OH_NN_OPERATION_FORBIDDEN; + isSupported = false; + return OH_NN_SUCCESS; + } + + if (HDI::Nnrt::V2_0::MockIPreparedModel::m_ExpectRetCode == OH_NN_MEMORY_ERROR) { + HDI::Nnrt::V2_0::MockIPreparedModel::m_ExpectRetCode = OH_NN_OPERATION_FORBIDDEN; + isSupported = false; + return OH_NN_MEMORY_ERROR; + } + + isSupported = true; + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode HDIDeviceV2_0::PrepareModel(std::shared_ptr model, + const ModelConfig& config, + std::shared_ptr& preparedModel) +{ + if (model == nullptr) { + LOGE("HDIDeviceV2_0 mock PrepareModel failed, the model is nullptr"); + return OH_NN_INVALID_PARAMETER; + } + + if (config.enableFloat16 == false) { + LOGE("HDIDeviceV2_0 mock PrepareModel failed, the enableFloat16 is false"); + return OH_NN_FAILED; + } + + sptr hdiPreparedModel = sptr(new (std::nothrow) OHOS::HDI::Nnrt::V2_0::MockIPreparedModel()); + if (hdiPreparedModel == nullptr) { + LOGE("HDIDeviceV2_0 mock PrepareModel failed, error happened when new sptr"); + return OH_NN_NULL_PTR; + } + + preparedModel = CreateSharedPtr(hdiPreparedModel); + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode HDIPreparedModelV2_0::ExportModelCache(std::vector& modelCache) +{ + if (!modelCache.empty()) { + LOGE("HDIPreparedModelV2_0 mock ExportModelCache failed, the modelCache is not empty"); + return OH_NN_INVALID_PARAMETER; + } + + if (HDI::Nnrt::V2_0::MockIPreparedModel::m_ExpectRetCode == OH_NN_FAILED) { + HDI::Nnrt::V2_0::MockIPreparedModel::m_ExpectRetCode = OH_NN_OPERATION_FORBIDDEN; + return OH_NN_FAILED; + } + + int bufferSize = 13; + ModelBuffer modelBuffer; + std::string aBuffer = "mock_buffer_a"; + modelBuffer.buffer = (void*)aBuffer.c_str(); + modelBuffer.length = bufferSize; + modelCache.emplace_back(modelBuffer); + + ModelBuffer modelBuffer2; + std::string bBuffer = "mock_buffer_b"; + modelBuffer2.buffer = (void*)bBuffer.c_str(); + modelBuffer2.length = bufferSize; + modelCache.emplace_back(modelBuffer2); + + return OH_NN_SUCCESS; +} + +void* HDIDeviceV2_0::AllocateBuffer(size_t length) +{ + if (length == 0) { + LOGE("HDIDeviceV2_0 mock AllocateBuffer failed, the length param is invalid"); + return nullptr; + } + + if (HDI::Nnrt::V2_0::MockIPreparedModel::m_ExpectRetCode == OH_NN_NULL_PTR) { + HDI::Nnrt::V2_0::MockIPreparedModel::m_ExpectRetCode = OH_NN_OPERATION_FORBIDDEN; + return nullptr; + } + + void* buffer = (void*)malloc(length); + if (buffer == nullptr) { + LOGE("HDIDeviceV2_0 mock AllocateBuffer failed, the buffer is nullptr"); + return nullptr; + } + return buffer; +} + +OH_NN_ReturnCode HDIDeviceV2_0::ReleaseBuffer(const void* buffer) +{ + if (buffer == nullptr) { + LOGE("HDIDeviceV2_0 mock ReleaseBuffer failed, the buffer is nullptr"); + return OH_NN_NULL_PTR; + } + + free(const_cast(buffer)); + buffer = nullptr; + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode HDIDeviceV2_0::PrepareModelFromModelCache(const std::vector& modelCache, + const ModelConfig& config, + std::shared_ptr& preparedModel) +{ + if (HDI::Nnrt::V2_0::MockIPreparedModel::m_ExpectRetCode == OH_NN_FAILED) { + HDI::Nnrt::V2_0::MockIPreparedModel::m_ExpectRetCode = OH_NN_OPERATION_FORBIDDEN; + return OH_NN_FAILED; + } + + if (modelCache.size() == 0 || config.enableFloat16 == false) { + LOGE("HDIDeviceV2_0 mock PrepareModel failed, the modelCache size equals 0 or enableFloat16 is false"); + return OH_NN_FAILED; + } + + sptr hdiPreparedModel = sptr(new (std::nothrow) OHOS::HDI::Nnrt::V2_0::MockIPreparedModel()); + if (hdiPreparedModel == nullptr) { + LOGE("HDIDeviceV2_0 mock PrepareModelFromModelCache failed, error happened when new sptr"); + return OH_NN_NULL_PTR; + } + + preparedModel = CreateSharedPtr(hdiPreparedModel); + + return OH_NN_SUCCESS; +} + +bool NNTensor::IsDynamicShape() const +{ + if (HDI::Nnrt::V2_0::MockIPreparedModel::m_ExpectRetCode == OH_NN_FAILED) { + HDI::Nnrt::V2_0::MockIPreparedModel::m_ExpectRetCode = OH_NN_OPERATION_FORBIDDEN; + return false; + } + + return true; +} +} // namespace NeuralNetworkRuntime +} // namespace OHOS \ No newline at end of file diff --git a/test/unittest/common/v2_0/executor_mock_device.cpp b/test/unittest/common/v2_0/executor_mock_device.cpp new file mode 100644 index 0000000000000000000000000000000000000000..b5b2049c4273bc8374b928b9eb53a681f5d4abda --- /dev/null +++ b/test/unittest/common/v2_0/executor_mock_device.cpp @@ -0,0 +1,104 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "frameworks/native/compilation.h" +#include "frameworks/native/execution_plan.h" +#include "frameworks/native/hdi_device_v2_0.h" +#include "test/unittest/common/v2_0/mock_idevice.h" + +OH_NN_ReturnCode OHOS::HDI::Nnrt::V2_0::MockIPreparedModel::m_ExpectRetCode = OH_NN_OPERATION_FORBIDDEN; + +namespace OHOS { +namespace NeuralNetworkRuntime { +std::shared_ptr ExecutionPlan::GetInputDevice() const +{ + sptr idevice + = sptr(new (std::nothrow) OHOS::HDI::Nnrt::V2_0::MockIDevice()); + std::shared_ptr device = std::make_shared(idevice); + return device; +} + +std::shared_ptr ExecutionPlan::GetOutputDevice() const +{ + sptr idevice + = sptr(new (std::nothrow) OHOS::HDI::Nnrt::V2_0::MockIDevice()); + std::shared_ptr device = std::make_shared(idevice); + return device; +} + +void* HDIDeviceV2_0::AllocateBuffer(size_t length) +{ + if (length == 0) { + LOGE("The length param is invalid, length=0"); + return nullptr; + } + + void* buffer = (void*)malloc(length); + if (buffer == nullptr) { + LOGE("alloct buffer failed"); + return nullptr; + } + + if (OHOS::HDI::Nnrt::V2_0::MockIPreparedModel::m_ExpectRetCode == OH_NN_INVALID_PARAMETER) { + OHOS::HDI::Nnrt::V2_0::MockIPreparedModel::m_ExpectRetCode = OH_NN_OPERATION_FORBIDDEN; + return nullptr; + } + return buffer; +} + +OH_NN_ReturnCode HDIDeviceV2_0::ReleaseBuffer(const void* buffer) +{ + if (buffer == nullptr) { + LOGE("alloct buffer failed"); + return OH_NN_FAILED; + } + free(const_cast(buffer)); + buffer = nullptr; + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode HDIPreparedModelV2_0::Run(const std::vector& inputs, const std::vector& outputs, + std::vector>& outputsDims, std::vector& isOutputBufferEnough) +{ + if (inputs.empty() || outputs.empty()) { + return OH_NN_INVALID_PARAMETER; + } + + if (OHOS::HDI::Nnrt::V2_0::MockIPreparedModel::m_ExpectRetCode == OH_NN_FAILED) { + OHOS::HDI::Nnrt::V2_0::MockIPreparedModel::m_ExpectRetCode = OH_NN_OPERATION_FORBIDDEN; + return OH_NN_INVALID_PARAMETER; + } + + isOutputBufferEnough.emplace_back(true); + outputsDims.emplace_back(outputs[0].dimensions); + + return OH_NN_SUCCESS; +} + +std::shared_ptr Compilation::GetExecutionPlan() const +{ + sptr hdiPreparedModel = OHOS::sptr(new (std::nothrow) OHOS::HDI::Nnrt::V2_0::HDI::Nnrt::V2_0::MockIPreparedModel()); + + std::shared_ptr preparedModel = std::make_shared(hdiPreparedModel); + sptr idevice + = OHOS::sptr(new (std::nothrow) OHOS::HDI::Nnrt::V2_0::MockIDevice()); + std::shared_ptr device = std::make_shared(idevice); + ExecutionPlan executor(preparedModel, device); + std::shared_ptr pExcutor = std::make_shared(executor); + return pExcutor; +} +} // namespace NeuralNetworkRuntime +} // namespace OHOS \ No newline at end of file diff --git a/test/unittest/common/v2_0/inner_model_mock_device.cpp b/test/unittest/common/v2_0/inner_model_mock_device.cpp new file mode 100644 index 0000000000000000000000000000000000000000..cffa3ccf82b94e36005218720209bd7eef3f9928 --- /dev/null +++ b/test/unittest/common/v2_0/inner_model_mock_device.cpp @@ -0,0 +1,72 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include + +#include "common/utils.h" +#include "frameworks/native/inner_model.h" +#include "frameworks/native/hdi_device_v2_0.h" +#include "frameworks/native/device_manager.h" +#include "frameworks/native/ops/div_builder.h" +#include "mock_idevice.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +// Mock the palce where the devicemanager GetDevice is called in inner_model build function. +std::shared_ptr DeviceManager::GetDevice(size_t deviceId) const +{ + sptr idevice = + sptr(new (std::nothrow) OHOS::HDI::Nnrt::V2_0::MockIDevice()); + + if (idevice == nullptr) { + LOGE("DeviceManager mock GetDevice failed, error happened when new sptr"); + return nullptr; + } else { + std::shared_ptr device = CreateSharedPtr(idevice); + if (device == nullptr) { + LOGE("DeviceManager mock GetDevice failed, device is nullptr"); + return nullptr; + } + + if (deviceId == 0) { + return nullptr; + } else { + return device; + } + } +} + +// Mock the palce where the operator GetPrimitive is called in inner_model build function. +Ops::LiteGraphPrimitvePtr Ops::DivBuilder::GetPrimitive() +{ + Ops::LiteGraphPrimitvePtr primitive = {nullptr, DestroyLiteGraphTensor}; + return primitive; +} + +// Mock the palce where the device GetSupportedOperation is called in inner_model build function. +OH_NN_ReturnCode HDIDeviceV2_0::GetSupportedOperation(std::shared_ptr model, + std::vector& supportedOperations) +{ + supportedOperations = {true, true, true}; + + if (model->name_ == "Loaded_NNR_Model") { + return OH_NN_UNAVALIDABLE_DEVICE; + } else { + return OH_NN_SUCCESS; + } +} +} // NeuralNetworkRuntime +} // OHOS diff --git a/test/unittest/common/v2_0/mock_idevice.cpp b/test/unittest/common/v2_0/mock_idevice.cpp new file mode 100644 index 0000000000000000000000000000000000000000..17561c43c5176cbc70c7b9ccedb196d44abaaa4c --- /dev/null +++ b/test/unittest/common/v2_0/mock_idevice.cpp @@ -0,0 +1,54 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "mock_idevice.h" + +namespace OHOS { +namespace HDI { +namespace Nnrt { +namespace V2_0 { +sptr INnrtDevice::Get(bool isStub) +{ + return INnrtDevice::Get("device_service", isStub); +} + +sptr INnrtDevice::Get(const std::string& serviceName, bool isStub) +{ + if (isStub) { + return nullptr; + } + + sptr mockIDevice = sptr(new (std::nothrow) MockIDevice()); + if (mockIDevice == nullptr) { + return nullptr; + } + std::string deviceName = "MockDevice"; + EXPECT_CALL(*((V2_0::MockIDevice*)mockIDevice.GetRefPtr()), GetDeviceName(::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<0>(deviceName), ::testing::Return(HDF_SUCCESS))); + + std::string vendorName = "MockVendor"; + EXPECT_CALL(*((V2_0::MockIDevice*)mockIDevice.GetRefPtr()), GetVendorName(::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<0>(vendorName), ::testing::Return(HDF_SUCCESS))); + + V2_0::DeviceStatus deviceStatus = V2_0::DeviceStatus::AVAILABLE; + EXPECT_CALL(*((V2_0::MockIDevice*)mockIDevice.GetRefPtr()), GetDeviceStatus(::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<0>(deviceStatus), ::testing::Return(HDF_SUCCESS))); + + return mockIDevice; +} +} // V2_0 +} // Nnrt +} // HDI +} // OHOS \ No newline at end of file diff --git a/test/unittest/common/v2_0/mock_idevice.h b/test/unittest/common/v2_0/mock_idevice.h new file mode 100644 index 0000000000000000000000000000000000000000..7ad846e79d757727df911f6ea1a67e5278eaebe4 --- /dev/null +++ b/test/unittest/common/v2_0/mock_idevice.h @@ -0,0 +1,63 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_MOCK_IDEVICE_H +#define NEURAL_NETWORK_RUNTIME_MOCK_IDEVICE_H + +#include + +#include "frameworks/native/hdi_prepared_model_v2_0.h" +#include "frameworks/native/memory_manager.h" +#include "frameworks/native/transform.h" + +namespace OHOS { +namespace HDI { +namespace Nnrt { +namespace V2_0 { +class MockIDevice : public INnrtDevice { +public: + MOCK_METHOD1(GetDeviceName, int32_t(std::string&)); + MOCK_METHOD1(GetVendorName, int32_t(std::string&)); + MOCK_METHOD1(GetDeviceType, int32_t(DeviceType&)); + MOCK_METHOD1(GetDeviceStatus, int32_t(DeviceStatus&)); + MOCK_METHOD2(GetSupportedOperation, int32_t(const Model&, std::vector&)); + MOCK_METHOD1(IsFloat16PrecisionSupported, int32_t(bool&)); + MOCK_METHOD1(IsPerformanceModeSupported, int32_t(bool&)); + MOCK_METHOD1(IsPrioritySupported, int32_t(bool&)); + MOCK_METHOD1(IsDynamicInputSupported, int32_t(bool&)); + MOCK_METHOD3(PrepareModel, int32_t(const Model&, const ModelConfig&, OHOS::sptr&)); + MOCK_METHOD1(IsModelCacheSupported, int32_t(bool&)); + MOCK_METHOD3(PrepareModelFromModelCache, int32_t(const std::vector&, const ModelConfig&, + OHOS::sptr&)); + MOCK_METHOD2(AllocateBuffer, int32_t(uint32_t, SharedBuffer&)); + MOCK_METHOD1(ReleaseBuffer, int32_t(const SharedBuffer&)); + MOCK_METHOD2(GetVersion, int32_t(uint32_t&, uint32_t&)); +}; + +class MockIPreparedModel : public IPreparedModel { +public: + MOCK_METHOD1(ExportModelCache, int32_t(std::vector&)); + MOCK_METHOD4(Run, int32_t(const std::vector&, const std::vector&, + std::vector>&, std::vector&)); + MOCK_METHOD2(GetInputDimRanges, int32_t(std::vector>&, std::vector>&)); + MOCK_METHOD2(GetVersion, int32_t(uint32_t&, uint32_t&)); + + static OH_NN_ReturnCode m_ExpectRetCode; +}; +} // V2_0 +} // Nnrt +} // HDI +} // OHOS +#endif // NEURAL_NETWORK_RUNTIME_MOCK_IDEVICE_H diff --git a/test/unittest/components/BUILD.gn b/test/unittest/components/BUILD.gn index 5b37d98d22fea6db43f4401271b675a6614105b1..b628cfaee428ba3ac228f081d1926b411b512e72 100644 --- a/test/unittest/components/BUILD.gn +++ b/test/unittest/components/BUILD.gn @@ -26,11 +26,32 @@ config("module_private_config") { ] } -ohos_unittest("CompilationTest") { +ohos_unittest("MemoryManagerTest") { + module_out_path = module_output_path + + sources = [ "//foundation/ai/neural_network_runtime/test/unittest/components/memory_manager/memory_manager_test.cpp" ] + sources += [ "//foundation/ai/neural_network_runtime/test/unittest/common/file_utils.cpp" ] + configs = [ ":module_private_config" ] + + deps = [ + "//foundation/ai/neural_network_runtime/frameworks:libneural_network_runtime", + "//third_party/googletest:gmock_main", + "//third_party/googletest:gtest_main", + ] + + external_deps = [ + "drivers_interface_nnrt:libnnrt_proxy_1.0", + "hitrace_native:libhitracechain", + "hiviewdfx_hilog_native:libhilog", + "mindspore:mindir", + ] +} + +ohos_unittest("CompilationV1_0Test") { module_out_path = module_output_path - sources = [ "//foundation/ai/neural_network_runtime/test/unittest/components/compilation/compilation_test.cpp" ] - sources += [ "//foundation/ai/neural_network_runtime/test/unittest/common/compilation_mock_idevice.cpp" ] + sources = [ "//foundation/ai/neural_network_runtime/test/unittest/components/v1_0/compilation/compilation_test.cpp" ] + sources += [ "//foundation/ai/neural_network_runtime/test/unittest/common/v1_0/compilation_mock_idevice.cpp" ] configs = [ ":module_private_config" ] deps = [ @@ -49,11 +70,11 @@ ohos_unittest("CompilationTest") { ] } -ohos_unittest("ExecutorTest") { +ohos_unittest("ExecutorV1_0Test") { module_out_path = module_output_path - sources = [ "//foundation/ai/neural_network_runtime/test/unittest/components/executor/executor_test.cpp" ] - sources += [ "//foundation/ai/neural_network_runtime/test/unittest/common/executor_mock_device.cpp" ] + sources = [ "//foundation/ai/neural_network_runtime/test/unittest/components/v1_0/executor/executor_test.cpp" ] + sources += [ "//foundation/ai/neural_network_runtime/test/unittest/common/v1_0/executor_mock_device.cpp" ] configs = [ ":module_private_config" ] deps = [ @@ -72,11 +93,11 @@ ohos_unittest("ExecutorTest") { ] } -ohos_unittest("DeviceManagerTest") { +ohos_unittest("DeviceManagerV1_0Test") { module_out_path = module_output_path - sources = [ "//foundation/ai/neural_network_runtime/test/unittest/components/device_manager/device_manager_test.cpp" ] - sources += [ "//foundation/ai/neural_network_runtime/test/unittest/common/mock_idevice.cpp" ] + sources = [ "//foundation/ai/neural_network_runtime/test/unittest/components/v1_0/device_manager/device_manager_test.cpp" ] + sources += [ "//foundation/ai/neural_network_runtime/test/unittest/common/v1_0/mock_idevice.cpp" ] configs = [ ":module_private_config" ] deps = [ @@ -94,11 +115,11 @@ ohos_unittest("DeviceManagerTest") { ] } -ohos_unittest("DeviceRegistrarTest") { +ohos_unittest("DeviceRegistrarV1_0Test") { module_out_path = module_output_path - sources = [ "//foundation/ai/neural_network_runtime/test/unittest/components/device_registrar/device_registrar_test.cpp" ] - sources += [ "//foundation/ai/neural_network_runtime/test/unittest/common/mock_idevice.cpp" ] + sources = [ "//foundation/ai/neural_network_runtime/test/unittest/components/v1_0/device_registrar/device_registrar_test.cpp" ] + sources += [ "//foundation/ai/neural_network_runtime/test/unittest/common/v1_0/mock_idevice.cpp" ] configs = [ ":module_private_config" ] deps = [ @@ -116,11 +137,11 @@ ohos_unittest("DeviceRegistrarTest") { ] } -ohos_unittest("HDIDeviceTest") { +ohos_unittest("HDIDeviceV1_0Test") { module_out_path = module_output_path - sources = [ "//foundation/ai/neural_network_runtime/test/unittest/components/hdi_device/hdi_device_test.cpp" ] - sources += [ "//foundation/ai/neural_network_runtime/test/unittest/common/mock_idevice.cpp" ] + sources = [ "//foundation/ai/neural_network_runtime/test/unittest/components/v1_0/hdi_device/hdi_device_test.cpp" ] + sources += [ "//foundation/ai/neural_network_runtime/test/unittest/common/v1_0/mock_idevice.cpp" ] sources += [ "//foundation/ai/neural_network_runtime/test/unittest/common/file_utils.cpp" ] configs = [ ":module_private_config" ] @@ -139,11 +160,11 @@ ohos_unittest("HDIDeviceTest") { ] } -ohos_unittest("HDIPreparedModelTest") { +ohos_unittest("HDIPreparedModelV1_0Test") { module_out_path = module_output_path - sources = [ "//foundation/ai/neural_network_runtime/test/unittest/components/hdi_prepared_model/hdi_prepared_model_test.cpp" ] - sources += [ "//foundation/ai/neural_network_runtime/test/unittest/common/mock_idevice.cpp" ] + sources = [ "//foundation/ai/neural_network_runtime/test/unittest/components/v1_0/hdi_prepared_model/hdi_prepared_model_test.cpp" ] + sources += [ "//foundation/ai/neural_network_runtime/test/unittest/common/v1_0/mock_idevice.cpp" ] sources += [ "//foundation/ai/neural_network_runtime/test/unittest/common/file_utils.cpp" ] configs = [ ":module_private_config" ] @@ -162,11 +183,10 @@ ohos_unittest("HDIPreparedModelTest") { ] } -ohos_unittest("MemoryManagerTest") { +ohos_unittest("TransformV1_0Test") { module_out_path = module_output_path - sources = [ "//foundation/ai/neural_network_runtime/test/unittest/components/memory_manager/memory_manager_test.cpp" ] - sources += [ "//foundation/ai/neural_network_runtime/test/unittest/common/file_utils.cpp" ] + sources = [ "//foundation/ai/neural_network_runtime/test/unittest/components/v1_0/transform/transform_test.cpp" ] configs = [ ":module_private_config" ] deps = [ @@ -183,10 +203,11 @@ ohos_unittest("MemoryManagerTest") { ] } -ohos_unittest("TransformTest") { +ohos_unittest("InnerModelV1_0Test") { module_out_path = module_output_path - sources = [ "//foundation/ai/neural_network_runtime/test/unittest/components/transform/transform_test.cpp" ] + sources = [ "//foundation/ai/neural_network_runtime/test/unittest/components/v1_0/inner_model/inner_model_test.cpp" ] + sources += [ "//foundation/ai/neural_network_runtime/test/unittest/common/v1_0/inner_model_mock_device.cpp" ] configs = [ ":module_private_config" ] deps = [ @@ -196,18 +217,19 @@ ohos_unittest("TransformTest") { ] external_deps = [ + "c_utils:utils", "drivers_interface_nnrt:libnnrt_proxy_1.0", + "hdf_core:libhdf_utils", "hitrace_native:libhitracechain", "hiviewdfx_hilog_native:libhilog", "mindspore:mindir", ] } -ohos_unittest("InnerModelTest") { +ohos_unittest("NnTensorV1_0Test") { module_out_path = module_output_path - sources = [ "//foundation/ai/neural_network_runtime/test/unittest/components/inner_model/inner_model_test.cpp" ] - sources += [ "//foundation/ai/neural_network_runtime/test/unittest/common/inner_model_mock_device.cpp" ] + sources = [ "//foundation/ai/neural_network_runtime/test/unittest/components/v1_0/inner_model/nn_tensor_test.cpp" ] configs = [ ":module_private_config" ] deps = [ @@ -226,10 +248,10 @@ ohos_unittest("InnerModelTest") { ] } -ohos_unittest("NnTensorTest") { +ohos_unittest("NnValidationV1_0Test") { module_out_path = module_output_path - sources = [ "//foundation/ai/neural_network_runtime/test/unittest/components/inner_model/nn_tensor_test.cpp" ] + sources = [ "//foundation/ai/neural_network_runtime/test/unittest/components/v1_0/inner_model/nn_validation_test.cpp" ] configs = [ ":module_private_config" ] deps = [ @@ -248,10 +270,10 @@ ohos_unittest("NnTensorTest") { ] } -ohos_unittest("NnValidationTest") { +ohos_unittest("OpsRegistryV1_0Test") { module_out_path = module_output_path - sources = [ "//foundation/ai/neural_network_runtime/test/unittest/components/inner_model/nn_validation_test.cpp" ] + sources = [ "//foundation/ai/neural_network_runtime/test/unittest/components/v1_0/inner_model/ops_regitstry_test.cpp" ] configs = [ ":module_private_config" ] deps = [ @@ -270,10 +292,12 @@ ohos_unittest("NnValidationTest") { ] } -ohos_unittest("OpsRegistryTest") { +ohos_unittest("NeuralNetworkRuntimeV1_0Test") { module_out_path = module_output_path - sources = [ "//foundation/ai/neural_network_runtime/test/unittest/components/inner_model/ops_regitstry_test.cpp" ] + sources = [ "//foundation/ai/neural_network_runtime/test/unittest/components/v1_0/neural_network_runtime_test/neural_network_runtime_test.cpp" ] + sources += [ "//foundation/ai/neural_network_runtime/test/unittest/common/v1_0/executor_mock_device.cpp" ] + configs = [ ":module_private_config" ] deps = [ @@ -292,12 +316,34 @@ ohos_unittest("OpsRegistryTest") { ] } -ohos_unittest("NeuralNetworkRuntimeTest") { +ohos_unittest("CompilationV2_0Test") { module_out_path = module_output_path - sources = [ "//foundation/ai/neural_network_runtime/test/unittest/components/neural_network_runtime_test/neural_network_runtime_test.cpp" ] - sources += [ "//foundation/ai/neural_network_runtime/test/unittest/common/executor_mock_device.cpp" ] + sources = [ "//foundation/ai/neural_network_runtime/test/unittest/components/v2_0/compilation/compilation_test.cpp" ] + sources += [ "//foundation/ai/neural_network_runtime/test/unittest/common/v2_0/compilation_mock_idevice.cpp" ] + configs = [ ":module_private_config" ] + deps = [ + "//foundation/ai/neural_network_runtime/frameworks:libneural_network_runtime", + "//third_party/googletest:gmock_main", + "//third_party/googletest:gtest_main", + ] + + external_deps = [ + "c_utils:utils", + "drivers_interface_nnrt:libnnrt_proxy_2.0", + "hdf_core:libhdf_utils", + "hitrace_native:libhitracechain", + "hiviewdfx_hilog_native:libhilog", + "mindspore:mindir", + ] +} + +ohos_unittest("ExecutorV2_0Test") { + module_out_path = module_output_path + + sources = [ "//foundation/ai/neural_network_runtime/test/unittest/components/v2_0/executor/executor_test.cpp" ] + sources += [ "//foundation/ai/neural_network_runtime/test/unittest/common/v2_0/executor_mock_device.cpp" ] configs = [ ":module_private_config" ] deps = [ @@ -308,7 +354,230 @@ ohos_unittest("NeuralNetworkRuntimeTest") { external_deps = [ "c_utils:utils", - "drivers_interface_nnrt:libnnrt_proxy_1.0", + "drivers_interface_nnrt:libnnrt_proxy_2.0", + "hdf_core:libhdf_utils", + "hitrace_native:libhitracechain", + "hiviewdfx_hilog_native:libhilog", + "mindspore:mindir", + ] +} + +ohos_unittest("DeviceManagerV2_0Test") { + module_out_path = module_output_path + + sources = [ "//foundation/ai/neural_network_runtime/test/unittest/components/v2_0/device_manager/device_manager_test.cpp" ] + sources += [ "//foundation/ai/neural_network_runtime/test/unittest/common/v2_0/mock_idevice.cpp" ] + configs = [ ":module_private_config" ] + + deps = [ + "//foundation/ai/neural_network_runtime/frameworks:libneural_network_runtime", + "//third_party/googletest:gmock_main", + "//third_party/googletest:gtest_main", + ] + + external_deps = [ + "c_utils:utils", + "drivers_interface_nnrt:libnnrt_proxy_2.0", + "hitrace_native:libhitracechain", + "hiviewdfx_hilog_native:libhilog", + "mindspore:mindir", + ] +} + +ohos_unittest("DeviceRegistrarV2_0Test") { + module_out_path = module_output_path + + sources = [ "//foundation/ai/neural_network_runtime/test/unittest/components/v2_0/device_registrar/device_registrar_test.cpp" ] + sources += [ "//foundation/ai/neural_network_runtime/test/unittest/common/v2_0/mock_idevice.cpp" ] + configs = [ ":module_private_config" ] + + deps = [ + "//foundation/ai/neural_network_runtime/frameworks:libneural_network_runtime", + "//third_party/googletest:gmock_main", + "//third_party/googletest:gtest_main", + ] + + external_deps = [ + "c_utils:utils", + "drivers_interface_nnrt:libnnrt_proxy_2.0", + "hitrace_native:libhitracechain", + "hiviewdfx_hilog_native:libhilog", + "mindspore:mindir", + ] +} + +ohos_unittest("HDIDeviceV2_0Test") { + module_out_path = module_output_path + + sources = [ "//foundation/ai/neural_network_runtime/test/unittest/components/v2_0/hdi_device/hdi_device_test.cpp" ] + sources += [ "//foundation/ai/neural_network_runtime/test/unittest/common/v2_0/mock_idevice.cpp" ] + sources += [ "//foundation/ai/neural_network_runtime/test/unittest/common/file_utils.cpp" ] + configs = [ ":module_private_config" ] + + deps = [ + "//foundation/ai/neural_network_runtime/frameworks:libneural_network_runtime", + "//third_party/googletest:gmock_main", + "//third_party/googletest:gtest_main", + ] + + external_deps = [ + "c_utils:utils", + "drivers_interface_nnrt:libnnrt_proxy_2.0", + "hitrace_native:libhitracechain", + "hiviewdfx_hilog_native:libhilog", + "mindspore:mindir", + ] +} + +ohos_unittest("HDIPreparedModelV2_0Test") { + module_out_path = module_output_path + + sources = [ "//foundation/ai/neural_network_runtime/test/unittest/components/v2_0/hdi_prepared_model/hdi_prepared_model_test.cpp" ] + sources += [ "//foundation/ai/neural_network_runtime/test/unittest/common/v2_0/mock_idevice.cpp" ] + sources += [ "//foundation/ai/neural_network_runtime/test/unittest/common/file_utils.cpp" ] + configs = [ ":module_private_config" ] + + deps = [ + "//foundation/ai/neural_network_runtime/frameworks:libneural_network_runtime", + "//third_party/googletest:gmock_main", + "//third_party/googletest:gtest_main", + ] + + external_deps = [ + "c_utils:utils", + "drivers_interface_nnrt:libnnrt_proxy_2.0", + "hitrace_native:libhitracechain", + "hiviewdfx_hilog_native:libhilog", + "mindspore:mindir", + ] +} + +ohos_unittest("TransformV2_0Test") { + module_out_path = module_output_path + + sources = [ "//foundation/ai/neural_network_runtime/test/unittest/components/v2_0/transform/transform_test.cpp" ] + configs = [ ":module_private_config" ] + + deps = [ + "//foundation/ai/neural_network_runtime/frameworks:libneural_network_runtime", + "//third_party/googletest:gmock_main", + "//third_party/googletest:gtest_main", + ] + + external_deps = [ + "drivers_interface_nnrt:libnnrt_proxy_2.0", + "hitrace_native:libhitracechain", + "hiviewdfx_hilog_native:libhilog", + "mindspore:mindir", + ] +} + +ohos_unittest("InnerModelV2_0Test") { + module_out_path = module_output_path + + sources = [ "//foundation/ai/neural_network_runtime/test/unittest/components/v2_0/inner_model/inner_model_test.cpp" ] + sources += [ "//foundation/ai/neural_network_runtime/test/unittest/common/v2_0/inner_model_mock_device.cpp" ] + configs = [ ":module_private_config" ] + + deps = [ + "//foundation/ai/neural_network_runtime/frameworks:libneural_network_runtime", + "//third_party/googletest:gmock_main", + "//third_party/googletest:gtest_main", + ] + + external_deps = [ + "c_utils:utils", + "drivers_interface_nnrt:libnnrt_proxy_2.0", + "hdf_core:libhdf_utils", + "hitrace_native:libhitracechain", + "hiviewdfx_hilog_native:libhilog", + "mindspore:mindir", + ] +} + +ohos_unittest("NnTensorV2_0Test") { + module_out_path = module_output_path + + sources = [ "//foundation/ai/neural_network_runtime/test/unittest/components/v2_0/inner_model/nn_tensor_test.cpp" ] + configs = [ ":module_private_config" ] + + deps = [ + "//foundation/ai/neural_network_runtime/frameworks:libneural_network_runtime", + "//third_party/googletest:gmock_main", + "//third_party/googletest:gtest_main", + ] + + external_deps = [ + "c_utils:utils", + "drivers_interface_nnrt:libnnrt_proxy_2.0", + "hdf_core:libhdf_utils", + "hitrace_native:libhitracechain", + "hiviewdfx_hilog_native:libhilog", + "mindspore:mindir", + ] +} + +ohos_unittest("NnValidationV2_0Test") { + module_out_path = module_output_path + + sources = [ "//foundation/ai/neural_network_runtime/test/unittest/components/v2_0/inner_model/nn_validation_test.cpp" ] + configs = [ ":module_private_config" ] + + deps = [ + "//foundation/ai/neural_network_runtime/frameworks:libneural_network_runtime", + "//third_party/googletest:gmock_main", + "//third_party/googletest:gtest_main", + ] + + external_deps = [ + "c_utils:utils", + "drivers_interface_nnrt:libnnrt_proxy_2.0", + "hdf_core:libhdf_utils", + "hitrace_native:libhitracechain", + "hiviewdfx_hilog_native:libhilog", + "mindspore:mindir", + ] +} + +ohos_unittest("OpsRegistryV2_0Test") { + module_out_path = module_output_path + + sources = [ "//foundation/ai/neural_network_runtime/test/unittest/components/v2_0/inner_model/ops_regitstry_test.cpp" ] + configs = [ ":module_private_config" ] + + deps = [ + "//foundation/ai/neural_network_runtime/frameworks:libneural_network_runtime", + "//third_party/googletest:gmock_main", + "//third_party/googletest:gtest_main", + ] + + external_deps = [ + "c_utils:utils", + "drivers_interface_nnrt:libnnrt_proxy_2.0", + "hdf_core:libhdf_utils", + "hitrace_native:libhitracechain", + "hiviewdfx_hilog_native:libhilog", + "mindspore:mindir", + ] +} + +ohos_unittest("NeuralNetworkRuntimeV2_0Test") { + module_out_path = module_output_path + + sources = [ "//foundation/ai/neural_network_runtime/test/unittest/components/v2_0/neural_network_runtime_test/neural_network_runtime_test.cpp" ] + sources += [ "//foundation/ai/neural_network_runtime/test/unittest/common/v2_0/executor_mock_device.cpp" ] + + configs = [ ":module_private_config" ] + + deps = [ + "//foundation/ai/neural_network_runtime/frameworks:libneural_network_runtime", + "//third_party/googletest:gmock_main", + "//third_party/googletest:gtest_main", + ] + + external_deps = [ + "c_utils:utils", + "drivers_interface_nnrt:libnnrt_proxy_2.0", "hdf_core:libhdf_utils", "hitrace_native:libhitracechain", "hiviewdfx_hilog_native:libhilog", @@ -319,18 +588,32 @@ ohos_unittest("NeuralNetworkRuntimeTest") { group("components_unittest") { testonly = true deps = [ - ":CompilationTest", - ":DeviceManagerTest", - ":DeviceRegistrarTest", - ":ExecutorTest", - ":HDIDeviceTest", - ":HDIPreparedModelTest", - ":InnerModelTest", ":MemoryManagerTest", - ":NeuralNetworkRuntimeTest", - ":NnTensorTest", - ":NnValidationTest", - ":OpsRegistryTest", - ":TransformTest", + + ":CompilationV1_0Test", + ":DeviceManagerV1_0Test", + ":DeviceRegistrarV1_0Test", + ":ExecutorV1_0Test", + ":HDIDeviceV1_0Test", + ":HDIPreparedModelV1_0Test", + ":InnerModelV1_0Test", + ":NeuralNetworkRuntimeV1_0Test", + ":NnTensorV1_0Test", + ":NnValidationV1_0Test", + ":OpsRegistryV1_0Test", + ":TransformV1_0Test", + + ":CompilationV2_0Test", + ":DeviceManagerV2_0Test", + ":DeviceRegistrarV2_0Test", + ":ExecutorV2_0Test", + ":HDIDeviceV2_0Test", + ":HDIPreparedModelV2_0Test", + ":InnerModelV2_0Test", + ":NeuralNetworkRuntimeV2_0Test", + ":NnTensorV2_0Test", + ":NnValidationV2_0Test", + ":OpsRegistryV2_0Test", + ":TransformV2_0Test", ] } diff --git a/test/unittest/components/compilation/compilation_test.cpp b/test/unittest/components/v1_0/compilation/compilation_test.cpp similarity index 99% rename from test/unittest/components/compilation/compilation_test.cpp rename to test/unittest/components/v1_0/compilation/compilation_test.cpp index 8529ccb2ed4fd0eadf897a26c0aa7c77e73dad14..25cc81c3eb8bb0943cc3bd52694102a577d2627d 100644 --- a/test/unittest/components/compilation/compilation_test.cpp +++ b/test/unittest/components/v1_0/compilation/compilation_test.cpp @@ -19,7 +19,7 @@ #include "mindir.h" -#include "test/unittest/common/mock_idevice.h" +#include "test/unittest/common/v1_0/mock_idevice.h" using namespace OHOS::NeuralNetworkRuntime; using namespace OHOS::HDI::Nnrt::V1_0; diff --git a/test/unittest/components/compilation/compilation_test.h b/test/unittest/components/v1_0/compilation/compilation_test.h similarity index 100% rename from test/unittest/components/compilation/compilation_test.h rename to test/unittest/components/v1_0/compilation/compilation_test.h diff --git a/test/unittest/components/device_manager/device_manager_test.cpp b/test/unittest/components/v1_0/device_manager/device_manager_test.cpp similarity index 90% rename from test/unittest/components/device_manager/device_manager_test.cpp rename to test/unittest/components/v1_0/device_manager/device_manager_test.cpp index 3e5068962be3e663510bb60607c2656db7769921..af7a33f1c22f4e98b32251e8ecf98f6e6bf5b189 100644 --- a/test/unittest/components/device_manager/device_manager_test.cpp +++ b/test/unittest/components/v1_0/device_manager/device_manager_test.cpp @@ -18,8 +18,8 @@ #include "common/log.h" #include "frameworks/native/device_manager.h" -#include "frameworks/native/hdi_device.h" -#include "test/unittest/common/mock_idevice.h" +#include "frameworks/native/hdi_device_v1_0.h" +#include "test/unittest/common/v1_0/mock_idevice.h" using namespace testing; using namespace testing::ext; @@ -128,8 +128,8 @@ HWTEST_F(DeviceManagerTest, devicemanager_registerdevice_001, TestSize.Level0) std::string vendorName = "MockVendor"; MockInit(device, typeVect, deviceName, vendorName); - std::function()> creator = - [&device]()->std::shared_ptr {return std::make_shared(device);}; + std::function()> creator = + [&device]()->std::shared_ptr {return std::make_shared(device);}; auto& deviceManager = DeviceManager::GetInstance(); OH_NN_ReturnCode result = deviceManager.RegisterDevice(creator); EXPECT_EQ(OH_NN_FAILED, result); @@ -142,8 +142,8 @@ HWTEST_F(DeviceManagerTest, devicemanager_registerdevice_001, TestSize.Level0) */ HWTEST_F(DeviceManagerTest, devicemanager_registerdevice_002, TestSize.Level0) { - std::function()> creator = - []()->std::shared_ptr {return nullptr;}; + std::function()> creator = + []()->std::shared_ptr {return nullptr;}; auto& deviceManager = DeviceManager::GetInstance(); OH_NN_ReturnCode result = deviceManager.RegisterDevice(creator); EXPECT_EQ(OH_NN_INVALID_PARAMETER, result); @@ -164,8 +164,8 @@ HWTEST_F(DeviceManagerTest, devicemanager_registerdevice_003, TestSize.Level0) std::string vendorName = "MockVendor"; MockInit(device, typeVect, deviceName, vendorName); - std::function()> creator = - [&device]()->std::shared_ptr {return std::make_shared(device);}; + std::function()> creator = + [&device]()->std::shared_ptr {return std::make_shared(device);}; auto& deviceManager = DeviceManager::GetInstance(); OH_NN_ReturnCode result = deviceManager.RegisterDevice(creator); EXPECT_EQ(OH_NN_UNAVALIDABLE_DEVICE, result); @@ -186,8 +186,8 @@ HWTEST_F(DeviceManagerTest, devicemanager_registerdevice_004, TestSize.Level0) std::string vendorName = "MockVendor"; MockInit(device, typeVect, deviceName, vendorName); - std::function()> creator = - [&device]()->std::shared_ptr {return std::make_shared(device);}; + std::function()> creator = + [&device]()->std::shared_ptr {return std::make_shared(device);}; auto& deviceManager = DeviceManager::GetInstance(); OH_NN_ReturnCode result = deviceManager.RegisterDevice(creator); EXPECT_EQ(OH_NN_UNAVALIDABLE_DEVICE, result); @@ -208,8 +208,8 @@ HWTEST_F(DeviceManagerTest, devicemanager_registerdevice_005, TestSize.Level0) std::string vendorName = "MockVendorA"; MockInit(device, typeVect, deviceName, vendorName); - std::function()> creator = - [&device]()->std::shared_ptr {return std::make_shared(device);}; + std::function()> creator = + [&device]()->std::shared_ptr {return std::make_shared(device);}; auto& deviceManager = DeviceManager::GetInstance(); OH_NN_ReturnCode result = deviceManager.RegisterDevice(creator); EXPECT_EQ(OH_NN_SUCCESS, result); diff --git a/test/unittest/components/device_registrar/device_registrar_test.cpp b/test/unittest/components/v1_0/device_registrar/device_registrar_test.cpp similarity index 96% rename from test/unittest/components/device_registrar/device_registrar_test.cpp rename to test/unittest/components/v1_0/device_registrar/device_registrar_test.cpp index b96079c82c7ae9fe40e6e00f7e30c17aa1ffc647..11b3c8c6022c92ebebfd3a24daf214a5cb524b68 100644 --- a/test/unittest/components/device_registrar/device_registrar_test.cpp +++ b/test/unittest/components/v1_0/device_registrar/device_registrar_test.cpp @@ -14,16 +14,15 @@ */ #include - -#include #include +#include #include #include "common/log.h" #include "frameworks/native/device_registrar.h" -#include "frameworks/native/hdi_device.h" +#include "frameworks/native/hdi_device_v1_0.h" #include "frameworks/native/device_manager.h" -#include "test/unittest/common/mock_idevice.h" +#include "test/unittest/common/v1_0/mock_idevice.h" using namespace testing; using namespace testing::ext; @@ -92,6 +91,11 @@ public: name = "MockVendorA"; return OH_NN_SUCCESS; }; + OH_NN_ReturnCode GetVersion(std::string& version) override + { + version = "MockVersionA"; + return OH_NN_SUCCESS; + } OH_NN_ReturnCode GetDeviceType(OH_NN_DeviceType& deviceType) override { return OH_NN_SUCCESS; @@ -211,7 +215,7 @@ std::shared_ptr CreateDeviceObjectCallback() { OHOS::sptr device = IRegisterDevice::Get(false); EXPECT_NE(device, nullptr); - std::shared_ptr m_mockDevice = std::make_unique(device); + std::shared_ptr m_mockDevice = std::make_shared(device); return m_mockDevice; } diff --git a/test/unittest/components/executor/executor_test.cpp b/test/unittest/components/v1_0/executor/executor_test.cpp similarity index 99% rename from test/unittest/components/executor/executor_test.cpp rename to test/unittest/components/v1_0/executor/executor_test.cpp index 5d13e51600f36395b41fff685076199b18e03673..5fb6189a7a913e9f864e7cfd9e5b7ff648a60f20 100644 --- a/test/unittest/components/executor/executor_test.cpp +++ b/test/unittest/components/v1_0/executor/executor_test.cpp @@ -18,7 +18,7 @@ #include "common/scoped_trace.h" #include "frameworks/native/compilation.h" #include "frameworks/native/inner_model.h" -#include "test/unittest/common/mock_idevice.h" +#include "test/unittest/common/v1_0/mock_idevice.h" using namespace OHOS::NeuralNetworkRuntime; using namespace OHOS::NeuralNetworkRuntime::Ops; diff --git a/test/unittest/components/executor/executor_test.h b/test/unittest/components/v1_0/executor/executor_test.h similarity index 100% rename from test/unittest/components/executor/executor_test.h rename to test/unittest/components/v1_0/executor/executor_test.h diff --git a/test/unittest/components/hdi_device/hdi_device_test.cpp b/test/unittest/components/v1_0/hdi_device/hdi_device_test.cpp similarity index 90% rename from test/unittest/components/hdi_device/hdi_device_test.cpp rename to test/unittest/components/v1_0/hdi_device/hdi_device_test.cpp index 07925bf69aa8d1dd60eed179007006240fe43a05..2c1be5a32a797fe3793447c418e725cc99ca1411 100644 --- a/test/unittest/components/hdi_device/hdi_device_test.cpp +++ b/test/unittest/components/v1_0/hdi_device/hdi_device_test.cpp @@ -24,8 +24,8 @@ #include #include -#include "frameworks/native/hdi_device.h" -#include "test/unittest/common/mock_idevice.h" +#include "frameworks/native/hdi_device_v1_0.h" +#include "test/unittest/common/v1_0/mock_idevice.h" #include "test/unittest/common/file_utils.h" using namespace testing; @@ -95,7 +95,7 @@ OH_NN_ReturnCode HDIDeviceTest::PrepareModel(int32_t allocBufferType, int32_t pr OHOS::sptr sp = OHOS::sptr(new (std::nothrow) V1_0::MockIDevice()); EXPECT_NE(sp, nullptr); - std::unique_ptr hdiDevice = std::make_unique(sp); + std::unique_ptr hdiDevice = std::make_unique(sp); EXPECT_NE(hdiDevice, nullptr); V1_0::SharedBuffer buffer {1, 1, 0, 1}; @@ -124,7 +124,7 @@ HWTEST_F(HDIDeviceTest, hdidevice_constructor_001, TestSize.Level0) { OHOS::sptr device = V1_0::INnrtDevice::Get(false); EXPECT_NE(device, nullptr); - std::unique_ptr hdiDevice = std::make_unique(device); + std::unique_ptr hdiDevice = std::make_unique(device); EXPECT_NE(hdiDevice, nullptr); } @@ -136,7 +136,7 @@ HWTEST_F(HDIDeviceTest, hdidevice_constructor_001, TestSize.Level0) HWTEST_F(HDIDeviceTest, hdidevice_getdevicename_001, TestSize.Level0) { OHOS::sptr device = V1_0::INnrtDevice::Get(false); - std::unique_ptr hdiDevice = std::make_unique(device); + std::unique_ptr hdiDevice = std::make_unique(device); EXPECT_NE(hdiDevice, nullptr); std::string deviceName = "MockDevice"; EXPECT_CALL(*((V1_0::MockIDevice *)device.GetRefPtr()), GetDeviceName(::testing::_)) @@ -157,7 +157,7 @@ HWTEST_F(HDIDeviceTest, hdidevice_getdevicename_001, TestSize.Level0) HWTEST_F(HDIDeviceTest, hdidevice_getdevicename_002, TestSize.Level0) { OHOS::sptr device = V1_0::INnrtDevice::Get(false); - std::unique_ptr hdiDevice = std::make_unique(device); + std::unique_ptr hdiDevice = std::make_unique(device); EXPECT_NE(hdiDevice, nullptr); std::string deviceName = "MockDevice"; EXPECT_CALL(*((V1_0::MockIDevice *)device.GetRefPtr()), GetDeviceName(::testing::_)) @@ -174,7 +174,7 @@ HWTEST_F(HDIDeviceTest, hdidevice_getdevicename_002, TestSize.Level0) HWTEST_F(HDIDeviceTest, hdidevice_getvendorname_001, TestSize.Level0) { OHOS::sptr device = V1_0::INnrtDevice::Get(false); - std::unique_ptr hdiDevice = std::make_unique(device); + std::unique_ptr hdiDevice = std::make_unique(device); EXPECT_NE(hdiDevice, nullptr); std::string vendorName = "MockVendor"; EXPECT_CALL(*((V1_0::MockIDevice *)device.GetRefPtr()), GetVendorName(::testing::_)) @@ -195,7 +195,7 @@ HWTEST_F(HDIDeviceTest, hdidevice_getvendorname_001, TestSize.Level0) HWTEST_F(HDIDeviceTest, hdidevice_getvendorname_002, TestSize.Level0) { OHOS::sptr device = V1_0::INnrtDevice::Get(false); - std::unique_ptr hdiDevice = std::make_unique(device); + std::unique_ptr hdiDevice = std::make_unique(device); EXPECT_NE(hdiDevice, nullptr); std::string vendorName = "MockVendor"; EXPECT_CALL(*((V1_0::MockIDevice *)device.GetRefPtr()), GetVendorName(::testing::_)) @@ -212,7 +212,7 @@ HWTEST_F(HDIDeviceTest, hdidevice_getvendorname_002, TestSize.Level0) HWTEST_F(HDIDeviceTest, hdidevice_getdevicetype_001, TestSize.Level0) { OHOS::sptr device = V1_0::INnrtDevice::Get(false); - std::unique_ptr hdiDevice = std::make_unique(device); + std::unique_ptr hdiDevice = std::make_unique(device); EXPECT_NE(hdiDevice, nullptr); V1_0::DeviceType iDeviceType = V1_0::DeviceType::CPU; EXPECT_CALL(*((V1_0::MockIDevice *)device.GetRefPtr()), GetDeviceType(::testing::_)) @@ -233,7 +233,7 @@ HWTEST_F(HDIDeviceTest, hdidevice_getdevicetype_001, TestSize.Level0) HWTEST_F(HDIDeviceTest, hdidevice_getdevicetype_002, TestSize.Level0) { OHOS::sptr device = V1_0::INnrtDevice::Get(false); - std::unique_ptr hdiDevice = std::make_unique(device); + std::unique_ptr hdiDevice = std::make_unique(device); EXPECT_NE(hdiDevice, nullptr); OH_NN_DeviceType deviceType = OH_NN_CPU; @@ -252,7 +252,7 @@ HWTEST_F(HDIDeviceTest, hdidevice_getdevicetype_002, TestSize.Level0) HWTEST_F(HDIDeviceTest, hdidevice_getdevicestatus_001, TestSize.Level0) { OHOS::sptr device = V1_0::INnrtDevice::Get(false); - std::unique_ptr hdiDevice = std::make_unique(device); + std::unique_ptr hdiDevice = std::make_unique(device); EXPECT_NE(hdiDevice, nullptr); V1_0::DeviceStatus iDeviceStatus = V1_0::DeviceStatus::AVAILABLE; @@ -274,7 +274,7 @@ HWTEST_F(HDIDeviceTest, hdidevice_getdevicestatus_001, TestSize.Level0) HWTEST_F(HDIDeviceTest, hdidevice_getdevicestatus_002, TestSize.Level0) { OHOS::sptr device = V1_0::INnrtDevice::Get(false); - std::unique_ptr hdiDevice = std::make_unique(device); + std::unique_ptr hdiDevice = std::make_unique(device); EXPECT_NE(hdiDevice, nullptr); DeviceStatus deviceStatus = AVAILABLE; V1_0::DeviceStatus iDeviceStatus = V1_0::DeviceStatus::AVAILABLE; @@ -294,7 +294,7 @@ HWTEST_F(HDIDeviceTest, hdidevice_getsupportedoperation_001, TestSize.Level0) std::vector ops {true}; std::shared_ptr model = std::make_shared(); OHOS::sptr device = V1_0::INnrtDevice::Get(false); - std::unique_ptr hdiDevice = std::make_unique(device); + std::unique_ptr hdiDevice = std::make_unique(device); EXPECT_NE(hdiDevice, nullptr); V1_0::SharedBuffer buffer {1, 1, 0, 1}; @@ -324,7 +324,7 @@ HWTEST_F(HDIDeviceTest, hdidevice_getsupportedoperation_002, TestSize.Level0) std::vector ops; std::shared_ptr model = std::make_shared(); OHOS::sptr device = V1_0::INnrtDevice::Get(false); - std::unique_ptr hdiDevice = std::make_unique(device); + std::unique_ptr hdiDevice = std::make_unique(device); EXPECT_NE(hdiDevice, nullptr); V1_0::SharedBuffer buffer {1, 1, 0, 1}; @@ -343,7 +343,7 @@ HWTEST_F(HDIDeviceTest, hdidevice_getsupportedoperation_002, TestSize.Level0) HWTEST_F(HDIDeviceTest, hdidevice_getsupportedoperation_003, TestSize.Level0) { OHOS::sptr device = V1_0::INnrtDevice::Get(false); - std::unique_ptr hdiDevice = std::make_unique(device); + std::unique_ptr hdiDevice = std::make_unique(device); EXPECT_NE(hdiDevice, nullptr); std::shared_ptr model = nullptr; @@ -362,7 +362,7 @@ HWTEST_F(HDIDeviceTest, hdidevice_getsupportedoperation_004, TestSize.Level0) std::vector ops {true}; std::shared_ptr model = std::make_shared(); OHOS::sptr device = V1_0::INnrtDevice::Get(false); - std::unique_ptr hdiDevice = std::make_unique(device); + std::unique_ptr hdiDevice = std::make_unique(device); EXPECT_NE(hdiDevice, nullptr); V1_0::SharedBuffer buffer {2, 1, 0, 1}; @@ -385,7 +385,7 @@ HWTEST_F(HDIDeviceTest, hdidevice_getsupportedoperation_004, TestSize.Level0) HWTEST_F(HDIDeviceTest, hdidevice_isfloat16precisionsupported_001, TestSize.Level0) { OHOS::sptr device = V1_0::INnrtDevice::Get(false); - std::unique_ptr hdiDevice = std::make_unique(device); + std::unique_ptr hdiDevice = std::make_unique(device); EXPECT_NE(hdiDevice, nullptr); bool isSupported = false; @@ -403,7 +403,7 @@ HWTEST_F(HDIDeviceTest, hdidevice_isfloat16precisionsupported_001, TestSize.Leve HWTEST_F(HDIDeviceTest, hdidevice_isfloat16precisionsupported_002, TestSize.Level0) { OHOS::sptr device = V1_0::INnrtDevice::Get(false); - std::unique_ptr hdiDevice = std::make_unique(device); + std::unique_ptr hdiDevice = std::make_unique(device); EXPECT_NE(hdiDevice, nullptr); bool isSupported = false; @@ -421,7 +421,7 @@ HWTEST_F(HDIDeviceTest, hdidevice_isfloat16precisionsupported_002, TestSize.Leve HWTEST_F(HDIDeviceTest, hdidevice_isperformancemodesupported_001, TestSize.Level0) { OHOS::sptr device = V1_0::INnrtDevice::Get(false); - std::unique_ptr hdiDevice = std::make_unique(device); + std::unique_ptr hdiDevice = std::make_unique(device); EXPECT_NE(hdiDevice, nullptr); bool isSupported = false; @@ -443,7 +443,7 @@ HWTEST_F(HDIDeviceTest, hdidevice_isperformancemodesupported_001, TestSize.Level HWTEST_F(HDIDeviceTest, hdidevice_isperformancemodesupported_002, TestSize.Level0) { OHOS::sptr device = V1_0::INnrtDevice::Get(false); - std::unique_ptr hdiDevice = std::make_unique(device); + std::unique_ptr hdiDevice = std::make_unique(device); EXPECT_NE(hdiDevice, nullptr); bool isSupported = false; @@ -461,7 +461,7 @@ HWTEST_F(HDIDeviceTest, hdidevice_isperformancemodesupported_002, TestSize.Level HWTEST_F(HDIDeviceTest, hdidevice_isprioritysupported_001, TestSize.Level0) { OHOS::sptr device = V1_0::INnrtDevice::Get(false); - std::unique_ptr hdiDevice = std::make_unique(device); + std::unique_ptr hdiDevice = std::make_unique(device); EXPECT_NE(hdiDevice, nullptr); bool isSupported = false; @@ -483,7 +483,7 @@ HWTEST_F(HDIDeviceTest, hdidevice_isprioritysupported_001, TestSize.Level0) HWTEST_F(HDIDeviceTest, hdidevice_isprioritysupported_002, TestSize.Level0) { OHOS::sptr device = V1_0::INnrtDevice::Get(false); - std::unique_ptr hdiDevice = std::make_unique(device); + std::unique_ptr hdiDevice = std::make_unique(device); EXPECT_NE(hdiDevice, nullptr); bool isSupported = false; @@ -501,7 +501,7 @@ HWTEST_F(HDIDeviceTest, hdidevice_isprioritysupported_002, TestSize.Level0) HWTEST_F(HDIDeviceTest, hdidevice_isdynamicinputsupported_001, TestSize.Level0) { OHOS::sptr device = V1_0::INnrtDevice::Get(false); - std::unique_ptr hdiDevice = std::make_unique(device); + std::unique_ptr hdiDevice = std::make_unique(device); EXPECT_NE(hdiDevice, nullptr); bool isSupported = false; @@ -523,7 +523,7 @@ HWTEST_F(HDIDeviceTest, hdidevice_isdynamicinputsupported_001, TestSize.Level0) HWTEST_F(HDIDeviceTest, hdidevice_isdynamicinputsupported_002, TestSize.Level0) { OHOS::sptr device = V1_0::INnrtDevice::Get(false); - std::unique_ptr hdiDevice = std::make_unique(device); + std::unique_ptr hdiDevice = std::make_unique(device); EXPECT_NE(hdiDevice, nullptr); bool isSupported = false; @@ -541,7 +541,7 @@ HWTEST_F(HDIDeviceTest, hdidevice_isdynamicinputsupported_002, TestSize.Level0) HWTEST_F(HDIDeviceTest, hdidevice_ismodelcachesupported_001, TestSize.Level0) { OHOS::sptr device = V1_0::INnrtDevice::Get(false); - std::unique_ptr hdiDevice = std::make_unique(device); + std::unique_ptr hdiDevice = std::make_unique(device); EXPECT_NE(hdiDevice, nullptr); bool isSupported = false; @@ -563,7 +563,7 @@ HWTEST_F(HDIDeviceTest, hdidevice_ismodelcachesupported_001, TestSize.Level0) HWTEST_F(HDIDeviceTest, hdidevice_ismodelcachesupported_002, TestSize.Level0) { OHOS::sptr device = V1_0::INnrtDevice::Get(false); - std::unique_ptr hdiDevice = std::make_unique(device); + std::unique_ptr hdiDevice = std::make_unique(device); EXPECT_NE(hdiDevice, nullptr); bool isSupported = false; @@ -594,7 +594,7 @@ HWTEST_F(HDIDeviceTest, hdidevice_preparemodel_001, TestSize.Level0) HWTEST_F(HDIDeviceTest, hdidevice_preparemodel_002, TestSize.Level0) { OHOS::sptr device = V1_0::INnrtDevice::Get(false); - std::unique_ptr hdiDevice = std::make_unique(device); + std::unique_ptr hdiDevice = std::make_unique(device); EXPECT_NE(hdiDevice, nullptr); std::shared_ptr model = nullptr; @@ -647,7 +647,7 @@ HWTEST_F(HDIDeviceTest, hdidevice_preparemodelfrommodelcache_001, TestSize.Level OHOS::sptr sp = OHOS::sptr(new (std::nothrow) V1_0::MockIDevice()); EXPECT_NE(sp, nullptr); - std::unique_ptr hdiDevice = std::make_unique(sp); + std::unique_ptr hdiDevice = std::make_unique(sp); EXPECT_NE(hdiDevice, nullptr); std::shared_ptr preparedModel; @@ -677,7 +677,7 @@ HWTEST_F(HDIDeviceTest, hdidevice_preparemodelfrommodelcache_002, TestSize.Level OHOS::sptr sp = OHOS::sptr(new (std::nothrow) V1_0::MockIDevice()); EXPECT_NE(sp, nullptr); - std::unique_ptr hdiDevice = std::make_unique(sp); + std::unique_ptr hdiDevice = std::make_unique(sp); EXPECT_NE(hdiDevice, nullptr); std::vector modelCache = { { buffer, 100 } }; @@ -686,7 +686,7 @@ HWTEST_F(HDIDeviceTest, hdidevice_preparemodelfrommodelcache_002, TestSize.Level OHOS::sptr(new (std::nothrow) V1_0::MockIPreparedModel()); EXPECT_NE(preModel, nullptr); - std::shared_ptr preparedModel = std::make_shared(preModel); + std::shared_ptr preparedModel = std::make_shared(preModel); OHOS::sptr iPreparedModel = OHOS::sptr(new (std::nothrow) V1_0::MockIPreparedModel); @@ -705,7 +705,7 @@ HWTEST_F(HDIDeviceTest, hdidevice_preparemodelfrommodelcache_002, TestSize.Level HWTEST_F(HDIDeviceTest, hdidevice_preparemodelfrommodelcache_003, TestSize.Level0) { OHOS::sptr device = V1_0::INnrtDevice::Get(false); - std::unique_ptr hdiDevice = std::make_unique(device); + std::unique_ptr hdiDevice = std::make_unique(device); EXPECT_NE(hdiDevice, nullptr); std::vector modelCache = { { nullptr, 0 } }; @@ -723,7 +723,7 @@ HWTEST_F(HDIDeviceTest, hdidevice_preparemodelfrommodelcache_003, TestSize.Level HWTEST_F(HDIDeviceTest, hdidevice_allocatebuffer_001, TestSize.Level0) { OHOS::sptr device = V1_0::INnrtDevice::Get(false); - std::unique_ptr hdiDevice = std::make_unique(device); + std::unique_ptr hdiDevice = std::make_unique(device); EXPECT_NE(hdiDevice, nullptr); V1_0::SharedBuffer buffer; @@ -744,7 +744,7 @@ HWTEST_F(HDIDeviceTest, hdidevice_allocatebuffer_001, TestSize.Level0) HWTEST_F(HDIDeviceTest, hdidevice_allocatebuffer_002, TestSize.Level0) { OHOS::sptr device = V1_0::INnrtDevice::Get(false); - std::unique_ptr hdiDevice = std::make_unique(device); + std::unique_ptr hdiDevice = std::make_unique(device); EXPECT_NE(hdiDevice, nullptr); size_t length = 8; @@ -761,7 +761,7 @@ HWTEST_F(HDIDeviceTest, hdidevice_allocatebuffer_002, TestSize.Level0) HWTEST_F(HDIDeviceTest, hdidevice_allocatebuffer_003, TestSize.Level0) { OHOS::sptr device = V1_0::INnrtDevice::Get(false); - std::unique_ptr hdiDevice = std::make_unique(device); + std::unique_ptr hdiDevice = std::make_unique(device); EXPECT_NE(hdiDevice, nullptr); size_t length = 0; @@ -781,7 +781,7 @@ HWTEST_F(HDIDeviceTest, hdidevice_releasebuffer_001, TestSize.Level0) GetBuffer(buffer, length); OHOS::sptr device = V1_0::INnrtDevice::Get(false); - std::unique_ptr hdiDevice = std::make_unique(device); + std::unique_ptr hdiDevice = std::make_unique(device); EXPECT_CALL(*((V1_0::MockIDevice *)device.GetRefPtr()), ReleaseBuffer(::testing::_)) .WillRepeatedly(::testing::Return(HDF_SUCCESS)); @@ -800,7 +800,7 @@ HWTEST_F(HDIDeviceTest, hdidevice_releasebuffer_001, TestSize.Level0) HWTEST_F(HDIDeviceTest, hdidevice_releasebuffer_002, TestSize.Level0) { OHOS::sptr device = V1_0::INnrtDevice::Get(false); - std::unique_ptr hdiDevice = std::make_unique(device); + std::unique_ptr hdiDevice = std::make_unique(device); EXPECT_NE(hdiDevice, nullptr); V1_0::SharedBuffer sharedbuffer; @@ -823,7 +823,7 @@ HWTEST_F(HDIDeviceTest, hdidevice_releasebuffer_002, TestSize.Level0) HWTEST_F(HDIDeviceTest, hdidevice_releasebuffer_003, TestSize.Level0) { OHOS::sptr device = V1_0::INnrtDevice::Get(false); - std::unique_ptr hdiDevice = std::make_unique(device); + std::unique_ptr hdiDevice = std::make_unique(device); EXPECT_NE(hdiDevice, nullptr); void *buffer = nullptr; @@ -840,7 +840,7 @@ HWTEST_F(HDIDeviceTest, hdidevice_releasebuffer_004, TestSize.Level0) const size_t length = 100; auto* buffer = new(std::nothrow) char[length]; OHOS::sptr device = V1_0::INnrtDevice::Get(false); - std::unique_ptr hdiDevice = std::make_unique(device); + std::unique_ptr hdiDevice = std::make_unique(device); EXPECT_NE(hdiDevice, nullptr); hdiDevice->ReleaseBuffer(buffer); @@ -860,7 +860,7 @@ HWTEST_F(HDIDeviceTest, hdidevice_releasebuffer_005, TestSize.Level0) GetBuffer(buffer, length); OHOS::sptr device = V1_0::INnrtDevice::Get(false); - std::unique_ptr hdiDevice = std::make_unique(device); + std::unique_ptr hdiDevice = std::make_unique(device); EXPECT_NE(hdiDevice, nullptr); EXPECT_CALL(*((V1_0::MockIDevice *)device.GetRefPtr()), ReleaseBuffer(::testing::_)) diff --git a/test/unittest/components/hdi_prepared_model/hdi_prepared_model_test.cpp b/test/unittest/components/v1_0/hdi_prepared_model/hdi_prepared_model_test.cpp similarity index 91% rename from test/unittest/components/hdi_prepared_model/hdi_prepared_model_test.cpp rename to test/unittest/components/v1_0/hdi_prepared_model/hdi_prepared_model_test.cpp index d946b6312390730ad7e02d588ad78aab8df8bcea..413fb5c64ff290022f33e3e4fc3e336c5b57e05e 100644 --- a/test/unittest/components/hdi_prepared_model/hdi_prepared_model_test.cpp +++ b/test/unittest/components/v1_0/hdi_prepared_model/hdi_prepared_model_test.cpp @@ -21,10 +21,10 @@ #include #include "common/log.h" -#include "frameworks/native/hdi_prepared_model.h" +#include "frameworks/native/hdi_prepared_model_v1_0.h" #include "frameworks/native/memory_manager.h" #include "frameworks/native/transform.h" -#include "test/unittest/common/mock_idevice.h" +#include "test/unittest/common/v1_0/mock_idevice.h" #include "test/unittest/common/file_utils.h" using namespace testing; @@ -81,7 +81,7 @@ OH_NN_ReturnCode HDIPreparedModelTest::Run(std::vector& inputs) OHOS::sptr(new (std::nothrow) V1_0::MockIPreparedModel()); EXPECT_NE(sp, nullptr); - std::unique_ptr preparedModel = std::make_unique(sp); + std::unique_ptr preparedModel = std::make_unique(sp); EXPECT_CALL(*sp, Run(::testing::_, ::testing::_, ::testing::_, ::testing::_)) .WillRepeatedly(::testing::DoAll( ::testing::SetArgReferee(outputsDims), @@ -104,7 +104,7 @@ HWTEST_F(HDIPreparedModelTest, hidpreparedmodel_constructor_001, TestSize.Level0 OHOS::sptr(new (std::nothrow) V1_0::MockIPreparedModel()); EXPECT_NE(hdiPreparedModel, nullptr); - std::unique_ptr preparedModel = std::make_unique(hdiPreparedModel); + std::unique_ptr preparedModel = std::make_unique(hdiPreparedModel); EXPECT_NE(preparedModel, nullptr); } @@ -118,7 +118,7 @@ HWTEST_F(HDIPreparedModelTest, hidpreparedmodel_exportmodelcache_001, TestSize.L std::vector bufferVect = {{100, 100, 0, 100}}; OHOS::sptr hdiPreparedModel = OHOS::sptr(new (std::nothrow) V1_0::MockIPreparedModel()); - std::unique_ptr preparedModel = std::make_unique(hdiPreparedModel); + std::unique_ptr preparedModel = std::make_unique(hdiPreparedModel); std::vector modelCache; EXPECT_CALL(*((V1_0::MockIPreparedModel*)hdiPreparedModel.GetRefPtr()), ExportModelCache(::testing::_)) @@ -145,7 +145,7 @@ HWTEST_F(HDIPreparedModelTest, hidpreparedmodel_exportmodelcache_002, TestSize.L OHOS::sptr(new (std::nothrow) V1_0::MockIPreparedModel()); EXPECT_NE(mockPreparedModel, nullptr); - std::unique_ptr preparedModel = std::make_unique(mockPreparedModel); + std::unique_ptr preparedModel = std::make_unique(mockPreparedModel); std::vector modelCache; EXPECT_CALL(*((V1_0::MockIPreparedModel*)mockPreparedModel.GetRefPtr()), ExportModelCache(::testing::_)) @@ -171,7 +171,7 @@ HWTEST_F(HDIPreparedModelTest, hidpreparedmodel_exportmodelcache_003, TestSize.L OHOS::sptr(new (std::nothrow) V1_0::MockIPreparedModel()); EXPECT_NE(hdiPreparedModel, nullptr); - std::unique_ptr preparedModel = std::make_unique(hdiPreparedModel); + std::unique_ptr preparedModel = std::make_unique(hdiPreparedModel); std::vector modelCache {{nullptr, 0}}; OH_NN_ReturnCode result = preparedModel->ExportModelCache(modelCache); EXPECT_EQ(OH_NN_INVALID_PARAMETER, result); @@ -189,7 +189,7 @@ HWTEST_F(HDIPreparedModelTest, hidpreparedmodel_exportmodelcache_004, TestSize.L OHOS::sptr(new (std::nothrow) V1_0::MockIPreparedModel()); EXPECT_NE(mockPreparedModel, nullptr); - std::unique_ptr preparedModel = std::make_unique(mockPreparedModel); + std::unique_ptr preparedModel = std::make_unique(mockPreparedModel); std::vector modelCache; EXPECT_CALL(*((V1_0::MockIPreparedModel*)mockPreparedModel.GetRefPtr()), ExportModelCache(::testing::_)) @@ -231,7 +231,7 @@ HWTEST_F(HDIPreparedModelTest, hidpreparedmodel_run_001, TestSize.Level0) OHOS::sptr(new (std::nothrow) V1_0::MockIPreparedModel()); EXPECT_NE(hdiPreparedModel, nullptr); - std::unique_ptr preparedModel = std::make_unique(hdiPreparedModel); + std::unique_ptr preparedModel = std::make_unique(hdiPreparedModel); OH_NN_ReturnCode result = preparedModel->Run(inputs, outputs, outputsDims, isOutputBufferEnough); EXPECT_EQ(OH_NN_INVALID_PARAMETER, result); } @@ -279,7 +279,7 @@ HWTEST_F(HDIPreparedModelTest, hidpreparedmodel_run_003, TestSize.Level0) OHOS::sptr(new (std::nothrow) V1_0::MockIPreparedModel()); EXPECT_NE(sp, nullptr); - std::unique_ptr preparedModel = std::make_unique(sp); + std::unique_ptr preparedModel = std::make_unique(sp); EXPECT_CALL(*sp, Run(::testing::_, ::testing::_, ::testing::_, ::testing::_)) .WillRepeatedly( @@ -332,7 +332,7 @@ HWTEST_F(HDIPreparedModelTest, hidpreparedmodel_run_005, TestSize.Level0) OHOS::sptr(new (std::nothrow) V1_0::MockIPreparedModel()); EXPECT_NE(sp, nullptr); - std::unique_ptr preparedModel = std::make_unique(sp); + std::unique_ptr preparedModel = std::make_unique(sp); OH_NN_ReturnCode result = preparedModel->Run(inputs, outputs, outputsDims, isOutputBufferEnough); EXPECT_EQ(OH_NN_INVALID_PARAMETER, result); diff --git a/test/unittest/components/inner_model/inner_model_test.cpp b/test/unittest/components/v1_0/inner_model/inner_model_test.cpp similarity index 100% rename from test/unittest/components/inner_model/inner_model_test.cpp rename to test/unittest/components/v1_0/inner_model/inner_model_test.cpp diff --git a/test/unittest/components/inner_model/nn_tensor_test.cpp b/test/unittest/components/v1_0/inner_model/nn_tensor_test.cpp similarity index 100% rename from test/unittest/components/inner_model/nn_tensor_test.cpp rename to test/unittest/components/v1_0/inner_model/nn_tensor_test.cpp diff --git a/test/unittest/components/inner_model/nn_validation_test.cpp b/test/unittest/components/v1_0/inner_model/nn_validation_test.cpp similarity index 100% rename from test/unittest/components/inner_model/nn_validation_test.cpp rename to test/unittest/components/v1_0/inner_model/nn_validation_test.cpp diff --git a/test/unittest/components/inner_model/ops_regitstry_test.cpp b/test/unittest/components/v1_0/inner_model/ops_regitstry_test.cpp similarity index 100% rename from test/unittest/components/inner_model/ops_regitstry_test.cpp rename to test/unittest/components/v1_0/inner_model/ops_regitstry_test.cpp diff --git a/test/unittest/components/neural_network_runtime_test/neural_network_runtime_test.cpp b/test/unittest/components/v1_0/neural_network_runtime_test/neural_network_runtime_test.cpp similarity index 98% rename from test/unittest/components/neural_network_runtime_test/neural_network_runtime_test.cpp rename to test/unittest/components/v1_0/neural_network_runtime_test/neural_network_runtime_test.cpp index 404f2e80d118d23b3478d60fe8de3fcf551fadc1..c38eea6d866a97ccc2869d9c6f3bca15363d354c 100644 --- a/test/unittest/components/neural_network_runtime_test/neural_network_runtime_test.cpp +++ b/test/unittest/components/v1_0/neural_network_runtime_test/neural_network_runtime_test.cpp @@ -20,12 +20,12 @@ #include "common/utils.h" #include "frameworks/native/compilation.h" #include "frameworks/native/device_manager.h" -#include "frameworks/native/hdi_device.h" -#include "test/unittest/common/mock_idevice.h" +#include "frameworks/native/hdi_device_v1_0.h" +#include "test/unittest/common/v1_0/mock_idevice.h" namespace OHOS { namespace NeuralNetworkRuntime { -OH_NN_ReturnCode HDIDevice::PrepareModel(std::shared_ptr model, +OH_NN_ReturnCode HDIDeviceV1_0::PrepareModel(std::shared_ptr model, const ModelConfig& config, std::shared_ptr& preparedModel) { @@ -40,11 +40,11 @@ OH_NN_ReturnCode HDIDevice::PrepareModel(std::shared_ptr iPreparedModel = sptr(new OHOS::HDI::Nnrt::V1_0::MockIPreparedModel()); if (iPreparedModel == nullptr) { - LOGE("HDIDevice mock PrepareModel failed, error happened when new sptr"); + LOGE("HDIDeviceV1_0 mock PrepareModel failed, error happened when new sptr"); return OH_NN_NULL_PTR; } - preparedModel = CreateSharedPtr(iPreparedModel); + preparedModel = CreateSharedPtr(iPreparedModel); return OH_NN_SUCCESS; } @@ -57,7 +57,7 @@ std::shared_ptr DeviceManager::GetDevice(size_t deviceId) const return nullptr; } - std::shared_ptr device = CreateSharedPtr(idevice); + std::shared_ptr device = CreateSharedPtr(idevice); if (device == nullptr) { LOGE("DeviceManager mock GetDevice failed, the device is nullptr"); return nullptr; @@ -71,7 +71,7 @@ std::shared_ptr DeviceManager::GetDevice(size_t deviceId) const } } -OH_NN_ReturnCode HDIDevice::GetDeviceType(OH_NN_DeviceType& deviceType) +OH_NN_ReturnCode HDIDeviceV1_0::GetDeviceType(OH_NN_DeviceType& deviceType) { if (deviceType == OH_NN_OTHERS) { return OH_NN_UNAVALIDABLE_DEVICE; @@ -104,35 +104,35 @@ const std::vector& DeviceManager::GetAllDeviceId() return deviceIds; } -OH_NN_ReturnCode HDIDevice::IsModelCacheSupported(bool& isSupported) +OH_NN_ReturnCode HDIDeviceV1_0::IsModelCacheSupported(bool& isSupported) { isSupported = true; return OH_NN_SUCCESS; } -OH_NN_ReturnCode HDIDevice::IsPerformanceModeSupported(bool& isSupported) +OH_NN_ReturnCode HDIDeviceV1_0::IsPerformanceModeSupported(bool& isSupported) { isSupported = true; return OH_NN_SUCCESS; } -OH_NN_ReturnCode HDIDevice::IsPrioritySupported(bool& isSupported) +OH_NN_ReturnCode HDIDeviceV1_0::IsPrioritySupported(bool& isSupported) { isSupported = true; return OH_NN_SUCCESS; } -OH_NN_ReturnCode HDIDevice::IsFloat16PrecisionSupported(bool& isSupported) +OH_NN_ReturnCode HDIDeviceV1_0::IsFloat16PrecisionSupported(bool& isSupported) { isSupported = true; return OH_NN_SUCCESS; } -OH_NN_ReturnCode HDIDevice::GetSupportedOperation(std::shared_ptr model, +OH_NN_ReturnCode HDIDeviceV1_0::GetSupportedOperation(std::shared_ptr model, std::vector& ops) { if (model == nullptr) { - LOGE("HDIDevice mock GetSupportedOperation failed, Model is nullptr, cannot query supported operation."); + LOGE("HDIDeviceV1_0 mock GetSupportedOperation failed, Model is nullptr, cannot query supported operation."); return OH_NN_NULL_PTR; } @@ -140,7 +140,7 @@ OH_NN_ReturnCode HDIDevice::GetSupportedOperation(std::shared_ptr +#include #include "frameworks/native/transform.h" #include "frameworks/native/memory_manager.h" @@ -30,403 +31,6 @@ public: ~TransformTestTest() = default; }; -/** - * @tc.name: transform_transhdidevicetype_001 - * @tc.desc: Verify the TransHDIDeviceType function return OH_NN_CPU - * @tc.type: FUNC - */ -HWTEST_F(TransformTestTest, transform_transhdidevicetype_001, TestSize.Level0) -{ - V1_0::DeviceType iDeviceType = V1_0::DeviceType::CPU; - OH_NN_DeviceType result = HDIToNN::TransHDIDeviceType(iDeviceType); - EXPECT_EQ(OH_NN_CPU, result); -} - -/** - * @tc.name: transform_transhdidevicetype_002 - * @tc.desc: Verify the TransHDIDeviceType function return OH_NN_GPU - * @tc.type: FUNC - */ -HWTEST_F(TransformTestTest, transform_transhdidevicetype_002, TestSize.Level0) -{ - V1_0::DeviceType iDeviceType = V1_0::DeviceType::GPU; - OH_NN_DeviceType result = HDIToNN::TransHDIDeviceType(iDeviceType); - EXPECT_EQ(OH_NN_GPU, result); -} - -/** - * @tc.name: transform_transhdidevicetype_003 - * @tc.desc: Verify the TransHDIDeviceType function return OH_NN_ACCELERATOR - * @tc.type: FUNC - */ -HWTEST_F(TransformTestTest, transform_transhdidevicetype_003, TestSize.Level0) -{ - V1_0::DeviceType iDeviceType = V1_0::DeviceType::ACCELERATOR; - OH_NN_DeviceType result = HDIToNN::TransHDIDeviceType(iDeviceType); - EXPECT_EQ(OH_NN_ACCELERATOR, result); -} - -/** - * @tc.name: transform_transhdidevicetype_004 - * @tc.desc: Verify the TransHDIDeviceType function return OH_NN_OTHERS - * @tc.type: FUNC - */ -HWTEST_F(TransformTestTest, transform_transhdidevicetype_004, TestSize.Level0) -{ - V1_0::DeviceType iDeviceType = V1_0::DeviceType::OTHER; - OH_NN_DeviceType result = HDIToNN::TransHDIDeviceType(iDeviceType); - EXPECT_EQ(OH_NN_OTHERS, result); -} - -/** - * @tc.name: transform_transhdidevicestatus_001 - * @tc.desc: Verify the TransHDIDeviceStatus function return AVAILABLE - * @tc.type: FUNC - */ -HWTEST_F(TransformTestTest, transform_transhdidevicestatus_001, TestSize.Level0) -{ - V1_0::DeviceStatus iDeviceStatus = V1_0::DeviceStatus::AVAILABLE; - DeviceStatus result = HDIToNN::TransHDIDeviceStatus(iDeviceStatus); - EXPECT_EQ(DeviceStatus::AVAILABLE, result); -} - -/** - * @tc.name: transform_transhdidevicestatus_002 - * @tc.desc: Verify the TransHDIDeviceStatus function return BUSY. - * @tc.type: FUNC - */ -HWTEST_F(TransformTestTest, transform_transhdidevicestatus_002, TestSize.Level0) -{ - V1_0::DeviceStatus iDeviceStatus = V1_0::DeviceStatus::BUSY; - DeviceStatus result = HDIToNN::TransHDIDeviceStatus(iDeviceStatus); - EXPECT_EQ(DeviceStatus::BUSY, result); -} - -/** - * @tc.name: transform_transhdidevicestatus_003 - * @tc.desc: Verify the TransHDIDeviceStatus function return OFFLINE. - * @tc.type: FUNC - */ -HWTEST_F(TransformTestTest, transform_transhdidevicestatus_003, TestSize.Level0) -{ - V1_0::DeviceStatus iDeviceStatus = V1_0::DeviceStatus::OFFLINE; - DeviceStatus result = HDIToNN::TransHDIDeviceStatus(iDeviceStatus); - EXPECT_EQ(DeviceStatus::OFFLINE, result); -} - -/** - * @tc.name: transform_transhdidevicestatus_004 - * @tc.desc: Verify the TransHDIDeviceStatus function return UNKNOWN. - * @tc.type: FUNC - */ -HWTEST_F(TransformTestTest, transform_transhdidevicestatus_004, TestSize.Level0) -{ - V1_0::DeviceStatus iDeviceStatus = V1_0::DeviceStatus::UNKNOWN; - DeviceStatus result = HDIToNN::TransHDIDeviceStatus(iDeviceStatus); - EXPECT_EQ(DeviceStatus::UNKNOWN, result); -} - -/** - * @tc.name: transform_transperformancemode_001 - * @tc.desc: Verify the TransPerformanceMode function return PERFORMANCE_LOW. - * @tc.type: FUNC - */ -HWTEST_F(TransformTestTest, transform_transperformancemode_001, TestSize.Level0) -{ - OH_NN_PerformanceMode mode = OH_NN_PERFORMANCE_LOW; - V1_0::PerformanceMode result = NNToHDI::TransPerformanceMode(mode); - EXPECT_EQ(V1_0::PerformanceMode::PERFORMANCE_LOW, result); -} - -/** - * @tc.name: transform_transperformancemode_002 - * @tc.desc: Verify the TransPerformanceMode function return PERFORMANCE_MEDIUM. - * @tc.type: FUNC - */ -HWTEST_F(TransformTestTest, transform_transperformancemode_002, TestSize.Level0) -{ - OH_NN_PerformanceMode mode = OH_NN_PERFORMANCE_MEDIUM; - V1_0::PerformanceMode result = NNToHDI::TransPerformanceMode(mode); - EXPECT_EQ(V1_0::PerformanceMode::PERFORMANCE_MEDIUM, result); -} - -/** - * @tc.name: transform_transperformancemode_003 - * @tc.desc: Verify the TransPerformanceMode function return PERFORMANCE_HIGH. - * @tc.type: FUNC - */ -HWTEST_F(TransformTestTest, transform_transperformancemode_003, TestSize.Level0) -{ - OH_NN_PerformanceMode mode = OH_NN_PERFORMANCE_HIGH; - V1_0::PerformanceMode result = NNToHDI::TransPerformanceMode(mode); - EXPECT_EQ(V1_0::PerformanceMode::PERFORMANCE_HIGH, result); -} - -/** - * @tc.name: transform_transperformancemode_004 - * @tc.desc: Verify the TransPerformanceMode function return PERFORMANCE_EXTREME. - * @tc.type: FUNC - */ -HWTEST_F(TransformTestTest, transform_transperformancemode_004, TestSize.Level0) -{ - OH_NN_PerformanceMode mode = OH_NN_PERFORMANCE_EXTREME; - V1_0::PerformanceMode result = NNToHDI::TransPerformanceMode(mode); - EXPECT_EQ(V1_0::PerformanceMode::PERFORMANCE_EXTREME, result); -} - -/** - * @tc.name: transform_transperformancemode_005 - * @tc.desc: Verify the TransPerformanceMode function return PERFORMANCE_NONE. - * @tc.type: FUNC - */ -HWTEST_F(TransformTestTest, transform_transperformancemode_005, TestSize.Level0) -{ - OH_NN_PerformanceMode mode = OH_NN_PERFORMANCE_NONE; - V1_0::PerformanceMode result = NNToHDI::TransPerformanceMode(mode); - EXPECT_EQ(V1_0::PerformanceMode::PERFORMANCE_NONE, result); -} - -/** - * @tc.name: transform_transpriority_001 - * @tc.desc: Verify the TransPriority function return PRIORITY_LOW. - * @tc.type: FUNC - */ -HWTEST_F(TransformTestTest, transform_transpriority_001, TestSize.Level0) -{ - OH_NN_Priority priority = OH_NN_PRIORITY_LOW; - V1_0::Priority result = NNToHDI::TransPriority(priority); - EXPECT_EQ(V1_0::Priority::PRIORITY_LOW, result); -} - -/** - * @tc.name: transform_transpriority_002 - * @tc.desc: Verify the TransPriority function return PRIORITY_MEDIUM. - * @tc.type: FUNC - */ -HWTEST_F(TransformTestTest, transform_transpriority_002, TestSize.Level0) -{ - OH_NN_Priority priority = OH_NN_PRIORITY_MEDIUM; - V1_0::Priority result = NNToHDI::TransPriority(priority); - EXPECT_EQ(V1_0::Priority::PRIORITY_MEDIUM, result); -} - -/** - * @tc.name: transform_transpriority_003 - * @tc.desc: Verify the TransPriority function return PRIORITY_HIGH. - * @tc.type: FUNC - */ -HWTEST_F(TransformTestTest, transform_transpriority_003, TestSize.Level0) -{ - OH_NN_Priority priority = OH_NN_PRIORITY_HIGH; - V1_0::Priority result = NNToHDI::TransPriority(priority); - EXPECT_EQ(V1_0::Priority::PRIORITY_HIGH, result); -} - -/** - * @tc.name: transform_transdatatype_001 - * @tc.desc: Verify the TransDataType function return DATA_TYPE_BOOL. - * @tc.type: FUNC - */ -HWTEST_F(TransformTestTest, transform_transdatatype_001, TestSize.Level0) -{ - OH_NN_DataType dataType = OH_NN_BOOL; - V1_0::DataType result = NNToHDI::TransDataType(dataType); - EXPECT_EQ(V1_0::DataType::DATA_TYPE_BOOL, result); -} - -/** - * @tc.name: transform_transdatatype_002 - * @tc.desc: Verify the TransDataType function return DATA_TYPE_INT8. - * @tc.type: FUNC - */ -HWTEST_F(TransformTestTest, transform_transdatatype_002, TestSize.Level0) -{ - OH_NN_DataType dataType = OH_NN_INT8; - V1_0::DataType result = NNToHDI::TransDataType(dataType); - EXPECT_EQ(V1_0::DataType::DATA_TYPE_INT8, result); -} - -/** - * @tc.name: transform_transdatatype_003 - * @tc.desc: Verify the TransDataType function return DATA_TYPE_INT16. - * @tc.type: FUNC - */ -HWTEST_F(TransformTestTest, transform_transdatatype_003, TestSize.Level0) -{ - OH_NN_DataType dataType = OH_NN_INT16; - V1_0::DataType result = NNToHDI::TransDataType(dataType); - EXPECT_EQ(V1_0::DataType::DATA_TYPE_INT16, result); -} - -/** - * @tc.name: transform_transdatatype_004 - * @tc.desc: Verify the TransDataType function return DATA_TYPE_INT32. - * @tc.type: FUNC - */ -HWTEST_F(TransformTestTest, transform_transdatatype_004, TestSize.Level0) -{ - OH_NN_DataType dataType = OH_NN_INT32; - V1_0::DataType result = NNToHDI::TransDataType(dataType); - EXPECT_EQ(V1_0::DataType::DATA_TYPE_INT32, result); -} - -/** - * @tc.name: transform_transdatatype_005 - * @tc.desc: Verify the TransDataType function return DATA_TYPE_INT64. - * @tc.type: FUNC - */ -HWTEST_F(TransformTestTest, transform_transdatatype_005, TestSize.Level0) -{ - OH_NN_DataType dataType = OH_NN_INT64; - V1_0::DataType result = NNToHDI::TransDataType(dataType); - EXPECT_EQ(V1_0::DataType::DATA_TYPE_INT64, result); -} - -/** - * @tc.name: transform_transdatatype_006 - * @tc.desc: Verify the TransDataType function return DATA_TYPE_UINT8. - * @tc.type: FUNC - */ -HWTEST_F(TransformTestTest, transform_transdatatype_006, TestSize.Level0) -{ - OH_NN_DataType dataType = OH_NN_UINT8; - V1_0::DataType result = NNToHDI::TransDataType(dataType); - EXPECT_EQ(V1_0::DataType::DATA_TYPE_UINT8, result); -} - -/** - * @tc.name: transform_transdatatype_007 - * @tc.desc: Verify the TransDataType function return DATA_TYPE_UINT16. - * @tc.type: FUNC - */ -HWTEST_F(TransformTestTest, transform_transdatatype_007, TestSize.Level0) -{ - OH_NN_DataType dataType = OH_NN_UINT16; - V1_0::DataType result = NNToHDI::TransDataType(dataType); - EXPECT_EQ(V1_0::DataType::DATA_TYPE_UINT16, result); -} - -/** - * @tc.name: transform_transdatatype_008 - * @tc.desc: Verify the TransDataType function return DATA_TYPE_UINT32. - * @tc.type: FUNC - */ -HWTEST_F(TransformTestTest, transform_transdatatype_008, TestSize.Level0) -{ - OH_NN_DataType dataType = OH_NN_UINT32; - V1_0::DataType result = NNToHDI::TransDataType(dataType); - EXPECT_EQ(V1_0::DataType::DATA_TYPE_UINT32, result); -} - -/** - * @tc.name: transform_transdatatype_009 - * @tc.desc: Verify the TransDataType function return DATA_TYPE_UINT64. - * @tc.type: FUNC - */ -HWTEST_F(TransformTestTest, transform_transdatatype_009, TestSize.Level0) -{ - OH_NN_DataType dataType = OH_NN_UINT64; - V1_0::DataType result = NNToHDI::TransDataType(dataType); - EXPECT_EQ(V1_0::DataType::DATA_TYPE_UINT64, result); -} - -/** - * @tc.name: transform_transdatatype_010 - * @tc.desc: Verify the TransDataType function return DATA_TYPE_FLOAT16. - * @tc.type: FUNC - */ -HWTEST_F(TransformTestTest, transform_transdatatype_010, TestSize.Level0) -{ - OH_NN_DataType dataType = OH_NN_FLOAT16; - V1_0::DataType result = NNToHDI::TransDataType(dataType); - EXPECT_EQ(V1_0::DataType::DATA_TYPE_FLOAT16, result); -} - -/** - * @tc.name: transform_transdatatype_011 - * @tc.desc: Verify the TransDataType function return DATA_TYPE_FLOAT32. - * @tc.type: FUNC - */ -HWTEST_F(TransformTestTest, transform_transdatatype_011, TestSize.Level0) -{ - OH_NN_DataType dataType = OH_NN_FLOAT32; - V1_0::DataType result = NNToHDI::TransDataType(dataType); - EXPECT_EQ(V1_0::DataType::DATA_TYPE_FLOAT32, result); -} - -/** - * @tc.name: transform_transdatatype_012 - * @tc.desc: Verify the TransDataType function return DATA_TYPE_UNKNOWN. - * @tc.type: FUNC - */ -HWTEST_F(TransformTestTest, transform_transdatatype_012, TestSize.Level0) -{ - OH_NN_DataType dataType = OH_NN_UNKNOWN; - V1_0::DataType result = NNToHDI::TransDataType(dataType); - EXPECT_EQ(V1_0::DataType::DATA_TYPE_UNKNOWN, result); -} - -/** - * @tc.name: transform_transdatatype_013 - * @tc.desc: Verify the TransDataType function return DATA_TYPE_FLOAT64. - * @tc.type: FUNC - */ -HWTEST_F(TransformTestTest, transform_transdatatype_013, TestSize.Level0) -{ - OH_NN_DataType dataType = OH_NN_FLOAT64; - V1_0::DataType result = NNToHDI::TransDataType(dataType); - EXPECT_EQ(V1_0::DataType::DATA_TYPE_FLOAT64, result); -} - -/** - * @tc.name: transform_transformat_001 - * @tc.desc: Verify the TransFormat function return FORMAT_NCHW. - * @tc.type: FUNC - */ -HWTEST_F(TransformTestTest, transform_transformat_001, TestSize.Level0) -{ - OH_NN_Format format = OH_NN_FORMAT_NCHW; - V1_0::Format result = NNToHDI::TransFormat(format); - EXPECT_EQ(V1_0::Format::FORMAT_NCHW, result); -} - -/** - * @tc.name: transform_transformat_002 - * @tc.desc: Verify the TransFormat function return FORMAT_NHWC. - * @tc.type: FUNC - */ -HWTEST_F(TransformTestTest, transform_transformat_002, TestSize.Level0) -{ - OH_NN_Format format = OH_NN_FORMAT_NHWC; - V1_0::Format result = NNToHDI::TransFormat(format); - EXPECT_EQ(V1_0::Format::FORMAT_NHWC, result); -} - -/** - * @tc.name: transform_transformat_003 - * @tc.desc: Verify the TransFormat function return FORMAT_NONE. - * @tc.type: FUNC - */ -HWTEST_F(TransformTestTest, transform_transformat_003, TestSize.Level0) -{ - OH_NN_Format format = OH_NN_FORMAT_NONE; - V1_0::Format result = NNToHDI::TransFormat(format); - EXPECT_EQ(V1_0::Format::FORMAT_NONE, result); -} - -/** - * @tc.name: transform_transiotensor_001 - * @tc.desc: Verify the TransIOTensor function return int8 - * @tc.type: FUNC - */ -HWTEST_F(TransformTestTest, transform_transiotensor_001, TestSize.Level0) -{ - IOTensor tensor; - tensor.dataType = OH_NN_INT8; - V1_0::IOTensor result = NNToHDI::TransIOTensor(tensor); - EXPECT_EQ(V1_0::DataType::DATA_TYPE_INT8, result.dataType); -} - /** * @tc.name: transform_gettypesize_001 * @tc.desc: Verify the TransIOTensor function return 1. diff --git a/test/unittest/components/v2_0/compilation/compilation_test.cpp b/test/unittest/components/v2_0/compilation/compilation_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..a4087b3c64a92e8d86564d1a60adaaa245bd73d6 --- /dev/null +++ b/test/unittest/components/v2_0/compilation/compilation_test.cpp @@ -0,0 +1,1143 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "compilation_test.h" + +#include + +#include "mindir.h" + +#include "test/unittest/common/v2_0/mock_idevice.h" + +using namespace OHOS::NeuralNetworkRuntime; +using namespace OHOS::HDI::Nnrt::V2_0; + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +static const int DATA_VALUE = 1; +static const int DATA_NUM = 36; +static const int DIM_NUM = 3; +OH_NN_ReturnCode CompilationTest::BuildModelGraph(NeuralNetworkRuntime::InnerModel& innerModel) +{ + // liteGraph is released internally by innerModel + mindspore::lite::LiteGraph* liteGraph = new (std::nothrow) mindspore::lite::LiteGraph; + EXPECT_NE(nullptr, liteGraph); + + liteGraph->name_ = "testGraph"; + liteGraph->input_indices_ = {0}; + liteGraph->output_indices_ = {1}; + liteGraph->all_tensors_ = {nullptr}; + const std::vector quant_params {}; + const std::vector data(DATA_NUM, DATA_VALUE); + const std::vector dim = {DIM_NUM, DIM_NUM}; + + for (size_t indexInput = 0; indexInput < liteGraph->input_indices_.size(); ++indexInput) { + liteGraph->all_tensors_.emplace_back(mindspore::lite::MindIR_Tensor_Create(liteGraph->name_, + mindspore::lite::DATA_TYPE_FLOAT32, dim, mindspore::lite::FORMAT_NCHW, data, quant_params)); + } + + for (size_t indexOutput = 0; indexOutput < liteGraph->output_indices_.size(); ++indexOutput) { + liteGraph->all_tensors_.emplace_back(mindspore::lite::MindIR_Tensor_Create(liteGraph->name_, + mindspore::lite::DATA_TYPE_FLOAT32, dim, mindspore::lite::FORMAT_NCHW, data, quant_params)); + } + + OH_NN_ReturnCode ret = innerModel.BuildFromLiteGraph(liteGraph); + return ret; +} + +void CompilationTest::SetConfig(Compilation& compilationTest) +{ + std::size_t deviceId = 1; + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.SetDevice(deviceId)); + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.SetPerformance(OH_NN_PERFORMANCE_EXTREME)); + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.SetPriority(OH_NN_PRIORITY_HIGH)); + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.SetEnableFp16(true)); +} + +void CompilationTest::WriteFile(uint64_t version, uint64_t fileNumber, std::size_t cacheDeviceId) +{ + uint64_t cacheSize = 4; + uint64_t writeSize = 7; + uint64_t cacheInfo[7] = {}; + auto cacheInfoPtr = cacheInfo; + *cacheInfoPtr++ = fileNumber; + *cacheInfoPtr++ = version; + *cacheInfoPtr++ = cacheDeviceId; + for (uint64_t i = 0; i < cacheSize; ++i) { + *cacheInfoPtr++ = i; + } + std::ofstream inFile("cache_info.nncache", std::ios::binary | std::ios::out | std::ios::trunc); + inFile.write(reinterpret_cast(cacheInfo), writeSize * sizeof(uint64_t)); + inFile.close(); +} + +void CompilationTest::BuildCompilation(InnerModel& innerModel) +{ + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilationTest(&innerModel); + SetConfig(compilationTest); + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.SetCacheDir("./", 1)); + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.Build()); +} + +/* + * @tc.name: compilation_set_device_001 + * @tc.desc: Verify the set deviceId after compilation finish of the SetDevice function. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_set_device_001, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilationTest(&innerModel); + SetConfig(compilationTest); + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.Build()); + + std::size_t deviceId = 1; + OH_NN_ReturnCode ret = compilationTest.SetDevice(deviceId); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/* + * @tc.name: compilation_set_device_002 + * @tc.desc: Verify the deviceId does not exist of the SetDevice function. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_set_device_002, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilationTest(&innerModel); + size_t deviceId = 0; + OH_NN_ReturnCode ret = compilationTest.SetDevice(deviceId); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: compilation_set_device_003 + * @tc.desc: Verify the error happened when getting supported operation of the SetDevice function. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_set_device_003, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + Compilation compilationTest(&innerModel); + std::size_t deviceId = 1; + OH_NN_ReturnCode ret = compilationTest.SetDevice(deviceId); + EXPECT_EQ(OH_NN_NULL_PTR, ret); +} + +/* + * @tc.name: compilation_set_device_004 + * @tc.desc: Verify the current device not support the model of the SetDevice function. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_set_device_004, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilationTest(&innerModel); + std::size_t deviceId = 1; + MockIPreparedModel::m_ExpectRetCode = OH_NN_SUCCESS; + OH_NN_ReturnCode ret = compilationTest.SetDevice(deviceId); + EXPECT_EQ(OH_NN_FAILED, ret); +} + +/* + * @tc.name: compilation_set_device_005 + * @tc.desc: Verify the error happened when checking whether device supports dynamic input of the SetDevice function. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_set_device_005, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilationTest(&innerModel); + std::size_t deviceId = 1; + MockIPreparedModel::m_ExpectRetCode = OH_NN_FAILED; + OH_NN_ReturnCode ret = compilationTest.SetDevice(deviceId); + EXPECT_EQ(OH_NN_FAILED, ret); +} + +/* + * @tc.name: compilation_set_device_006 + * @tc.desc: Verify the device does not support dynamic shape inputs of the SetDevice function. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_set_device_006, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilationTest(&innerModel); + std::size_t deviceId = 1; + MockIPreparedModel::m_ExpectRetCode = OH_NN_INVALID_PATH; + OH_NN_ReturnCode ret = compilationTest.SetDevice(deviceId); + EXPECT_EQ(OH_NN_FAILED, ret); +} + +/* + * @tc.name: compilation_set_device_007 + * @tc.desc: Verify the set normal deviceId of the SetDevice function. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_set_device_007, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilationTest(&innerModel); + std::size_t deviceId = 1; + OH_NN_ReturnCode ret = compilationTest.SetDevice(deviceId); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/* + * @tc.name: compilation_set_cachedir_001 + * @tc.desc: Verify the set cache after compilation finish of the SetCacheDir function. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_set_cachedir_001, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilationTest(&innerModel); + SetConfig(compilationTest); + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.Build()); + + std::size_t deviceId = 1; + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, compilationTest.SetDevice(deviceId)); + + OH_NN_ReturnCode ret = compilationTest.SetCacheDir("../", 1); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/* + * @tc.name: compilation_set_cachedir_002 + * @tc.desc: Verify the not set device of the SetCacheDir function. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_set_cachedir_002, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + Compilation compilationTest(&innerModel); + OH_NN_ReturnCode ret = compilationTest.SetCacheDir("../", 1); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/* + * @tc.name: compilation_set_cachedir_003 + * @tc.desc: Verify the Fail to query whether the device is available to save cache model of the SetCacheDir function. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_set_cachedir_003, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilationTest(&innerModel); + std::size_t deviceId = 1; + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.SetDevice(deviceId)); + + MockIPreparedModel::m_ExpectRetCode = OH_NN_FAILED; + OH_NN_ReturnCode ret = compilationTest.SetCacheDir("../", 1); + EXPECT_EQ(OH_NN_FAILED, ret); +} + +/* + * @tc.name: compilation_set_cachedir_004 + * @tc.desc: Verify the device is unavailable to save cache model of the SetCacheDir function. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_set_cachedir_004, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilationTest(&innerModel); + std::size_t deviceId = 1; + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.SetDevice(deviceId)); + + MockIPreparedModel::m_ExpectRetCode = OH_NN_SUCCESS; + OH_NN_ReturnCode ret = compilationTest.SetCacheDir("../", 1); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/* + * @tc.name: compilation_set_cachedir_005 + * @tc.desc: Verify the cache model path is invalid of the SetCacheDir function. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_set_cachedir_005, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilationTest(&innerModel); + std::size_t deviceId = 1; + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.SetDevice(deviceId)); + OH_NN_ReturnCode ret = compilationTest.SetCacheDir("../compilation_test.cpp", 1); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: compilation_set_cachedir_006 + * @tc.desc: Verify the cache model path is not a directory of the SetCacheDir function. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_set_cachedir_006, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilationTest(&innerModel); + std::size_t deviceId = 1; + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.SetDevice(deviceId)); + OH_NN_ReturnCode ret = compilationTest.SetCacheDir("./CompilationTest", 1); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: compilation_set_cachedir_007 + * @tc.desc: Verify the success of the SetCacheDir function. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_set_cachedir_007, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilationTest(&innerModel); + std::size_t deviceId = 1; + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.SetDevice(deviceId)); + OH_NN_ReturnCode ret = compilationTest.SetCacheDir("../", 1); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/* + * @tc.name: compilation_set_performance_001 + * @tc.desc: Verify the set performance after compilation finish of the SetPerformance function. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_set_performance_001, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilationTest(&innerModel); + + SetConfig(compilationTest); + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.Build()); + + std::size_t deviceId = 1; + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, compilationTest.SetDevice(deviceId)); + + OH_NN_ReturnCode ret = compilationTest.SetPerformance(OH_NN_PERFORMANCE_NONE); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/* + * @tc.name: compilation_set_performance_002 + * @tc.desc: Verify the set performance before set device of the SetPerformance function. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_set_performance_002, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + Compilation compilationTest(&innerModel); + OH_NN_ReturnCode ret = compilationTest.SetPerformance(OH_NN_PERFORMANCE_NONE); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/* + * @tc.name: compilation_set_performance_003 + * @tc.desc: Verify the call device failed of the SetPerformance function. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_set_performance_003, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilationTest(&innerModel); + + std::size_t deviceId = 1; + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.SetDevice(deviceId)); + + MockIPreparedModel::m_ExpectRetCode = OH_NN_FAILED; + OH_NN_ReturnCode ret = compilationTest.SetPerformance(OH_NN_PERFORMANCE_NONE); + EXPECT_EQ(OH_NN_FAILED, ret); +} + +/* + * @tc.name: compilation_set_performance_004 + * @tc.desc: Verify the device is not support performance setting of the SetPerformance function. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_set_performance_004, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilationTest(&innerModel); + + std::size_t deviceId = 1; + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.SetDevice(deviceId)); + + MockIPreparedModel::m_ExpectRetCode = OH_NN_SUCCESS; + OH_NN_ReturnCode ret = compilationTest.SetPerformance(OH_NN_PERFORMANCE_NONE); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/* + * @tc.name: compilation_set_performance_005 + * @tc.desc: Verify the passed invalid performance of the SetPerformance function. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_set_performance_005, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilationTest(&innerModel); + + std::size_t deviceId = 1; + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.SetDevice(deviceId)); + + OH_NN_PerformanceMode performance = static_cast(5); + OH_NN_ReturnCode ret = compilationTest.SetPerformance(performance); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: compilation_set_performance_006 + * @tc.desc: Verify the success of the SetPerformance function. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_set_performance_006, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilationTest(&innerModel); + + std::size_t deviceId = 1; + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.SetDevice(deviceId)); + + OH_NN_ReturnCode ret = compilationTest.SetPerformance(OH_NN_PERFORMANCE_NONE); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/* + * @tc.name: compilation_set_priority_001 + * @tc.desc: Verify the set priority after compilation finish of the SetPriority function. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_set_priority_001, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilationTest(&innerModel); + + SetConfig(compilationTest); + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.Build()); + + std::size_t deviceId = 1; + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, compilationTest.SetDevice(deviceId)); + + OH_NN_ReturnCode ret = compilationTest.SetPriority(OH_NN_PRIORITY_LOW); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/* + * @tc.name: compilation_set_priority_002 + * @tc.desc: Verify the set priority before set device of the SetPriority function. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_set_priority_002, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + Compilation compilationTest(&innerModel); + + OH_NN_ReturnCode ret = compilationTest.SetPriority(OH_NN_PRIORITY_LOW); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/* + * @tc.name: compilation_set_priority_003 + * @tc.desc: Verify the call device failed of the SetPriority function. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_set_priority_003, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilationTest(&innerModel); + + std::size_t deviceId = 1; + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.SetDevice(deviceId)); + + MockIPreparedModel::m_ExpectRetCode = OH_NN_INVALID_PARAMETER; + OH_NN_ReturnCode ret = compilationTest.SetPriority(OH_NN_PRIORITY_LOW); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: compilation_set_priority_004 + * @tc.desc: Verify the device is not support priority setting of the SetPriority function. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_set_priority_004, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilationTest(&innerModel); + + std::size_t deviceId = 1; + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.SetDevice(deviceId)); + + MockIPreparedModel::m_ExpectRetCode = OH_NN_SUCCESS; + OH_NN_ReturnCode ret = compilationTest.SetPriority(OH_NN_PRIORITY_LOW); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/* + * @tc.name: compilation_set_priority_005 + * @tc.desc: Verify the passed invalid priority of the SetPriority function. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_set_priority_005, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilationTest(&innerModel); + + std::size_t deviceId = 1; + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.SetDevice(deviceId)); + + OH_NN_Priority priority = static_cast(5);; + OH_NN_ReturnCode ret = compilationTest.SetPriority(priority); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: compilation_set_priority_006 + * @tc.desc: Verify the success of the SetPriority function. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_set_priority_006, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilationTest(&innerModel); + + std::size_t deviceId = 1; + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.SetDevice(deviceId)); + + OH_NN_ReturnCode ret = compilationTest.SetPriority(OH_NN_PRIORITY_LOW); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/* + * @tc.name: compilation_set_enable_fp16_001 + * @tc.desc: Verify the enable float16 after compilation finish of the SetEnableFp16 function. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_set_enable_fp16_001, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilationTest(&innerModel); + + SetConfig(compilationTest); + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.Build()); + + std::size_t deviceId = 1; + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, compilationTest.SetDevice(deviceId)); + + OH_NN_ReturnCode ret = compilationTest.SetEnableFp16(true); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/* + * @tc.name: compilation_set_enable_fp16_002 + * @tc.desc: Verify the set enable fp16 before set device of the SetEnableFp16 function. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_set_enable_fp16_002, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + Compilation compilationTest(&innerModel); + + OH_NN_ReturnCode ret = compilationTest.SetEnableFp16(true); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/* + * @tc.name: compilation_set_enable_fp16_003 + * @tc.desc: Verify the call device failed of the SetEnableFp16 function. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_set_enable_fp16_003, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilationTest(&innerModel); + + std::size_t deviceId = 1; + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.SetDevice(deviceId)); + + MockIPreparedModel::m_ExpectRetCode = OH_NN_MEMORY_ERROR; + OH_NN_ReturnCode ret = compilationTest.SetEnableFp16(true); + EXPECT_EQ(OH_NN_MEMORY_ERROR, ret); +} + +/* + * @tc.name: compilation_set_enable_fp16_004 + * @tc.desc: Verify the device is not support float16 precision setting of the SetEnableFp16 function. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_set_enable_fp16_004, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilationTest(&innerModel); + + std::size_t deviceId = 1; + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.SetDevice(deviceId)); + + MockIPreparedModel::m_ExpectRetCode = OH_NN_SUCCESS; + OH_NN_ReturnCode ret = compilationTest.SetEnableFp16(true); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/* + * @tc.name: compilation_set_enable_fp16_005 + * @tc.desc: Verify the success of the SetEnableFp16 function. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_set_enable_fp16_005, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilationTest(&innerModel); + + std::size_t deviceId = 1; + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.SetDevice(deviceId)); + + OH_NN_ReturnCode ret = compilationTest.SetEnableFp16(true); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/* + * @tc.name: compilation_get_input_tensors_001 + * @tc.desc: Verify the normal input tensors of the GetInputTensors function. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_get_input_tensors_001, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + Compilation compilationTest(&innerModel); + EXPECT_EQ(innerModel.GetInputTensors(), compilationTest.GetInputTensors()); +} + +/* + * @tc.name: compilation_get_output_tensors_001 + * @tc.desc: Verify the normal output tensors of the GetOutputTensors function. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_get_output_tensors_001, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + Compilation compilationTest(&innerModel); + EXPECT_EQ(innerModel.GetOutputTensors(), compilationTest.GetOutputTensors()); +} + +/* + * @tc.name: compilation_get_execution_plan_001 + * @tc.desc: Verify the passed nullptr of the GetExecutionPlan function. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_get_execution_plan_001, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + Compilation compilationTest(&innerModel); + EXPECT_EQ(nullptr, compilationTest.GetExecutionPlan()); +} + +/* + * @tc.name: compilation_is_dynamic_shape_001 + * @tc.desc: Verify the input tensor is empth of the IsDynamicShape function. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_is_dynamic_shape_001, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + Compilation compilationTest(&innerModel); + EXPECT_EQ(false, compilationTest.IsDynamicShape()); +} + +/* + * @tc.name: compilation_is_dynamic_shape_002 + * @tc.desc: Verify the return true of the IsDynamicShape function. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_is_dynamic_shape_002, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilationTest(&innerModel); + EXPECT_EQ(true, compilationTest.IsDynamicShape()); +} + +/* + * @tc.name: compilation_is_dynamic_shape_003 + * @tc.desc: Verify the return false of the IsDynamicShape function. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_is_dynamic_shape_003, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilationTest(&innerModel); + MockIPreparedModel::m_ExpectRetCode = OH_NN_FAILED; + EXPECT_EQ(false, compilationTest.IsDynamicShape()); +} + +/* + * @tc.name: compilation_is_build_001 + * @tc.desc: Verify return false of the IsBuild function. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_is_build_001, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + Compilation compilationTest(&innerModel); + EXPECT_EQ(false, compilationTest.IsBuild()); +} + +/* + * @tc.name: compilation_build_001 + * @tc.desc: Verify the build after compilation finish of the Build function. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_build_001, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilationTest(&innerModel); + SetConfig(compilationTest); + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.Build()); + + OH_NN_ReturnCode ret = compilationTest.Build(); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/* + * @tc.name: compilation_build_002 + * @tc.desc: Verify the not set device of the Build function. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_build_002, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilationTest(&innerModel); + OH_NN_ReturnCode ret = compilationTest.Build(); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/* + * @tc.name: compilation_build_003 + * @tc.desc: Verify the preparing model failed of the Build function without set cache path. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_build_003, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + Compilation compilationTest(&innerModel); + MockIPreparedModel::m_ExpectRetCode = OH_NN_INVALID_FILE; + SetConfig(compilationTest); + OH_NN_ReturnCode ret = compilationTest.Build(); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: compilation_build_004 + * @tc.desc: Verify the success of the Build function without set cache path. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_build_004, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilationTest(&innerModel); + SetConfig(compilationTest); + OH_NN_ReturnCode ret = compilationTest.Build(); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/* + * @tc.name: compilation_build_005 + * @tc.desc: Verify the preparing model failed of the Build function without cache file. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_build_005, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + Compilation compilationTest(&innerModel); + std::size_t deviceId = 1; + MockIPreparedModel::m_ExpectRetCode = OH_NN_INVALID_FILE; + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.SetDevice(deviceId)); + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.SetCacheDir("./", 1)); + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.SetEnableFp16(true)); + OH_NN_ReturnCode ret = compilationTest.Build(); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: compilation_build_006 + * @tc.desc: Verify the export model cache failed of the Build function without cache file. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_build_006, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilationTest(&innerModel); + std::size_t deviceId = 1; + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.SetDevice(deviceId)); + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.SetCacheDir("./", 1)); + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.SetEnableFp16(true)); + + MockIPreparedModel::m_ExpectRetCode = OH_NN_FAILED; + OH_NN_ReturnCode ret = compilationTest.Build(); + EXPECT_EQ(OH_NN_FAILED, ret); +} + +/* + * @tc.name: compilation_build_007 + * @tc.desc: Verify the model cache file is invalid to generating cache mode of the Build function without cache file. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_build_007, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilationTest(&innerModel); + std::size_t deviceId = 1; + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.SetDevice(deviceId)); + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.SetCacheDir("/sys", 1)); + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.SetEnableFp16(true)); + OH_NN_ReturnCode ret = compilationTest.Build(); + EXPECT_EQ(OH_NN_INVALID_FILE, ret); +} + +/* + * @tc.name: compilation_build_008 + * @tc.desc: Verify the success to generating cache mode of the Build function without cache file. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_build_008, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilationTest(&innerModel); + std::size_t deviceId = 1; + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.SetDevice(deviceId)); + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.SetCacheDir("./", 1)); + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.SetEnableFp16(true)); + OH_NN_ReturnCode ret = compilationTest.Build(); + EXPECT_EQ(0, remove("0.nncache")); + EXPECT_EQ(0, remove("1.nncache")); + EXPECT_EQ(0, remove("cache_info.nncache")); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/* + * @tc.name: compilation_build_009 + * @tc.desc: Verify the Fail to get the content of info cache file of the Build. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_build_009, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + BuildCompilation(innerModel); + + Compilation compilationTest(&innerModel); + SetConfig(compilationTest); + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.SetCacheDir("./", 1)); + + std::ofstream createFile("cache_info.nncache"); + createFile.close(); + OH_NN_ReturnCode ret = compilationTest.Build(); + EXPECT_EQ(0, remove("0.nncache")); + EXPECT_EQ(0, remove("1.nncache")); + EXPECT_EQ(0, remove("cache_info.nncache")); + EXPECT_EQ(OH_NN_INVALID_FILE, ret); +} + +/* + * @tc.name: compilation_build_010 + * @tc.desc: Verify the deviceId in the cache files is different from current deviceId of the Build function. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_build_010, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + BuildCompilation(innerModel); + WriteFile(1, 4, 2); + + Compilation compilationTest(&innerModel); + SetConfig(compilationTest); + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.SetCacheDir("./", 1)); + + OH_NN_ReturnCode ret = compilationTest.Build(); + EXPECT_EQ(0, remove("0.nncache")); + EXPECT_EQ(0, remove("1.nncache")); + EXPECT_EQ(0, remove("cache_info.nncache")); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: compilation_build_011 + * @tc.desc: Verify the info cache file has been changed of the Build function. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_build_011, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + BuildCompilation(innerModel); + WriteFile(1, 100, 1); + + Compilation compilationTest(&innerModel); + SetConfig(compilationTest); + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.SetCacheDir("./", 1)); + + OH_NN_ReturnCode ret = compilationTest.Build(); + EXPECT_EQ(0, remove("0.nncache")); + EXPECT_EQ(0, remove("1.nncache")); + EXPECT_EQ(0, remove("cache_info.nncache")); + EXPECT_EQ(OH_NN_INVALID_FILE, ret); +} + +/* + * @tc.name: compilation_build_012 + * @tc.desc: Verify the Preparing model failed of the Build function model version is greater than cached versio. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_build_012, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + Compilation compilationTest(&innerModel); + MockIPreparedModel::m_ExpectRetCode = OH_NN_INVALID_FILE; + SetConfig(compilationTest); + WriteFile(0, 4, 1); + + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.SetCacheDir("./", 1)); + + std::ofstream inFile("0.nncache", std::ios::binary | std::ios::out | std::ios::trunc); + inFile.close(); + + OH_NN_ReturnCode ret = compilationTest.Build(); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: compilation_build_013 + * @tc.desc: Verify that the build function return success message with model version is greater than cached version + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_build_013, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilationTest(&innerModel); + SetConfig(compilationTest); + WriteFile(0, 1, 1); + + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.SetCacheDir("./", 1)); + + std::ofstream inFile("0.nncache", std::ios::binary | std::ios::out | std::ios::trunc); + inFile.close(); + + OH_NN_ReturnCode ret = compilationTest.Build(); + EXPECT_EQ(0, remove("0.nncache")); + EXPECT_EQ(0, remove("1.nncache")); + EXPECT_EQ(0, remove("cache_info.nncache")); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/* + * @tc.name: compilation_build_014 + * @tc.desc: Verify the model version is less than version cache of the Build function. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_build_014, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + BuildCompilation(innerModel); + WriteFile(3, 4, 1); + + Compilation compilationTest(&innerModel); + SetConfig(compilationTest); + + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.SetCacheDir("./", 1)); + + OH_NN_ReturnCode ret = compilationTest.Build(); + EXPECT_EQ(0, remove("0.nncache")); + EXPECT_EQ(0, remove("1.nncache")); + EXPECT_EQ(0, remove("cache_info.nncache")); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/* + * @tc.name: compilation_build_015 + * @tc.desc: Verify the checking cache model failed of the Build function with release buffer. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_build_015, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + BuildCompilation(innerModel); + EXPECT_EQ(0, remove("1.nncache")); + + Compilation compilationTest(&innerModel); + SetConfig(compilationTest); + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.SetCacheDir("./", 1)); + + OH_NN_ReturnCode ret = compilationTest.Build(); + EXPECT_EQ(0, remove("0.nncache")); + EXPECT_EQ(0, remove("cache_info.nncache")); + EXPECT_EQ(OH_NN_INVALID_FILE, ret); +} + +/* + * @tc.name: compilation_build_016 + * @tc.desc: Verify the get cache file length of the Build function. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_build_016, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + BuildCompilation(innerModel); + + Compilation compilationTest(&innerModel); + SetConfig(compilationTest); + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.SetCacheDir("./", 1)); + + std::ofstream inFile("0.nncache", std::ios::binary | std::ios::out | std::ios::trunc); + inFile.close(); + + OH_NN_ReturnCode ret = compilationTest.Build(); + EXPECT_EQ(0, remove("0.nncache")); + EXPECT_EQ(0, remove("1.nncache")); + EXPECT_EQ(0, remove("cache_info.nncache")); + EXPECT_EQ(OH_NN_INVALID_FILE, ret); +} + +/* + * @tc.name: compilation_build_017 + * @tc.desc: Verify the fail to create file buffer of the Build function. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_build_017, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + BuildCompilation(innerModel); + + Compilation compilationTest(&innerModel); + SetConfig(compilationTest); + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.SetCacheDir("./", 1)); + + MockIPreparedModel::m_ExpectRetCode = OH_NN_NULL_PTR; + OH_NN_ReturnCode ret = compilationTest.Build(); + EXPECT_EQ(0, remove("0.nncache")); + EXPECT_EQ(0, remove("1.nncache")); + EXPECT_EQ(0, remove("cache_info.nncache")); + EXPECT_EQ(OH_NN_NULL_PTR, ret); +} + +/* + * @tc.name: compilation_build_018 + * @tc.desc: Verify the cache model file has been changed of the Build function. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_build_018, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + BuildCompilation(innerModel); + + Compilation compilationTest(&innerModel); + SetConfig(compilationTest); + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.SetCacheDir("./", 1)); + + uint64_t version = 1; + uint64_t fileNumber = 1; + std::size_t cacheDeviceId = 1; + uint64_t cacheInfo[7] = {}; + auto cacheInfoPtr = cacheInfo; + *cacheInfoPtr++ = fileNumber; + *cacheInfoPtr++ = version; + *cacheInfoPtr++ = cacheDeviceId; + for (uint64_t i = 0; i < 4; ++i) { + *cacheInfoPtr++ = i; + } + + std::ofstream onFile("0.nncache", std::ios::binary | std::ios::out | std::ios::trunc); + onFile.write(reinterpret_cast(cacheInfo), 7 * sizeof(uint64_t)); + onFile.close(); + + OH_NN_ReturnCode ret = compilationTest.Build(); + EXPECT_EQ(0, remove("0.nncache")); + EXPECT_EQ(0, remove("1.nncache")); + EXPECT_EQ(0, remove("cache_info.nncache")); + EXPECT_EQ(OH_NN_INVALID_FILE, ret); +} + +/* + * @tc.name: compilation_build_019 + * @tc.desc: Verify the preparing model from cache failed of the Build function with load cache build. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_build_019, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + BuildCompilation(innerModel); + + Compilation compilationTest(&innerModel); + SetConfig(compilationTest); + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.SetCacheDir("./", 1)); + + MockIPreparedModel::m_ExpectRetCode = OH_NN_FAILED; + OH_NN_ReturnCode ret = compilationTest.Build(); + EXPECT_EQ(0, remove("0.nncache")); + EXPECT_EQ(0, remove("1.nncache")); + EXPECT_EQ(0, remove("cache_info.nncache")); + EXPECT_EQ(OH_NN_FAILED, ret); +} + +/* + * @tc.name: compilation_build_020 + * @tc.desc: Verify the success of the Build function with load cache build. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_build_020, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + BuildCompilation(innerModel); + + Compilation compilationTest(&innerModel); + SetConfig(compilationTest); + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.SetCacheDir("./", 1)); + + OH_NN_ReturnCode ret = compilationTest.Build(); + EXPECT_EQ(0, remove("0.nncache")); + EXPECT_EQ(0, remove("1.nncache")); + EXPECT_EQ(0, remove("cache_info.nncache")); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} +} // namespace UnitTest +} // namespace NeuralNetworkRuntime +} // namespace OHOS diff --git a/test/unittest/components/v2_0/compilation/compilation_test.h b/test/unittest/components/v2_0/compilation/compilation_test.h new file mode 100644 index 0000000000000000000000000000000000000000..8217f4f3acec605c1dd10cb2b198b180c38ce8bd --- /dev/null +++ b/test/unittest/components/v2_0/compilation/compilation_test.h @@ -0,0 +1,38 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_COMPILATION_UNITTEST_H +#define NEURAL_NETWORK_RUNTIME_COMPILATION_UNITTEST_H + +#include + +#include "frameworks/native/compilation.h" +#include "frameworks/native/inner_model.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class CompilationTest : public testing::Test { +public: + OH_NN_ReturnCode BuildModelGraph(NeuralNetworkRuntime::InnerModel& innerModel); + void SetConfig(Compilation& compilationTest); + void WriteFile(uint64_t version, uint64_t fileNumber, std::size_t cacheDeviceId); + void BuildCompilation(InnerModel& innerModel); +}; +} // namespace UnitTest +} // namespace NeuralNetworkRuntime +} // namespace OHOS + +#endif // NEURAL_NETWORK_RUNTIME_COMPILATION_UNITTEST_H \ No newline at end of file diff --git a/test/unittest/components/v2_0/device_manager/device_manager_test.cpp b/test/unittest/components/v2_0/device_manager/device_manager_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..3dea507d873095541b16b66b49ec97d25cac7cd9 --- /dev/null +++ b/test/unittest/components/v2_0/device_manager/device_manager_test.cpp @@ -0,0 +1,242 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include + +#include "common/log.h" +#include "frameworks/native/device_manager.h" +#include "frameworks/native/hdi_device_v2_0.h" +#include "test/unittest/common/v2_0/mock_idevice.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime; +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class DeviceManagerTest : public testing::Test { +protected: + void MockInit(OHOS::sptr device, const std::vector& typeVect, + const std::string& deviceName, const std::string& vendorName); +}; + +void DeviceManagerTest::MockInit(OHOS::sptr device, const std::vector& typeVect, + const std::string& deviceName, const std::string& vendorName) +{ + const size_t typeSize = 4; + int index = 0; + EXPECT_EQ(typeSize, typeVect.size()); + EXPECT_CALL(*device, GetDeviceName(::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<0>(deviceName), + ::testing::Return(typeVect[index++]))); + + EXPECT_CALL(*device, GetVendorName(::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<0>(vendorName), + ::testing::Return(typeVect[index++]))); + + V2_0::DeviceStatus deviceStatus = V2_0::DeviceStatus::AVAILABLE; + EXPECT_CALL(*device, GetDeviceStatus(::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<0>(deviceStatus), + ::testing::Return(typeVect[index++]))); + + uint32_t majorVer = 1; + uint32_t minorVer = 0; + EXPECT_CALL(*device, GetVersion(::testing::_, ::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<0>(majorVer), ::testing::SetArgReferee<1>(minorVer), + ::testing::Return(typeVect[index++]))); +} + +/** + * @tc.name: devicemanager_getalldeviceid_001 + * @tc.desc: Verify the GetAllDeviceId function return deviceid list is not null. + * @tc.type: FUNC + */ +HWTEST_F(DeviceManagerTest, devicemanager_getalldeviceid_001, TestSize.Level0) +{ + auto &deviceManager = DeviceManager::GetInstance(); + std::vector idVect = deviceManager.GetAllDeviceId(); + EXPECT_NE((size_t)0, idVect.size()); + + const size_t expectDeviceId {std::hash{}("MockDevice_MockVendor")}; + EXPECT_EQ(expectDeviceId, idVect[0]); + + const std::string expectDeviceName = "MockDevice"; + std::string deviceName = ""; + std::shared_ptr retDevice = deviceManager.GetDevice(idVect[0]); + retDevice->GetDeviceName(deviceName); + EXPECT_EQ(deviceName, expectDeviceName); +} + +/** + * @tc.name: devicemanager_getdevice_001 + * @tc.desc: Verify the GetDevice function return nullptr in case of deviceId invalid. + * @tc.type: FUNC + */ +HWTEST_F(DeviceManagerTest, devicemanager_getdevice_001, TestSize.Level0) +{ + auto &deviceManager = DeviceManager::GetInstance(); + const size_t deviceId = 1; + std::shared_ptr result = deviceManager.GetDevice(deviceId); + EXPECT_EQ(nullptr, result); +} + +/** + * @tc.name: devicemanager_getdevice_002 + * @tc.desc: Verify the GetDevice function validate device name return specified device name. + * @tc.type: FUNC + */ +HWTEST_F(DeviceManagerTest, devicemanager_getdevice_002, TestSize.Level0) +{ + auto &deviceManager = DeviceManager::GetInstance(); + std::vector idVect = deviceManager.GetAllDeviceId(); + EXPECT_EQ((size_t)1, idVect.size()); + size_t deviceId = idVect[0]; + std::shared_ptr result = deviceManager.GetDevice(deviceId); + EXPECT_NE(nullptr, result); + + const std::string expectDeviceNameA = "MockDevice"; + std::string deviceName = ""; + result->GetDeviceName(deviceName); + EXPECT_EQ(deviceName, expectDeviceNameA); +} + +/** + * @tc.name: devicemanager_registerdevice_001 + * @tc.desc: Verify the RegisterDevice function register repeatly. + * @tc.type: FUNC + */ +HWTEST_F(DeviceManagerTest, devicemanager_registerdevice_001, TestSize.Level0) +{ + std::vector typeVect = {HDF_SUCCESS, HDF_SUCCESS, HDF_SUCCESS, HDF_SUCCESS}; + OHOS::sptr device = OHOS::sptr(new (std::nothrow) V2_0::MockIDevice()); + EXPECT_NE(device.GetRefPtr(), nullptr); + + std::string deviceName = "MockDevice"; + std::string vendorName = "MockVendor"; + MockInit(device, typeVect, deviceName, vendorName); + + std::function()> creator = + [&device]()->std::shared_ptr {return std::make_shared(device);}; + auto& deviceManager = DeviceManager::GetInstance(); + OH_NN_ReturnCode result = deviceManager.RegisterDevice(creator); + EXPECT_EQ(OH_NN_FAILED, result); +} + +/** + * @tc.name: devicemanager_registerdevice_002 + * @tc.desc: Verify the RegisterDevice function return invalid parameter. + * @tc.type: FUNC + */ +HWTEST_F(DeviceManagerTest, devicemanager_registerdevice_002, TestSize.Level0) +{ + std::function()> creator = + []()->std::shared_ptr {return nullptr;}; + auto& deviceManager = DeviceManager::GetInstance(); + OH_NN_ReturnCode result = deviceManager.RegisterDevice(creator); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, result); +} + +/** + * @tc.name: devicemanager_registerdevice_003 + * @tc.desc: Verify the RegisterDevice function return unavailable device in case of device name invalid param. + * @tc.type: FUNC + */ +HWTEST_F(DeviceManagerTest, devicemanager_registerdevice_003, TestSize.Level0) +{ + std::vector typeVect = {HDF_FAILURE, HDF_SUCCESS, HDF_SUCCESS, HDF_SUCCESS}; + OHOS::sptr device = OHOS::sptr(new (std::nothrow) V2_0::MockIDevice()); + EXPECT_NE(device.GetRefPtr(), nullptr); + + std::string deviceName = "MockDevice"; + std::string vendorName = "MockVendor"; + MockInit(device, typeVect, deviceName, vendorName); + + std::function()> creator = + [&device]()->std::shared_ptr {return std::make_shared(device);}; + auto& deviceManager = DeviceManager::GetInstance(); + OH_NN_ReturnCode result = deviceManager.RegisterDevice(creator); + EXPECT_EQ(OH_NN_UNAVALIDABLE_DEVICE, result); +} + +/** + * @tc.name: devicemanager_registerdevice_004 + * @tc.desc: Verify the RegisterDevice function return unavailable device in case of vendor name failure. + * @tc.type: FUNC + */ +HWTEST_F(DeviceManagerTest, devicemanager_registerdevice_004, TestSize.Level0) +{ + std::vector typeVect = {HDF_SUCCESS, HDF_FAILURE, HDF_SUCCESS, HDF_SUCCESS}; + OHOS::sptr device = OHOS::sptr(new (std::nothrow) V2_0::MockIDevice()); + EXPECT_NE(device.GetRefPtr(), nullptr); + + std::string deviceName = "MockDevice"; + std::string vendorName = "MockVendor"; + MockInit(device, typeVect, deviceName, vendorName); + + std::function()> creator = + [&device]()->std::shared_ptr {return std::make_shared(device);}; + auto& deviceManager = DeviceManager::GetInstance(); + OH_NN_ReturnCode result = deviceManager.RegisterDevice(creator); + EXPECT_EQ(OH_NN_UNAVALIDABLE_DEVICE, result); +} + +/** + * @tc.name: devicemanager_registerdevice_005 + * @tc.desc: Verify the RegisterDevice function return success. + * @tc.type: FUNC + */ +HWTEST_F(DeviceManagerTest, devicemanager_registerdevice_005, TestSize.Level0) +{ + std::vector typeVect = {HDF_SUCCESS, HDF_SUCCESS, HDF_SUCCESS, HDF_SUCCESS}; + OHOS::sptr device = OHOS::sptr(new (std::nothrow) V2_0::MockIDevice()); + EXPECT_NE(device.GetRefPtr(), nullptr); + + std::string deviceName = "MockDeviceA"; + std::string vendorName = "MockVendorA"; + MockInit(device, typeVect, deviceName, vendorName); + + std::function()> creator = + [&device]()->std::shared_ptr {return std::make_shared(device);}; + auto& deviceManager = DeviceManager::GetInstance(); + OH_NN_ReturnCode result = deviceManager.RegisterDevice(creator); + EXPECT_EQ(OH_NN_SUCCESS, result); + + std::vector idVect = deviceManager.GetAllDeviceId(); + EXPECT_NE((size_t)0, idVect.size()); + + const size_t expectDeviceId {std::hash{}("MockDeviceA_MockVendorA")}; + EXPECT_EQ(expectDeviceId, idVect[0]); + + const std::string expectDeviceName = "MockDeviceA_MockVendorA"; + const std::string retDeviceName = deviceManager.GetDeviceName(idVect[0]); + EXPECT_EQ(retDeviceName, expectDeviceName); +} + +/** + * @tc.name: devicemanager_getdevicename_001 + * @tc.desc: Verify the GetDevice function return empty string in case of deviceid invalid. + * @tc.type: FUNC + */ +HWTEST_F(DeviceManagerTest, devicemanager_getdevicename_001, TestSize.Level0) +{ + auto &deviceManager = DeviceManager::GetInstance(); + const size_t deviceId = 1; + std::string result = deviceManager.GetDeviceName(deviceId); + EXPECT_EQ("", result); +} +} // namespace UnitTest +} // namespace NeuralNetworkRuntime +} // namespace OHOS diff --git a/test/unittest/components/v2_0/device_registrar/device_registrar_test.cpp b/test/unittest/components/v2_0/device_registrar/device_registrar_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..082de403b5023fade43ad258d33bab9ea51c9203 --- /dev/null +++ b/test/unittest/components/v2_0/device_registrar/device_registrar_test.cpp @@ -0,0 +1,268 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include + +#include "common/log.h" +#include "frameworks/native/device_registrar.h" +#include "frameworks/native/hdi_device_v2_0.h" +#include "frameworks/native/device_manager.h" +#include "test/unittest/common/v2_0/mock_idevice.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime; +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class IRegisterDevice : public HDI::HdiBase { +public: + DECLARE_HDI_DESCRIPTOR(u"ohos.hdi.nnrt.v2_0.IRegisterDevice"); + + virtual ~IRegisterDevice() = default; + + static sptr Get(bool isStub = false); + static sptr Get(const std::string& serviceName, bool isStub = false); + + virtual int32_t GetDeviceName(std::string& name) = 0; + + virtual int32_t GetVendorName(std::string& name) = 0; + + virtual int32_t GetDeviceType(V2_0::DeviceType& deviceType) = 0; + + virtual int32_t GetDeviceStatus(V2_0::DeviceStatus& status) = 0; + + virtual int32_t GetSupportedOperation(const V2_0::Model& model, std::vector& ops) = 0; + + virtual int32_t IsFloat16PrecisionSupported(bool& isSupported) = 0; + + virtual int32_t IsPerformanceModeSupported(bool& isSupported) = 0; + + virtual int32_t IsPrioritySupported(bool& isSupported) = 0; + + virtual int32_t IsDynamicInputSupported(bool& isSupported) = 0; + + virtual int32_t PrepareModel(const V2_0::Model& model, const V2_0::ModelConfig& config, + sptr& preparedModel) = 0; + + virtual int32_t IsModelCacheSupported(bool& isSupported) = 0; + + virtual int32_t PrepareModelFromModelCache(const std::vector& modelCache, + const V2_0::ModelConfig& config, sptr& preparedModel) = 0; + + virtual int32_t AllocateBuffer(uint32_t length, V2_0::SharedBuffer& buffer) = 0; + + virtual int32_t ReleaseBuffer(const V2_0::SharedBuffer& buffer) = 0; + + virtual int32_t GetVersion(uint32_t& majorVer, uint32_t& minorVer) + { + majorVer = INNRT_DEVICE_MAJOR_VERSION; + minorVer = INNRT_DEVICE_MINOR_VERSION; + return HDF_SUCCESS; + } +}; + +class SimulationDevice : public Device { +public: + explicit SimulationDevice(OHOS::sptr device) {}; + + OH_NN_ReturnCode GetDeviceName(std::string& name) override + { + name = "MockIDeviceA"; + return OH_NN_SUCCESS; + }; + OH_NN_ReturnCode GetVendorName(std::string& name) override + { + name = "MockVendorA"; + return OH_NN_SUCCESS; + }; + OH_NN_ReturnCode GetVersion(std::string& version) override + { + version = "MockVersionA"; + return OH_NN_SUCCESS; + } + OH_NN_ReturnCode GetDeviceType(OH_NN_DeviceType& deviceType) override + { + return OH_NN_SUCCESS; + }; + OH_NN_ReturnCode GetDeviceStatus(DeviceStatus& status) override + { + status = DeviceStatus::AVAILABLE; + return OH_NN_SUCCESS; + }; + OH_NN_ReturnCode GetSupportedOperation(std::shared_ptr model, + std::vector& ops) override + { + return OH_NN_SUCCESS; + }; + + OH_NN_ReturnCode IsFloat16PrecisionSupported(bool& isSupported) override + { + return OH_NN_SUCCESS; + }; + OH_NN_ReturnCode IsPerformanceModeSupported(bool& isSupported) override + { + return OH_NN_SUCCESS; + }; + OH_NN_ReturnCode IsPrioritySupported(bool& isSupported) override + { + return OH_NN_SUCCESS; + }; + OH_NN_ReturnCode IsDynamicInputSupported(bool& isSupported) override + { + return OH_NN_SUCCESS; + }; + OH_NN_ReturnCode IsModelCacheSupported(bool& isSupported) override + { + return OH_NN_SUCCESS; + }; + + OH_NN_ReturnCode PrepareModel(std::shared_ptr model, const ModelConfig& config, + std::shared_ptr& preparedModel) override + { + return OH_NN_SUCCESS; + }; + OH_NN_ReturnCode PrepareModelFromModelCache(const std::vector& modelCache, const ModelConfig& config, + std::shared_ptr& preparedModel) override + { + return OH_NN_SUCCESS; + }; + + void *AllocateBuffer(size_t length) override + { + return nullptr; + }; + OH_NN_ReturnCode ReleaseBuffer(const void* buffer) override + { + return OH_NN_SUCCESS; + }; +}; + +class MockIDeviceImp : public IRegisterDevice { +public: + MOCK_METHOD1(GetDeviceName, int32_t(std::string&)); + MOCK_METHOD1(GetVendorName, int32_t(std::string&)); + MOCK_METHOD1(GetDeviceType, int32_t(V2_0::DeviceType&)); + MOCK_METHOD1(GetDeviceStatus, int32_t(V2_0::DeviceStatus&)); + MOCK_METHOD2(GetSupportedOperation, int32_t(const V2_0::Model&, std::vector&)); + MOCK_METHOD1(IsFloat16PrecisionSupported, int32_t(bool&)); + MOCK_METHOD1(IsPerformanceModeSupported, int32_t(bool&)); + MOCK_METHOD1(IsPrioritySupported, int32_t(bool&)); + MOCK_METHOD1(IsDynamicInputSupported, int32_t(bool&)); + MOCK_METHOD3(PrepareModel, + int32_t(const V2_0::Model&, const V2_0::ModelConfig&, OHOS::sptr&)); + MOCK_METHOD1(IsModelCacheSupported, int32_t(bool&)); + MOCK_METHOD3(PrepareModelFromModelCache, int32_t(const std::vector&, const V2_0::ModelConfig&, + OHOS::sptr&)); + MOCK_METHOD2(AllocateBuffer, int32_t(uint32_t, V2_0::SharedBuffer&)); + MOCK_METHOD1(ReleaseBuffer, int32_t(const V2_0::SharedBuffer&)); + MOCK_METHOD2(GetVersion, int32_t(uint32_t&, uint32_t&)); +}; + +sptr IRegisterDevice::Get(bool isStub) +{ + return IRegisterDevice::Get("device_service", isStub); +} + +sptr IRegisterDevice::Get(const std::string& serviceName, bool isStub) +{ + if (isStub) { + return nullptr; + } + + sptr mockIDevice = sptr(new (std::nothrow) MockIDeviceImp()); + if (mockIDevice.GetRefPtr() == nullptr) { + LOGE("Failed to new MockIDeviceImp object."); + return nullptr; + } + + std::string deviceName = "MockIDeviceA"; + EXPECT_CALL(*((MockIDeviceImp *)mockIDevice.GetRefPtr()), GetDeviceName(::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<0>(deviceName), ::testing::Return(HDF_SUCCESS))); + + std::string vendorName = "MockVendorA"; + EXPECT_CALL(*((MockIDeviceImp *)mockIDevice.GetRefPtr()), GetVendorName(::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<0>(vendorName), ::testing::Return(HDF_SUCCESS))); + + V2_0::DeviceStatus deviceStatus = V2_0::DeviceStatus::AVAILABLE; + EXPECT_CALL(*((MockIDeviceImp *)mockIDevice.GetRefPtr()), GetDeviceStatus(::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<0>(deviceStatus), ::testing::Return(HDF_SUCCESS))); + return mockIDevice; +} + +class DeviceRegistrarTest : public testing::Test { +public: + DeviceRegistrarTest() = default; + ~DeviceRegistrarTest() = default; +}; + +std::shared_ptr CreateDeviceObjectCallback() +{ + OHOS::sptr device = IRegisterDevice::Get(false); + EXPECT_NE(device, nullptr); + std::shared_ptr m_mockDevice = std::make_shared(device); + return m_mockDevice; +} + +std::shared_ptr CreateNullObjectCallback() +{ + return nullptr; +} + +/* * + * @tc.name: devicemanager_getalldeviceid_001 + * @tc.desc: Verify the Constructor function register object success. + * @tc.type: FUNC + */ +HWTEST_F(DeviceRegistrarTest, deviceregistrar_constructor_001, TestSize.Level0) +{ + CreateDevice creator = CreateDeviceObjectCallback; + std::unique_ptr deviceRegister = std::make_unique(creator); + EXPECT_NE(deviceRegister, nullptr); + auto &deviceManager = DeviceManager::GetInstance(); + std::vector idVect = deviceManager.GetAllDeviceId(); + EXPECT_EQ((size_t)2, idVect.size()); + + const size_t expectDeviceId {std::hash{}("MockDevice_MockVendor")}; + EXPECT_EQ(expectDeviceId, idVect[1]); + + const std::string expectDeviceNameA = "MockDevice"; + std::string deviceName = ""; + std::shared_ptr retDevice = deviceManager.GetDevice(idVect[1]); + retDevice->GetDeviceName(deviceName); + EXPECT_EQ(deviceName, expectDeviceNameA); + + const std::string expectDeviceNameB = "MockDevice_MockVendor"; + std::string queryDeviceName = deviceManager.GetDeviceName(idVect[1]); + EXPECT_EQ(queryDeviceName, expectDeviceNameB); +} + +/* * + * @tc.name: devicemanager_getalldeviceid_002 + * @tc.desc: Verify the Constructor function register object creator return nullptr, used for branch coverage. + * @tc.type: FUNC + */ +HWTEST_F(DeviceRegistrarTest, deviceregistrar_constructor_002, TestSize.Level0) +{ + CreateDevice creator = CreateNullObjectCallback; + std::unique_ptr deviceRegister = std::make_unique(creator); + EXPECT_NE(deviceRegister, nullptr); +} +} // namespace UnitTest +} // namespace NeuralNetworkRuntime +} // namespace OHOS diff --git a/test/unittest/components/v2_0/executor/executor_test.cpp b/test/unittest/components/v2_0/executor/executor_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..19b6f3fd0b0bc88fe83dcdde90dd412ac1d528e4 --- /dev/null +++ b/test/unittest/components/v2_0/executor/executor_test.cpp @@ -0,0 +1,1206 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "executor_test.h" + +#include "common/scoped_trace.h" +#include "frameworks/native/compilation.h" +#include "frameworks/native/inner_model.h" +#include "test/unittest/common/v2_0/mock_idevice.h" + +using namespace OHOS::NeuralNetworkRuntime; +using namespace OHOS::NeuralNetworkRuntime::Ops; +using namespace OHOS::HDI::Nnrt::V2_0; +using namespace OHOS::HiviewDFX; + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +using NNTensorPtr = std::shared_ptr; + +MSLITE::LiteGraph* ExecutorTest::BuildLiteGraph(const std::vector dim, const std::vector dimOut) +{ + MSLITE::LiteGraph* liteGraph = new (std::nothrow) MSLITE::LiteGraph(); + if (liteGraph == nullptr) { + LOGE("liteGraph build failed"); + return nullptr; + } + liteGraph->name_ = "testGraph"; + liteGraph->input_indices_.emplace_back(0); + liteGraph->output_indices_.emplace_back(1); + const std::vector quant_params; + + for (size_t indexInput = 0; indexInput < liteGraph->input_indices_.size(); ++indexInput) { + const std::vector data(36, 1); + void* liteGraphTensor1 = MSLITE::MindIR_Tensor_Create(liteGraph->name_, + MSLITE::DATA_TYPE_FLOAT32, dim, MSLITE::FORMAT_NCHW, data, quant_params); + liteGraph->all_tensors_.emplace_back(liteGraphTensor1); + } + + for (size_t indexOutput = 0; indexOutput < liteGraph->output_indices_.size(); ++indexOutput) { + const std::vector dataOut(36, 1); + void* liteGraphTensor2 = MSLITE::MindIR_Tensor_Create(liteGraph->name_, + MSLITE::DATA_TYPE_FLOAT32, dimOut, MSLITE::FORMAT_NCHW, dataOut, quant_params); + liteGraph->all_tensors_.emplace_back(liteGraphTensor2); + } + + return liteGraph; +} + +OH_NN_Tensor ExecutorTest::SetTensor(OH_NN_DataType dataType, uint32_t dimensionCount, const int32_t *dimensions, + const OH_NN_QuantParam *quantParam, OH_NN_TensorType type) +{ + OH_NN_Tensor tensor; + tensor.dataType = dataType; + tensor.dimensionCount = dimensionCount; + tensor.dimensions = dimensions; + tensor.quantParam = quantParam; + tensor.type = type; + + return tensor; +} + +void ExecutorTest::SetMermory(OH_NN_Memory** &memory) +{ + float dataArry[9] = {0, 1, 2, 3, 4, 5, 6, 7, 8}; + void* const data = dataArry; + OH_NN_Memory memoryPtr = {data, 9 * sizeof(float)}; + OH_NN_Memory* ptr = &memoryPtr; + memory = &ptr; +} + +/* + * @tc.name: executor_set_input_001 + * @tc.desc: Verify that the SetInput function returns a successful message. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_set_input_001, testing::ext::TestSize.Level0) +{ + const MSLITE::LiteGraph* liteGraphTest = BuildLiteGraph(m_dim, m_dimOut); + InnerModel innerModel; + innerModel.BuildFromLiteGraph(liteGraphTest); + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + OH_NN_Tensor tensor = SetTensor(OH_NN_FLOAT32, m_dimensionCount, m_dimArry, nullptr, OH_NN_TENSOR); + void* buffer = m_dataArry; + size_t length = 9 * sizeof(float); + + OH_NN_ReturnCode ret = executorTest.SetInput(m_index, tensor, buffer, length); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/* + * @tc.name: executor_set_input_002 + * @tc.desc: Verify that the SetInput function returns a failed message with out-of-range index. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_set_input_002, testing::ext::TestSize.Level0) +{ + const MSLITE::LiteGraph* liteGraphTest = BuildLiteGraph(m_dim, m_dimOut); + InnerModel innerModel; + innerModel.BuildFromLiteGraph(liteGraphTest); + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + m_index = 6; + OH_NN_Tensor tensor = SetTensor(OH_NN_FLOAT32, m_dimensionCount, m_dimArry, nullptr, OH_NN_TENSOR); + size_t length = 9 * sizeof(float); + void* buffer = m_dataArry; + + OH_NN_ReturnCode ret = executorTest.SetInput(m_index, tensor, buffer, length); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: executor_set_input_003 + * @tc.desc: Verify that the SetInput function returns a failed message with dynamic shape. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_set_input_003, testing::ext::TestSize.Level0) +{ + const MSLITE::LiteGraph* liteGraphTest = BuildLiteGraph(m_dim, m_dimOut); + InnerModel innerModel; + innerModel.BuildFromLiteGraph(liteGraphTest); + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + const int dim = -1; + m_dimensionCount = 1; + OH_NN_Tensor tensor = SetTensor(OH_NN_FLOAT32, m_dimensionCount, &dim, nullptr, OH_NN_TENSOR); + size_t length = 1 * sizeof(float); + float data = 0; + void* buffer = &data; + + OH_NN_ReturnCode ret = executorTest.SetInput(m_index, tensor, buffer, length); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: executor_set_input_004 + * @tc.desc: Verify that the SetInput function returns a failed message with invalid tensor's dataType. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_set_input_004, testing::ext::TestSize.Level0) +{ + const MSLITE::LiteGraph* liteGraphTest = BuildLiteGraph(m_dim, m_dimOut); + InnerModel innerModel; + innerModel.BuildFromLiteGraph(liteGraphTest); + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + OH_NN_Tensor tensor = SetTensor(OH_NN_INT64, m_dimensionCount, m_dimArry, nullptr, OH_NN_TENSOR); + void* buffer = m_dataArry; + size_t length = 9 * sizeof(float); + + OH_NN_ReturnCode ret = executorTest.SetInput(m_index, tensor, buffer, length); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: executor_set_input_005 + * @tc.desc: Verify that the SetInput function returns a failed message with invalid length. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_set_input_005, testing::ext::TestSize.Level0) +{ + const MSLITE::LiteGraph* liteGraphTest = BuildLiteGraph(m_dim, m_dimOut); + InnerModel innerModel; + innerModel.BuildFromLiteGraph(liteGraphTest); + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + + OH_NN_Tensor tensor = SetTensor(OH_NN_FLOAT32, m_dimensionCount, m_dimArry, nullptr, OH_NN_TENSOR); + size_t length = 1 * sizeof(float); + void* buffer = m_dataArry; + + OH_NN_ReturnCode ret = executorTest.SetInput(m_index, tensor, buffer, length); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: executor_set_input_006 + * @tc.desc: Verify that the SetInput function returns a failed message with allocating buffer is unsuccessfully. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_set_input_006, testing::ext::TestSize.Level0) +{ + HDI::Nnrt::V2_0::MockIPreparedModel::m_ExpectRetCode = OH_NN_INVALID_PARAMETER; + const MSLITE::LiteGraph* liteGraphTest = BuildLiteGraph(m_dim, m_dimOut); + InnerModel innerModel; + innerModel.BuildFromLiteGraph(liteGraphTest); + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + OH_NN_Tensor tensor = SetTensor(OH_NN_FLOAT32, m_dimensionCount, m_dimArry, nullptr, OH_NN_TENSOR); + void* buffer = m_dataArry; + size_t length = 9 * sizeof(float); + + OH_NN_ReturnCode ret = executorTest.SetInput(m_index, tensor, buffer, length); + EXPECT_EQ(OH_NN_MEMORY_ERROR, ret); +} + +/* + * @tc.name: executor_set_input_007 + * @tc.desc: Verify that the SetInput function returns a failed message with empty buffer. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_set_input_007, testing::ext::TestSize.Level0) +{ + const MSLITE::LiteGraph* liteGraphTest = BuildLiteGraph(m_dim, m_dimOut); + InnerModel innerModel; + innerModel.BuildFromLiteGraph(liteGraphTest); + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + OH_NN_Tensor tensor = SetTensor(OH_NN_FLOAT32, m_dimensionCount, m_dimArry, nullptr, OH_NN_TENSOR); + void* buffer = nullptr; + size_t length = 9 * sizeof(float); + + OH_NN_ReturnCode ret = executorTest.SetInput(m_index, tensor, buffer, length); + EXPECT_EQ(OH_NN_MEMORY_ERROR, ret); +} + +/* + * @tc.name: executor_set_input_008 + * @tc.desc: Verify that the SetInput function returns a successful message with dataLength <= curBufferLength. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_set_input_008, testing::ext::TestSize.Level0) +{ + const MSLITE::LiteGraph* liteGraphTest = BuildLiteGraph(m_dim, m_dimOut); + InnerModel innerModel; + innerModel.BuildFromLiteGraph(liteGraphTest); + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + OH_NN_Tensor tensor = SetTensor(OH_NN_FLOAT32, m_dimensionCount, m_dimArry, nullptr, OH_NN_TENSOR); + float dataArry[15] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14}; + void* buffer = dataArry; + size_t length = 9 * sizeof(float); + + EXPECT_EQ(OH_NN_SUCCESS, executorTest.SetInput(m_index, tensor, buffer, length)); + + float expectArry[9] = {0, 1, 2, 3, 4, 5, 6, 7, 8}; + void* expectBuffer = expectArry; + OH_NN_ReturnCode ret = executorTest.SetInput(m_index, tensor, expectBuffer, length); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/* + * @tc.name: executor_set_input_009 + * @tc.desc: Verify that the SetInput function returns a failed message with length less than dataLength. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_set_input_009, testing::ext::TestSize.Level0) +{ + const MSLITE::LiteGraph* liteGraphTest = BuildLiteGraph(m_dim, m_dimOut); + InnerModel innerModel; + innerModel.BuildFromLiteGraph(liteGraphTest); + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + OH_NN_Tensor tensor = SetTensor(OH_NN_FLOAT32, m_dimensionCount, m_dimArry, nullptr, OH_NN_TENSOR); + void* const data = m_dataArry; + OH_NN_Memory memory = {data, 9 * sizeof(float)}; + + EXPECT_EQ(OH_NN_SUCCESS, executorTest.SetInputFromMemory(m_index, tensor, memory)); + + float expectData = 0; + void* buffer = &expectData; + size_t length = 1 * sizeof(float); + + OH_NN_ReturnCode ret = executorTest.SetInput(m_index, tensor, buffer, length); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: executor_set_input_010 + * @tc.desc: Verify that the SetInput function returns a failed message with BuildFromOHNNTensor unsuccessfully. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_set_input_010, testing::ext::TestSize.Level0) +{ + const MSLITE::LiteGraph* liteGraphTest = BuildLiteGraph(m_dim, m_dimOut); + InnerModel innerModel; + innerModel.BuildFromLiteGraph(liteGraphTest); + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + m_dimensionCount = 0; + OH_NN_Tensor tensor = SetTensor(OH_NN_UNKNOWN, m_dimensionCount, m_dimArry, nullptr, OH_NN_TENSOR); + void* buffer = m_dataArry; + size_t length = 9 * sizeof(float); + + OH_NN_ReturnCode ret = executorTest.SetInput(m_index, tensor, buffer, length); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: executor_set_input_011 + * @tc.desc: Verify that the SetInput function returns a successful message with dataLength <= curBufferLength. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_set_input_011, testing::ext::TestSize.Level0) +{ + const std::vector expectDim = {3, -1}; + const MSLITE::LiteGraph* liteGraphTest = BuildLiteGraph(expectDim, m_dimOut); + InnerModel innerModel; + innerModel.BuildFromLiteGraph(liteGraphTest); + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + OH_NN_Tensor tensor = SetTensor(OH_NN_FLOAT32, m_dimensionCount, m_dimArry, nullptr, OH_NN_TENSOR); + void* buffer = m_dataArry; + size_t length = 9 * sizeof(float); + EXPECT_EQ(OH_NN_SUCCESS, executorTest.SetInput(m_index, tensor, buffer, length)); + + const int32_t testDim[2] = {3, 5}; + OH_NN_Tensor expectTensor = SetTensor(OH_NN_FLOAT32, m_dimensionCount, testDim, nullptr, OH_NN_TENSOR); + size_t expectLength = 15 * sizeof(float); + float expectArry[15] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14}; + void* expectBuffer = expectArry; + OH_NN_ReturnCode ret = executorTest.SetInput(m_index, expectTensor, expectBuffer, expectLength); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/* + * @tc.name: executor_set_input_from_memory_001 + * @tc.desc: Verify that the SetInputFromMemory function returns a successful message. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_set_input_from_memory_001, testing::ext::TestSize.Level0) +{ + const MSLITE::LiteGraph* liteGraphTest = BuildLiteGraph(m_dim, m_dimOut); + InnerModel innerModel; + innerModel.BuildFromLiteGraph(liteGraphTest); + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + OH_NN_Tensor tensor = SetTensor(OH_NN_FLOAT32, m_dimensionCount, m_dimArry, nullptr, OH_NN_TENSOR); + void* const data = m_dataArry; + OH_NN_Memory memory = {data, 9 * sizeof(float)}; + + OH_NN_ReturnCode ret = executorTest.SetInputFromMemory(m_index, tensor, memory); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/* + * @tc.name: executor_set_input_from_memory_002 + * @tc.desc: Verify that the SetInputFromMemory function returns a failed message with out-of-range index. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_set_input_from_memory_002, testing::ext::TestSize.Level0) +{ + const MSLITE::LiteGraph* liteGraphTest = BuildLiteGraph(m_dim, m_dimOut); + InnerModel innerModel; + innerModel.BuildFromLiteGraph(liteGraphTest); + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + m_index = 6; + OH_NN_Tensor tensor = SetTensor(OH_NN_FLOAT32, m_dimensionCount, m_dimArry, nullptr, OH_NN_TENSOR); + void* const data = m_dataArry; + OH_NN_Memory memory = {data, 9 * sizeof(float)}; + + OH_NN_ReturnCode ret = executorTest.SetInputFromMemory(m_index, tensor, memory); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: executor_set_input_from_memory_003 + * @tc.desc: Verify that the SetInputFromMemory function returns a failed message with dynamic shape. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_set_input_from_memory_003, testing::ext::TestSize.Level0) +{ + const MSLITE::LiteGraph* liteGraphTest = BuildLiteGraph(m_dim, m_dimOut); + InnerModel innerModel; + innerModel.BuildFromLiteGraph(liteGraphTest); + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + const int dim = -1; + OH_NN_Tensor tensor; + tensor.dataType = OH_NN_FLOAT32; + tensor.dimensionCount = 1; + tensor.dimensions = &dim; + tensor.quantParam = nullptr; + tensor.type = OH_NN_TENSOR; + float value = 0; + void* const data = &value; + OH_NN_Memory memory = {data, 1 * sizeof(float)}; + + OH_NN_ReturnCode ret = executorTest.SetInputFromMemory(m_index, tensor, memory); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: executor_set_input_from_memory_004 + * @tc.desc: Verify that the SetInputFromMemory function returns a failed message with invalid tensor's dataType. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_set_input_from_memory_004, testing::ext::TestSize.Level0) +{ + const MSLITE::LiteGraph* liteGraphTest = BuildLiteGraph(m_dim, m_dimOut); + InnerModel innerModel; + innerModel.BuildFromLiteGraph(liteGraphTest); + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + OH_NN_Tensor tensor = SetTensor(OH_NN_INT64, m_dimensionCount, m_dimArry, nullptr, OH_NN_TENSOR); + void* const data = m_dataArry; + OH_NN_Memory memory = {data, 9 * sizeof(float)}; + + OH_NN_ReturnCode ret = executorTest.SetInputFromMemory(m_index, tensor, memory); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: executor_set_input_from_memory_005 + * @tc.desc: Verify that the SetInput function returns a failed message with invalid memory.length. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_set_input_from_memory_005, testing::ext::TestSize.Level0) +{ + const MSLITE::LiteGraph* liteGraphTest = BuildLiteGraph(m_dim, m_dimOut); + InnerModel innerModel; + innerModel.BuildFromLiteGraph(liteGraphTest); + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + OH_NN_Tensor tensor = SetTensor(OH_NN_FLOAT32, m_dimensionCount, m_dimArry, nullptr, OH_NN_TENSOR); + void* const data = m_dataArry; + OH_NN_Memory memory = {data, 1 * sizeof(float)}; + + OH_NN_ReturnCode ret = executorTest.SetInputFromMemory(m_index, tensor, memory); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: executor_set_output_001 + * @tc.desc: Verify that the SetOutput function returns a successful message. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_set_output_001, testing::ext::TestSize.Level0) +{ + const MSLITE::LiteGraph* liteGraphTest = BuildLiteGraph(m_dim, m_dimOut); + InnerModel innerModel; + innerModel.BuildFromLiteGraph(liteGraphTest); + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + size_t length = 9 * sizeof(float); + void* buffer = m_dataArry; + + OH_NN_ReturnCode ret = executorTest.SetOutput(m_index, buffer, length); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/* + * @tc.name: executor_set_output_002 + * @tc.desc: Verify that the SetOutput function returns a failed message with out-of-range index. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_set_output_002, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + m_index = 6; + size_t length = 9 * sizeof(float); + void* buffer = m_dataArry; + + OH_NN_ReturnCode ret = executorTest.SetOutput(m_index, buffer, length); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: executor_set_output_003 + * @tc.desc: Verify that the SetOutput function returns a failed message with invalid length. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_set_output_003, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + size_t length = 2 * sizeof(float); + void* buffer = m_dataArry; + + OH_NN_ReturnCode ret = executorTest.SetOutput(m_index, buffer, length); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: executor_set_output_004 + * @tc.desc: Verify that the SetOutput function returns a failed message with allocating buffer is failed. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_set_output_004, testing::ext::TestSize.Level0) +{ + HDI::Nnrt::V2_0::MockIPreparedModel::m_ExpectRetCode = OH_NN_INVALID_PARAMETER; + const MSLITE::LiteGraph* liteGraphTest = BuildLiteGraph(m_dim, m_dimOut); + InnerModel innerModel; + innerModel.BuildFromLiteGraph(liteGraphTest); + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + size_t length = 9 * sizeof(float); + void* buffer = m_dataArry; + + OH_NN_ReturnCode ret = executorTest.SetOutput(m_index, buffer, length); + EXPECT_EQ(OH_NN_MEMORY_ERROR, ret); +} + +/* + * @tc.name: executor_set_output_005 + * @tc.desc: Verify that the SetOutput function returns a successful message. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_set_output_005, testing::ext::TestSize.Level0) +{ + const MSLITE::LiteGraph* liteGraphTest = BuildLiteGraph(m_dim, m_dimOut); + InnerModel innerModel; + innerModel.BuildFromLiteGraph(liteGraphTest); + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + void* const data = m_dataArry; + OH_NN_Memory memory = {data, 9 * sizeof(float)}; + EXPECT_EQ(OH_NN_SUCCESS, executorTest.SetOutputFromMemory(m_index, memory)); + + size_t length = 1 * sizeof(float); + float expectData = 0; + void* buffer = &expectData; + OH_NN_ReturnCode ret = executorTest.SetOutput(m_index, buffer, length); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: executor_set_output_006 + * @tc.desc: Verify that the SetOutput function returns a successful message with length <= curBufferLength. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_set_output_006, testing::ext::TestSize.Level0) +{ + const MSLITE::LiteGraph* liteGraphTest = BuildLiteGraph(m_dim, m_dimOut); + InnerModel innerModel; + innerModel.BuildFromLiteGraph(liteGraphTest); + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + size_t length = 9 * sizeof(float); + void* buffer = m_dataArry; + EXPECT_EQ(OH_NN_SUCCESS, executorTest.SetOutput(m_index, buffer, length)); + + float expectDataArry[9] = {0, 1, 2, 3, 4, 5, 6, 7, 8}; + void* expectBuffer = expectDataArry; + OH_NN_ReturnCode ret = executorTest.SetOutput(m_index, expectBuffer, length); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/* + * @tc.name: executor_set_output_007 + * @tc.desc: Verify that the SetOutput function returns a successful message with length > curBufferLength. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_set_output_007, testing::ext::TestSize.Level0) +{ + const MSLITE::LiteGraph* liteGraphTest = BuildLiteGraph(m_dim, m_dimOut); + InnerModel innerModel; + innerModel.BuildFromLiteGraph(liteGraphTest); + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + size_t length = 9 * sizeof(float); + void* buffer = m_dataArry; + EXPECT_EQ(OH_NN_SUCCESS, executorTest.SetOutput(m_index, buffer, length)); + + size_t expectLength = 15 * sizeof(float); + float expectDataArry[15] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14}; + void* expectBuffer = expectDataArry; + OH_NN_ReturnCode ret = executorTest.SetOutput(m_index, expectBuffer, expectLength); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/* + * @tc.name: executor_set_output_from_memory_001 + * @tc.desc: Verify that the SetOutputFromMemory function returns a successful message. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_set_output_from_memory_001, testing::ext::TestSize.Level0) +{ + const MSLITE::LiteGraph* liteGraphTest = BuildLiteGraph(m_dim, m_dimOut); + InnerModel innerModel; + innerModel.BuildFromLiteGraph(liteGraphTest); + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + void* const data = m_dataArry; + OH_NN_Memory memory = {data, 9 * sizeof(float)}; + + OH_NN_ReturnCode ret = executorTest.SetOutputFromMemory(m_index, memory); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/* + * @tc.name: executor_set_output_from_memory_002 + * @tc.desc: Verify that the SetOutputFromMemory function returns a failed message with out-of-range index. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_set_output_from_memory_002, testing::ext::TestSize.Level0) +{ + const MSLITE::LiteGraph* liteGraphTest = BuildLiteGraph(m_dim, m_dimOut); + InnerModel innerModel; + innerModel.BuildFromLiteGraph(liteGraphTest); + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + m_index = 6; + void* const data = m_dataArry; + OH_NN_Memory memory = {data, 9 * sizeof(float)}; + + OH_NN_ReturnCode ret = executorTest.SetOutputFromMemory(m_index, memory); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: executor_set_output_from_memory_003 + * @tc.desc: Verify that the SetOutputFromMemory function returns a failed message with invalid memory.length. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_set_output_from_memory_003, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + void* const data = m_dataArry; + OH_NN_Memory memory = {data, 0}; + + OH_NN_ReturnCode ret = executorTest.SetOutputFromMemory(m_index, memory); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: executor_set_output_from_memory_004 + * @tc.desc: Verify that the SetOutputFromMemory function returns a failed message with memory.length < dataLength. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_set_output_from_memory_004, testing::ext::TestSize.Level0) +{ + const std::vector expectDim = {4, 4}; + const MSLITE::LiteGraph* liteGraphTest = BuildLiteGraph(m_dim, expectDim); + InnerModel innerModel; + innerModel.BuildFromLiteGraph(liteGraphTest); + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + void* const data = m_dataArry; + OH_NN_Memory memory = {data, 9 * sizeof(float)}; + + OH_NN_ReturnCode ret = executorTest.SetOutputFromMemory(m_index, memory); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: executor_set_output_from_memory_005 + * @tc.desc: Verify that the SetOutputFromMemory function returns a successful message. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_set_output_from_memory_005, testing::ext::TestSize.Level0) +{ + const MSLITE::LiteGraph* liteGraphTest = BuildLiteGraph(m_dim, m_dimOut); + InnerModel innerModel; + innerModel.BuildFromLiteGraph(liteGraphTest); + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + size_t length = 9 * sizeof(float); + void* buffer = m_dataArry; + + EXPECT_EQ(OH_NN_SUCCESS, executorTest.SetOutput(m_index, buffer, length)); + void* const data = m_dataArry; + OH_NN_Memory memory = {data, 9 * sizeof(float)}; + + OH_NN_ReturnCode ret = executorTest.SetOutputFromMemory(m_index, memory); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/* + * @tc.name: executor_get_output_dimensions_001 + * @tc.desc: Verify that the GetOutputShape function returns a successful message. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_get_output_dimensions_001, testing::ext::TestSize.Level0) +{ + const MSLITE::LiteGraph* liteGraphTest = BuildLiteGraph(m_dim, m_dimOut); + InnerModel innerModel; + innerModel.BuildFromLiteGraph(liteGraphTest); + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + OH_NN_Tensor tensor = SetTensor(OH_NN_FLOAT32, m_dimensionCount, m_dimArry, nullptr, OH_NN_TENSOR); + size_t length = 9 * sizeof(float); + void* buffer = m_dataArry; + + EXPECT_EQ(OH_NN_SUCCESS, executorTest.SetInput(m_index, tensor, buffer, length)); + EXPECT_EQ(OH_NN_SUCCESS, executorTest.SetOutput(m_index, buffer, length)); + EXPECT_EQ(OH_NN_SUCCESS, executorTest.Run()); + + int32_t expectDim[2] = {3, 3}; + int32_t* ptr = expectDim; + int32_t** dimensions = &ptr; + uint32_t dimensionCount = 2; + + OH_NN_ReturnCode ret = executorTest.GetOutputShape(m_index, dimensions, dimensionCount); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/* + * @tc.name: executor_get_output_dimensions_002 + * @tc.desc: Verify that the GetOutputShape function returns a failed message without run. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_get_output_dimensions_002, testing::ext::TestSize.Level0) +{ + size_t length = 9 * sizeof(float); + void* buffer = m_dataArry; + const MSLITE::LiteGraph* liteGraphTest = BuildLiteGraph(m_dim, m_dimOut); + InnerModel innerModel; + innerModel.BuildFromLiteGraph(liteGraphTest); + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + OH_NN_Tensor tensor = SetTensor(OH_NN_FLOAT32, m_dimensionCount, m_dimArry, nullptr, OH_NN_TENSOR); + + EXPECT_EQ(OH_NN_SUCCESS, executorTest.SetInput(m_index, tensor, buffer, length)); + EXPECT_EQ(OH_NN_SUCCESS, executorTest.SetOutput(m_index, buffer, length)); + + int32_t testDim[2] = {3, 3}; + int32_t* ptr = testDim; + int32_t** dimensions = &ptr; + uint32_t dimensionCount = 2; + + OH_NN_ReturnCode ret = executorTest.GetOutputShape(m_index, dimensions, dimensionCount); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/* + * @tc.name: executor_get_output_dimensions_003 + * @tc.desc: Verify that the GetOutputShape function returns a failed message with out-of-range index. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_get_output_dimensions_003, testing::ext::TestSize.Level0) +{ + const MSLITE::LiteGraph* liteGraphTest = BuildLiteGraph(m_dim, m_dimOut); + InnerModel innerModel; + innerModel.BuildFromLiteGraph(liteGraphTest); + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + void* buffer = m_dataArry; + size_t length = 9 * sizeof(float); + OH_NN_Tensor tensor = SetTensor(OH_NN_FLOAT32, m_dimensionCount, m_dimArry, nullptr, OH_NN_TENSOR); + + EXPECT_EQ(OH_NN_SUCCESS, executorTest.SetInput(m_index, tensor, buffer, length)); + EXPECT_EQ(OH_NN_SUCCESS, executorTest.SetOutput(m_index, buffer, length)); + EXPECT_EQ(OH_NN_SUCCESS, executorTest.Run()); + + uint32_t testIndex = 6; + int32_t testDim[2] = {3, 3}; + int32_t* ptr = testDim; + int32_t** dimensions = &ptr; + uint32_t dimensionCount = 2; + + OH_NN_ReturnCode ret = executorTest.GetOutputShape(testIndex, dimensions, dimensionCount); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: executor_create_input_memory_001 + * @tc.desc: Verify that the CreateInputMemory function returns a successful message. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_create_input_memory_001, testing::ext::TestSize.Level0) +{ + const MSLITE::LiteGraph* liteGraphTest = BuildLiteGraph(m_dim, m_dimOut); + InnerModel innerModel; + innerModel.BuildFromLiteGraph(liteGraphTest); + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + OH_NN_Memory** memory = nullptr; + SetMermory(memory); + size_t length = 9 * sizeof(float); + + OH_NN_ReturnCode ret = executorTest.CreateInputMemory(m_index, length, memory); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/* + * @tc.name: executor_create_input_memory_002 + * @tc.desc: Verify that the CreateInputMemory function returns a failed message with out-of-range index. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_create_input_memory_002, testing::ext::TestSize.Level0) +{ + const MSLITE::LiteGraph* liteGraphTest = BuildLiteGraph(m_dim, m_dimOut); + InnerModel innerModel; + innerModel.BuildFromLiteGraph(liteGraphTest); + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + size_t length = 9 * sizeof(float); + OH_NN_Memory** memory = nullptr; + SetMermory(memory); + + m_index = 6; + OH_NN_ReturnCode ret = executorTest.CreateInputMemory(m_index, length, memory); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: executor_create_input_memory_003 + * @tc.desc: Verify that the CreateInputMemory function returns a failed message with allocating buffer unsuccessfully. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_create_input_memory_003, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + HDI::Nnrt::V2_0::MockIPreparedModel::m_ExpectRetCode = OH_NN_INVALID_PARAMETER; + const MSLITE::LiteGraph* liteGraphTest = BuildLiteGraph(m_dim, m_dimOut); + innerModel.BuildFromLiteGraph(liteGraphTest); + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + size_t length = 9 * sizeof(float); + OH_NN_Memory** memory = nullptr; + SetMermory(memory); + + OH_NN_ReturnCode ret = executorTest.CreateInputMemory(m_index, length, memory); + EXPECT_EQ(OH_NN_MEMORY_ERROR, ret); +} + +/* + * @tc.name: executor_destroy_input_memory_001 + * @tc.desc: Verify that the DestroyInputMemory function returns a successful message. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_destroy_input_memory_001, testing::ext::TestSize.Level0) +{ + const MSLITE::LiteGraph* liteGraphTest = BuildLiteGraph(m_dim, m_dimOut); + InnerModel innerModel; + innerModel.BuildFromLiteGraph(liteGraphTest); + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + size_t length = 9 * sizeof(float); + float dataArry[9] = {0, 1, 2, 3, 4, 5, 6, 7, 8}; + void* const data = dataArry; + OH_NN_Memory memoryPtr = {data, 9 * sizeof(float)}; + OH_NN_Memory* ptr = &memoryPtr; + OH_NN_Memory** memory = &ptr; + + EXPECT_EQ(OH_NN_SUCCESS, executorTest.CreateInputMemory(m_index, length, memory)); + OH_NN_ReturnCode ret = executorTest.DestroyInputMemory(m_index, memory); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/* + * @tc.name: executor_destroy_input_memory_002 + * @tc.desc: Verify that the DestroyInputMemory function returns a failed message with out-of-range index. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_destroy_input_memory_002, testing::ext::TestSize.Level0) +{ + const MSLITE::LiteGraph* liteGraphTest = BuildLiteGraph(m_dim, m_dimOut); + InnerModel innerModel; + innerModel.BuildFromLiteGraph(liteGraphTest); + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + size_t length = 9 * sizeof(float); + OH_NN_Memory** memory = nullptr; + SetMermory(memory); + + uint32_t testIndex = 6; + EXPECT_EQ(OH_NN_SUCCESS, executorTest.CreateInputMemory(m_index, length, memory)); + OH_NN_ReturnCode ret = executorTest.DestroyInputMemory(testIndex, memory); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: executor_destroy_input_memory_003 + * @tc.desc: Verify that the DestroyInputMemory function returns a failed message without creating memory. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_destroy_input_memory_003, testing::ext::TestSize.Level0) +{ + const MSLITE::LiteGraph* liteGraphTest = BuildLiteGraph(m_dim, m_dimOut); + InnerModel innerModel; + innerModel.BuildFromLiteGraph(liteGraphTest); + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + OH_NN_Memory** memory = nullptr; + SetMermory(memory); + + OH_NN_ReturnCode ret = executorTest.DestroyInputMemory(m_index, memory); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: executor_destroy_input_memory_004 + * @tc.desc: Verify that the DestroyInputMemory function returns a failed message with invalid memory.data. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_destroy_input_memory_004, testing::ext::TestSize.Level0) +{ + const MSLITE::LiteGraph* liteGraphTest = BuildLiteGraph(m_dim, m_dimOut); + InnerModel innerModel; + innerModel.BuildFromLiteGraph(liteGraphTest); + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + size_t length = 9 * sizeof(float); + OH_NN_Memory** memory = nullptr; + SetMermory(memory); + + EXPECT_EQ(OH_NN_SUCCESS, executorTest.CreateInputMemory(m_index, length, memory)); + + float arry[9] = {0, 0, 0, 0, 0, 0, 0, 0, 0}; + void* const expectData = arry; + OH_NN_Memory mptr = {expectData, 9 * sizeof(float)}; + OH_NN_Memory* expectPtr = &mptr; + OH_NN_Memory** expectMemory = &expectPtr; + + OH_NN_ReturnCode ret = executorTest.DestroyInputMemory(m_index, expectMemory); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: executor_create_output_memory_001 + * @tc.desc: Verify that the CreateOutputMemory function returns a successful message. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_create_output_memory_001, testing::ext::TestSize.Level0) +{ + const MSLITE::LiteGraph* liteGraphTest = BuildLiteGraph(m_dim, m_dimOut); + InnerModel innerModel; + innerModel.BuildFromLiteGraph(liteGraphTest); + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + size_t length = 9 * sizeof(float); + OH_NN_Memory** memory = nullptr; + SetMermory(memory); + + OH_NN_ReturnCode ret = executorTest.CreateOutputMemory(m_index, length, memory); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/* + * @tc.name: executor_create_output_memory_002 + * @tc.desc: Verify that the CreateOutputMemory function returns a failed message with out-of-range index. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_create_output_memory_002, testing::ext::TestSize.Level0) +{ + const MSLITE::LiteGraph* liteGraphTest = BuildLiteGraph(m_dim, m_dimOut); + InnerModel innerModel; + innerModel.BuildFromLiteGraph(liteGraphTest); + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + m_index = 6; + size_t length = 9 * sizeof(float); + OH_NN_Memory** memory = nullptr; + SetMermory(memory); + + OH_NN_ReturnCode ret = executorTest.CreateOutputMemory(m_index, length, memory); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: executor_create_output_memory_003 + * @tc.desc: Verify that the CreateOutputMemory function returns a failed message with allocating buffer unsuccessfully. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_create_output_memory_003, testing::ext::TestSize.Level0) +{ + HDI::Nnrt::V2_0::MockIPreparedModel::m_ExpectRetCode = OH_NN_INVALID_PARAMETER; + const MSLITE::LiteGraph* liteGraphTest = BuildLiteGraph(m_dim, m_dimOut); + InnerModel innerModel; + innerModel.BuildFromLiteGraph(liteGraphTest); + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + size_t length = 9 * sizeof(float); + OH_NN_Memory** memory = nullptr; + SetMermory(memory); + + OH_NN_ReturnCode ret = executorTest.CreateOutputMemory(m_index, length, memory); + EXPECT_EQ(OH_NN_MEMORY_ERROR, ret); +} + +/* + * @tc.name: executor_destroy_output_memory_001 + * @tc.desc: Verify that the DestroyOutputMemory function returns a successful message. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_destroy_output_memory_001, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + const MSLITE::LiteGraph* liteGraphTest = BuildLiteGraph(m_dim, m_dimOut); + innerModel.BuildFromLiteGraph(liteGraphTest); + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + size_t length = 9 * sizeof(float); + OH_NN_Memory** memory = nullptr; + SetMermory(memory); + + EXPECT_EQ(OH_NN_SUCCESS, executorTest.CreateOutputMemory(m_index, length, memory)); + OH_NN_ReturnCode ret = executorTest.DestroyOutputMemory(m_index, memory); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/* + * @tc.name: executor_destroy_output_memory_002 + * @tc.desc: Verify that the DestroyOutputMemory function returns a failed message with out-of-range index. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_destroy_output_memory_002, testing::ext::TestSize.Level0) +{ + const MSLITE::LiteGraph* liteGraphTest = BuildLiteGraph(m_dim, m_dimOut); + InnerModel innerModel; + innerModel.BuildFromLiteGraph(liteGraphTest); + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + uint32_t testIndex = 6; + size_t length = 9 * sizeof(float); + OH_NN_Memory** memory = nullptr; + SetMermory(memory); + + EXPECT_EQ(OH_NN_SUCCESS, executorTest.CreateOutputMemory(m_index, length, memory)); + OH_NN_ReturnCode ret = executorTest.DestroyOutputMemory(testIndex, memory); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: executor_destroy_output_memory_003 + * @tc.desc: Verify that the DestroyOutputMemory function returns a failed message without creating memory. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_destroy_output_memory_003, testing::ext::TestSize.Level0) +{ + const MSLITE::LiteGraph* liteGraphTest = BuildLiteGraph(m_dim, m_dimOut); + InnerModel innerModel; + innerModel.BuildFromLiteGraph(liteGraphTest); + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + OH_NN_Memory** memory = nullptr; + SetMermory(memory); + + OH_NN_ReturnCode ret = executorTest.DestroyOutputMemory(m_index, memory); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: executor_destroy_output_memory_004 + * @tc.desc: Verify that the DestroyOutputMemory function returns a failed message with invalid memory.data. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_destroy_output_memory_004, testing::ext::TestSize.Level0) +{ + const MSLITE::LiteGraph* liteGraphTest = BuildLiteGraph(m_dim, m_dimOut); + InnerModel innerModel; + innerModel.BuildFromLiteGraph(liteGraphTest); + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + size_t length = 9 * sizeof(float); + OH_NN_Memory** memory = nullptr; + SetMermory(memory); + + EXPECT_EQ(OH_NN_SUCCESS, executorTest.CreateOutputMemory(m_index, length, memory)); + + float arry[9] = {0, 0, 0, 0, 0, 0, 0, 0, 0}; + void* const expectData = arry; + OH_NN_Memory mptr = {expectData, 9 * sizeof(float)}; + OH_NN_Memory* expectPtr = &mptr; + OH_NN_Memory** expectMemory = &expectPtr; + + OH_NN_ReturnCode ret = executorTest.DestroyOutputMemory(m_index, expectMemory); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: executor_run_test_001 + * @tc.desc: Verify that the Run function returns a successful message. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_run_test_001, testing::ext::TestSize.Level0) +{ + HiviewDFX::HiTraceId traceId = HiTraceChain::Begin("executor_run_test_001", HITRACE_FLAG_TP_INFO); + const MSLITE::LiteGraph* liteGraphTest = BuildLiteGraph(m_dim, m_dimOut); + InnerModel innerModel; + innerModel.BuildFromLiteGraph(liteGraphTest); + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + size_t length = 9 * sizeof(float); + OH_NN_Tensor tensor = SetTensor(OH_NN_FLOAT32, m_dimensionCount, m_dimArry, nullptr, OH_NN_TENSOR); + void* buffer = m_dataArry; + + EXPECT_EQ(OH_NN_SUCCESS, executorTest.SetInput(m_index, tensor, buffer, length)); + EXPECT_EQ(OH_NN_SUCCESS, executorTest.SetOutput(m_index, buffer, length)); + OH_NN_ReturnCode ret = executorTest.Run(); + HiTraceChain::End(traceId); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/* + * @tc.name: executor_run_test_002 + * @tc.desc: Verify that the DestroyOutputMemory function returns a failed message without SetInput. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_run_test_002, testing::ext::TestSize.Level0) +{ + const MSLITE::LiteGraph* liteGraphTest = BuildLiteGraph(m_dim, m_dimOut); + InnerModel innerModel; + innerModel.BuildFromLiteGraph(liteGraphTest); + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + OH_NN_ReturnCode ret = executorTest.Run(); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: executor_run_test_003 + * @tc.desc: Verify that the DestroyOutputMemory function returns a failed message without SetOutput. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_run_test_003, testing::ext::TestSize.Level0) +{ + const MSLITE::LiteGraph* liteGraphTest = BuildLiteGraph(m_dim, m_dimOut); + InnerModel innerModel; + innerModel.BuildFromLiteGraph(liteGraphTest); + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + OH_NN_Tensor tensor = SetTensor(OH_NN_FLOAT32, m_dimensionCount, m_dimArry, nullptr, OH_NN_TENSOR); + void* buffer = m_dataArry; + size_t length = 9 * sizeof(float); + + EXPECT_EQ(OH_NN_SUCCESS, executorTest.SetInput(m_index, tensor, buffer, length)); + OH_NN_ReturnCode ret = executorTest.Run(); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: executor_run_test_004 + * @tc.desc: Verify that the DestroyOutputMemory function returns a failed message with failed executionPlan.Run. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_run_test_004, testing::ext::TestSize.Level0) +{ + HDI::Nnrt::V2_0::MockIPreparedModel::m_ExpectRetCode = OH_NN_FAILED; + const MSLITE::LiteGraph* liteGraphTest = BuildLiteGraph(m_dim, m_dimOut); + InnerModel innerModel; + innerModel.BuildFromLiteGraph(liteGraphTest); + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + OH_NN_Tensor tensor = SetTensor(OH_NN_FLOAT32, m_dimensionCount, m_dimArry, nullptr, OH_NN_TENSOR); + void* buffer = m_dataArry; + size_t length = 9 * sizeof(float); + + EXPECT_EQ(OH_NN_SUCCESS, executorTest.SetInput(m_index, tensor, buffer, length)); + EXPECT_EQ(OH_NN_SUCCESS, executorTest.SetOutput(m_index, buffer, length)); + OH_NN_ReturnCode ret = executorTest.Run(); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} +} // namespace UnitTest +} // namespace NeuralNetworkRuntime +} // namespace OHOS \ No newline at end of file diff --git a/test/unittest/components/v2_0/executor/executor_test.h b/test/unittest/components/v2_0/executor/executor_test.h new file mode 100644 index 0000000000000000000000000000000000000000..05837b5bfe3895c89b5651432fceffbb812192a1 --- /dev/null +++ b/test/unittest/components/v2_0/executor/executor_test.h @@ -0,0 +1,48 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_EXECUTOR_UNITTEST_H +#define NEURAL_NETWORK_RUNTIME_EXECUTOR_UNITTEST_H + +#include + +#include "mindir.h" + +#include "frameworks/native/executor.h" + +namespace MSLITE = mindspore::lite; +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class ExecutorTest : public testing::Test { +public: + MSLITE::LiteGraph* BuildLiteGraph(const std::vector dim, const std::vector dimOut); + OH_NN_Tensor SetTensor(OH_NN_DataType dataType, uint32_t dimensionCount, const int32_t *dimensions, + const OH_NN_QuantParam *quantParam, OH_NN_TensorType type); + void SetMermory(OH_NN_Memory** &memory); + +public: + uint32_t m_index {0}; + const std::vector m_dim {3, 3}; + const std::vector m_dimOut {3, 3}; + const int32_t m_dimArry[2] {3, 3}; + uint32_t m_dimensionCount {2}; + float m_dataArry[9] {0, 1, 2, 3, 4, 5, 6, 7, 8}; +}; +} // namespace UnitTest +} // namespace NeuralNetworkRuntime +} // namespace OHOS + +#endif // NEURAL_NETWORK_RUNTIME_EXECUTOR_UNITTEST_H \ No newline at end of file diff --git a/test/unittest/components/v2_0/hdi_device/hdi_device_test.cpp b/test/unittest/components/v2_0/hdi_device/hdi_device_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..2cfd496c6bf46989dac6e845a5e1b309b1ccd0c2 --- /dev/null +++ b/test/unittest/components/v2_0/hdi_device/hdi_device_test.cpp @@ -0,0 +1,875 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "frameworks/native/hdi_device_v2_0.h" +#include "test/unittest/common/v2_0/mock_idevice.h" +#include "test/unittest/common/file_utils.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime; +namespace mindspore { +namespace lite { +OHOS::HDI::Nnrt::V2_0::Model* MindIR_LiteGraph_To_Model(const LiteGraph* lite_graph, + const OHOS::HDI::Nnrt::V2_0::SharedBuffer& buffer) +{ + return new (std::nothrow) OHOS::HDI::Nnrt::V2_0::Model(); +} + +void MindIR_Model_Destroy(OHOS::HDI::Nnrt::V2_0::Model** model) +{ + if ((model != nullptr) && (*model != nullptr)) { + delete *model; + *model = nullptr; + } +} + +size_t MindIR_LiteGraph_GetConstTensorSize(const mindspore::lite::LiteGraph* lite_graph) +{ + return 1; +} +} +} + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class HDIDeviceTest : public testing::Test { +protected: + void GetBuffer(void*& buffer, size_t length); + OH_NN_ReturnCode PrepareModel(int32_t allocBufferType, int32_t prepareType); +}; + +void HDIDeviceTest::GetBuffer(void*& buffer, size_t length) +{ + std::string data = "ABCD"; + const size_t dataLength = 100; + data.resize(dataLength, '+'); + + std::string filename = "/data/log/memory-001.dat"; + FileUtils fileUtils(filename); + fileUtils.WriteFile(data); + + int fd = open(filename.c_str(), O_RDWR); + EXPECT_NE(fd, -1); + + const auto &memoryManager = MemoryManager::GetInstance(); + buffer = memoryManager->MapMemory(fd, length); + EXPECT_NE(buffer, nullptr); + + const char* result = static_cast(buffer); + int index = 0; + EXPECT_EQ('A', result[index++]); + EXPECT_EQ('B', result[index++]); + EXPECT_EQ('C', result[index++]); + EXPECT_EQ('D', result[index++]); + close(fd); +} + +OH_NN_ReturnCode HDIDeviceTest::PrepareModel(int32_t allocBufferType, int32_t prepareType) +{ + std::shared_ptr model = std::make_shared(); + OHOS::sptr sp = OHOS::sptr(new (std::nothrow) V2_0::MockIDevice()); + EXPECT_NE(sp, nullptr); + + std::unique_ptr hdiDevice = std::make_unique(sp); + EXPECT_NE(hdiDevice, nullptr); + + V2_0::SharedBuffer buffer {1, 1, 0, 1}; + EXPECT_CALL(*sp, AllocateBuffer(::testing::_, ::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<1>(buffer), ::testing::Return(allocBufferType))); + + std::shared_ptr preparedModel; + const int position = 2; + OHOS::sptr iPreparedModel = + OHOS::sptr(new (std::nothrow) V2_0::MockIPreparedModel()); + EXPECT_CALL(*sp, PrepareModel(::testing::_, ::testing::_, ::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee(iPreparedModel), + ::testing::Return(prepareType))); + + ModelConfig config; + OH_NN_ReturnCode result = hdiDevice->PrepareModel(model, config, preparedModel); + return result; +} + +/* * + * @tc.name: hdidevice_constructor_001 + * @tc.desc: Verify the Constructor function return object success. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_constructor_001, TestSize.Level0) +{ + OHOS::sptr device = V2_0::INnrtDevice::Get(false); + EXPECT_NE(device, nullptr); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); +} + +/* * + * @tc.name: hdidevice_getdevicename_001 + * @tc.desc: Verify the GetDeviceName function validate device name success. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_getdevicename_001, TestSize.Level0) +{ + OHOS::sptr device = V2_0::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + std::string deviceName = "MockDevice"; + EXPECT_CALL(*((V2_0::MockIDevice *)device.GetRefPtr()), GetDeviceName(::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<0>(deviceName), ::testing::Return(HDF_SUCCESS))); + + const std::string expectDeviceName = "MockDevice"; + std::string newDeviceName = ""; + OH_NN_ReturnCode result = hdiDevice->GetDeviceName(newDeviceName); + EXPECT_EQ(OH_NN_SUCCESS, result); + EXPECT_EQ(expectDeviceName, newDeviceName); +} + +/* * + * @tc.name: hdidevice_getdevicename_002 + * @tc.desc: Verify the GetDeviceName function return unavailable device. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_getdevicename_002, TestSize.Level0) +{ + OHOS::sptr device = V2_0::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + std::string deviceName = "MockDevice"; + EXPECT_CALL(*((V2_0::MockIDevice *)device.GetRefPtr()), GetDeviceName(::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<0>(deviceName), ::testing::Return(HDF_FAILURE))); + OH_NN_ReturnCode result = hdiDevice->GetDeviceName(deviceName); + EXPECT_EQ(OH_NN_UNAVALIDABLE_DEVICE, result); +} + +/* * + * @tc.name: hdidevice_getvendorname_001 + * @tc.desc: Verify the GetVendorName function validate vendor name success. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_getvendorname_001, TestSize.Level0) +{ + OHOS::sptr device = V2_0::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + std::string vendorName = "MockVendor"; + EXPECT_CALL(*((V2_0::MockIDevice *)device.GetRefPtr()), GetVendorName(::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<0>(vendorName), ::testing::Return(HDF_SUCCESS))); + + const std::string expectDeviceName = "MockVendor"; + std::string newVendorName = ""; + OH_NN_ReturnCode result = hdiDevice->GetVendorName(newVendorName); + EXPECT_EQ(OH_NN_SUCCESS, result); + EXPECT_EQ(expectDeviceName, newVendorName); +} + +/* * + * @tc.name: hdidevice_getvendorname_002 + * @tc.desc: Verify the GetVendorName function return unavailable device. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_getvendorname_002, TestSize.Level0) +{ + OHOS::sptr device = V2_0::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + std::string vendorName = "MockVendor"; + EXPECT_CALL(*((V2_0::MockIDevice *)device.GetRefPtr()), GetVendorName(::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<0>(vendorName), ::testing::Return(HDF_FAILURE))); + OH_NN_ReturnCode result = hdiDevice->GetVendorName(vendorName); + EXPECT_EQ(OH_NN_UNAVALIDABLE_DEVICE, result); +} + +/* * + * @tc.name: hdidevice_getdevicetype_001 + * @tc.desc: Verify the GetDeviceType function validate device type success. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_getdevicetype_001, TestSize.Level0) +{ + OHOS::sptr device = V2_0::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + V2_0::DeviceType iDeviceType = V2_0::DeviceType::CPU; + EXPECT_CALL(*((V2_0::MockIDevice *)device.GetRefPtr()), GetDeviceType(::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<0>(iDeviceType), ::testing::Return(HDF_SUCCESS))); + + OH_NN_DeviceType expectDeviceType = OH_NN_CPU; + OH_NN_DeviceType newDeviceType = OH_NN_CPU; + OH_NN_ReturnCode result = hdiDevice->GetDeviceType(newDeviceType); + EXPECT_EQ(OH_NN_SUCCESS, result); + EXPECT_EQ(expectDeviceType, newDeviceType); +} + +/* * + * @tc.name: hdidevice_getdevicetype_002 + * @tc.desc: Verify the GetDeviceType function return unavailable device. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_getdevicetype_002, TestSize.Level0) +{ + OHOS::sptr device = V2_0::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + + OH_NN_DeviceType deviceType = OH_NN_CPU; + V2_0::DeviceType iDeviceType = V2_0::DeviceType::CPU; + EXPECT_CALL(*((V2_0::MockIDevice *)device.GetRefPtr()), GetDeviceType(::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<0>(iDeviceType), ::testing::Return(HDF_FAILURE))); + OH_NN_ReturnCode result = hdiDevice->GetDeviceType(deviceType); + EXPECT_EQ(OH_NN_UNAVALIDABLE_DEVICE, result); +} + +/* * + * @tc.name: hdidevice_getdevicestatus_001 + * @tc.desc: Verify the GetDeviceStatus function validate device status success. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_getdevicestatus_001, TestSize.Level0) +{ + OHOS::sptr device = V2_0::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + + V2_0::DeviceStatus iDeviceStatus = V2_0::DeviceStatus::AVAILABLE; + EXPECT_CALL(*((V2_0::MockIDevice *)device.GetRefPtr()), GetDeviceStatus(::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<0>(iDeviceStatus), ::testing::Return(HDF_SUCCESS))); + + const DeviceStatus expectDeviceStatus = AVAILABLE; + DeviceStatus newDeviceStatus = AVAILABLE; + OH_NN_ReturnCode result = hdiDevice->GetDeviceStatus(newDeviceStatus); + EXPECT_EQ(OH_NN_SUCCESS, result); + EXPECT_EQ(expectDeviceStatus, newDeviceStatus); +} + +/* * + * @tc.name: hdidevice_getdevicestatus_002 + * @tc.desc: Verify the GetDeviceStatus function return unavailable device. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_getdevicestatus_002, TestSize.Level0) +{ + OHOS::sptr device = V2_0::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + DeviceStatus deviceStatus = AVAILABLE; + V2_0::DeviceStatus iDeviceStatus = V2_0::DeviceStatus::AVAILABLE; + EXPECT_CALL(*((V2_0::MockIDevice *)device.GetRefPtr()), GetDeviceStatus(::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<0>(iDeviceStatus), ::testing::Return(HDF_FAILURE))); + OH_NN_ReturnCode result = hdiDevice->GetDeviceStatus(deviceStatus); + EXPECT_EQ(OH_NN_UNAVALIDABLE_DEVICE, result); +} + +/* * + * @tc.name: hdidevice_getsupportedoperation_001 + * @tc.desc: Verify the GetSupportedOperation function return success. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_getsupportedoperation_001, TestSize.Level0) +{ + std::vector ops {true}; + std::shared_ptr model = std::make_shared(); + OHOS::sptr device = V2_0::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + + V2_0::SharedBuffer buffer {1, 1, 0, 1}; + EXPECT_CALL(*((V2_0::MockIDevice *)device.GetRefPtr()), AllocateBuffer(::testing::_, ::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<1>(buffer), ::testing::Return(HDF_SUCCESS))); + + EXPECT_CALL(*((V2_0::MockIDevice *)device.GetRefPtr()), GetSupportedOperation(::testing::_, ::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<1>(ops), ::testing::Return(HDF_SUCCESS))); + + std::vector newOps {true}; + const std::vector expectOps {true}; + OH_NN_ReturnCode result = hdiDevice->GetSupportedOperation(model, newOps); + EXPECT_EQ(OH_NN_SUCCESS, result); + auto expectOpsSize = expectOps.size(); + for (size_t i = 0; i < expectOpsSize; ++i) { + EXPECT_EQ(expectOps[i], newOps[i]); + } +} + +/* * + * @tc.name: hdidevice_getsupportedoperation_002 + * @tc.desc: Verify the GetSupportedOperation function return failed in case of allocate buffer failure. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_getsupportedoperation_002, TestSize.Level0) +{ + std::vector ops; + std::shared_ptr model = std::make_shared(); + OHOS::sptr device = V2_0::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + + V2_0::SharedBuffer buffer {1, 1, 0, 1}; + EXPECT_CALL(*((V2_0::MockIDevice *)device.GetRefPtr()), AllocateBuffer(::testing::_, ::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<1>(buffer), ::testing::Return(HDF_FAILURE))); + + OH_NN_ReturnCode result = hdiDevice->GetSupportedOperation(model, ops); + EXPECT_EQ(OH_NN_FAILED, result); +} + +/* * + * @tc.name: hdidevice_getsupportedoperation_003 + * @tc.desc: Verify the GetSupportedOperation function return nullptr. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_getsupportedoperation_003, TestSize.Level0) +{ + OHOS::sptr device = V2_0::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + + std::shared_ptr model = nullptr; + std::vector ops; + OH_NN_ReturnCode result = hdiDevice->GetSupportedOperation(model, ops); + EXPECT_EQ(OH_NN_NULL_PTR, result); +} + +/* * + * @tc.name: hdidevice_getsupportedoperation_004 + * @tc.desc: Verify the GetSupportedOperation function return unavalidable device. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_getsupportedoperation_004, TestSize.Level0) +{ + std::vector ops {true}; + std::shared_ptr model = std::make_shared(); + OHOS::sptr device = V2_0::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + + V2_0::SharedBuffer buffer {2, 1, 0, 1}; + EXPECT_CALL(*((V2_0::MockIDevice *)device.GetRefPtr()), AllocateBuffer(::testing::_, ::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<1>(buffer), ::testing::Return(HDF_SUCCESS))); + + EXPECT_CALL(*((V2_0::MockIDevice *)device.GetRefPtr()), GetSupportedOperation(::testing::_, ::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<1>(ops), ::testing::Return(HDF_FAILURE))); + + std::vector newOps {true}; + OH_NN_ReturnCode result = hdiDevice->GetSupportedOperation(model, newOps); + EXPECT_EQ(OH_NN_UNAVALIDABLE_DEVICE, result); +} + +/* * + * @tc.name: hdidevice_isfloat16precisionsupported_001 + * @tc.desc: Verify the IsFloat16PrecisionSupported function return success. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_isfloat16precisionsupported_001, TestSize.Level0) +{ + OHOS::sptr device = V2_0::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + + bool isSupported = false; + EXPECT_CALL(*((V2_0::MockIDevice *)device.GetRefPtr()), IsFloat16PrecisionSupported(::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<0>(isSupported), ::testing::Return(HDF_SUCCESS))); + OH_NN_ReturnCode result = hdiDevice->IsFloat16PrecisionSupported(isSupported); + EXPECT_EQ(OH_NN_SUCCESS, result); +} + +/* * + * @tc.name: hdidevice_isfloat16precisionsupported_002 + * @tc.desc: Verify the IsFloat16PrecisionSupported function return unavailable device. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_isfloat16precisionsupported_002, TestSize.Level0) +{ + OHOS::sptr device = V2_0::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + + bool isSupported = false; + EXPECT_CALL(*((V2_0::MockIDevice *)device.GetRefPtr()), IsFloat16PrecisionSupported(::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<0>(isSupported), ::testing::Return(HDF_FAILURE))); + OH_NN_ReturnCode result = hdiDevice->IsFloat16PrecisionSupported(isSupported); + EXPECT_EQ(OH_NN_UNAVALIDABLE_DEVICE, result); +} + +/* * + * @tc.name: hdidevice_isperformancemodesupported_001 + * @tc.desc: Verify the IsPerformanceModeSupported function return success. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_isperformancemodesupported_001, TestSize.Level0) +{ + OHOS::sptr device = V2_0::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + + bool isSupported = false; + EXPECT_CALL(*((V2_0::MockIDevice *)device.GetRefPtr()), IsPerformanceModeSupported(::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<0>(isSupported), ::testing::Return(HDF_SUCCESS))); + + bool newIsSupported = false; + const bool expectIsSupported = false; + OH_NN_ReturnCode result = hdiDevice->IsPerformanceModeSupported(newIsSupported); + EXPECT_EQ(OH_NN_SUCCESS, result); + EXPECT_EQ(expectIsSupported, newIsSupported); +} + +/* * + * @tc.name: hdidevice_isperformancemodesupported_002 + * @tc.desc: Verify the IsPerformanceModeSupported function return unavailable device. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_isperformancemodesupported_002, TestSize.Level0) +{ + OHOS::sptr device = V2_0::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + + bool isSupported = false; + EXPECT_CALL(*((V2_0::MockIDevice *)device.GetRefPtr()), IsPerformanceModeSupported(::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<0>(isSupported), ::testing::Return(HDF_FAILURE))); + OH_NN_ReturnCode result = hdiDevice->IsPerformanceModeSupported(isSupported); + EXPECT_EQ(OH_NN_UNAVALIDABLE_DEVICE, result); +} + +/* * + * @tc.name: hdidevice_isprioritysupported_001 + * @tc.desc: Verify the IsPrioritySupported function return success. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_isprioritysupported_001, TestSize.Level0) +{ + OHOS::sptr device = V2_0::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + + bool isSupported = false; + EXPECT_CALL(*((V2_0::MockIDevice *)device.GetRefPtr()), IsPrioritySupported(::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<0>(isSupported), ::testing::Return(HDF_SUCCESS))); + + bool newIsSupported = false; + bool expectIsSupported = false; + OH_NN_ReturnCode result = hdiDevice->IsPrioritySupported(newIsSupported); + EXPECT_EQ(OH_NN_SUCCESS, result); + EXPECT_EQ(newIsSupported, expectIsSupported); +} + +/* * + * @tc.name: hdidevice_isprioritysupported_002 + * @tc.desc: Verify the IsPrioritySupported function return unavailable device. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_isprioritysupported_002, TestSize.Level0) +{ + OHOS::sptr device = V2_0::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + + bool isSupported = false; + EXPECT_CALL(*((V2_0::MockIDevice *)device.GetRefPtr()), IsPrioritySupported(::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<0>(isSupported), ::testing::Return(HDF_FAILURE))); + OH_NN_ReturnCode result = hdiDevice->IsPrioritySupported(isSupported); + EXPECT_EQ(OH_NN_UNAVALIDABLE_DEVICE, result); +} + +/* * + * @tc.name: hdidevice_isdynamicinputsupported_001 + * @tc.desc: Verify the IsDynamicInputSupported function return success. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_isdynamicinputsupported_001, TestSize.Level0) +{ + OHOS::sptr device = V2_0::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + + bool isSupported = false; + EXPECT_CALL(*((V2_0::MockIDevice *)device.GetRefPtr()), IsDynamicInputSupported(::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<0>(isSupported), ::testing::Return(HDF_SUCCESS))); + + bool newIsSupported = false; + bool expectIsSupported = false; + OH_NN_ReturnCode result = hdiDevice->IsDynamicInputSupported(newIsSupported); + EXPECT_EQ(OH_NN_SUCCESS, result); + EXPECT_EQ(newIsSupported, expectIsSupported); +} + +/* * + * @tc.name: hdidevice_isdynamicinputsupported_002 + * @tc.desc: Verify the IsDynamicInputSupported function return unavailable device. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_isdynamicinputsupported_002, TestSize.Level0) +{ + OHOS::sptr device = V2_0::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + + bool isSupported = false; + EXPECT_CALL(*((V2_0::MockIDevice *)device.GetRefPtr()), IsDynamicInputSupported(::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<0>(isSupported), ::testing::Return(HDF_FAILURE))); + OH_NN_ReturnCode result = hdiDevice->IsDynamicInputSupported(isSupported); + EXPECT_EQ(OH_NN_UNAVALIDABLE_DEVICE, result); +} + +/* * + * @tc.name: hdidevice_isdynamicinputsupported_001 + * @tc.desc: Verify the IsModelCacheSupported function return success. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_ismodelcachesupported_001, TestSize.Level0) +{ + OHOS::sptr device = V2_0::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + + bool isSupported = false; + EXPECT_CALL(*((V2_0::MockIDevice *)device.GetRefPtr()), IsModelCacheSupported(::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<0>(isSupported), ::testing::Return(HDF_SUCCESS))); + + bool newIsSupported = false; + bool expectIsSupported = false; + OH_NN_ReturnCode result = hdiDevice->IsModelCacheSupported(newIsSupported); + EXPECT_EQ(OH_NN_SUCCESS, result); + EXPECT_EQ(expectIsSupported, newIsSupported); +} + +/* * + * @tc.name: hdidevice_isdynamicinputsupported_002 + * @tc.desc: Verify the IsModelCacheSupported function return unavailable device. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_ismodelcachesupported_002, TestSize.Level0) +{ + OHOS::sptr device = V2_0::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + + bool isSupported = false; + EXPECT_CALL(*((V2_0::MockIDevice *)device.GetRefPtr()), IsModelCacheSupported(::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<0>(isSupported), ::testing::Return(HDF_FAILURE))); + OH_NN_ReturnCode result = hdiDevice->IsModelCacheSupported(isSupported); + EXPECT_EQ(OH_NN_UNAVALIDABLE_DEVICE, result); +} + +/* * + * @tc.name: hdidevice_preparemodel_001 + * @tc.desc: Verify the PrepareModel function return success. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_preparemodel_001, TestSize.Level0) +{ + int32_t allocBufferType = HDF_SUCCESS; + int32_t prepareType = HDF_SUCCESS; + OH_NN_ReturnCode result = PrepareModel(allocBufferType, prepareType); + EXPECT_EQ(OH_NN_SUCCESS, result); +} + +/* * + * @tc.name: hdidevice_preparemodel_002 + * @tc.desc: Verify the PrepareModel function return invalid parameter. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_preparemodel_002, TestSize.Level0) +{ + OHOS::sptr device = V2_0::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + + std::shared_ptr model = nullptr; + ModelConfig config; + std::shared_ptr preparedModel; + OH_NN_ReturnCode result = hdiDevice->PrepareModel(model, config, preparedModel); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, result); +} + +/* * + * @tc.name: hdidevice_preparemodel_003 + * @tc.desc: Verify the PrepareModel function return failed. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_preparemodel_003, TestSize.Level0) +{ + int32_t allocBufferType = HDF_SUCCESS; + int32_t prepareType = HDF_FAILURE; + OH_NN_ReturnCode result = PrepareModel(allocBufferType, prepareType); + EXPECT_EQ(OH_NN_FAILED, result); +} + +/* * + * @tc.name: hdidevice_preparemodel_004 + * @tc.desc: Verify the PrepareModel function return failed. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_preparemodel_004, TestSize.Level0) +{ + int32_t allocBufferType = HDF_FAILURE; + int32_t prepareType = HDF_FAILURE; + OH_NN_ReturnCode result = PrepareModel(allocBufferType, prepareType); + EXPECT_EQ(OH_NN_FAILED, result); +} + +/* * + * @tc.name: hdidevice_preparemodelfrommodelcache_001 + * @tc.desc: Verify the PrepareModelFromModelCache function return success. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_preparemodelfrommodelcache_001, TestSize.Level0) +{ + size_t length = 100; + void *buffer = nullptr; + GetBuffer(buffer, length); + + std::vector modelCache = { { buffer, 100 } }; + ModelConfig config; + + OHOS::sptr sp = OHOS::sptr(new (std::nothrow) V2_0::MockIDevice()); + EXPECT_NE(sp, nullptr); + + std::unique_ptr hdiDevice = std::make_unique(sp); + EXPECT_NE(hdiDevice, nullptr); + + std::shared_ptr preparedModel; + + OHOS::sptr iPreparedModel = + OHOS::sptr(new (std::nothrow) V2_0::MockIPreparedModel()); + EXPECT_CALL(*sp, PrepareModelFromModelCache(::testing::_, ::testing::_, ::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<2>(iPreparedModel), ::testing::Return(HDF_SUCCESS))); + + OH_NN_ReturnCode result = hdiDevice->PrepareModelFromModelCache(modelCache, config, preparedModel); + const auto &memoryManager = MemoryManager::GetInstance(); + memoryManager->UnMapMemory(buffer); + EXPECT_EQ(OH_NN_SUCCESS, result); +} + +/* * + * @tc.name: hdidevice_preparemodelfrommodelcache_002 + * @tc.desc: Verify the PrepareModelFromModelCache function return unavailable device. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_preparemodelfrommodelcache_002, TestSize.Level0) +{ + size_t length = 100; + void *buffer = nullptr; + GetBuffer(buffer, length); + + OHOS::sptr sp = OHOS::sptr(new (std::nothrow) V2_0::MockIDevice()); + EXPECT_NE(sp, nullptr); + + std::unique_ptr hdiDevice = std::make_unique(sp); + EXPECT_NE(hdiDevice, nullptr); + + std::vector modelCache = { { buffer, 100 } }; + ModelConfig config; + OHOS::sptr preModel = + OHOS::sptr(new (std::nothrow) V2_0::MockIPreparedModel()); + EXPECT_NE(preModel, nullptr); + + std::shared_ptr preparedModel = std::make_shared(preModel); + + OHOS::sptr iPreparedModel = + OHOS::sptr(new (std::nothrow) V2_0::MockIPreparedModel); + EXPECT_CALL(*sp, PrepareModelFromModelCache(::testing::_, ::testing::_, ::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<2>(iPreparedModel), ::testing::Return(HDF_FAILURE))); + + OH_NN_ReturnCode result = hdiDevice->PrepareModelFromModelCache(modelCache, config, preparedModel); + EXPECT_EQ(OH_NN_UNAVALIDABLE_DEVICE, result); +} + +/* * + * @tc.name: hdidevice_preparemodelfrommodelcache_003 + * @tc.desc: Verify the PrepareModelFromModelCache function return nullptr. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_preparemodelfrommodelcache_003, TestSize.Level0) +{ + OHOS::sptr device = V2_0::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + + std::vector modelCache = { { nullptr, 0 } }; + ModelConfig config; + std::shared_ptr preparedModel; + OH_NN_ReturnCode result = hdiDevice->PrepareModelFromModelCache(modelCache, config, preparedModel); + EXPECT_EQ(OH_NN_NULL_PTR, result); +} + +/* * + * @tc.name: hdidevice_allocatebuffer_001 + * @tc.desc: Verify the AllocateBuffer function return nullptr. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_allocatebuffer_001, TestSize.Level0) +{ + OHOS::sptr device = V2_0::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + + V2_0::SharedBuffer buffer; + EXPECT_CALL(*((V2_0::MockIDevice *)device.GetRefPtr()), AllocateBuffer(::testing::_, ::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<1>(buffer), ::testing::Return(HDF_FAILURE))); + + size_t length = 8; + void *result = hdiDevice->AllocateBuffer(length); + EXPECT_EQ(nullptr, result); + hdiDevice->ReleaseBuffer(result); +} + +/* * + * @tc.name: hdidevice_allocatebuffer_002 + * @tc.desc: Verify the AllocateBuffer function return nullptr and HDF_FAILURE. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_allocatebuffer_002, TestSize.Level0) +{ + OHOS::sptr device = V2_0::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + + size_t length = 8; + void *result = hdiDevice->AllocateBuffer(length); + EXPECT_EQ(nullptr, result); + hdiDevice->ReleaseBuffer(result); +} + +/* * + * @tc.name: hdidevice_allocatebuffer_003 + * @tc.desc: Verify the AllocateBuffer function return nullptr in case of 0 size. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_allocatebuffer_003, TestSize.Level0) +{ + OHOS::sptr device = V2_0::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + + size_t length = 0; + void *result = hdiDevice->AllocateBuffer(length); + EXPECT_EQ(nullptr, result); +} + +/* * + * @tc.name: hdidevice_releasebuffer_001 + * @tc.desc: Verify the ReleaseBuffer function validate buffer success. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_releasebuffer_001, TestSize.Level0) +{ + size_t length = 100; + void *buffer = nullptr; + GetBuffer(buffer, length); + + OHOS::sptr device = V2_0::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + + EXPECT_CALL(*((V2_0::MockIDevice *)device.GetRefPtr()), ReleaseBuffer(::testing::_)) + .WillRepeatedly(::testing::Return(HDF_SUCCESS)); + + EXPECT_NE(hdiDevice, nullptr); + hdiDevice->ReleaseBuffer(buffer); + const auto &memoryManager = MemoryManager::GetInstance(); + memoryManager->UnMapMemory(buffer); +} + +/* * + * @tc.name: hdidevice_releasebuffer_002 + * @tc.desc: Verify the ReleaseBuffer function validate AllocateBuffer return nullptr. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_releasebuffer_002, TestSize.Level0) +{ + OHOS::sptr device = V2_0::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + + V2_0::SharedBuffer sharedbuffer; + EXPECT_CALL(*((V2_0::MockIDevice *)device.GetRefPtr()), AllocateBuffer(::testing::_, ::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<1>(sharedbuffer), ::testing::Return(HDF_FAILURE))); + + EXPECT_CALL(*((V2_0::MockIDevice *)device.GetRefPtr()), ReleaseBuffer(::testing::_)) + .WillRepeatedly(::testing::Return(HDF_FAILURE)); + + size_t length = 8; + void *buffer = hdiDevice->AllocateBuffer(length); + hdiDevice->ReleaseBuffer(buffer); +} + +/* * + * @tc.name: hdidevice_releasebuffer_003 + * @tc.desc: Verify the ReleaseBuffer function validate param buffer is nullptr. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_releasebuffer_003, TestSize.Level0) +{ + OHOS::sptr device = V2_0::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + + void *buffer = nullptr; + hdiDevice->ReleaseBuffer(buffer); +} + +/* * + * @tc.name: hdidevice_releasebuffer_004 + * @tc.desc: Verify the ReleaseBuffer function validate invalid buffer. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_releasebuffer_004, TestSize.Level0) +{ + const size_t length = 100; + auto* buffer = new(std::nothrow) char[length]; + OHOS::sptr device = V2_0::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + + hdiDevice->ReleaseBuffer(buffer); + delete[] buffer; + buffer = nullptr; +} + +/* * + * @tc.name: hdidevice_releasebuffer_005 + * @tc.desc: Verify the ReleaseBuffer function validate moc object's ReleaseBuffer return failure. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_releasebuffer_005, TestSize.Level0) +{ + size_t length = 100; + void *buffer = nullptr; + GetBuffer(buffer, length); + + OHOS::sptr device = V2_0::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + + EXPECT_CALL(*((V2_0::MockIDevice *)device.GetRefPtr()), ReleaseBuffer(::testing::_)) + .WillRepeatedly(::testing::Return(HDF_FAILURE)); + + hdiDevice->ReleaseBuffer(buffer); + const auto &memoryManager = MemoryManager::GetInstance(); + memoryManager->UnMapMemory(buffer); +} +} // namespace UnitTest +} // namespace NeuralNetworkRuntime +} // namespace OHOS diff --git a/test/unittest/components/v2_0/hdi_prepared_model/hdi_prepared_model_test.cpp b/test/unittest/components/v2_0/hdi_prepared_model/hdi_prepared_model_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..5e4d2d8fcbc38252f0de46bc4c76fa08639439c7 --- /dev/null +++ b/test/unittest/components/v2_0/hdi_prepared_model/hdi_prepared_model_test.cpp @@ -0,0 +1,344 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include + +#include +#include + +#include "common/log.h" +#include "frameworks/native/hdi_prepared_model_v2_0.h" +#include "frameworks/native/memory_manager.h" +#include "frameworks/native/transform.h" +#include "test/unittest/common/v2_0/mock_idevice.h" +#include "test/unittest/common/file_utils.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime; +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class HDIPreparedModelTest : public testing::Test { +protected: + void GetBuffer(void*& buffer, size_t length); + void InitTensor(std::vector& inputs, void* buffer, size_t length); + OH_NN_ReturnCode Run(std::vector& inputs); +}; + +void HDIPreparedModelTest::GetBuffer(void*& buffer, size_t length) +{ + std::string data = "ABCD"; + const size_t dataLength = 100; + data.resize(dataLength, '-'); + + std::string filename = "/data/log/memory-001.dat"; + FileUtils fileUtils(filename); + fileUtils.WriteFile(data); + + int fd = open(filename.c_str(), O_RDWR); + EXPECT_NE(-1, fd); + + const auto& memoryManager = MemoryManager::GetInstance(); + buffer = memoryManager->MapMemory(fd, length); + close(fd); +} + +void HDIPreparedModelTest::InitTensor(std::vector& inputs, void* buffer, size_t length) +{ + IOTensor inputTensor; + inputTensor.dataType = OH_NN_INT8; + inputTensor.dataType = OH_NN_INT8; + inputTensor.format = OH_NN_FORMAT_NCHW; + inputTensor.data = buffer; + inputTensor.length = length; + inputs.emplace_back(std::move(inputTensor)); +} + +OH_NN_ReturnCode HDIPreparedModelTest::Run(std::vector& inputs) +{ + const int vvPosition = 2; + const int vPosition = 3; + std::vector outputs; + std::vector> outputsDims {{0}}; + std::vector isOutputBufferEnough {}; + + OHOS::sptr sp = + OHOS::sptr(new (std::nothrow) V2_0::MockIPreparedModel()); + EXPECT_NE(sp, nullptr); + + std::unique_ptr preparedModel = std::make_unique(sp); + EXPECT_CALL(*sp, Run(::testing::_, ::testing::_, ::testing::_, ::testing::_)) + .WillRepeatedly(::testing::DoAll( + ::testing::SetArgReferee(outputsDims), + ::testing::SetArgReferee(isOutputBufferEnough), + ::testing::Return(HDF_SUCCESS)) + ); + + OH_NN_ReturnCode result = preparedModel->Run(inputs, outputs, outputsDims, isOutputBufferEnough); + return result; +} + +/** + * @tc.name: hidpreparedmodel_constructor_001 + * @tc.desc: Verify the Constructor function validate constructor success. + * @tc.type: FUNC + */ +HWTEST_F(HDIPreparedModelTest, hidpreparedmodel_constructor_001, TestSize.Level0) +{ + OHOS::sptr hdiPreparedModel = + OHOS::sptr(new (std::nothrow) V2_0::MockIPreparedModel()); + EXPECT_NE(hdiPreparedModel, nullptr); + + std::unique_ptr preparedModel = std::make_unique(hdiPreparedModel); + EXPECT_NE(preparedModel, nullptr); +} + +/** + * @tc.name: hidpreparedmodel_exportmodelcache_001 + * @tc.desc: Verify the ExportModelCache function return memory error. + * @tc.type: FUNC + */ +HWTEST_F(HDIPreparedModelTest, hidpreparedmodel_exportmodelcache_001, TestSize.Level0) +{ + std::vector bufferVect = {{100, 100, 0, 100}}; + OHOS::sptr hdiPreparedModel = + OHOS::sptr(new (std::nothrow) V2_0::MockIPreparedModel()); + std::unique_ptr preparedModel = std::make_unique(hdiPreparedModel); + std::vector modelCache; + EXPECT_CALL(*((V2_0::MockIPreparedModel*)hdiPreparedModel.GetRefPtr()), + ExportModelCache(::testing::_)) + .WillRepeatedly( + ::testing::DoAll( + ::testing::SetArgReferee<0>(bufferVect), + ::testing::Return(HDF_SUCCESS) + ) + ); + + OH_NN_ReturnCode result = preparedModel->ExportModelCache(modelCache); + EXPECT_EQ(OH_NN_MEMORY_ERROR, result); +} + +/** + * @tc.name: hidpreparedmodel_exportmodelcache_002 + * @tc.desc: Verify the ExportModelCache function return success. + * @tc.type: FUNC + */ +HWTEST_F(HDIPreparedModelTest, hidpreparedmodel_exportmodelcache_002, TestSize.Level0) +{ + std::vector bufferVect; + OHOS::sptr mockPreparedModel = + OHOS::sptr(new (std::nothrow) V2_0::MockIPreparedModel()); + EXPECT_NE(mockPreparedModel, nullptr); + + std::unique_ptr preparedModel = std::make_unique(mockPreparedModel); + std::vector modelCache; + EXPECT_CALL(*((V2_0::MockIPreparedModel*)mockPreparedModel.GetRefPtr()), + ExportModelCache(::testing::_)) + .WillRepeatedly( + ::testing::DoAll( + ::testing::SetArgReferee<0>(bufferVect), + ::testing::Return(HDF_SUCCESS) + ) + ); + + OH_NN_ReturnCode result = preparedModel->ExportModelCache(modelCache); + EXPECT_EQ(OH_NN_SUCCESS, result); +} + +/** + * @tc.name: hidpreparedmodel_exportmodelcache_003 + * @tc.desc: Verify the ExportModelCache function return invalid parameter. + * @tc.type: FUNC + */ +HWTEST_F(HDIPreparedModelTest, hidpreparedmodel_exportmodelcache_003, TestSize.Level0) +{ + OHOS::sptr hdiPreparedModel = + OHOS::sptr(new (std::nothrow) V2_0::MockIPreparedModel()); + EXPECT_NE(hdiPreparedModel, nullptr); + + std::unique_ptr preparedModel = std::make_unique(hdiPreparedModel); + std::vector modelCache {{nullptr, 0}}; + OH_NN_ReturnCode result = preparedModel->ExportModelCache(modelCache); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, result); +} + +/** + * @tc.name: hidpreparedmodel_exportmodelcache_004 + * @tc.desc: Verify the ExportModelCache function return unvailable device. + * @tc.type: FUNC + */ +HWTEST_F(HDIPreparedModelTest, hidpreparedmodel_exportmodelcache_004, TestSize.Level0) +{ + std::vector bufferVect = {{100, 100, 0, 100}}; + OHOS::sptr mockPreparedModel = + OHOS::sptr(new (std::nothrow) V2_0::MockIPreparedModel()); + EXPECT_NE(mockPreparedModel, nullptr); + + std::unique_ptr preparedModel = std::make_unique(mockPreparedModel); + std::vector modelCache; + EXPECT_CALL(*((V2_0::MockIPreparedModel*)mockPreparedModel.GetRefPtr()), + ExportModelCache(::testing::_)) + .WillRepeatedly( + ::testing::DoAll( + ::testing::SetArgReferee<0>(bufferVect), + ::testing::Return(HDF_FAILURE) + ) + ); + + OH_NN_ReturnCode result = preparedModel->ExportModelCache(modelCache); + EXPECT_EQ(OH_NN_UNAVALIDABLE_DEVICE, result); +} + +/** + * @tc.name: hidpreparedmodel_run_001 + * @tc.desc: Verify the Run function return invalid parameter. + * @tc.type: FUNC + */ +HWTEST_F(HDIPreparedModelTest, hidpreparedmodel_run_001, TestSize.Level0) +{ + IOTensor inputTensor; + inputTensor.dataType = OH_NN_INT8; + + IOTensor outputTensor; + outputTensor.dataType = OH_NN_INT8; + std::vector inputs; + inputs.emplace_back(std::move(inputTensor)); + std::vector outputs; + + std::vector iOutputTensors; + V2_0::IOTensor iTensor; + iOutputTensors.emplace_back(iTensor); + std::vector> outputsDims {{0}}; + std::vector isOutputBufferEnough {}; + + std::shared_ptr sp = std::make_shared(); + OHOS::sptr hdiPreparedModel = + OHOS::sptr(new (std::nothrow) V2_0::MockIPreparedModel()); + EXPECT_NE(hdiPreparedModel, nullptr); + + std::unique_ptr preparedModel = std::make_unique(hdiPreparedModel); + OH_NN_ReturnCode result = preparedModel->Run(inputs, outputs, outputsDims, isOutputBufferEnough); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, result); +} + +/** + * @tc.name: hidpreparedmodel_run_002 + * @tc.desc: Verify the Run function return success. + * @tc.type: FUNC + */ +HWTEST_F(HDIPreparedModelTest, hidpreparedmodel_run_002, TestSize.Level0) +{ + const size_t length = 100; + void* buffer = nullptr; + GetBuffer(buffer, length); + + std::vector inputs; + std::vector outputs; + InitTensor(inputs, buffer, length); + + OH_NN_ReturnCode result = Run(inputs); + EXPECT_EQ(OH_NN_SUCCESS, result); + const auto& memoryManager = MemoryManager::GetInstance(); + memoryManager->UnMapMemory(buffer); +} + +/** + * @tc.name: hidpreparedmodel_run_003 + * @tc.desc: Verify the Run function return unavailable device in case of run failure. + * @tc.type: FUNC + */ +HWTEST_F(HDIPreparedModelTest, hidpreparedmodel_run_003, TestSize.Level0) +{ + const size_t length = 100; + void* buffer = nullptr; + GetBuffer(buffer, length); + + std::vector inputs; + std::vector outputs; + InitTensor(inputs, buffer, length); + + std::vector> outputsDims {}; + std::vector isOutputBufferEnough {}; + + OHOS::sptr sp = + OHOS::sptr(new (std::nothrow) V2_0::MockIPreparedModel()); + EXPECT_NE(sp, nullptr); + + std::unique_ptr preparedModel = std::make_unique(sp); + + EXPECT_CALL(*sp, Run(::testing::_, ::testing::_, ::testing::_, ::testing::_)) + .WillRepeatedly( + ::testing::DoAll( + ::testing::SetArgReferee<2>(outputsDims), + ::testing::SetArgReferee<3>(isOutputBufferEnough), + ::testing::Return(HDF_FAILURE) + ) + ); + + OH_NN_ReturnCode result = preparedModel->Run(inputs, outputs, outputsDims, isOutputBufferEnough); + EXPECT_EQ(OH_NN_UNAVALIDABLE_DEVICE, result); + const auto& memoryManager = MemoryManager::GetInstance(); + memoryManager->UnMapMemory(buffer); +} + +/** + * @tc.name: hidpreparedmodel_run_004 + * @tc.desc: Verify the Run function return invalid parameter. + * @tc.type: FUNC + */ +HWTEST_F(HDIPreparedModelTest, hidpreparedmodel_run_004, TestSize.Level0) +{ + std::vector inputs; + InitTensor(inputs, nullptr, 0); + OH_NN_ReturnCode result = Run(inputs); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, result); +} + +/** + * @tc.name: hidpreparedmodel_run_005 + * @tc.desc: Verify the Run function return invalid parameter in case of output invalid. + * @tc.type: FUNC + */ +HWTEST_F(HDIPreparedModelTest, hidpreparedmodel_run_005, TestSize.Level0) +{ + const size_t length = 100; + void* buffer = nullptr; + GetBuffer(buffer, length); + + std::vector inputs; + std::vector outputs; + InitTensor(inputs, buffer, length); + InitTensor(outputs, nullptr, 0); + + std::vector> outputsDims {}; + std::vector isOutputBufferEnough {}; + + OHOS::sptr sp = + OHOS::sptr(new (std::nothrow) V2_0::MockIPreparedModel()); + EXPECT_NE(sp, nullptr); + + std::unique_ptr preparedModel = std::make_unique(sp); + + OH_NN_ReturnCode result = preparedModel->Run(inputs, outputs, outputsDims, isOutputBufferEnough); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, result); + const auto& memoryManager = MemoryManager::GetInstance(); + memoryManager->UnMapMemory(buffer); +} +} // namespace UnitTest +} // namespace NeuralNetworkRuntime +} // namespace OHOS diff --git a/test/unittest/components/v2_0/inner_model/inner_model_test.cpp b/test/unittest/components/v2_0/inner_model/inner_model_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..e40c0422576273d205f66e960a48f00d4f11c3f7 --- /dev/null +++ b/test/unittest/components/v2_0/inner_model/inner_model_test.cpp @@ -0,0 +1,825 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include + +#include "common/utils.h" +#include "common/log.h" +#include "frameworks/native/nn_tensor.h" +#include "frameworks/native/inner_model.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime; + +namespace NNRT { +namespace UnitTest { +class InnerModelTest : public testing::Test { +public: + void SetLiteGraph(mindspore::lite::LiteGraph* liteGraph); + void SetTensors(); + void SetIndices(); + +public: + InnerModel m_innerModelTest; + + std::vector m_dimInput{3, 3}; + std::vector m_dimOutput{3, 3}; + std::vector m_inputIndices{0}; + std::vector m_outputIndices{1}; + + OH_NN_OperationType m_opType{OH_NN_OPS_ADD}; + + OH_NN_UInt32Array m_inputs; + OH_NN_UInt32Array m_outputs; + OH_NN_UInt32Array m_params; + + uint32_t m_paramIndexs[1]{3}; + uint32_t m_inputIndexs[2]{0, 1}; + uint32_t m_outputIndexs[1]{2}; +}; + +void InnerModelTest::SetLiteGraph(mindspore::lite::LiteGraph* liteGraph) +{ + liteGraph->name_ = "testGraph"; + liteGraph->input_indices_ = m_inputIndices; + liteGraph->output_indices_ = m_outputIndices; + + const std::vector quant_params {}; + + for (size_t indexInput = 0; indexInput < liteGraph->input_indices_.size(); ++indexInput) { + const std::vector data(36, 1); + liteGraph->all_tensors_.emplace_back(mindspore::lite::MindIR_Tensor_Create(liteGraph->name_, + mindspore::lite::DATA_TYPE_FLOAT32, m_dimInput, mindspore::lite::FORMAT_NCHW, data, quant_params)); + } + + for (size_t indexOutput = 0; indexOutput < liteGraph->output_indices_.size(); ++indexOutput) { + const std::vector dataOut(36, 1); + liteGraph->all_tensors_.emplace_back(mindspore::lite::MindIR_Tensor_Create(liteGraph->name_, + mindspore::lite::DATA_TYPE_FLOAT32, m_dimOutput, mindspore::lite::FORMAT_NCHW, dataOut, quant_params)); + } +} + +void InnerModelTest::SetTensors() +{ + const int dim[2] = {2, 2}; + const OH_NN_Tensor& tensor = {OH_NN_FLOAT32, 2, dim, nullptr, OH_NN_TENSOR}; + + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.AddTensor(tensor)); + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.AddTensor(tensor)); + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.AddTensor(tensor)); + + const OH_NN_Tensor& tensorParam = {OH_NN_INT8, 0, nullptr, nullptr, OH_NN_ADD_ACTIVATIONTYPE}; + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.AddTensor(tensorParam)); +} + +void InnerModelTest::SetIndices() +{ + m_params.data = m_paramIndexs; + m_params.size = sizeof(m_paramIndexs) / sizeof(uint32_t); + + m_inputs.data = m_inputIndexs; + m_inputs.size = sizeof(m_inputIndexs) / sizeof(uint32_t); + + m_outputs.data = m_outputIndexs; + m_outputs.size = sizeof(m_outputIndexs) / sizeof(uint32_t); +} + +/** + * @tc.name: inner_model_construct_nntensor_from_litegraph_001 + * @tc.desc: Verify the input_indices is empty of the construct_nntensor_from_litegraph function + * @tc.type: FUNC + */ +HWTEST_F(InnerModelTest, inner_model_construct_nntensor_from_litegraph_001, TestSize.Level1) +{ + mindspore::lite::LiteGraph* liteGraph = new (std::nothrow) mindspore::lite::LiteGraph(); + EXPECT_NE(nullptr, liteGraph); + m_inputIndices = {}; + + SetLiteGraph(liteGraph); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_innerModelTest.BuildFromLiteGraph(liteGraph)); + mindspore::lite::MindIR_LiteGraph_Destroy(&liteGraph); +} + +/** + * @tc.name: inner_model_construct_nntensor_from_litegraph_002 + * @tc.desc: Verify the input_indices is out of bounds of the construct_nntensor_from_litegraph function + * @tc.type: FUNC + */ +HWTEST_F(InnerModelTest, inner_model_construct_nntensor_from_litegraph_002, TestSize.Level1) +{ + mindspore::lite::LiteGraph* liteGraph = new (std::nothrow) mindspore::lite::LiteGraph(); + EXPECT_NE(nullptr, liteGraph); + m_inputIndices = {6}; + + SetLiteGraph(liteGraph); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_innerModelTest.BuildFromLiteGraph(liteGraph)); + mindspore::lite::MindIR_LiteGraph_Destroy(&liteGraph); +} + +/** + * @tc.name: inner_model_construct_nntensor_from_litegraph_003 + * @tc.desc: Verify the success of the construct_nntensor_from_litegraph function + * @tc.type: FUNC + */ +HWTEST_F(InnerModelTest, inner_model_construct_nntensor_from_litegraph_003, TestSize.Level1) +{ + mindspore::lite::LiteGraph* liteGraph = new (std::nothrow) mindspore::lite::LiteGraph(); + EXPECT_NE(nullptr, liteGraph); + + SetLiteGraph(liteGraph); + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.BuildFromLiteGraph(liteGraph)); +} + +/** + * @tc.name: inner_model_construct_nntensor_from_litegraph_004 + * @tc.desc: Verify the nntensor build failed nullptr return of the construct_nntensor_from_litegraph function + * @tc.type: FUNC + */ +HWTEST_F(InnerModelTest, inner_model_construct_nntensor_from_litegraph_004, TestSize.Level1) +{ + mindspore::lite::LiteGraph* liteGraph = new (std::nothrow) mindspore::lite::LiteGraph(); + EXPECT_NE(nullptr, liteGraph); + m_dimInput = {3, -3}; + + SetLiteGraph(liteGraph); + EXPECT_EQ(OH_NN_NULL_PTR, m_innerModelTest.BuildFromLiteGraph(liteGraph)); + mindspore::lite::MindIR_LiteGraph_Destroy(&liteGraph); +} + +/** + * @tc.name: inner_model_construct_nntensor_from_litegraph_005 + * @tc.desc: Verify the output indices out of bounds of the construct_nntensor_from_litegraph function + * @tc.type: FUNC + */ +HWTEST_F(InnerModelTest, inner_model_construct_nntensor_from_litegraph_005, TestSize.Level1) +{ + mindspore::lite::LiteGraph* liteGraph = new (std::nothrow) mindspore::lite::LiteGraph(); + EXPECT_NE(nullptr, liteGraph); + m_outputIndices = {6}; + + SetLiteGraph(liteGraph); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_innerModelTest.BuildFromLiteGraph(liteGraph)); + mindspore::lite::MindIR_LiteGraph_Destroy(&liteGraph); +} + +/** + * @tc.name: inner_model_build_from_lite_graph_001 + * @tc.desc: Verify the litegraph is nullptr of the build_from_lite_graph function + * @tc.type: FUNC + */ +HWTEST_F(InnerModelTest, inner_model_build_from_lite_graph_001, TestSize.Level1) +{ + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_innerModelTest.BuildFromLiteGraph(nullptr)); +} + +/** + * @tc.name: inner_model_build_from_lite_graph_002 + * @tc.desc: Verify the buildfromlitegraph twice forbidden of the build_from_lite_graph function + * @tc.type: FUNC + */ +HWTEST_F(InnerModelTest, inner_model_build_from_lite_graph_002, TestSize.Level1) +{ + mindspore::lite::LiteGraph* liteGraph = new (std::nothrow) mindspore::lite::LiteGraph(); + EXPECT_NE(nullptr, liteGraph); + + SetLiteGraph(liteGraph); + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.BuildFromLiteGraph(liteGraph)); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, m_innerModelTest.BuildFromLiteGraph(liteGraph)); +} + +/** + * @tc.name: inner_model_build_from_lite_graph_003 + * @tc.desc: Verify the litegraph->alltensors is empty of the build_from_lite_graph function + * @tc.type: FUNC + */ +HWTEST_F(InnerModelTest, inner_model_build_from_lite_graph_003, TestSize.Level1) +{ + mindspore::lite::LiteGraph* liteGraph = new (std::nothrow) mindspore::lite::LiteGraph(); + EXPECT_NE(nullptr, liteGraph); + liteGraph->name_ = "testGraph"; + liteGraph->input_indices_ = {0}; + liteGraph->output_indices_ = {1}; + + const int32_t dimInput[2] = {2, 2}; + const OH_NN_Tensor& tensor = {OH_NN_INT8, 2, dimInput, nullptr, OH_NN_TENSOR}; + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.AddTensor(tensor)); + + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, m_innerModelTest.BuildFromLiteGraph(liteGraph)); + mindspore::lite::MindIR_LiteGraph_Destroy(&liteGraph); +} + + +/** + * @tc.name: inner_model_add_tensor_001 + * @tc.desc: Verify the success of the addtensor function + * @tc.type: FUNC + */ +HWTEST_F(InnerModelTest, inner_model_add_tensor_001, TestSize.Level1) +{ + const int32_t dimInput[2] = {2, 2}; + const OH_NN_Tensor& tensor = {OH_NN_INT8, 2, dimInput, nullptr, OH_NN_TENSOR}; + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.AddTensor(tensor)); +} + +/** + * @tc.name: inner_model_add_tensor_002 + * @tc.desc: Verify the addtensor after buildfromlitegraph of the addtensor function + * @tc.type: FUNC + */ +HWTEST_F(InnerModelTest, inner_model_add_tensor_002, TestSize.Level1) +{ + mindspore::lite::LiteGraph* liteGraph = new (std::nothrow) mindspore::lite::LiteGraph(); + EXPECT_NE(nullptr, liteGraph); + SetLiteGraph(liteGraph); + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.BuildFromLiteGraph(liteGraph)); + + const int32_t dimInput[2] = {2, 2}; + const OH_NN_Tensor& tensor = {OH_NN_INT8, 2, dimInput, nullptr, OH_NN_TENSOR}; + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, m_innerModelTest.AddTensor(tensor)); +} + +/** + * @tc.name: inner_model_add_tensor_003 + * @tc.desc: Verify the buildfromnntensor failed of the addtensor function + * @tc.type: FUNC + */ +HWTEST_F(InnerModelTest, inner_model_add_tensor_003, TestSize.Level1) +{ + const int32_t dimInput[2] = {2, -2}; + const OH_NN_Tensor& tensor = {OH_NN_INT8, 2, dimInput, nullptr, OH_NN_TENSOR}; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_innerModelTest.AddTensor(tensor)); +} + + +/** + * @tc.name: inner_model_set_tensor_value_001 + * @tc.desc: Verify the success of the set_tensor_value function + * @tc.type: FUNC + */ +HWTEST_F(InnerModelTest, inner_model_set_tensor_value_001, TestSize.Level1) +{ + SetTensors(); + + uint32_t index = 3; + const int8_t activation = 0; + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.SetTensorValue(index, + static_cast(&activation), sizeof(int8_t))); +} + +/** + * @tc.name: inner_model_set_tensor_value_002 + * @tc.desc: Verify the index out of bounds of the set_tensor_value function + * @tc.type: FUNC + */ +HWTEST_F(InnerModelTest, inner_model_set_tensor_value_002, TestSize.Level1) +{ + SetTensors(); + + uint32_t index = 6; + const int8_t activation = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_innerModelTest.SetTensorValue(index, + static_cast(&activation), sizeof(int8_t))); +} + +/** + * @tc.name: inner_model_set_tensor_value_003 + * @tc.desc: Verify the buffer value is nullptr of the set_tensor_value function + * @tc.type: FUNC + */ +HWTEST_F(InnerModelTest, inner_model_set_tensor_value_003, TestSize.Level1) +{ + SetTensors(); + + uint32_t index = 3; + const int8_t activation = 0; + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.SetTensorValue(index, + nullptr, sizeof(activation))); +} + +/** + * @tc.name: inner_model_set_tensor_value_004 + * @tc.desc: Verify the length invalid of the set_tensor_value function + * @tc.type: FUNC + */ +HWTEST_F(InnerModelTest, inner_model_set_tensor_value_004, TestSize.Level1) +{ + SetTensors(); + + uint32_t index = 3; + const int8_t activation = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_innerModelTest.SetTensorValue(index, + static_cast(&activation), 0)); +} + +/** + * @tc.name: inner_model_set_tensor_value_005 + * @tc.desc: Verify the after buildgraph of the set_tensor_value function + * @tc.type: FUNC + */ +HWTEST_F(InnerModelTest, inner_model_set_tensor_value_005, TestSize.Level1) +{ + uint32_t index = 3; + const int8_t activation = 0; + + mindspore::lite::LiteGraph* liteGraph = new (std::nothrow) mindspore::lite::LiteGraph(); + EXPECT_NE(nullptr, liteGraph); + SetLiteGraph(liteGraph); + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.BuildFromLiteGraph(liteGraph)); + + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, m_innerModelTest.SetTensorValue(index, + static_cast(&activation), sizeof(int8_t))); +} + +/** + * @tc.name: inner_model_set_tensor_value_006 + * @tc.desc: Verify the set value twice of the set_tensor_value function + * @tc.type: FUNC + */ +HWTEST_F(InnerModelTest, inner_model_set_tensor_value_006, TestSize.Level1) +{ + SetTensors(); + + uint32_t index = 3; + const int8_t activation = 0; + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.SetTensorValue(index, + static_cast(&activation), sizeof(int8_t))); + + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_innerModelTest.SetTensorValue(index, + static_cast(&activation), sizeof(int8_t))); +} + +/** + * @tc.name: inner_model_set_tensor_value_007 + * @tc.desc: Verify the tensor dynamicShape of the set_tensor_value function + * @tc.type: FUNC + */ +HWTEST_F(InnerModelTest, inner_model_set_tensor_value_007, TestSize.Level1) +{ + const int32_t dimInput[2] = {2, -1}; + const OH_NN_Tensor& tensor = {OH_NN_FLOAT32, 2, dimInput, nullptr, OH_NN_TENSOR}; + + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.AddTensor(tensor)); + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.AddTensor(tensor)); + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.AddTensor(tensor)); + const OH_NN_Tensor& tensorParam = {OH_NN_INT8, 0, nullptr, nullptr, OH_NN_ADD_ACTIVATIONTYPE}; + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.AddTensor(tensorParam)); + + uint32_t index = 0; + float x[4] = {0, 1, 2, 3}; + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, m_innerModelTest.SetTensorValue(index, + x, sizeof(x)- 1)); +} + +/** + * @tc.name: inner_model_add_operation_001 + * @tc.desc: Verify the success of the addoperation function + * @tc.type: FUNC + */ +HWTEST_F(InnerModelTest, inner_model_add_operation_001, TestSize.Level1) +{ + SetIndices(); + + SetTensors(); + + uint32_t index = 3; + const int8_t activation = 0; + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.SetTensorValue(index, + static_cast(&activation), sizeof(int8_t))); + + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.AddOperation(m_opType, m_params, m_inputs, m_outputs)); +} + +/** + * @tc.name: inner_model_add_operation_002 + * @tc.desc: Verify the after buildgraph of the addtensor function + * @tc.type: FUNC + */ +HWTEST_F(InnerModelTest, inner_model_add_operation_002, TestSize.Level1) +{ + OH_NN_OperationType m_opType = OH_NN_OPS_ADD; + OH_NN_UInt32Array m_inputs; + OH_NN_UInt32Array m_outputs; + OH_NN_UInt32Array m_params; + + mindspore::lite::LiteGraph* liteGraph = new (std::nothrow) mindspore::lite::LiteGraph(); + EXPECT_NE(nullptr, liteGraph); + SetLiteGraph(liteGraph); + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.BuildFromLiteGraph(liteGraph)); + + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, m_innerModelTest.AddOperation(m_opType, m_params, m_inputs, + m_outputs)); +} + +/** + * @tc.name: inner_model_add_operation_003 + * @tc.desc: Verify the without set buffer of the addtensor function + * @tc.type: FUNC + */ +HWTEST_F(InnerModelTest, inner_model_add_operation_003, TestSize.Level1) +{ + SetIndices(); + SetTensors(); + + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_innerModelTest.AddOperation(m_opType, m_params, m_inputs, m_outputs)); +} + +/** + * @tc.name: inner_model_add_operation_004 + * @tc.desc: Verify the output indices equal to input indices of the addtensor function + * @tc.type: FUNC + */ +HWTEST_F(InnerModelTest, inner_model_add_operation_004, TestSize.Level1) +{ + m_outputIndexs[0] = 0; + + SetIndices(); + SetTensors(); + + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_innerModelTest.AddOperation(m_opType, m_params, m_inputs, m_outputs)); +} + +/** + * @tc.name: inner_model_add_operation_005 + * @tc.desc: Verify the optype invalid of the addtensor function + * @tc.type: FUNC + */ +HWTEST_F(InnerModelTest, inner_model_add_operation_005, TestSize.Level1) +{ + m_opType = OH_NN_OperationType(99); + + SetIndices(); + SetTensors(); + + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_innerModelTest.AddOperation(m_opType, m_params, m_inputs, m_outputs)); +} + +/** + * @tc.name: inner_model_add_operation_006 + * @tc.desc: Verify the input indices out of bounds of the addoperation function + * @tc.type: FUNC + */ +HWTEST_F(InnerModelTest, inner_model_add_operation_006, TestSize.Level1) +{ + m_inputIndexs[1] = 6; + + SetIndices(); + SetTensors(); + + uint32_t index = 3; + const int8_t activation = 0; + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.SetTensorValue(index, + static_cast(&activation), sizeof(int8_t))); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_innerModelTest.AddOperation(m_opType, m_params, m_inputs, m_outputs)); +} + +/** + * @tc.name: inner_model_add_operation_007 + * @tc.desc: Verify the param indices out of bounds of the addoperation function + * @tc.type: FUNC + */ +HWTEST_F(InnerModelTest, inner_model_add_operation_007, TestSize.Level1) +{ + m_paramIndexs[0] = 6; + + SetIndices(); + SetTensors(); + + uint32_t index = 3; + const int8_t activation = 0; + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.SetTensorValue(index, + static_cast(&activation), sizeof(int8_t))); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_innerModelTest.AddOperation(m_opType, m_params, m_inputs, m_outputs)); +} + +/** + * @tc.name: inner_model_add_operation_008 + * @tc.desc: Verify the input indices size is 0 of the addoperation function + * @tc.type: FUNC + */ +HWTEST_F(InnerModelTest, inner_model_add_operation_008, TestSize.Level1) +{ + SetIndices(); + + m_inputs.size = 0; + m_inputs.data = nullptr; + SetTensors(); + + uint32_t index = 3; + const int8_t activation = 0; + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.SetTensorValue(index, + static_cast(&activation), sizeof(int8_t))); + + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_innerModelTest.AddOperation(m_opType, m_params, m_inputs, m_outputs)); +} + +/** + * @tc.name: inner_model_add_operation_009 + * @tc.desc: Verify the output indices size is 0 of the addoperation function + * @tc.type: FUNC + */ +HWTEST_F(InnerModelTest, inner_model_add_operation_009, TestSize.Level1) +{ + SetIndices(); + + m_outputs.size = 0; + m_outputs.data = nullptr; + SetTensors(); + + uint32_t index = 3; + const int8_t activation = 0; + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.SetTensorValue(index, + static_cast(&activation), sizeof(int8_t))); + + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_innerModelTest.AddOperation(m_opType, m_params, m_inputs, m_outputs)); +} + +/** + * @tc.name: inner_model_add_operation_010 + * @tc.desc: Verify the ops build failed of the addoperation function + * @tc.type: FUNC + */ +HWTEST_F(InnerModelTest, inner_model_add_operation_010, TestSize.Level1) +{ + SetIndices(); + + const int32_t dimInput1[2] = {2, 2}; + const OH_NN_Tensor& tensor = {OH_NN_FLOAT32, 2, dimInput1, nullptr, OH_NN_TENSOR}; + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.AddTensor(tensor)); + const int32_t dimInput2[2] = {2, 2}; + const OH_NN_Tensor& tensor1 = {OH_NN_FLOAT32, 2, dimInput2, nullptr, OH_NN_TENSOR}; + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.AddTensor(tensor1)); + const int32_t dimOutput[2] = {2, 2}; + const OH_NN_Tensor& tensor2 = {OH_NN_FLOAT32, 2, dimOutput, nullptr, OH_NN_TENSOR}; + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.AddTensor(tensor2)); + const OH_NN_Tensor& tensor3 = {OH_NN_INT8, 0, nullptr, nullptr, OH_NN_DIV_ACTIVATIONTYPE}; + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.AddTensor(tensor3)); + + uint32_t index = 3; + const int8_t activation = 0; + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.SetTensorValue(index, + static_cast(&activation), sizeof(int8_t))); + + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_innerModelTest.AddOperation(m_opType, m_params, m_inputs, m_outputs)); +} + +/** + * @tc.name: inner_model_specify_inputs_and_outputs_001 + * @tc.desc: Verify the success of the specify_inputs_and_outputs function + * @tc.type: FUNC + */ +HWTEST_F(InnerModelTest, inner_model_specify_inputs_and_outputs_001, TestSize.Level1) +{ + SetIndices(); + SetTensors(); + + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.SpecifyInputsAndOutputs(m_inputs, m_outputs)); + + std::vector> inTensors = m_innerModelTest.GetInputTensors(); + EXPECT_EQ(inTensors.size(), m_inputs.size); + std::vector> outTensors = m_innerModelTest.GetOutputTensors(); + EXPECT_EQ(outTensors.size(), m_outputs.size); +} + +/** + * @tc.name: inner_model_specify_inputs_and_outputs_002 + * @tc.desc: Verify the after buildgraph of the specify_inputs_and_outputs function + * @tc.type: FUNC + */ +HWTEST_F(InnerModelTest, inner_model_specify_inputs_and_outputs_002, TestSize.Level1) +{ + OH_NN_UInt32Array inputs; + OH_NN_UInt32Array outputs; + inputs.data = m_inputIndexs; + inputs.size = sizeof(m_inputIndexs) / sizeof(uint32_t); + outputs.data = nullptr; + outputs.size = sizeof(m_outputIndexs) / sizeof(uint32_t); + + mindspore::lite::LiteGraph* liteGraph = new (std::nothrow) mindspore::lite::LiteGraph(); + EXPECT_NE(nullptr, liteGraph); + SetLiteGraph(liteGraph); + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.BuildFromLiteGraph(liteGraph)); + + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, m_innerModelTest.SpecifyInputsAndOutputs(inputs, outputs)); +} + +/** + * @tc.name: inner_model_specify_inputs_and_outputs_003 + * @tc.desc: Verify the output indices is nullptr but length not 0 of the specify_inputs_and_outputs function + * @tc.type: FUNC + */ +HWTEST_F(InnerModelTest, inner_model_specify_inputs_and_outputs_003, TestSize.Level1) +{ + SetIndices(); + + m_outputs.data = nullptr; + SetTensors(); + + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_innerModelTest.SpecifyInputsAndOutputs(m_inputs, m_outputs)); +} + +/** + * @tc.name: inner_model_specify_inputs_and_outputs_004 + * @tc.desc: Verify the specift twice of the specify_inputs_and_outputs function + * @tc.type: FUNC + */ +HWTEST_F(InnerModelTest, inner_model_specify_inputs_and_outputs_004, TestSize.Level1) +{ + SetIndices(); + SetTensors(); + + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.SpecifyInputsAndOutputs(m_inputs, m_outputs)); + + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, m_innerModelTest.SpecifyInputsAndOutputs(m_inputs, m_outputs)); +} + +/** + * @tc.name: inner_model_build_001 + * @tc.desc: Verify the success of the build function + * @tc.type: FUNC + */ +HWTEST_F(InnerModelTest, inner_model_build_001, TestSize.Level1) +{ + SetIndices(); + SetTensors(); + + uint32_t index = 3; + const int8_t activation = 0; + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.SetTensorValue(index, + static_cast(&activation), sizeof(int8_t))); + + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.AddOperation(m_opType, m_params, m_inputs, m_outputs)); + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.SpecifyInputsAndOutputs(m_inputs, m_outputs)); + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.Build()); + EXPECT_EQ(true, m_innerModelTest.IsBuild()); +} + +/** + * @tc.name: inner_model_build_002 + * @tc.desc: Verify the build twice forbidden of the build function + * @tc.type: FUNC + */ +HWTEST_F(InnerModelTest, inner_model_build_002, TestSize.Level1) +{ + SetIndices(); + SetTensors(); + + uint32_t index = 3; + const int8_t activation = 0; + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.SetTensorValue(index, + static_cast(&activation), sizeof(int8_t))); + + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.AddOperation(m_opType, m_params, m_inputs, m_outputs)); + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.SpecifyInputsAndOutputs(m_inputs, m_outputs)); + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.Build()); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, m_innerModelTest.Build()); +} + +/** + * @tc.name: inner_model_build_003 + * @tc.desc: Verify the params not match optype of the build function + * @tc.type: FUNC + */ +HWTEST_F(InnerModelTest, inner_model_build_003, TestSize.Level1) +{ + OH_NN_OperationType m_opType = OH_NN_OPS_DIV; + + SetIndices(); + + const int dim[2] = {2, 2}; + const OH_NN_Tensor& tensor = {OH_NN_FLOAT32, 2, dim, nullptr, OH_NN_TENSOR}; + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.AddTensor(tensor)); + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.AddTensor(tensor)); + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.AddTensor(tensor)); + const OH_NN_Tensor& tensorParam = {OH_NN_INT8, 0, nullptr, nullptr, OH_NN_DIV_ACTIVATIONTYPE}; + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.AddTensor(tensorParam)); + + uint32_t index = 3; + const int8_t activation = 0; + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.SetTensorValue(index, + static_cast(&activation), sizeof(int8_t))); + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.SpecifyInputsAndOutputs(m_inputs, m_outputs)); + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.AddOperation(m_opType, m_params, m_inputs, m_outputs)); + EXPECT_EQ(OH_NN_FAILED, m_innerModelTest.Build()); +} + +/** + * @tc.name: inner_model_build_004 + * @tc.desc: Verify the success of the build function + * @tc.type: FUNC + */ +HWTEST_F(InnerModelTest, inner_model_build_004, TestSize.Level1) +{ + SetIndices(); + SetTensors(); + + uint32_t index = 3; + const int8_t activation = 0; + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.SetTensorValue(index, + static_cast(&activation), sizeof(int8_t))); + + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.AddOperation(m_opType, m_params, m_inputs, m_outputs)); + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.SpecifyInputsAndOutputs(m_inputs, m_outputs)); + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.Build()); +} + +/** + * @tc.name: inner_model_get_supported_operation_001 + * @tc.desc: Verify the success of the get_supported_operation function + * @tc.type: FUNC + */ +HWTEST_F(InnerModelTest, inner_model_get_supported_operation_001, TestSize.Level1) +{ + const bool *isSupported = nullptr; + uint32_t opCount = 1; + + SetIndices(); + SetTensors(); + + uint32_t index = 3; + const int8_t activation = 0; + size_t deviceID = 10; + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.SetTensorValue(index, + static_cast(&activation), sizeof(int8_t))); + + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.AddOperation(m_opType, m_params, m_inputs, m_outputs)); + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.SpecifyInputsAndOutputs(m_inputs, m_outputs)); + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.Build()); + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.GetSupportedOperations(deviceID, &isSupported, opCount)); +} + +/** + * @tc.name: inner_model_get_supported_operation_002 + * @tc.desc: Verify the mock hdi device result of the get_supported_operation function + * @tc.type: FUNC + */ +HWTEST_F(InnerModelTest, inner_model_get_supported_operation_002, TestSize.Level1) +{ + size_t deviceID = 10; + const bool *isSupported = nullptr; + uint32_t opCount = 1; + + mindspore::lite::LiteGraph* liteGraph = new (std::nothrow) mindspore::lite::LiteGraph(); + EXPECT_NE(nullptr, liteGraph); + SetLiteGraph(liteGraph); + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.BuildFromLiteGraph(liteGraph)); + + EXPECT_EQ(OH_NN_UNAVALIDABLE_DEVICE, m_innerModelTest.GetSupportedOperations(deviceID, &isSupported, opCount)); +} + +/** + * @tc.name: inner_model_get_supported_operation_003 + * @tc.desc: Verify the mock device manager of the get_supported_operation function + * @tc.type: FUNC + */ +HWTEST_F(InnerModelTest, inner_model_get_supported_operation_003, TestSize.Level1) +{ + const bool *isSupported = nullptr; + uint32_t opCount = 1; + + SetIndices(); + SetTensors(); + + uint32_t index = 3; + const int8_t activation = 0; + size_t deviceID = 0; + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.SetTensorValue(index, + static_cast(&activation), sizeof(int8_t))); + + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.AddOperation(m_opType, m_params, m_inputs, m_outputs)); + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.SpecifyInputsAndOutputs(m_inputs, m_outputs)); + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.Build()); + EXPECT_EQ(OH_NN_FAILED, m_innerModelTest.GetSupportedOperations(deviceID, &isSupported, opCount)); + + std::shared_ptr liteGraph = m_innerModelTest.GetLiteGraphs(); + EXPECT_EQ(liteGraph->name_, "NNR_Model"); +} + +/** + * @tc.name: inner_model_get_supported_operation_004 + * @tc.desc: Verify the before build of the get_supported_operation function + * @tc.type: FUNC + */ +HWTEST_F(InnerModelTest, inner_model_get_supported_operation_004, TestSize.Level1) +{ + size_t deviceID = 10; + const bool *isSupported = nullptr; + uint32_t opCount = 1; + + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, m_innerModelTest.GetSupportedOperations(deviceID, &isSupported, opCount)); +} +} // namespace UnitTest +} // namespace NNRT diff --git a/test/unittest/components/v2_0/inner_model/nn_tensor_test.cpp b/test/unittest/components/v2_0/inner_model/nn_tensor_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..a288c26902bbd8c384f263a39f1bdfae9228e466 --- /dev/null +++ b/test/unittest/components/v2_0/inner_model/nn_tensor_test.cpp @@ -0,0 +1,525 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include + +#include "frameworks/native/validation.h" +#include "frameworks/native/nn_tensor.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime; +using namespace OHOS::NeuralNetworkRuntime::Validation; + +namespace NNRT { +namespace UnitTest { +class NnTensorTest : public testing::Test { +}; + +/** + * @tc.name: nn_tensor_parse_dimensions_001 + * @tc.desc: Verify the success of the parse_dimensions function + * @tc.type: FUNC + */ +HWTEST_F(NnTensorTest, nn_tensor_parse_dimensions_001, TestSize.Level1) +{ + const int dim[2] = {2, 2}; + OH_NN_Tensor tensor{OH_NN_FLOAT32, 2, dim, nullptr, OH_NN_TENSOR}; + + NNTensor nnTensor; + EXPECT_EQ(OH_NN_SUCCESS, nnTensor.BuildFromOHNNTensor(tensor)); +} + +/** + * @tc.name: nn_tensor_parse_dimensions_002 + * @tc.desc: Verify the invalid dimensions of the parse_dimensions function + * @tc.type: FUNC + */ +HWTEST_F(NnTensorTest, nn_tensor_parse_dimensions_002, TestSize.Level1) +{ + OH_NN_Tensor tensor; + tensor.dataType = OH_NN_FLOAT32; + tensor.dimensionCount = 2; + tensor.dimensions = nullptr; + tensor.quantParam = nullptr; + tensor.type = OH_NN_TENSOR; + + NNTensor nnTensor; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, nnTensor.BuildFromOHNNTensor(tensor)); +} + +/** + * @tc.name: nn_tensor_parse_dimensions_003 + * @tc.desc: Verify the invalid shape tensor of the parse_dimensions function + * @tc.type: FUNC + */ +HWTEST_F(NnTensorTest, nn_tensor_parse_dimensions_003, TestSize.Level1) +{ + const int dim[2] = {2, -2}; + OH_NN_Tensor tensor{OH_NN_FLOAT32, 2, dim, nullptr, OH_NN_TENSOR}; + + NNTensor nnTensor; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, nnTensor.BuildFromOHNNTensor(tensor)); +} + +/** + * @tc.name: nn_tensor_parse_dimensions_004 + * @tc.desc: Verify the dynamic shape of the parse_dimensions function + * @tc.type: FUNC + */ +HWTEST_F(NnTensorTest, nn_tensor_parse_dimensions_004, TestSize.Level1) +{ + const int dim[2] = {2, -1}; + OH_NN_Tensor tensor{OH_NN_FLOAT32, 2, dim, nullptr, OH_NN_TENSOR}; + + NNTensor nnTensor; + EXPECT_EQ(OH_NN_SUCCESS, nnTensor.BuildFromOHNNTensor(tensor)); +} + +/** + * @tc.name: nn_tensor_parse_dimensions_005 + * @tc.desc: Verify the dims out of bounds of the parse_dimensions function + * @tc.type: FUNC + */ +HWTEST_F(NnTensorTest, nn_tensor_parse_dimensions_005, TestSize.Level1) +{ + const int dim[3] = {1000000, 1000000, 10000000}; + OH_NN_Tensor tensor{OH_NN_FLOAT32, 2, dim, nullptr, OH_NN_TENSOR}; + + NNTensor nnTensor; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, nnTensor.BuildFromOHNNTensor(tensor)); +} + + +/** + * @tc.name: nn_tensor_parse_quant_params_001 + * @tc.desc: Verify the success of the parse_quant_params function + * @tc.type: FUNC + */ +HWTEST_F(NnTensorTest, nn_tensor_parse_quant_params_001, TestSize.Level1) +{ + const double scale = 1.0; + const int32_t zeroPoint = 0; + const uint32_t numBits = 8; + const OH_NN_QuantParam quantParam = {1, &numBits, &scale, &zeroPoint}; + + NNTensor nnTensor; + const int dim[2] = {2, 2}; + OH_NN_Tensor tensor{OH_NN_FLOAT32, 2, dim, &quantParam, OH_NN_TENSOR}; + + EXPECT_EQ(OH_NN_SUCCESS, nnTensor.BuildFromOHNNTensor(tensor)); +} + +/** + * @tc.name: nn_tensor_parse_quant_params_002 + * @tc.desc: Verify the invalid numbits of the parse_quant_params function + * @tc.type: FUNC + */ +HWTEST_F(NnTensorTest, nn_tensor_parse_quant_params_002, TestSize.Level1) +{ + const double scale = 1.0; + const int32_t zeroPoint = 0; + const uint32_t numBits = 16; + const OH_NN_QuantParam quantParam = {1, &numBits, &scale, &zeroPoint}; + + NNTensor nnTensor; + const int dim[2] = {2, 2}; + OH_NN_Tensor tensor{OH_NN_FLOAT32, 2, dim, &quantParam, OH_NN_TENSOR}; + + EXPECT_EQ(OH_NN_INVALID_PARAMETER, nnTensor.BuildFromOHNNTensor(tensor)); +} + +/** + * @tc.name: nn_tensor_parse_quant_params_004 + * @tc.desc: Verify the invalid scale of the parse_quant_params function + * @tc.type: FUNC + */ +HWTEST_F(NnTensorTest, nn_tensor_parse_quant_params_004, TestSize.Level1) +{ + const int32_t zeroPoint = 0; + const uint32_t numBits = 8; + const OH_NN_QuantParam quantParam = {1, &numBits, nullptr, &zeroPoint}; + + NNTensor nnTensor; + const int dim[2] = {2, 2}; + OH_NN_Tensor tensor{OH_NN_FLOAT32, 2, dim, &quantParam, OH_NN_TENSOR}; + + EXPECT_EQ(OH_NN_INVALID_PARAMETER, nnTensor.BuildFromOHNNTensor(tensor)); +} + +/** + * @tc.name: nn_tensor_parse_quant_params_005 + * @tc.desc: Verify the invalid zeropoint of the parse_quant_params function + * @tc.type: FUNC + */ +HWTEST_F(NnTensorTest, nn_tensor_parse_quant_params_005, TestSize.Level1) +{ + const double scale = 1.0; + const uint32_t numBits = 8; + const OH_NN_QuantParam quantParam = {1, &numBits, &scale, nullptr}; + + NNTensor nnTensor; + const int dim[2] = {2, 2}; + OH_NN_Tensor tensor{OH_NN_FLOAT32, 2, dim, &quantParam, OH_NN_TENSOR}; + + EXPECT_EQ(OH_NN_INVALID_PARAMETER, nnTensor.BuildFromOHNNTensor(tensor)); +} + +/** + * @tc.name: nn_tensor_set_dimensions_001 + * @tc.desc: Verify the success of the set_dimensions function + * @tc.type: FUNC + */ +HWTEST_F(NnTensorTest, nn_tensor_set_dimensions_001, TestSize.Level1) +{ + const int dim[2] = {2, -1}; + OH_NN_Tensor tensor{OH_NN_FLOAT32, 2, dim, nullptr, OH_NN_TENSOR}; + const std::vector dimensions = {2, 3}; + + NNTensor nnTensor; + EXPECT_EQ(OH_NN_SUCCESS, nnTensor.BuildFromOHNNTensor(tensor)); + EXPECT_EQ(OH_NN_SUCCESS, nnTensor.SetDimensions(dimensions)); +} + +/** + * @tc.name: nn_tensor_set_dimensions_002 + * @tc.desc: Verify the dim out of bounds of the set_dimensions function + * @tc.type: FUNC + */ +HWTEST_F(NnTensorTest, nn_tensor_set_dimensions_002, TestSize.Level1) +{ + const int dim[2] = {2, -1}; + OH_NN_Tensor tensor{OH_NN_FLOAT32, 2, dim, nullptr, OH_NN_TENSOR}; + + NNTensor nnTensor; + const std::vector dimensions = {2, 3, 5}; + EXPECT_EQ(OH_NN_SUCCESS, nnTensor.BuildFromOHNNTensor(tensor)); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, nnTensor.SetDimensions(dimensions)); +} + +/** + * @tc.name: nn_tensor_compare_attribute_001 + * @tc.desc: Verify the success of the CompareAttribute function + * @tc.type: FUNC + */ +HWTEST_F(NnTensorTest, nn_tensor_compare_attribute_001, TestSize.Level1) +{ + const int dim[2] = {2, 2}; + OH_NN_Tensor tensor{OH_NN_FLOAT32, 2, dim, nullptr, OH_NN_TENSOR}; + + NNTensor nnTensor; + NNTensor expectTensor; + EXPECT_EQ(OH_NN_SUCCESS, nnTensor.BuildFromOHNNTensor(tensor)); + expectTensor = std::move(nnTensor); + EXPECT_EQ(true, nnTensor.CompareAttribute(nnTensor)); +} + +/** + * @tc.name: nn_tensor_compare_attribute_002 + * @tc.desc: Verify the datatype not equal of the CompareAttribute function + * @tc.type: FUNC + */ +HWTEST_F(NnTensorTest, nn_tensor_compare_attribute_002, TestSize.Level1) +{ + const int dim[2] = {2, 2}; + OH_NN_Tensor tensor{OH_NN_FLOAT32, 2, dim, nullptr, OH_NN_TENSOR}; + + NNTensor nnTensor; + NNTensor expectTensor; + EXPECT_EQ(OH_NN_SUCCESS, nnTensor.BuildFromOHNNTensor(tensor)); + + const int dimExpect[2] = {2, 2}; + OH_NN_Tensor tensorExpect{OH_NN_INT32, 2, dimExpect, nullptr, OH_NN_TENSOR}; + EXPECT_EQ(OH_NN_SUCCESS, expectTensor.BuildFromOHNNTensor(tensorExpect)); + + EXPECT_EQ(false, nnTensor.CompareAttribute(expectTensor)); +} + +/** + * @tc.name: nn_tensor_compare_attribute_003 + * @tc.desc: Verify the dim size not equal of the CompareAttribute function + * @tc.type: FUNC + */ +HWTEST_F(NnTensorTest, nn_tensor_compare_attribute_003, TestSize.Level1) +{ + const int dim[2] = {2, 2}; + OH_NN_Tensor tensor{OH_NN_FLOAT32, 2, dim, nullptr, OH_NN_TENSOR}; + + NNTensor nnTensor; + NNTensor expectTensor; + EXPECT_EQ(OH_NN_SUCCESS, nnTensor.BuildFromOHNNTensor(tensor)); + + const int dimExpect[3] = {2, 2, 3}; + OH_NN_Tensor tensorExpect{OH_NN_FLOAT32, 3, dimExpect, nullptr, OH_NN_TENSOR}; + EXPECT_EQ(OH_NN_SUCCESS, expectTensor.BuildFromOHNNTensor(tensorExpect)); + + EXPECT_EQ(false, nnTensor.CompareAttribute(expectTensor)); +} + +/** + * @tc.name: nn_tensor_compare_attribute_004 + * @tc.desc: Verify the dim value not equal of the CompareAttribute function + * @tc.type: FUNC + */ +HWTEST_F(NnTensorTest, nn_tensor_compare_attribute_004, TestSize.Level1) +{ + const int dim[2] = {2, 2}; + OH_NN_Tensor tensor{OH_NN_FLOAT32, 2, dim, nullptr, OH_NN_TENSOR}; + + NNTensor nnTensor; + NNTensor expectTensor; + EXPECT_EQ(OH_NN_SUCCESS, nnTensor.BuildFromOHNNTensor(tensor)); + + const int dimExpect[2] = {2, 3}; + OH_NN_Tensor tensorExpect{OH_NN_FLOAT32, 2, dimExpect, nullptr, OH_NN_TENSOR}; + EXPECT_EQ(OH_NN_SUCCESS, expectTensor.BuildFromOHNNTensor(tensorExpect)); + + EXPECT_EQ(false, nnTensor.CompareAttribute(expectTensor)); +} + +/** + * @tc.name: nn_tensor_is_scalar_001 + * @tc.desc: Verify the success of the is_scalar function + * @tc.type: FUNC + */ +HWTEST_F(NnTensorTest, nn_tensor_is_scalar_001, TestSize.Level1) +{ + const int dim[2] = {2, 2}; + OH_NN_Tensor tensor{OH_NN_FLOAT32, 2, dim, nullptr, OH_NN_TENSOR}; + + NNTensor nnTensor; + EXPECT_EQ(OH_NN_SUCCESS, nnTensor.BuildFromOHNNTensor(tensor)); + EXPECT_EQ(false, nnTensor.IsScalar()); +} + +/** + * @tc.name: nn_tensor_build_from_tensor_001 + * @tc.desc: Verify the success of the build_from_tensor function + * @tc.type: FUNC + */ +HWTEST_F(NnTensorTest, nn_tensor_convert_to_io_tensor_001, TestSize.Level1) +{ + const int dim[2] = {2, 2}; + OH_NN_Tensor tensor{OH_NN_FLOAT32, 2, dim, nullptr, OH_NN_TENSOR}; + + NNTensor nnTensor; + EXPECT_EQ(OH_NN_SUCCESS, nnTensor.BuildFromOHNNTensor(tensor)); + + int8_t* activationValue = new (std::nothrow) int8_t[1]{0}; + EXPECT_NE(nullptr, activationValue); + + // After SetBuffer, this memory is released by NNTensor + nnTensor.SetBuffer(activationValue, sizeof(int8_t)); + IOTensor ioTensor; + nnTensor.ConvertToIOTensor(ioTensor); + EXPECT_EQ(sizeof(int8_t), ioTensor.length); +} + +/** + * @tc.name: nn_tensor_get_buffer_length_001 + * @tc.desc: Verify the success of the get_buffer_length function + * @tc.type: FUNC + */ +HWTEST_F(NnTensorTest, nn_tensor_get_buffer_length_001, TestSize.Level1) +{ + const int dim[2] = {2, 2}; + OH_NN_Tensor tensor{OH_NN_FLOAT32, 2, dim, nullptr, OH_NN_TENSOR}; + + NNTensor nnTensor; + EXPECT_EQ(OH_NN_SUCCESS, nnTensor.BuildFromOHNNTensor(tensor)); + int8_t* activationValue = new (std::nothrow) int8_t[1]{0}; + EXPECT_NE(nullptr, activationValue); + + // After SetBuffer, this memory is released by NNTensor + nnTensor.SetBuffer(activationValue, sizeof(int8_t)); + size_t length = sizeof(int8_t); + EXPECT_EQ(length, nnTensor.GetBufferLength()); +} + +/** + * @tc.name: nn_tensor_get_format_001 + * @tc.desc: Verify the success of the get_format function + * @tc.type: FUNC + */ +HWTEST_F(NnTensorTest, nn_tensor_get_format_001, TestSize.Level1) +{ + const int dim[2] = {2, 2}; + OH_NN_Tensor tensor{OH_NN_FLOAT32, 2, dim, nullptr, OH_NN_TENSOR}; + + NNTensor nnTensor; + EXPECT_EQ(OH_NN_SUCCESS, nnTensor.BuildFromOHNNTensor(tensor)); + OH_NN_Format format = OH_NN_FORMAT_NHWC; + EXPECT_EQ(format, nnTensor.GetFormat()); +} + +/** + * @tc.name: nn_tensor_get_name_001 + * @tc.desc: Verify the success of the get name function + * @tc.type: FUNC + */ +HWTEST_F(NnTensorTest, nn_tensor_get_name_001, TestSize.Level1) +{ + NNTensor nnTensor; + const std::string& name = "test"; + nnTensor.SetName(name); + EXPECT_EQ(name, nnTensor.GetName()); +} + +/** + * @tc.name: nn_tensor_get_quant_param_001 + * @tc.desc: Verify the success of the get_quant_param function + * @tc.type: FUNC + */ +HWTEST_F(NnTensorTest, nn_tensor_get_quant_param_001, TestSize.Level1) +{ + const int dim[2] = {2, 2}; + OH_NN_Tensor tensor{OH_NN_FLOAT32, 2, dim, nullptr, OH_NN_TENSOR}; + + NNTensor nnTensor; + EXPECT_EQ(OH_NN_SUCCESS, nnTensor.BuildFromOHNNTensor(tensor)); + + std::vector quantParam = nnTensor.GetQuantParam(); + size_t quantSize = 0; + EXPECT_EQ(quantSize, quantParam.size()); +} + +/** + * @tc.name: nn_tensor_build_from_tensor_002 + * @tc.desc: Verify the invalid datatype value of the build_from_tensor function + * @tc.type: FUNC + */ +HWTEST_F(NnTensorTest, nn_tensor_build_from_tensor_002, TestSize.Level1) +{ + const int dim[2] = {2, 2}; + + int dataTypeTest = 13; + OH_NN_DataType dataType = (OH_NN_DataType)dataTypeTest; + OH_NN_Tensor tensor{dataType, 2, dim, nullptr, OH_NN_TENSOR}; + + NNTensor nnTensor; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, nnTensor.BuildFromOHNNTensor(tensor)); +} + +/** + * @tc.name: nn_tensor_convert_to_lite_graph_tensor_001 + * @tc.desc: Verify the success of the convert_to_lite_graph function + * @tc.type: FUNC + */ +HWTEST_F(NnTensorTest, nn_tensor_convert_to_lite_graph_tensor_001, TestSize.Level1) +{ + const int dim[2] = {2, 2}; + OH_NN_Tensor tensor{OH_NN_FLOAT32, 2, dim, nullptr, OH_NN_TENSOR}; + + NNTensor nnTensor; + EXPECT_EQ(OH_NN_SUCCESS, nnTensor.BuildFromOHNNTensor(tensor)); + + LiteGraphTensorPtr tensorPtr = {nullptr, DestroyLiteGraphTensor}; + EXPECT_NE(tensorPtr, nnTensor.ConvertToLiteGraphTensor()); +} + +/** + * @tc.name: nn_tensor_convert_to_lite_graph_tensor_002 + * @tc.desc: Verify the success with quant of the convert_to_lite_graph function + * @tc.type: FUNC + */ +HWTEST_F(NnTensorTest, nn_tensor_convert_to_lite_graph_tensor_002, TestSize.Level1) +{ + const int dim[2] = {2, 2}; + + OH_NN_Tensor tensor; + tensor.dataType = OH_NN_FLOAT32; + tensor.dimensionCount = 2; + tensor.dimensions = dim; + const double scale = 1.0; + const int32_t zeroPoint = 0; + const uint32_t numBits = 8; + const OH_NN_QuantParam quantParam = {1, &numBits, &scale, &zeroPoint}; + tensor.quantParam = &quantParam; + tensor.type = OH_NN_TENSOR; + + NNTensor nnTensor; + EXPECT_EQ(OH_NN_SUCCESS, nnTensor.BuildFromOHNNTensor(tensor)); + + LiteGraphTensorPtr tensorPtr = {nullptr, DestroyLiteGraphTensor}; + EXPECT_NE(tensorPtr, nnTensor.ConvertToLiteGraphTensor()); +} + +/** + * @tc.name: nn_tensor_build_001 + * @tc.desc: Verify the success of the build function + * @tc.type: FUNC + */ +HWTEST_F(NnTensorTest, nn_tensor_build_001, TestSize.Level1) +{ + OH_NN_DataType dataType = OH_NN_FLOAT32; + const std::vector dimensions = {2, 2}; + const std::vector quantParam = {{8, 1.0, 0}, {8, 1.0, 0}, {8, 1.0, 0}}; + OH_NN_TensorType type = OH_NN_ADD_ACTIVATIONTYPE; + + NNTensor nnTensor; + EXPECT_EQ(OH_NN_SUCCESS, nnTensor.Build(dataType, dimensions, quantParam, type)); +} + +/** + * @tc.name: nn_tensor_build_002 + * @tc.desc: Verify the invalid datatype value of the build function + * @tc.type: FUNC + */ +HWTEST_F(NnTensorTest, nn_tensor_build_002, TestSize.Level1) +{ + int dataTypeTest = 13; + OH_NN_DataType dataType = (OH_NN_DataType)dataTypeTest; + const std::vector dimensions = {2, 2}; + const std::vector quantParam = {{8, 1.0, 0}, {8, 1.0, 0}, {8, 1.0, 0}}; + OH_NN_TensorType type = OH_NN_ADD_ACTIVATIONTYPE; + + NNTensor nnTensor; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, nnTensor.Build(dataType, dimensions, quantParam, type)); +} + +/** + * @tc.name: nn_tensor_build_003 + * @tc.desc: Verify the dynamic shape of the build function + * @tc.type: FUNC + */ +HWTEST_F(NnTensorTest, nn_tensor_build_003, TestSize.Level1) +{ + OH_NN_DataType dataType = OH_NN_FLOAT32; + const std::vector dimensions = {2, -2}; + const std::vector quantParam = {{8, 1.0, 0}, {8, 1.0, 0}, {8, 1.0, 0}}; + OH_NN_TensorType type = OH_NN_ADD_ACTIVATIONTYPE; + + NNTensor nnTensor; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, nnTensor.Build(dataType, dimensions, quantParam, type)); +} + +/** + * @tc.name: nn_tensor_build_004 + * @tc.desc: Verify the invalid numbits of the build function + * @tc.type: FUNC + */ +HWTEST_F(NnTensorTest, nn_tensor_build_004, TestSize.Level1) +{ + OH_NN_DataType dataType = OH_NN_FLOAT32; + const std::vector dimensions = {2, 2}; + const std::vector quantParam = {{2, 1.0, 0}, {2, 1.0, 0}, {2, 1.0, 0}}; + OH_NN_TensorType type = OH_NN_ADD_ACTIVATIONTYPE; + + NNTensor nnTensor; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, nnTensor.Build(dataType, dimensions, quantParam, type)); +} +} // namespace UnitTest +} // namespace NNRT diff --git a/test/unittest/components/v2_0/inner_model/nn_validation_test.cpp b/test/unittest/components/v2_0/inner_model/nn_validation_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..49a2e81e465aa0582e6df75634466b02522bfbd2 --- /dev/null +++ b/test/unittest/components/v2_0/inner_model/nn_validation_test.cpp @@ -0,0 +1,175 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include + +#include "frameworks/native/validation.h" +#include "frameworks/native/nn_tensor.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime; +using namespace OHOS::NeuralNetworkRuntime::Validation; + +namespace NNRT { +namespace UnitTest { +class NnValidationTest : public testing::Test { +}; + +/** + * @tc.name: nn_validation_validate_tensor_datatype_001 + * @tc.desc: Verify the success of the validate_tensor_datatype function + * @tc.type: FUNC + */ +HWTEST_F(NnValidationTest, nn_validation_validate_tensor_datatype_001, TestSize.Level1) +{ + int dataTypeTest = 12; + OH_NN_DataType dataType = (OH_NN_DataType)dataTypeTest; + EXPECT_EQ(true, ValidateTensorDataType(dataType)); +} + +/** + * @tc.name: nn_validation_validate_tensor_datatype_002 + * @tc.desc: Verify the gt bounds of the validate_tensor_datatype function + * @tc.type: FUNC + */ +HWTEST_F(NnValidationTest, nn_validation_validate_tensor_datatype_002, TestSize.Level1) +{ + int dataTypeTest = 13; + OH_NN_DataType dataType = (OH_NN_DataType)dataTypeTest; + EXPECT_EQ(false, ValidateTensorDataType(dataType)); +} + +/** + * @tc.name: nn_validation_validate_tensor_datatype_003 + * @tc.desc: Verify the lt bounds of the validate_tensor_datatype function + * @tc.type: FUNC + */ +HWTEST_F(NnValidationTest, nn_validation_validate_tensor_datatype_003, TestSize.Level1) +{ + int dataTypeTest = -1; + OH_NN_DataType dataType = (OH_NN_DataType)dataTypeTest; + EXPECT_EQ(false, ValidateTensorDataType(dataType)); +} + +/** + * @tc.name: nn_validation_validate_preformance_mode_001 + * @tc.desc: Verify the success of the validate_preformance_mode function + * @tc.type: FUNC + */ +HWTEST_F(NnValidationTest, nn_validation_validate_preformance_mode_001, TestSize.Level1) +{ + int performanceModeTest = 4; + OH_NN_PerformanceMode performanceMode = (OH_NN_PerformanceMode)performanceModeTest; + EXPECT_EQ(true, ValidatePerformanceMode(performanceMode)); +} + +/** + * @tc.name: nn_validation_validate_preformance_mode_002 + * @tc.desc: Verify the gt bounds of the validate_preformance_mode function + * @tc.type: FUNC + */ +HWTEST_F(NnValidationTest, nn_validation_validate_preformance_mode_002, TestSize.Level1) +{ + int performanceModeTest = 5; + OH_NN_PerformanceMode performanceMode = (OH_NN_PerformanceMode)performanceModeTest; + EXPECT_EQ(false, ValidatePerformanceMode(performanceMode)); +} + +/** + * @tc.name: nn_validation_validate_preformance_mode_003 + * @tc.desc: Verify the lt bounds of the validate_preformance_mode function + * @tc.type: FUNC + */ +HWTEST_F(NnValidationTest, nn_validation_validate_preformance_mode_003, TestSize.Level1) +{ + int performanceModeTest = -1; + OH_NN_PerformanceMode performanceMode = (OH_NN_PerformanceMode)performanceModeTest; + EXPECT_EQ(false, ValidatePerformanceMode(performanceMode)); +} + +/** + * @tc.name: nn_validation_validate_priority_001 + * @tc.desc: Verify the success of the validate_priority function + * @tc.type: FUNC + */ +HWTEST_F(NnValidationTest, nn_validation_validate_priority_001, TestSize.Level1) +{ + int priorityTest = 2; + OH_NN_Priority priority = (OH_NN_Priority)priorityTest; + EXPECT_EQ(true, ValidatePriority(priority)); +} + +/** + * @tc.name: nn_validation_validate_priority_002 + * @tc.desc: Verify the gt bounds of the validate_priority function + * @tc.type: FUNC + */ +HWTEST_F(NnValidationTest, nn_validation_validate_priority_002, TestSize.Level1) +{ + int priorityTest = 4; + OH_NN_Priority priority = (OH_NN_Priority)priorityTest; + EXPECT_EQ(false, ValidatePriority(priority)); +} + +/** + * @tc.name: nn_validation_validate_priority_003 + * @tc.desc: Verify the lt bounds of the validate_priority function + * @tc.type: FUNC + */ +HWTEST_F(NnValidationTest, nn_validation_validate_priority_003, TestSize.Level1) +{ + int priorityTest = -1; + OH_NN_Priority priority = (OH_NN_Priority)priorityTest; + EXPECT_EQ(false, ValidatePriority(priority)); +} + +/** + * @tc.name: nn_validation_fusetype_001 + * @tc.desc: Verify the success of the validate_fusetype function + * @tc.type: FUNC + */ +HWTEST_F(NnValidationTest, nn_validation_fusetype_001, TestSize.Level1) +{ + int fuseTypeTest = 2; + OH_NN_FuseType fuseType = (OH_NN_FuseType)fuseTypeTest; + EXPECT_EQ(true, ValidateFuseType(fuseType)); +} + +/** + * @tc.name: nn_validation_fusetype_002 + * @tc.desc: Verify the gt bounds of the validate_fusetype function + * @tc.type: FUNC + */ +HWTEST_F(NnValidationTest, nn_validation_fusetype_002, TestSize.Level1) +{ + int fuseTypeTest = 3; + OH_NN_FuseType fuseType = (OH_NN_FuseType)fuseTypeTest; + EXPECT_EQ(false, ValidateFuseType(fuseType)); +} + +/** + * @tc.name: nn_validation_fusetype_003 + * @tc.desc: Verify the lt bounds of the validate_fusetype function + * @tc.type: FUNC + */ +HWTEST_F(NnValidationTest, nn_validation_fusetype_003, TestSize.Level1) +{ + int fuseTypeTest = -1; + OH_NN_FuseType fuseType = (OH_NN_FuseType)fuseTypeTest; + EXPECT_EQ(false, ValidateFuseType(fuseType)); +} +} // namespace UnitTest +} // namespace NNRT diff --git a/test/unittest/components/v2_0/inner_model/ops_regitstry_test.cpp b/test/unittest/components/v2_0/inner_model/ops_regitstry_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..de3cc84846f07e6cdc33067e5b09754de8a5e998 --- /dev/null +++ b/test/unittest/components/v2_0/inner_model/ops_regitstry_test.cpp @@ -0,0 +1,64 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include + +#include "frameworks/native/validation.h" +#include "frameworks/native/ops_registry.h" +#include "frameworks/native/ops/add_builder.h" +#include "frameworks/native/ops/div_builder.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime; +using namespace OHOS::NeuralNetworkRuntime::Validation; +using namespace OHOS::NeuralNetworkRuntime::Ops; + +namespace NNRT { +namespace UnitTest { +class OpsRegistryTest : public testing::Test { +}; + +/** + * @tc.name: registry_001 + * @tc.desc: Verify the registry success the registar function + * @tc.type: FUNC + */ +HWTEST_F(OpsRegistryTest, registry_001, TestSize.Level1) +{ + const int newRegistryOperationType = 100; + REGISTER_OPS(AddBuilder, OH_NN_OperationType(newRegistryOperationType)); + + OpsRegistry& opsregistry = OpsRegistry::GetSingleton(); + EXPECT_NE(nullptr, opsregistry.GetOpsBuilder(OH_NN_OperationType(newRegistryOperationType))); +} + +/** + * @tc.name: registry_002 + * @tc.desc: Verify the registry twice the registar function + * @tc.type: FUNC + */ +HWTEST_F(OpsRegistryTest, registry_002, TestSize.Level1) +{ + const int newRegistryOperationType = 1000; + REGISTER_OPS(AddBuilder, OH_NN_OperationType(newRegistryOperationType)); + + OpsRegistry& opsregistry = OpsRegistry::GetSingleton(); + EXPECT_NE(nullptr, opsregistry.GetOpsBuilder(OH_NN_OperationType(newRegistryOperationType))); + + REGISTER_OPS(DivBuilder, OH_NN_OperationType(newRegistryOperationType)); +} +} // namespace UnitTest +} // namespace NNRT diff --git a/test/unittest/components/v2_0/neural_network_runtime_test/neural_network_runtime_test.cpp b/test/unittest/components/v2_0/neural_network_runtime_test/neural_network_runtime_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..8f0c04260deed1f8c2d34579b2b8c1ae1894308b --- /dev/null +++ b/test/unittest/components/v2_0/neural_network_runtime_test/neural_network_runtime_test.cpp @@ -0,0 +1,2221 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "neural_network_runtime_test.h" + +#include "mindir.h" + +#include "common/utils.h" +#include "frameworks/native/compilation.h" +#include "frameworks/native/device_manager.h" +#include "frameworks/native/hdi_device_v2_0.h" +#include "test/unittest/common/v2_0/mock_idevice.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +OH_NN_ReturnCode HDIDeviceV2_0::PrepareModel(std::shared_ptr model, + const ModelConfig& config, + std::shared_ptr& preparedModel) +{ + if (model == nullptr) { + return OH_NN_INVALID_PARAMETER; + } + + if (config.enableFloat16 == false) { + return OH_NN_FAILED; + } + + sptr iPreparedModel = sptr(new OHOS::HDI::Nnrt::V2_0::MockIPreparedModel()); + if (iPreparedModel == nullptr) { + LOGE("HDIDeviceV2_0 mock PrepareModel failed, error happened when new sptr"); + return OH_NN_NULL_PTR; + } + + preparedModel = CreateSharedPtr(iPreparedModel); + return OH_NN_SUCCESS; +} + +std::shared_ptr DeviceManager::GetDevice(size_t deviceId) const +{ + sptr idevice + = sptr(new (std::nothrow) OHOS::HDI::Nnrt::V2_0::MockIDevice()); + if (idevice == nullptr) { + LOGE("DeviceManager mock GetDevice failed, error happened when new sptr"); + return nullptr; + } + + std::shared_ptr device = CreateSharedPtr(idevice); + if (device == nullptr) { + LOGE("DeviceManager mock GetDevice failed, the device is nullptr"); + return nullptr; + } + + if (deviceId == 0) { + LOGE("DeviceManager mock GetDevice failed, the passed parameter deviceId is 0"); + return nullptr; + } else { + return device; + } +} + +OH_NN_ReturnCode HDIDeviceV2_0::GetDeviceType(OH_NN_DeviceType& deviceType) +{ + if (deviceType == OH_NN_OTHERS) { + return OH_NN_UNAVALIDABLE_DEVICE; + } + + return OH_NN_SUCCESS; +} + +const std::string& DeviceManager::GetDeviceName(size_t deviceId) +{ + static std::string deviceName = ""; + if (deviceId == 0) { + return deviceName; + } + + deviceName = "deviceId"; + return deviceName; +} + +const std::vector& DeviceManager::GetAllDeviceId() +{ + static std::vector deviceIds; + if (OHOS::HDI::Nnrt::V2_0::MockIPreparedModel::m_ExpectRetCode == OH_NN_FAILED) { + // In order not to affect other use cases, set to the OH_NN_OPERATION_FORBIDDEN + OHOS::HDI::Nnrt::V2_0::MockIPreparedModel::m_ExpectRetCode = OH_NN_OPERATION_FORBIDDEN; + return deviceIds; + } + std::size_t device = 1; + deviceIds.emplace_back(device); + return deviceIds; +} + +OH_NN_ReturnCode HDIDeviceV2_0::IsModelCacheSupported(bool& isSupported) +{ + isSupported = true; + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode HDIDeviceV2_0::IsPerformanceModeSupported(bool& isSupported) +{ + isSupported = true; + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode HDIDeviceV2_0::IsPrioritySupported(bool& isSupported) +{ + isSupported = true; + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode HDIDeviceV2_0::IsFloat16PrecisionSupported(bool& isSupported) +{ + isSupported = true; + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode HDIDeviceV2_0::GetSupportedOperation(std::shared_ptr model, + std::vector& ops) +{ + if (model == nullptr) { + LOGE("HDIDeviceV2_0 mock GetSupportedOperation failed, Model is nullptr, cannot query supported operation."); + return OH_NN_NULL_PTR; + } + + ops.emplace_back(true); + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode HDIDeviceV2_0::IsDynamicInputSupported(bool& isSupported) +{ + isSupported = true; + return OH_NN_SUCCESS; +} +} // namespace NeuralNetworkRuntime +} // namespace OHOS + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Unittest { +OH_NN_ReturnCode NeuralNetworkRuntimeTest::BuildModelGraph(InnerModel& innerModel) +{ + // liteGraph is released internally by innerModel + mindspore::lite::LiteGraph* liteGraph = new (std::nothrow) mindspore::lite::LiteGraph; + EXPECT_NE(nullptr, liteGraph); + + liteGraph->name_ = "testGraph"; + liteGraph->input_indices_ = {0}; + liteGraph->output_indices_ = {1}; + liteGraph->all_tensors_ = {nullptr}; + const std::vector data(36, 1); + const std::vector dim = {3, 3}; + const std::vector quant_params {}; + + for (size_t indexInput = 0; indexInput < liteGraph->input_indices_.size(); ++indexInput) { + liteGraph->all_tensors_.emplace_back(mindspore::lite::MindIR_Tensor_Create(liteGraph->name_, + mindspore::lite::DATA_TYPE_FLOAT32, dim, mindspore::lite::FORMAT_NCHW, data, quant_params)); + } + + for (size_t indexOutput = 0; indexOutput < liteGraph->output_indices_.size(); ++indexOutput) { + liteGraph->all_tensors_.emplace_back(mindspore::lite::MindIR_Tensor_Create(liteGraph->name_, + mindspore::lite::DATA_TYPE_FLOAT32, dim, mindspore::lite::FORMAT_NCHW, data, quant_params)); + } + + return innerModel.BuildFromLiteGraph(liteGraph); +} + +void NeuralNetworkRuntimeTest::InitIndices() +{ + m_inputIndices.data = m_inputIndexs; + m_inputIndices.size = sizeof(m_inputIndexs) / sizeof(uint32_t); + + m_outputIndices.data = m_outputIndexs; + m_outputIndices.size = sizeof(m_outputIndexs) / sizeof(uint32_t); + + m_paramIndices.data = m_paramIndexs; + m_paramIndices.size = sizeof(m_paramIndexs) / sizeof(uint32_t); +} + +void NeuralNetworkRuntimeTest::AddModelTensor(InnerModel& innerModel) +{ + const int dim[2] = {2, 2}; + const OH_NN_Tensor& tensor = {OH_NN_FLOAT32, 2, dim, nullptr, OH_NN_TENSOR}; + + EXPECT_EQ(OH_NN_SUCCESS, innerModel.AddTensor(tensor)); + EXPECT_EQ(OH_NN_SUCCESS, innerModel.AddTensor(tensor)); + EXPECT_EQ(OH_NN_SUCCESS, innerModel.AddTensor(tensor)); + + const OH_NN_Tensor& tensorParam = {OH_NN_INT8, 0, nullptr, nullptr, OH_NN_ADD_ACTIVATIONTYPE}; + EXPECT_EQ(OH_NN_SUCCESS, innerModel.AddTensor(tensorParam)); +} + +void NeuralNetworkRuntimeTest::SetTensor() +{ + m_tensor.dataType = OH_NN_INT32; + m_tensor.dimensionCount = 0; + m_tensor.dimensions = nullptr; + m_tensor.quantParam = nullptr; + m_tensor.type = OH_NN_TENSOR; +} + +void NeuralNetworkRuntimeTest::SetInnerBuild(InnerModel& innerModel) +{ + uint32_t index = 3; + const int8_t activation = 0; + EXPECT_EQ(OH_NN_SUCCESS, innerModel.SetTensorValue(index, + static_cast(&activation), sizeof(int8_t))); + + OH_NN_OperationType opType{OH_NN_OPS_ADD}; + EXPECT_EQ(OH_NN_SUCCESS, innerModel.AddOperation(opType, m_paramIndices, m_inputIndices, m_outputIndices)); + EXPECT_EQ(OH_NN_SUCCESS, innerModel.SpecifyInputsAndOutputs(m_inputIndices, m_outputIndices)); + EXPECT_EQ(OH_NN_SUCCESS, innerModel.Build()); +} + +void NeuralNetworkRuntimeTest::SetInputAndOutput(Executor& executor) +{ + size_t length = 9 * sizeof(int32_t); + float input[9] = {0, 1, 2, 3, 4, 5, 6, 7, 8}; + void *buffer = input; + uint32_t index = 0; + + SetTensor(); + EXPECT_EQ(OH_NN_SUCCESS, executor.SetInput(index, m_tensor, buffer, length)); + EXPECT_EQ(OH_NN_SUCCESS, executor.SetOutput(index, buffer, length)); + EXPECT_EQ(OH_NN_SUCCESS, executor.Run()); +} + +/* + * @tc.name: model_construct_001 + * @tc.desc: Verify the return model of the OH_NNModel_Construct function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, model_construct_001, testing::ext::TestSize.Level0) +{ + OH_NNModel* ret = OH_NNModel_Construct(); + EXPECT_NE(nullptr, ret); +} + +/* + * @tc.name: model_add_tensor_001 + * @tc.desc: Verify the OH_NNModel is nullptr of the OH_NNModel_Tensor function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, model_add_tensor_001, testing::ext::TestSize.Level0) +{ + OH_NNModel* model = nullptr; + const int32_t dimInput[2] = {2, 2}; + const OH_NN_Tensor tensor = {OH_NN_INT8, 2, dimInput, nullptr, OH_NN_TENSOR}; + OH_NN_ReturnCode ret = OH_NNModel_AddTensor(model, &tensor); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: model_add_tensor_002 + * @tc.desc: Verify the OH_NN_Tensor is nullptr of the OH_NNModel_AddTensor function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, model_add_tensor_002, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + + OH_NNModel* model = reinterpret_cast(&innerModel); + OH_NN_Tensor* tensor = nullptr; + OH_NN_ReturnCode ret = OH_NNModel_AddTensor(model, tensor); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: model_add_tensor_003 + * @tc.desc: Verify the success of the OH_NNModel_AddTensor function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, model_add_tensor_003, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + OH_NNModel* model = reinterpret_cast(&innerModel); + + const int32_t dimInput[2] = {2, 2}; + const OH_NN_Tensor tensor = {OH_NN_INT8, 2, dimInput, nullptr, OH_NN_TENSOR}; + OH_NN_ReturnCode ret = OH_NNModel_AddTensor(model, &tensor); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/* + * @tc.name: model_add_operation_001 + * @tc.desc: Verify the OH_NNModel is nullptr of the OH_NNModel_AddOperation function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, model_add_operation_001, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + OH_NNModel* model = nullptr; + OH_NN_OperationType opType{OH_NN_OPS_ADD}; + + InitIndices(); + AddModelTensor(innerModel); + + uint32_t index = 3; + const int8_t activation = 0; + EXPECT_EQ(OH_NN_SUCCESS, innerModel.SetTensorValue(index, + static_cast(&activation), sizeof(int8_t))); + + OH_NN_ReturnCode ret = OH_NNModel_AddOperation(model, opType, &m_paramIndices, &m_inputIndices, &m_outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: model_add_operation_002 + * @tc.desc: Verify the paramIndices is nullptr of the OH_NNModel_AddOperation function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, model_add_operation_002, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + OH_NNModel* model = reinterpret_cast(&innerModel); + OH_NN_OperationType opType{OH_NN_OPS_ADD}; + + m_inputIndices.data = m_inputIndexs; + m_inputIndices.size = sizeof(m_inputIndexs) / sizeof(uint32_t); + + m_outputIndices.data = m_outputIndexs; + m_outputIndices.size = sizeof(m_outputIndexs) / sizeof(uint32_t); + + AddModelTensor(innerModel); + uint32_t index = 3; + const int8_t activation = 0; + EXPECT_EQ(OH_NN_SUCCESS, innerModel.SetTensorValue(index, + static_cast(&activation), sizeof(int8_t))); + + OH_NN_ReturnCode ret = OH_NNModel_AddOperation(model, opType, nullptr, &m_inputIndices, &m_outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: model_add_operation_003 + * @tc.desc: Verify the inputIndices is nullptr of the OH_NNModel_AddOperation function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, model_add_operation_003, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + OH_NNModel* model = reinterpret_cast(&innerModel); + OH_NN_OperationType opType{OH_NN_OPS_ADD}; + + m_paramIndices.data = m_paramIndexs; + m_paramIndices.size = sizeof(m_paramIndexs) / sizeof(uint32_t); + + m_outputIndices.data = m_outputIndexs; + m_outputIndices.size = sizeof(m_outputIndexs) / sizeof(uint32_t); + + AddModelTensor(innerModel); + uint32_t index = 3; + const int8_t activation = 0; + EXPECT_EQ(OH_NN_SUCCESS, innerModel.SetTensorValue(index, + static_cast(&activation), sizeof(int8_t))); + + OH_NN_ReturnCode ret = OH_NNModel_AddOperation(model, opType, &m_paramIndices, nullptr, &m_outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: model_add_operation_004 + * @tc.desc: Verify the outputIndices is nullptr of the OH_NNModel_AddOperation function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, model_add_operation_004, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + OH_NNModel* model = reinterpret_cast(&innerModel); + OH_NN_OperationType opType{OH_NN_OPS_ADD}; + + m_paramIndices.data = m_paramIndexs; + m_paramIndices.size = sizeof(m_paramIndexs) / sizeof(uint32_t); + + m_inputIndices.data = m_inputIndexs; + m_inputIndices.size = sizeof(m_inputIndexs) / sizeof(uint32_t); + + AddModelTensor(innerModel); + uint32_t index = 3; + const int8_t activation = 0; + EXPECT_EQ(OH_NN_SUCCESS, innerModel.SetTensorValue(index, + static_cast(&activation), sizeof(int8_t))); + + OH_NN_ReturnCode ret = OH_NNModel_AddOperation(model, opType, &m_paramIndices, &m_inputIndices, nullptr); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: model_add_operation_005 + * @tc.desc: Verify the success of the OH_NNModel_AddOperation function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, model_add_operation_005, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + OH_NNModel* model = reinterpret_cast(&innerModel); + OH_NN_OperationType opType{OH_NN_OPS_ADD}; + + InitIndices(); + AddModelTensor(innerModel); + + uint32_t index = 3; + const int8_t activation = 0; + EXPECT_EQ(OH_NN_SUCCESS, innerModel.SetTensorValue(index, + static_cast(&activation), sizeof(int8_t))); + + OH_NN_ReturnCode ret = OH_NNModel_AddOperation(model, opType, &m_paramIndices, &m_inputIndices, &m_outputIndices); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/* + * @tc.name: model_set_tensor_data_001 + * @tc.desc: Verify the OH_NNModel is nullptr of the OH_NNModel_SetTensorData function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, model_set_tensor_data_001, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + OH_NNModel* model = nullptr; + AddModelTensor(innerModel); + + uint32_t index = 3; + const int8_t activation = 0; + + OH_NN_ReturnCode ret = OH_NNModel_SetTensorData(model, index, static_cast(&activation), + sizeof(int8_t)); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: model_set_tensor_data_002 + * @tc.desc: Verify the data is nullptr of the OH_NNModel_SetTensorData function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, model_set_tensor_data_002, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + OH_NNModel* model = reinterpret_cast(&innerModel); + AddModelTensor(innerModel); + + uint32_t index = 3; + + OH_NN_ReturnCode ret = OH_NNModel_SetTensorData(model, index, nullptr, sizeof(int8_t)); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: model_set_tensor_data_003 + * @tc.desc: Verify the length is 0 of the OH_NNModel_SetTensorData function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, model_set_tensor_data_003, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + OH_NNModel* model = reinterpret_cast(&innerModel); + AddModelTensor(innerModel); + + uint32_t index = 3; + const int8_t activation = 0; + + OH_NN_ReturnCode ret = OH_NNModel_SetTensorData(model, index, static_cast(&activation), 0); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: model_set_tensor_data_004 + * @tc.desc: Verify the successs of the OH_NNModel_SetTensorData function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, model_set_tensor_data_004, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + OH_NNModel* model = reinterpret_cast(&innerModel); + AddModelTensor(innerModel); + + uint32_t index = 3; + const int8_t activation = 0; + + OH_NN_ReturnCode ret = OH_NNModel_SetTensorData(model, index, static_cast(&activation), + sizeof(int8_t)); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/* + * @tc.name: model_specify_inputs_and_outputs_001 + * @tc.desc: Verify the OH_NNModel is nullptr of the OH_NNModel_SpecifyInputsAndOutputs function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, model_specify_inputs_and_outputs_001, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + OH_NNModel* model = nullptr; + + InitIndices(); + AddModelTensor(innerModel); + + OH_NN_ReturnCode ret = OH_NNModel_SpecifyInputsAndOutputs(model, &m_inputIndices, &m_outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: model_specify_inputs_and_outputs_002 + * @tc.desc: Verify the inputIndices is nullptr of the OH_NNModel_SpecifyInputsAndOutputs function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, model_specify_inputs_and_outputs_002, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + OH_NNModel* model = reinterpret_cast(&innerModel); + + InitIndices(); + AddModelTensor(innerModel); + + OH_NN_ReturnCode ret = OH_NNModel_SpecifyInputsAndOutputs(model, nullptr, &m_outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: model_specify_inputs_and_outputs_003 + * @tc.desc: Verify the outputIndices is nullptr of the OH_NNModel_SpecifyInputsAndOutputs function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, model_specify_inputs_and_outputs_003, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + OH_NNModel* model = reinterpret_cast(&innerModel); + + InitIndices(); + AddModelTensor(innerModel); + + OH_NN_ReturnCode ret = OH_NNModel_SpecifyInputsAndOutputs(model, &m_inputIndices, nullptr); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: model_specify_inputs_and_outputs_004 + * @tc.desc: Verify the success of the OH_NNModel_SpecifyInputsAndOutputs function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, model_specify_inputs_and_outputs_004, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + OH_NNModel* model = reinterpret_cast(&innerModel); + + InitIndices(); + AddModelTensor(innerModel); + + OH_NN_ReturnCode ret = OH_NNModel_SpecifyInputsAndOutputs(model, &m_inputIndices, &m_outputIndices); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/* + * @tc.name: model_finish_001 + * @tc.desc: Verify the OH_NNModel is nullptr of the OH_NNModel_Finish function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, model_finish_001, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + OH_NNModel* model = nullptr; + + OH_NN_OperationType opType{OH_NN_OPS_ADD}; + + InitIndices(); + AddModelTensor(innerModel); + + uint32_t index = 3; + const int8_t activation = 0; + EXPECT_EQ(OH_NN_SUCCESS, innerModel.SetTensorValue(index, static_cast(&activation), + sizeof(int8_t))); + + EXPECT_EQ(OH_NN_SUCCESS, innerModel.AddOperation(opType, m_paramIndices, m_inputIndices, m_outputIndices)); + EXPECT_EQ(OH_NN_SUCCESS, innerModel.SpecifyInputsAndOutputs(m_inputIndices, m_outputIndices)); + + OH_NN_ReturnCode ret = OH_NNModel_Finish(model); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: model_finish_002 + * @tc.desc: Verify the success of the OH_NNModel_Finish function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, model_finish_002, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + OH_NNModel* model = reinterpret_cast(&innerModel); + + OH_NN_OperationType opType{OH_NN_OPS_ADD}; + + InitIndices(); + AddModelTensor(innerModel); + + const int8_t activation = 0; + uint32_t index = 3; + EXPECT_EQ(OH_NN_SUCCESS, innerModel.SetTensorValue(index, + static_cast(&activation), sizeof(int8_t))); + + EXPECT_EQ(OH_NN_SUCCESS, innerModel.AddOperation(opType, m_paramIndices, m_inputIndices, m_outputIndices)); + EXPECT_EQ(OH_NN_SUCCESS, innerModel.SpecifyInputsAndOutputs(m_inputIndices, m_outputIndices)); + + OH_NN_ReturnCode ret = OH_NNModel_Finish(model); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/* + * @tc.name: model_destroy_001 + * @tc.desc: Verify the OH_NNModel is nullptr of the OH_NNModel_Destroy function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, model_destroy_001, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + OH_NNModel** pModel = nullptr; + OH_NNModel_Destroy(pModel); + EXPECT_EQ(nullptr, pModel); +} + +/* + * @tc.name: model_destroy_002 + * @tc.desc: Verify the *OH_NNModel is nullptr of the OH_NNModel_Destroy function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, model_destroy_002, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + OH_NNModel* model = nullptr; + OH_NNModel** pModel = &model; + OH_NNModel_Destroy(pModel); + EXPECT_EQ(nullptr, model); +} + +/* + * @tc.name: model_destroy_003 + * @tc.desc: Verify the normal model of the OH_NNModel_Destroy function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, model_destroy_003, testing::ext::TestSize.Level0) +{ + InnerModel* innerModel = new InnerModel(); + EXPECT_NE(nullptr, innerModel); + OH_NNModel* model = reinterpret_cast(innerModel); + OH_NNModel_Destroy(&model); + EXPECT_EQ(nullptr, model); +} + +/* + * @tc.name: model_get_available_operation_001 + * @tc.desc: Verify the OH_NNModel is nullptr of the OH_NNModel_GetAvailableOperations function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, model_get_available_operation_001, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + OH_NNModel* model = nullptr; + + uint32_t opCount = 1; + const bool *pIsAvailable = nullptr; + + InitIndices(); + AddModelTensor(innerModel); + SetInnerBuild(innerModel); + + size_t deviceID = 10; + OH_NN_ReturnCode ret = OH_NNModel_GetAvailableOperations(model, deviceID, &pIsAvailable, &opCount); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: model_get_available_operation_002 + * @tc.desc: Verify the isAvailable is nullptr of the OH_NNModel_GetAvailableOperations function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, model_get_available_operation_002, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + OH_NNModel* model = reinterpret_cast(&innerModel); + + uint32_t opCount = 1; + InitIndices(); + AddModelTensor(innerModel); + SetInnerBuild(innerModel); + + size_t deviceID = 10; + OH_NN_ReturnCode ret = OH_NNModel_GetAvailableOperations(model, deviceID, nullptr, &opCount); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: model_get_available_operation_003 + * @tc.desc: Verify the *isAvailable is no nullptr of the OH_NNModel_GetAvailableOperations function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, model_get_available_operation_003, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + OH_NNModel* model = reinterpret_cast(&innerModel); + + const bool isAvailable = true; + const bool *pIsAvailable = &isAvailable; + uint32_t opCount = 1; + + InitIndices(); + AddModelTensor(innerModel); + SetInnerBuild(innerModel); + + size_t deviceID = 10; + OH_NN_ReturnCode ret = OH_NNModel_GetAvailableOperations(model, deviceID, &pIsAvailable, &opCount); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: model_get_available_operation_004 + * @tc.desc: Verify the opCount is nullptr of the OH_NNModel_GetAvailableOperations function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, model_get_available_operation_004, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + OH_NNModel* model = reinterpret_cast(&innerModel); + + const bool *pIsAvailable = nullptr; + uint32_t* opCount = nullptr; + + InitIndices(); + AddModelTensor(innerModel); + SetInnerBuild(innerModel); + + size_t deviceID = 10; + OH_NN_ReturnCode ret = OH_NNModel_GetAvailableOperations(model, deviceID, &pIsAvailable, opCount); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: model_get_available_operation_005 + * @tc.desc: Verify the success of the OH_NNModel_GetAvailableOperations function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, model_get_available_operation_005, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + OH_NNModel* model = reinterpret_cast(&innerModel); + + const bool *pIsAvailable = nullptr; + uint32_t opCount = 1; + + InitIndices(); + AddModelTensor(innerModel); + SetInnerBuild(innerModel); + + size_t deviceID = 10; + OH_NN_ReturnCode ret = OH_NNModel_GetAvailableOperations(model, deviceID, &pIsAvailable, &opCount); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/* + * @tc.name: compilation_construct_001 + * @tc.desc: Verify the OH_NNModel is nullptr of the OH_NNCompilation_Construct function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, compilation_construct_001, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + const OH_NNModel* model = nullptr; + OH_NNCompilation* ret = OH_NNCompilation_Construct(model); + EXPECT_EQ(nullptr, ret); +} + +/* + * @tc.name: compilation_construct_002 + * @tc.desc: Verify the not OH_NNModel_Build before creating compilation of the OH_NNCompilation_Construct function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, compilation_construct_002, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + OH_NNModel* model = reinterpret_cast(&innerModel); + OH_NNCompilation* ret = OH_NNCompilation_Construct(model); + EXPECT_EQ(nullptr, ret); +} + +/* + * @tc.name: compilation_construct_003 + * @tc.desc: Verify the normal model of the OH_NNCompilation_Construct function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, compilation_construct_003, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + OH_NNModel* model = reinterpret_cast(&innerModel); + OH_NNCompilation* ret = OH_NNCompilation_Construct(model); + EXPECT_NE(nullptr, ret); +} + +/* + * @tc.name: compilation_set_device_001 + * @tc.desc: Verify the OH_NNCompilation is nullptr of the OH_NNCompilation_SetDevice function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, compilation_set_device_001, testing::ext::TestSize.Level0) +{ + OH_NNCompilation* compilation = nullptr; + size_t deviceId = 1; + OH_NN_ReturnCode ret = OH_NNCompilation_SetDevice(compilation, deviceId); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: compilation_set_device_002 + * @tc.desc: Verify the success of the OH_NNCompilation_SetDevice function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, compilation_set_device_002, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilation(&innerModel); + OH_NNCompilation* nnCompilation = reinterpret_cast(&compilation); + size_t deviceId = 1; + OH_NN_ReturnCode ret = OH_NNCompilation_SetDevice(nnCompilation, deviceId); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/* + * @tc.name: compilation_set_cache_001 + * @tc.desc: Verify the OH_NNCompilation is nullptr of the OH_NNCompilation_SetCache function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, compilation_set_cache_001, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilation(&innerModel); + OH_NNCompilation* nnCompilation = nullptr; + const char* cacheDir = "../"; + uint32_t version = 1; + std::size_t deviceId = 1; + EXPECT_EQ(OH_NN_SUCCESS, compilation.SetDevice(deviceId)); + OH_NN_ReturnCode ret = OH_NNCompilation_SetCache(nnCompilation, cacheDir, version); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: compilation_set_cache_002 + * @tc.desc: Verify the cachePath is nullptr of the OH_NNCompilation_SetCache function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, compilation_set_cache_002, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilation(&innerModel); + OH_NNCompilation* nnCompilation = reinterpret_cast(&compilation); + const char* cacheDir = nullptr; + uint32_t version = 1; + std::size_t deviceId = 1; + EXPECT_EQ(OH_NN_SUCCESS, compilation.SetDevice(deviceId)); + OH_NN_ReturnCode ret = OH_NNCompilation_SetCache(nnCompilation, cacheDir, version); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: compilation_set_cache_003 + * @tc.desc: Verify the success of the OH_NNCompilation_SetCache function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, compilation_set_cache_003, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilation(&innerModel); + OH_NNCompilation* nnCompilation = reinterpret_cast(&compilation); + const char* cacheDir = "../"; + uint32_t version = 1; + std::size_t deviceId = 1; + EXPECT_EQ(OH_NN_SUCCESS, compilation.SetDevice(deviceId)); + OH_NN_ReturnCode ret = OH_NNCompilation_SetCache(nnCompilation, cacheDir, version); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/* + * @tc.name: compilation_set_performance_mode_001 + * @tc.desc: Verify the OH_NNCompilation is nullptr of the OH_NNCompilation_SetPerformanceMode function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, compilation_set_performance_mode_001, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilation(&innerModel); + OH_NNCompilation* nnCompilation = nullptr; + OH_NN_PerformanceMode performanceMode = OH_NN_PERFORMANCE_NONE; + + std::size_t deviceId = 1; + EXPECT_EQ(OH_NN_SUCCESS, compilation.SetDevice(deviceId)); + OH_NN_ReturnCode ret = OH_NNCompilation_SetPerformanceMode(nnCompilation, performanceMode); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: compilation_set_performance_mode_002 + * @tc.desc: Verify the success of the OH_NNCompilation_SetPerformanceMode function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, compilation_set_performance_mode_002, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilation(&innerModel); + OH_NNCompilation* nnCompilation = reinterpret_cast(&compilation); + OH_NN_PerformanceMode performanceMode = OH_NN_PERFORMANCE_NONE; + + std::size_t deviceId = 1; + EXPECT_EQ(OH_NN_SUCCESS, compilation.SetDevice(deviceId)); + + OH_NN_ReturnCode ret = OH_NNCompilation_SetPerformanceMode(nnCompilation, performanceMode); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/* + * @tc.name: compilation_set_priority_001 + * @tc.desc: Verify the OH_NNCompilation is nullptr of the OH_NNCompilation_SetPriority function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, compilation_set_priority_001, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilation(&innerModel); + OH_NNCompilation* nnCompilation = nullptr; + OH_NN_Priority priority = OH_NN_PRIORITY_LOW; + + std::size_t deviceId = 1; + EXPECT_EQ(OH_NN_SUCCESS, compilation.SetDevice(deviceId)); + + OH_NN_ReturnCode ret = OH_NNCompilation_SetPriority(nnCompilation, priority); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: compilation_set_priority_002 + * @tc.desc: Verify the success of the OH_NNCompilation_SetPriority function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, compilation_set_priority_002, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilation(&innerModel); + OH_NNCompilation* nnCompilation = reinterpret_cast(&compilation); + OH_NN_Priority priority = OH_NN_PRIORITY_LOW; + + std::size_t deviceId = 1; + EXPECT_EQ(OH_NN_SUCCESS, compilation.SetDevice(deviceId)); + + OH_NN_ReturnCode ret = OH_NNCompilation_SetPriority(nnCompilation, priority); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/* + * @tc.name: compilation_set_enable_float16_001 + * @tc.desc: Verify the OH_NNCompilation is nullptr of the OH_NNCompilation_EnableFloat16 function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, compilation_set_enable_float16_001, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilation(&innerModel); + OH_NNCompilation* nnCompilation = nullptr; + bool enableFloat16 = true; + + std::size_t deviceId = 1; + EXPECT_EQ(OH_NN_SUCCESS, compilation.SetDevice(deviceId)); + + OH_NN_ReturnCode ret = OH_NNCompilation_EnableFloat16(nnCompilation, enableFloat16); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: compilation_set_enable_float16_002 + * @tc.desc: Verify the success of the OH_NNCompilation_EnableFloat16 function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, compilation_set_enable_float16_002, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilation(&innerModel); + OH_NNCompilation* nnCompilation = reinterpret_cast(&compilation); + bool enableFloat16 = true; + + std::size_t deviceId = 1; + EXPECT_EQ(OH_NN_SUCCESS, compilation.SetDevice(deviceId)); + + OH_NN_ReturnCode ret = OH_NNCompilation_EnableFloat16(nnCompilation, enableFloat16); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/* + * @tc.name: compilation_build_001 + * @tc.desc: Verify the OH_NNCompilation is nullptr of the OH_NNCompilation_Build function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, compilation_build_001, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilation(&innerModel); + OH_NNCompilation* nnCompilation = nullptr; + + std::size_t deviceId = 1; + EXPECT_EQ(OH_NN_SUCCESS, compilation.SetDevice(deviceId)); + EXPECT_EQ(OH_NN_SUCCESS, compilation.SetPerformance(OH_NN_PERFORMANCE_EXTREME)); + EXPECT_EQ(OH_NN_SUCCESS, compilation.SetPriority(OH_NN_PRIORITY_HIGH)); + EXPECT_EQ(OH_NN_SUCCESS, compilation.SetEnableFp16(true)); + + OH_NN_ReturnCode ret = OH_NNCompilation_Build(nnCompilation); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: compilation_build_002 + * @tc.desc: Verify the success of the OH_NNCompilation_Build function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, compilation_build_002, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilation(&innerModel); + OH_NNCompilation* nnCompilation = reinterpret_cast(&compilation); + + std::size_t deviceId = 1; + EXPECT_EQ(OH_NN_SUCCESS, compilation.SetDevice(deviceId)); + EXPECT_EQ(OH_NN_SUCCESS, compilation.SetPerformance(OH_NN_PERFORMANCE_EXTREME)); + EXPECT_EQ(OH_NN_SUCCESS, compilation.SetPriority(OH_NN_PRIORITY_HIGH)); + EXPECT_EQ(OH_NN_SUCCESS, compilation.SetEnableFp16(true)); + + OH_NN_ReturnCode ret = OH_NNCompilation_Build(nnCompilation); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/* + * @tc.name: compilation_destroy_001 + * @tc.desc: Verify the OH_NNCompilation is nullptr of the OH_NNCompilation_Destroy function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, compilation_destroy_001, testing::ext::TestSize.Level0) +{ + OH_NNCompilation** pCompilation = nullptr; + OH_NNCompilation_Destroy(pCompilation); + EXPECT_EQ(nullptr, pCompilation); +} + +/* + * @tc.name: compilation_destroy_002 + * @tc.desc: Verify the *OH_NNCompilation is nullptr of the OH_NNCompilation_Destroy function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, compilation_destroy_002, testing::ext::TestSize.Level0) +{ + OH_NNCompilation* compilation = nullptr; + OH_NNCompilation** pCompilation = &compilation; + OH_NNCompilation_Destroy(pCompilation); + EXPECT_EQ(nullptr, compilation); +} + +/* + * @tc.name: compilation_destroy_003 + * @tc.desc: Verify the normal model of the OH_NNCompilation_Destroy function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, compilation_destroy_003, testing::ext::TestSize.Level0) +{ + InnerModel* innerModel = new InnerModel(); + EXPECT_NE(nullptr, innerModel); + Compilation* compilation = new(std::nothrow) Compilation(innerModel); + EXPECT_NE(nullptr, compilation); + OH_NNCompilation* nnCompilation = reinterpret_cast(compilation); + OH_NNCompilation_Destroy(&nnCompilation); + EXPECT_EQ(nullptr, nnCompilation); +} + +/** + * @tc.name: excutor_construct_001 + * @tc.desc: Verify the OH_NNCompilation is nullptr of the OH_NNExecutor_Construct function + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, excutor_construct_001, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilation(&innerModel); + + std::size_t deviceId = 1; + EXPECT_EQ(OH_NN_SUCCESS, compilation.SetDevice(deviceId)); + EXPECT_EQ(OH_NN_SUCCESS, compilation.SetEnableFp16(true)); + EXPECT_EQ(OH_NN_SUCCESS, compilation.SetPerformance(OH_NN_PERFORMANCE_EXTREME)); + EXPECT_EQ(OH_NN_SUCCESS, compilation.SetPriority(OH_NN_PRIORITY_HIGH)); + EXPECT_EQ(OH_NN_SUCCESS, compilation.Build()); + + OH_NNCompilation* nnCompilation = nullptr; + OH_NNExecutor* executor = OH_NNExecutor_Construct(nnCompilation); + EXPECT_EQ(nullptr, executor); +} + +/** + * @tc.name: excutor_construct_002 + * @tc.desc: Verify the not OH_NNCompilation_Build before creating executor of the OH_NNExecutor_Construct function + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, excutor_construct_002, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilation(&innerModel); + OH_NNCompilation* nnCompilation = reinterpret_cast(&compilation); + OH_NNExecutor * executor = OH_NNExecutor_Construct(nnCompilation); + EXPECT_EQ(nullptr, executor); +} + +/** + * @tc.name: excutor_construct_003 + * @tc.desc: Verify the success of the OH_NNExecutor_Construct function + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, excutor_construct_003, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilation(&innerModel); + + std::size_t deviceId = 1; + EXPECT_EQ(OH_NN_SUCCESS, compilation.SetDevice(deviceId)); + EXPECT_EQ(OH_NN_SUCCESS, compilation.SetPerformance(OH_NN_PERFORMANCE_EXTREME)); + EXPECT_EQ(OH_NN_SUCCESS, compilation.SetPriority(OH_NN_PRIORITY_HIGH)); + EXPECT_EQ(OH_NN_SUCCESS, compilation.SetEnableFp16(true)); + EXPECT_EQ(OH_NN_SUCCESS, compilation.Build()); + + OH_NNCompilation* nnCompilation = reinterpret_cast(&compilation); + OH_NNExecutor * executor = OH_NNExecutor_Construct(nnCompilation); + EXPECT_NE(nullptr, executor); +} + +/** + * @tc.name: excutor_setinput_001 + * @tc.desc: Verify the OH_NNExecutor is nullptr of the OH_NNExecutor_SetInput function + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, excutor_setinput_001, testing::ext::TestSize.Level0) +{ + SetTensor(); + + float input[9] = {0, 1, 2, 3, 4, 5, 6, 7, 8}; + const void *buffer = input; + size_t length = 2 * sizeof(float); + uint32_t inputIndex = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNExecutor_SetInput(nullptr, inputIndex, &m_tensor, buffer, length)); +} + +/** + * @tc.name: excutor_setinput_002 + * @tc.desc: Verify the OH_NN_Tensor is nullptr of the OH_NNExecutor_SetInput function + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, excutor_setinput_002, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation innerCompilation(&innerModel); + Executor executor(&innerCompilation); + OH_NNExecutor* nnExecutor = reinterpret_cast(&executor); + + uint32_t inputIndex = 0; + float input[9] = {0, 1, 2, 3, 4, 5, 6, 7, 8}; + const void *buffer = input; + size_t length = 2 * sizeof(float); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNExecutor_SetInput(nnExecutor, inputIndex, nullptr, buffer, length)); +} + +/** + * @tc.name: excutor_setinput_003 + * @tc.desc: Verify the data is nullptr of the OH_NNExecutor_SetInput function + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, excutor_setinput_003, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation innerCompilation(&innerModel); + Executor executor(&innerCompilation); + OH_NNExecutor* nnExecutor = reinterpret_cast(&executor); + + SetTensor(); + + uint32_t inputIndex = 0; + const void *buffer = nullptr; + size_t length = 2 * sizeof(float); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNExecutor_SetInput(nnExecutor, inputIndex, &m_tensor, buffer, length)); +} + +/** + * @tc.name: excutor_setinput_004 + * @tc.desc: Verify the length is 0 of the OH_NNExecutor_SetInput function + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, excutor_setinput_004, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation innerCompilation(&innerModel); + Executor executor(&innerCompilation); + OH_NNExecutor* nnExecutor = reinterpret_cast(&executor); + + uint32_t inputIndex = 0; + SetTensor(); + + size_t length = 0; + float input[9] = {0, 1, 2, 3, 4, 5, 6, 7, 8}; + const void *buffer = input; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNExecutor_SetInput(nnExecutor, inputIndex, &m_tensor, buffer, length)); +} + +/** + * @tc.name: excutor_setinput_005 + * @tc.desc: Verify the success of the OH_NNExecutor_SetInput function + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, excutor_setinput_005, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation innerCompilation(&innerModel); + Executor executor(&innerCompilation); + OH_NNExecutor* nnExecutor = reinterpret_cast(&executor); + + uint32_t inputIndex = 0; + SetTensor(); + + float input[9] = {0, 1, 2, 3, 4, 5, 6, 7, 8}; + const void *buffer = input; + size_t length = 9 * sizeof(int32_t); + OH_NN_ReturnCode ret = OH_NNExecutor_SetInput(nnExecutor, inputIndex, &m_tensor, buffer, length); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/** + * @tc.name: excutor_setoutput_001 + * @tc.desc: Verify the OH_NNExecutor is nullptr of the OH_NNExecutor_SetOutput function + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, excutor_setoutput_001, testing::ext::TestSize.Level0) +{ + uint32_t outputIndex = 0; + float input[9] = {0, 1, 2, 3, 4, 5, 6, 7, 8}; + void *buffer = input; + size_t length = 9 * sizeof(int32_t); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNExecutor_SetOutput(nullptr, outputIndex, buffer, length)); +} + +/** + * @tc.name: excutor_setoutput_002 + * @tc.desc: Verify the data is nullptr of the OH_NNExecutor_SetOutput function + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, excutor_setoutput_002, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation innerCompilation(&innerModel); + Executor executor(&innerCompilation); + OH_NNExecutor* nnExecutor = reinterpret_cast(&executor); + + uint32_t outputIndex = 0; + void *buffer = nullptr; + size_t length = 9 * sizeof(int32_t); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNExecutor_SetOutput(nnExecutor, outputIndex, buffer, length)); +} + +/** + * @tc.name: excutor_setoutput_003 + * @tc.desc: Verify the length is 0 of the OH_NNExecutor_SetOutput function + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, excutor_setoutput_003, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation innerCompilation(&innerModel); + Executor executor(&innerCompilation); + OH_NNExecutor* nnExecutor = reinterpret_cast(&executor); + + uint32_t outputIndex = 0; + float input[9] = {0, 1, 2, 3, 4, 5, 6, 7, 8}; + void *buffer = input; + size_t length = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNExecutor_SetOutput(nnExecutor, outputIndex, buffer, length)); +} + +/** + * @tc.name: excutor_setoutput_004 + * @tc.desc: Verify the success of the OH_NNExecutor_SetOutput function + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, excutor_setoutput_004, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation innerCompilation(&innerModel); + Executor executor(&innerCompilation); + OH_NNExecutor* nnExecutor = reinterpret_cast(&executor); + + uint32_t outputIndex = 0; + float input[9] = {0, 1, 2, 3, 4, 5, 6, 7, 8}; + void *buffer = input; + size_t length = 9 * sizeof(int32_t); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNExecutor_SetOutput(nnExecutor, outputIndex, buffer, length)); +} + +/** + * @tc.name: excutor_getoutputshape_001 + * @tc.desc: Verify the OH_NNExecutor is nullptr of the OH_NNExecutor_GetOutputShape function + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, excutor_getoutputshape_001, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation innerCompilation(&innerModel); + Executor executor(&innerCompilation); + OH_NNExecutor* nnExecutor = nullptr; + + SetInputAndOutput(executor); + + int32_t* ptr = nullptr; + int32_t** shape = &ptr; + uint32_t length = 2; + uint32_t outputIndex = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNExecutor_GetOutputShape(nnExecutor, outputIndex, + shape, &length)); +} + +/** + * @tc.name: excutor_getoutputshape_002 + * @tc.desc: Verify the shape is nullptr of the OH_NNExecutor_GetOutputShape function + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, excutor_getoutputshape_002, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation innerCompilation(&innerModel); + Executor executor(&innerCompilation); + OH_NNExecutor* nnExecutor = reinterpret_cast(&executor); + + SetInputAndOutput(executor); + + uint32_t outputIndex = 0; + int32_t** shape = nullptr; + uint32_t length = 2; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNExecutor_GetOutputShape(nnExecutor, outputIndex, + shape, &length)); +} + +/** + * @tc.name: excutor_getoutputshape_003 + * @tc.desc: Verify the *shape is not nullptr of the OH_NNExecutor_GetOutputShape function + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, excutor_getoutputshape_003, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation innerCompilation(&innerModel); + Executor executor(&innerCompilation); + OH_NNExecutor* nnExecutor = reinterpret_cast(&executor); + + SetInputAndOutput(executor); + + int32_t expectDim[2] = {3, 3}; + int32_t* ptr = expectDim; + int32_t** shape = &ptr; + uint32_t length = 2; + uint32_t outputIndex = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNExecutor_GetOutputShape(nnExecutor, outputIndex, + shape, &length)); +} + +/** + * @tc.name: excutor_getoutputshape_004 + * @tc.desc: Verify the length is nullptr of the OH_NNExecutor_GetOutputShape function + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, excutor_getoutputshape_004, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation innerCompilation(&innerModel); + Executor executor(&innerCompilation); + OH_NNExecutor* nnExecutor = reinterpret_cast(&executor); + + SetInputAndOutput(executor); + + int32_t* ptr = nullptr; + int32_t** shape = &ptr; + uint32_t outputIndex = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNExecutor_GetOutputShape(nnExecutor, outputIndex, shape, nullptr)); +} + +/** + * @tc.name: excutor_getoutputshape_005 + * @tc.desc: Verify the success of the OH_NNExecutor_GetOutputShape function + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, excutor_getoutputshape_005, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation innerCompilation(&innerModel); + Executor executor(&innerCompilation); + OH_NNExecutor* nnExecutor = reinterpret_cast(&executor); + + SetInputAndOutput(executor); + + int32_t* ptr = nullptr; + int32_t** shape = &ptr; + uint32_t length = 2; + uint32_t outputIndex = 0; + EXPECT_EQ(OH_NN_SUCCESS, OH_NNExecutor_GetOutputShape(nnExecutor, outputIndex, shape, &length)); +} + +/** + * @tc.name: excutor_run_001 + * @tc.desc: Verify the OH_NNExecutor is nullptr of the OH_NNExecutor_Run function + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, excutor_run_001, testing::ext::TestSize.Level0) +{ + OH_NNExecutor* nnExecutor = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNExecutor_Run(nnExecutor)); +} + +/** + * @tc.name: excutor_run_002 + * @tc.desc: Verify the success of the OH_NNExecutor_Run function + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, excutor_run_002, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation innerCompilation(&innerModel); + Executor executor(&innerCompilation); + OH_NNExecutor* nnExecutor = reinterpret_cast(&executor); + + uint32_t index = 0; + size_t length = 9 * sizeof(int32_t); + float input[9] = {0, 1, 2, 3, 4, 5, 6, 7, 8}; + void *buffer = input; + + SetTensor(); + EXPECT_EQ(OH_NN_SUCCESS, executor.SetInput(index, m_tensor, buffer, length)); + EXPECT_EQ(OH_NN_SUCCESS, executor.SetOutput(index, buffer, length)); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNExecutor_Run(nnExecutor)); +} + +/* + * @tc.name: executor_allocate_input_memory_001 + * @tc.desc: Verify the OH_NNExecutor is nullptr of the OH_NNExecutor_AllocateInputMemory function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, executor_allocate_input_memory_001, testing::ext::TestSize.Level0) +{ + OH_NNExecutor* nnExecutor = nullptr; + uint32_t outputIndex = 0; + size_t length = 9 * sizeof(float); + + OH_NN_Memory* ret = OH_NNExecutor_AllocateInputMemory(nnExecutor, outputIndex, length); + EXPECT_EQ(nullptr, ret); +} + +/* + * @tc.name: executor_allocate_input_memory_002 + * @tc.desc: Verify the passed length equals 0 of the OH_NNExecutor_AllocateInputMemory function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, executor_allocate_input_memory_002, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation innerCompilation(&innerModel); + Executor executor(&innerCompilation); + OH_NNExecutor* nnExecutor = reinterpret_cast(&executor); + + uint32_t outputIndex = 0; + size_t length = 0; + + OH_NN_Memory* ret = OH_NNExecutor_AllocateInputMemory(nnExecutor, outputIndex, length); + EXPECT_EQ(nullptr, ret); +} + +/* + * @tc.name: executor_allocate_input_memory_003 + * @tc.desc: Verify the error when creating input memory in executor of the OH_NNExecutor_AllocateInputMemory function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, executor_allocate_input_memory_003, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation innerCompilation(&innerModel); + Executor executor(&innerCompilation); + OH_NNExecutor* nnExecutor = reinterpret_cast(&executor); + + uint32_t outputIndex = 6; + size_t length = 9 * sizeof(float); + + OH_NN_Memory* ret = OH_NNExecutor_AllocateInputMemory(nnExecutor, outputIndex, length); + EXPECT_EQ(nullptr, ret); +} + +/* + * @tc.name: executor_allocate_input_memory_004 + * @tc.desc: Verify the success of the OH_NNExecutor_AllocateInputMemory function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, executor_allocate_input_memory_004, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation innerCompilation(&innerModel); + Executor executor(&innerCompilation); + OH_NNExecutor* nnExecutor = reinterpret_cast(&executor); + + uint32_t outputIndex = 0; + size_t length = 9 * sizeof(float); + + OH_NN_Memory* ret = OH_NNExecutor_AllocateInputMemory(nnExecutor, outputIndex, length); + EXPECT_NE(nullptr, ret); +} + +/* + * @tc.name: executor_allocate_output_memory_001 + * @tc.desc: Verify the OH_NNExecutor is nullptr of the OH_NNExecutor_AllocateOutputMemory function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, executor_allocate_output_memory_001, testing::ext::TestSize.Level0) +{ + OH_NNExecutor* nnExecutor = nullptr; + uint32_t outputIndex = 0; + size_t length = 9 * sizeof(float); + + OH_NN_Memory* ret = OH_NNExecutor_AllocateOutputMemory(nnExecutor, outputIndex, length); + EXPECT_EQ(nullptr, ret); +} + +/* + * @tc.name: executor_allocate_output_memory_002 + * @tc.desc: Verify the passed length equals 0 of the OH_NNExecutor_AllocateOutputMemory function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, executor_allocate_output_memory_002, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation innerCompilation(&innerModel); + Executor executor(&innerCompilation); + OH_NNExecutor* nnExecutor = reinterpret_cast(&executor); + + uint32_t outputIndex = 0; + size_t length = 0; + + OH_NN_Memory* ret = OH_NNExecutor_AllocateOutputMemory(nnExecutor, outputIndex, length); + EXPECT_EQ(nullptr, ret); +} + +/* + * @tc.name: executor_allocate_output_memory_003 + * @tc.desc: Verify the error when create output memory in executor of the OH_NNExecutor_AllocateOutputMemory function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, executor_allocate_output_memory_003, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation innerCompilation(&innerModel); + Executor executor(&innerCompilation); + OH_NNExecutor* nnExecutor = reinterpret_cast(&executor); + + uint32_t outputIndex = 6; + size_t length = 9 * sizeof(float); + + OH_NN_Memory* ret = OH_NNExecutor_AllocateOutputMemory(nnExecutor, outputIndex, length); + EXPECT_EQ(nullptr, ret); +} + +/* + * @tc.name: executor_allocate_output_memory_004 + * @tc.desc: Verify the success of the OH_NNExecutor_AllocateOutputMemory function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, executor_allocate_output_memory_004, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation innerCompilation(&innerModel); + Executor executor(&innerCompilation); + OH_NNExecutor* nnExecutor = reinterpret_cast(&executor); + + uint32_t outputIndex = 0; + size_t length = 9 * sizeof(float); + + OH_NN_Memory* ret = OH_NNExecutor_AllocateOutputMemory(nnExecutor, outputIndex, length); + EXPECT_NE(nullptr, ret); +} + + +/* + * @tc.name: executor_destroy_input_memory_001 + * @tc.desc: Verify the OH_NNExecutor is nullptr of the OH_NNExecutor_DestroyInputMemory function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, executor_destroy_input_memory_001, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + BuildModelGraph(innerModel); + Compilation innerCompilation(&innerModel); + Executor executor(&innerCompilation); + OH_NNExecutor* nnExecutor = nullptr; + + uint32_t inputIndex = 0; + float dataArry[9] {0, 1, 2, 3, 4, 5, 6, 7, 8}; + void* const data = dataArry; + OH_NN_Memory memory = {data, 9 * sizeof(float)}; + OH_NN_Memory* pMemory = &memory; + size_t length = 9 * sizeof(float); + EXPECT_EQ(OH_NN_SUCCESS, executor.CreateInputMemory(inputIndex, length, &pMemory)); + OH_NNExecutor_DestroyInputMemory(nnExecutor, inputIndex, &pMemory); + EXPECT_EQ(nullptr, nnExecutor); +} + +/* + * @tc.name: executor_destroy_input_memory_002 + * @tc.desc: Verify the memory is nullptr of the OH_NNExecutor_DestroyInputMemory function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, executor_destroy_input_memory_002, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + BuildModelGraph(innerModel); + Compilation innerCompilation(&innerModel); + Executor executor(&innerCompilation); + OH_NNExecutor* nnExecutor = reinterpret_cast(&executor); + + uint32_t inputIndex = 0; + OH_NN_Memory** memory = nullptr; + OH_NNExecutor_DestroyInputMemory(nnExecutor, inputIndex, memory); + EXPECT_EQ(nullptr, memory); +} + +/* + * @tc.name: executor_destroy_input_memory_003 + * @tc.desc: Verify the *memory is nullptr of the OH_NNExecutor_DestroyInputMemory function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, executor_destroy_input_memory_003, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + BuildModelGraph(innerModel); + Compilation innerCompilation(&innerModel); + Executor executor(&innerCompilation); + OH_NNExecutor* nnExecutor = reinterpret_cast(&executor); + + uint32_t inputIndex = 0; + OH_NN_Memory* memory = nullptr; + OH_NN_Memory** pMemory = &memory; + OH_NNExecutor_DestroyInputMemory(nnExecutor, inputIndex, pMemory); + EXPECT_EQ(nullptr, memory); +} + +/* + * @tc.name: executor_destroy_input_memory_004 + * @tc.desc: Verify the error happened when destroying input memory of the OH_NNExecutor_DestroyInputMemory function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, executor_destroy_input_memory_004, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + BuildModelGraph(innerModel); + Compilation innerCompilation(&innerModel); + Executor executor(&innerCompilation); + OH_NNExecutor* nnExecutor = reinterpret_cast(&executor); + + uint32_t inputIndex = 6; + float dataArry[9] {0, 1, 2, 3, 4, 5, 6, 7, 8}; + void* const data = dataArry; + OH_NN_Memory memory = {data, 9 * sizeof(float)}; + OH_NN_Memory* pMemory = &memory; + OH_NNExecutor_DestroyInputMemory(nnExecutor, inputIndex, &pMemory); + EXPECT_NE(nullptr, pMemory); +} + +/* + * @tc.name: executor_destroy_input_memory_005 + * @tc.desc: Verify the success of the OH_NNExecutor_DestroyInputMemory function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, executor_destroy_input_memory_005, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + BuildModelGraph(innerModel); + Compilation innerCompilation(&innerModel); + Executor executor(&innerCompilation); + OH_NNExecutor* nnExecutor = reinterpret_cast(&executor); + + uint32_t inputIndex = 0; + float dataArry[9] {0, 1, 2, 3, 4, 5, 6, 7, 8}; + void* const data = dataArry; + OH_NN_Memory memory = {data, 9 * sizeof(float)}; + OH_NN_Memory* pMemory = &memory; + size_t length = 9 * sizeof(float); + EXPECT_EQ(OH_NN_SUCCESS, executor.CreateInputMemory(inputIndex, length, &pMemory)); + OH_NNExecutor_DestroyInputMemory(nnExecutor, inputIndex, &pMemory); + EXPECT_EQ(nullptr, pMemory); +} + +/* + * @tc.name: executor_destroy_output_memory_001 + * @tc.desc: Verify the OH_NNExecutor is nullptr of the OH_NNExecutor_DestroyOutputMemory function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, executor_destroy_output_memory_001, testing::ext::TestSize.Level0) +{ + OH_NNExecutor* nnExecutor = nullptr; + uint32_t outputIndex = 0; + float dataArry[9] {0, 1, 2, 3, 4, 5, 6, 7, 8}; + void* const data = dataArry; + OH_NN_Memory memory = {data, 9 * sizeof(float)}; + OH_NN_Memory* pMemory = &memory; + OH_NNExecutor_DestroyOutputMemory(nnExecutor, outputIndex, &pMemory); + EXPECT_EQ(nullptr, nnExecutor); +} + +/* + * @tc.name: executor_destroy_output_memory_002 + * @tc.desc: Verify the memory is nullptr of the OH_NNExecutor_DestroyOutputMemory function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, executor_destroy_output_memory_002, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation innerCompilation(&innerModel); + Executor executor(&innerCompilation); + OH_NNExecutor* nnExecutor = reinterpret_cast(&executor); + + uint32_t outputIndex = 0; + OH_NN_Memory** memory = nullptr; + OH_NNExecutor_DestroyOutputMemory(nnExecutor, outputIndex, memory); + EXPECT_EQ(nullptr, memory); +} + +/* + * @tc.name: executor_destroy_output_memory_003 + * @tc.desc: Verify the *memory is nullptr of the OH_NNExecutor_DestroyOutputMemory function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, executor_destroy_output_memory_003, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation innerCompilation(&innerModel); + Executor executor(&innerCompilation); + OH_NNExecutor* nnExecutor = reinterpret_cast(&executor); + + uint32_t outputIndex = 0; + OH_NN_Memory* memory = nullptr; + OH_NN_Memory** pMemory = &memory; + OH_NNExecutor_DestroyOutputMemory(nnExecutor, outputIndex, pMemory); + EXPECT_EQ(nullptr, memory); +} + +/* + * @tc.name: executor_destroy_output_memory_004 + * @tc.desc: Verify the error happened when destroying output memory of the OH_NNExecutor_DestroyOutputMemory function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, executor_destroy_output_memory_004, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation innerCompilation(&innerModel); + Executor executor(&innerCompilation); + OH_NNExecutor* nnExecutor = reinterpret_cast(&executor); + + uint32_t outputIndex = 6; + float dataArry[9] {0, 1, 2, 3, 4, 5, 6, 7, 8}; + void* const data = dataArry; + OH_NN_Memory memory = {data, 9 * sizeof(float)}; + OH_NN_Memory* pMemory = &memory; + OH_NNExecutor_DestroyOutputMemory(nnExecutor, outputIndex, &pMemory); + EXPECT_NE(nullptr, pMemory); +} + +/* + * @tc.name: executor_destroy_output_memory_005 + * @tc.desc: Verify the success of the OH_NNExecutor_DestroyOutputMemory function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, executor_destroy_output_memory_005, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation innerCompilation(&innerModel); + Executor executor(&innerCompilation); + OH_NNExecutor* nnExecutor = reinterpret_cast(&executor); + + float dataArry[9] {0, 1, 2, 3, 4, 5, 6, 7, 8}; + void* const data = dataArry; + OH_NN_Memory memory = {data, 9 * sizeof(float)}; + OH_NN_Memory* pMemory = &memory; + size_t length = 9 * sizeof(float); + uint32_t outputIndex = 0; + EXPECT_EQ(OH_NN_SUCCESS, executor.CreateOutputMemory(outputIndex, length, &pMemory)); + OH_NNExecutor_DestroyOutputMemory(nnExecutor, outputIndex, &pMemory); + EXPECT_EQ(nullptr, pMemory); +} + +/* + * @tc.name: executor_set_input_with_memory_001 + * @tc.desc: Verify the OH_NNExecutor is nullptr of the OH_NNExecutor_SetInputWithMemory function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, executor_set_input_with_memory_001, testing::ext::TestSize.Level0) +{ + OH_NNExecutor* nnExecutor = nullptr; + + SetTensor(); + + uint32_t inputIndex = 0; + float dataArry[9] {0, 1, 2, 3, 4, 5, 6, 7, 8}; + void* const data = dataArry; + OH_NN_Memory memory = {data, 9 * sizeof(float)}; + + OH_NN_ReturnCode ret = OH_NNExecutor_SetInputWithMemory(nnExecutor, inputIndex, &m_tensor, &memory); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: executor_set_input_with_memory_002 + * @tc.desc: Verify the operand is nullptr of the OH_NNExecutor_SetInputWithMemory function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, executor_set_input_with_memory_002, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation innerCompilation(&innerModel); + Executor executor(&innerCompilation); + OH_NNExecutor* nnExecutor = reinterpret_cast(&executor); + + OH_NN_Tensor* operand = nullptr; + + uint32_t inputIndex = 0; + float dataArry[9] {0, 1, 2, 3, 4, 5, 6, 7, 8}; + void* const data = dataArry; + OH_NN_Memory memory = {data, 9 * sizeof(float)}; + + OH_NN_ReturnCode ret = OH_NNExecutor_SetInputWithMemory(nnExecutor, inputIndex, operand, &memory); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: executor_set_input_with_memory_003 + * @tc.desc: Verify the memory is nullptr of the OH_NNExecutor_SetInputWithMemory function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, executor_set_input_with_memory_003, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation innerCompilation(&innerModel); + Executor executor(&innerCompilation); + OH_NNExecutor* nnExecutor = reinterpret_cast(&executor); + + SetTensor(); + + uint32_t inputIndex = 0; + OH_NN_Memory* memory = nullptr; + OH_NN_ReturnCode ret = OH_NNExecutor_SetInputWithMemory(nnExecutor, inputIndex, &m_tensor, memory); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: executor_set_input_with_memory_004 + * @tc.desc: Verify the success of the OH_NNExecutor_SetInputWithMemory function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, executor_set_input_with_memory_004, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation innerCompilation(&innerModel); + Executor executor(&innerCompilation); + OH_NNExecutor* nnExecutor = reinterpret_cast(&executor); + + SetTensor(); + + uint32_t inputIndex = 0; + float dataArry[9] {0, 1, 2, 3, 4, 5, 6, 7, 8}; + void* const data = dataArry; + OH_NN_Memory memory = {data, 9 * sizeof(float)}; + + OH_NN_ReturnCode ret = OH_NNExecutor_SetInputWithMemory(nnExecutor, inputIndex, &m_tensor, &memory); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + + +/* + * @tc.name: executor_set_output_with_memory_001 + * @tc.desc: Verify the OH_NNExecutor is nullptr of the OH_NNExecutor_SetOutputWithMemory function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, executor_set_output_with_memory_001, testing::ext::TestSize.Level0) +{ + OH_NNExecutor* nnExecutor = nullptr; + uint32_t outputIndex = 0; + float dataArry[9] {0, 1, 2, 3, 4, 5, 6, 7, 8}; + void* const data = dataArry; + OH_NN_Memory memory = {data, 9 * sizeof(float)}; + OH_NN_ReturnCode ret = OH_NNExecutor_SetOutputWithMemory(nnExecutor, outputIndex, &memory); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: executor_set_output_with_memory_002 + * @tc.desc: Verify the memory is nullptr of the OH_NNExecutor_SetOutputWithMemory function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, executor_set_output_with_memory_002, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation innerCompilation(&innerModel); + Executor executor(&innerCompilation); + OH_NNExecutor* nnExecutor = reinterpret_cast(&executor); + + uint32_t outputIndex = 0; + OH_NN_Memory* memory = nullptr; + OH_NN_ReturnCode ret = OH_NNExecutor_SetOutputWithMemory(nnExecutor, outputIndex, memory); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: executor_set_output_with_memory_003 + * @tc.desc: Verify the success of the OH_NNExecutor_SetOutputWithMemory function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, executor_set_output_with_memory_003, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation innerCompilation(&innerModel); + Executor executor(&innerCompilation); + OH_NNExecutor* nnExecutor = reinterpret_cast(&executor); + + uint32_t outputIndex = 0; + float dataArry[9] {0, 1, 2, 3, 4, 5, 6, 7, 8}; + void* const data = dataArry; + OH_NN_Memory memory = {data, 9 * sizeof(float)}; + OH_NN_ReturnCode ret = OH_NNExecutor_SetOutputWithMemory(nnExecutor, outputIndex, &memory); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/* + * @tc.name: executor_destroy_001 + * @tc.desc: Verify the OH_NNExecutor is nullptr of the OH_NNExecutor_Destroy function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, executor_destroy_001, testing::ext::TestSize.Level0) +{ + OH_NNExecutor** pExecutor = nullptr; + OH_NNExecutor_Destroy(pExecutor); + EXPECT_EQ(nullptr, pExecutor); +} + +/* + * @tc.name: executor_destroy_002 + * @tc.desc: Verify the *OH_NNExecutor is nullptr of the OH_NNExecutor_Destroy function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, executor_destroy_002, testing::ext::TestSize.Level0) +{ + OH_NNExecutor* nnExecutor = nullptr; + OH_NNExecutor** pExecutor = &nnExecutor; + OH_NNExecutor_Destroy(pExecutor); + EXPECT_EQ(nullptr, nnExecutor); +} + +/* + * @tc.name: executor_destroy_003 + * @tc.desc: Verify the normal model of the OH_NNExecutor_Destroy function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, executor_destroy_003, testing::ext::TestSize.Level0) +{ + InnerModel* innerModel = new InnerModel(); + EXPECT_NE(nullptr, innerModel); + Compilation* innerCompilation = new(std::nothrow) Compilation(innerModel); + EXPECT_NE(nullptr, innerCompilation); + Executor* executor = new(std::nothrow) Executor(innerCompilation); + EXPECT_NE(nullptr, executor); + + OH_NNExecutor* nnExecutor = reinterpret_cast(executor); + OH_NNExecutor_Destroy(&nnExecutor); + EXPECT_EQ(nullptr, nnExecutor); +} + +/* + * @tc.name: device_get_all_devices_id_001 + * @tc.desc: Verify the allDevicesID is nullptr of the OH_NNDevice_GetAllDevicesID function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, device_get_all_devices_id_001, testing::ext::TestSize.Level0) +{ + const size_t** allDevicesId = nullptr; + uint32_t deviceCount = 1; + uint32_t* pDeviceCount = &deviceCount; + OH_NN_ReturnCode ret = OH_NNDevice_GetAllDevicesID(allDevicesId, pDeviceCount); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: device_get_all_devices_id_002 + * @tc.desc: Verify the *allDevicesID is not nullptr of the OH_NNDevice_GetAllDevicesID function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, device_get_all_devices_id_002, testing::ext::TestSize.Level0) +{ + const size_t devicesId = 1; + const size_t* allDevicesId = &devicesId; + const size_t** pAllDevicesId = &allDevicesId; + uint32_t deviceCount = 1; + uint32_t* pDeviceCount = &deviceCount; + OH_NN_ReturnCode ret = OH_NNDevice_GetAllDevicesID(pAllDevicesId, pDeviceCount); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: device_get_all_devices_id_003 + * @tc.desc: Verify the deviceCount is nullptr of the OH_NNDevice_GetAllDevicesID function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, device_get_all_devices_id_003, testing::ext::TestSize.Level0) +{ + const size_t* allDevicesId = nullptr; + const size_t** pAllDevicesId = &allDevicesId; + uint32_t* pDeviceCount = nullptr; + OH_NN_ReturnCode ret = OH_NNDevice_GetAllDevicesID(pAllDevicesId, pDeviceCount); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: device_get_all_devices_id_004 + * @tc.desc: Verify the get no device of the OH_NNDevice_GetAllDevicesID function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, device_get_all_devices_id_004, testing::ext::TestSize.Level0) +{ + const size_t* allDevicesId = nullptr; + const size_t** pAllDevicesId = &allDevicesId; + uint32_t deviceCount = 1; + uint32_t* pDeviceCount = &deviceCount; + OHOS::HDI::Nnrt::V2_0::MockIPreparedModel::m_ExpectRetCode = OH_NN_FAILED; + OH_NN_ReturnCode ret = OH_NNDevice_GetAllDevicesID(pAllDevicesId, pDeviceCount); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/* + * @tc.name: device_get_all_devices_id_005 + * @tc.desc: Verify the success of the OH_NNDevice_GetAllDevicesID function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, device_get_all_devices_id_005, testing::ext::TestSize.Level0) +{ + const size_t* allDevicesId = nullptr; + const size_t** pAllDevicesId = &allDevicesId; + uint32_t deviceCount = 1; + uint32_t* pDeviceCount = &deviceCount; + OH_NN_ReturnCode ret = OH_NNDevice_GetAllDevicesID(pAllDevicesId, pDeviceCount); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/* + * @tc.name: device_get_name_001 + * @tc.desc: Verify the name is nullptr of the OH_NNDevice_GetName function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, device_get_name_001, testing::ext::TestSize.Level0) +{ + size_t deviceID = 1; + const char **name = nullptr; + OH_NN_ReturnCode ret = OH_NNDevice_GetName(deviceID, name); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: device_get_name_002 + * @tc.desc: Verify the *name is not nullptr of the OH_NNDevice_GetName function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, device_get_name_002, testing::ext::TestSize.Level0) +{ + size_t deviceID = 1; + const char* name = "diviceId"; + const char** pName = &name; + OH_NN_ReturnCode ret = OH_NNDevice_GetName(deviceID, pName); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: device_get_name_003 + * @tc.desc: Verify the error happened when getting name of deviceID of the OH_NNDevice_GetName function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, device_get_name_003, testing::ext::TestSize.Level0) +{ + size_t deviceID = 0; + const char* name = nullptr; + const char** pName = &name; + OH_NN_ReturnCode ret = OH_NNDevice_GetName(deviceID, pName); + EXPECT_EQ(OH_NN_FAILED, ret); +} + +/* + * @tc.name: device_get_name_004 + * @tc.desc: Verify the success of the OH_NNDevice_GetName function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, device_get_name_004, testing::ext::TestSize.Level0) +{ + size_t deviceID = 1; + const char* name = nullptr; + const char** pName = &name; + OH_NN_ReturnCode ret = OH_NNDevice_GetName(deviceID, pName); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/* + * @tc.name: device_get_type_001 + * @tc.desc: Verify the device is nullptr of the OH_NNDevice_GetType function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, device_get_type_001, testing::ext::TestSize.Level0) +{ + size_t deviceID = 0; + OH_NN_DeviceType deviceType = OH_NN_CPU; + OH_NN_DeviceType* pDeviceType = &deviceType; + OH_NN_ReturnCode ret = OH_NNDevice_GetType(deviceID, pDeviceType); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: device_get_type_002 + * @tc.desc: Verify the OH_NN_DeviceType is nullptr of the OH_NNDevice_GetType function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, device_get_type_002, testing::ext::TestSize.Level0) +{ + size_t deviceID = 1; + OH_NN_DeviceType* pDeviceType = nullptr; + OH_NN_ReturnCode ret = OH_NNDevice_GetType(deviceID, pDeviceType); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: device_get_type_003 + * @tc.desc: Verify the error happened when getting name of deviceID of the OH_NNDevice_GetType function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, device_get_type_003, testing::ext::TestSize.Level0) +{ + size_t deviceID = 1; + OH_NN_DeviceType deviceType = OH_NN_OTHERS; + OH_NN_DeviceType* pDeviceType = &deviceType; + OH_NN_ReturnCode ret = OH_NNDevice_GetType(deviceID, pDeviceType); + EXPECT_EQ(OH_NN_UNAVALIDABLE_DEVICE, ret); +} + +/* + * @tc.name: device_get_type_004 + * @tc.desc: Verify the success of the OH_NNDevice_GetType function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, device_get_type_004, testing::ext::TestSize.Level0) +{ + size_t deviceID = 1; + OH_NN_DeviceType deviceType = OH_NN_CPU; + OH_NN_DeviceType* pDeviceType = &deviceType; + OH_NN_ReturnCode ret = OH_NNDevice_GetType(deviceID, pDeviceType); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} +} // namespace Unittest +} // namespace NeuralNetworkRuntime +} // namespace OHOS diff --git a/test/unittest/components/v2_0/neural_network_runtime_test/neural_network_runtime_test.h b/test/unittest/components/v2_0/neural_network_runtime_test/neural_network_runtime_test.h new file mode 100644 index 0000000000000000000000000000000000000000..61f1ed2b157af6f9b546b11c490770971a984ba4 --- /dev/null +++ b/test/unittest/components/v2_0/neural_network_runtime_test/neural_network_runtime_test.h @@ -0,0 +1,52 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_UNITTEST_H +#define NEURAL_NETWORK_RUNTIME_UNITTEST_H + +#include + +#include "interfaces/kits/c/neural_network_runtime.h" +#include "frameworks/native/inner_model.h" +#include "frameworks/native/executor.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Unittest { +class NeuralNetworkRuntimeTest : public testing::Test { +public: + OH_NN_ReturnCode BuildModelGraph(InnerModel& innerModel); + void InitIndices(); + void AddModelTensor(InnerModel& innerModel); + void SetInnerBuild(InnerModel& innerModel); + void SetExecutor(Executor& executor); + void SetInputAndOutput(Executor& executor); + void SetTensor(); + +public: + OH_NN_UInt32Array m_inputIndices; + OH_NN_UInt32Array m_outputIndices; + OH_NN_UInt32Array m_paramIndices; + OH_NN_Tensor m_tensor; + + uint32_t m_inputIndexs[2]{0, 1}; + uint32_t m_outputIndexs[1]{2}; + uint32_t m_paramIndexs[1]{3}; +}; +} // namespace Unittest +} // namespace NeuralNetworkRuntime +} // namespace OHOS + +#endif // NEURAL_NETWORK_RUNTIME_UNITTEST_H diff --git a/test/unittest/components/v2_0/transform/transform_test.cpp b/test/unittest/components/v2_0/transform/transform_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..94c6bf984f89e4e1a2c10c436acb3037057d169f --- /dev/null +++ b/test/unittest/components/v2_0/transform/transform_test.cpp @@ -0,0 +1,516 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include + +#include "frameworks/native/transform.h" +#include "frameworks/native/memory_manager.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime; +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class TransformTestTest : public testing::Test { +public: + TransformTestTest() = default; + ~TransformTestTest() = default; +}; + +/** + * @tc.name: transform_gettypesize_001 + * @tc.desc: Verify the TransIOTensor function return 1. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_gettypesize_001, TestSize.Level0) +{ + OH_NN_DataType dataType = OH_NN_BOOL; + uint32_t result = GetTypeSize(dataType); + EXPECT_EQ(static_cast(1), result); +} + +/** + * @tc.name: transform_gettypesize_002 + * @tc.desc: Verify the TransIOTensor function return 2. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_gettypesize_002, TestSize.Level0) +{ + OH_NN_DataType dataType = OH_NN_INT16; + uint32_t result = GetTypeSize(dataType); + EXPECT_EQ(static_cast(2), result); +} + +/** + * @tc.name: transform_gettypesize_003 + * @tc.desc: Verify the TransIOTensor function return 4. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_gettypesize_003, TestSize.Level0) +{ + OH_NN_DataType dataType = OH_NN_INT32; + uint32_t result = GetTypeSize(dataType); + EXPECT_EQ(static_cast(4), result); +} + +/** + * @tc.name: transform_gettypesize_004 + * @tc.desc: Verify the TransIOTensor function return 8. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_gettypesize_004, TestSize.Level0) +{ + OH_NN_DataType dataType = OH_NN_INT64; + uint32_t result = GetTypeSize(dataType); + EXPECT_EQ(static_cast(8), result); +} + +/** + * @tc.name: transform_gettypesize_005 + * @tc.desc: Verify the TransIOTensor function return 0. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_gettypesize_005, TestSize.Level0) +{ + OH_NN_DataType dataType = OH_NN_UNKNOWN; + uint32_t result = GetTypeSize(dataType); + EXPECT_EQ(static_cast(0), result); +} + +/** + * @tc.name: transform_nntoms_transformdatatype_001 + * @tc.desc: Verify the TransIOTensor function return DATA_TYPE_BOOL. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_nntoms_transformdatatype_001, TestSize.Level0) +{ + OH_NN_DataType dataType = OH_NN_BOOL; + mindspore::lite::DataType result = NNToMS::TransformDataType(dataType); + EXPECT_EQ(mindspore::lite::DATA_TYPE_BOOL, result); +} + +/** + * @tc.name: transform_nntoms_transformdatatype_002 + * @tc.desc: Verify the TransDataType function return DATA_TYPE_INT8. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_nntoms_transformdatatype_002, TestSize.Level0) +{ + OH_NN_DataType dataType = OH_NN_INT8; + mindspore::lite::DataType result = NNToMS::TransformDataType(dataType); + EXPECT_EQ(mindspore::lite::DATA_TYPE_INT8, result); +} + +/** + * @tc.name: transform_nntoms_transformdatatype_003 + * @tc.desc: Verify the TransDataType function return DATA_TYPE_INT16. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_nntoms_transformdatatype_003, TestSize.Level0) +{ + OH_NN_DataType dataType = OH_NN_INT16; + mindspore::lite::DataType result = NNToMS::TransformDataType(dataType); + EXPECT_EQ(mindspore::lite::DATA_TYPE_INT16, result); +} + +/** + * @tc.name: transform_nntoms_transformdatatype_004 + * @tc.desc: Verify the TransDataType function return DATA_TYPE_INT32. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_nntoms_transformdatatype_004, TestSize.Level0) +{ + OH_NN_DataType dataType = OH_NN_INT32; + mindspore::lite::DataType result = NNToMS::TransformDataType(dataType); + EXPECT_EQ(mindspore::lite::DATA_TYPE_INT32, result); +} + +/** + * @tc.name: transform_nntoms_transformdatatype_005 + * @tc.desc: Verify the TransDataType function return DATA_TYPE_INT64. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_nntoms_transformdatatype_005, TestSize.Level0) +{ + OH_NN_DataType dataType = OH_NN_INT64; + mindspore::lite::DataType result = NNToMS::TransformDataType(dataType); + EXPECT_EQ(mindspore::lite::DATA_TYPE_INT64, result); +} + +/** + * @tc.name: transform_nntoms_transformdatatype_006 + * @tc.desc: Verify the TransDataType function return DATA_TYPE_UINT8. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_nntoms_transformdatatype_006, TestSize.Level0) +{ + OH_NN_DataType dataType = OH_NN_UINT8; + mindspore::lite::DataType result = NNToMS::TransformDataType(dataType); + EXPECT_EQ(mindspore::lite::DATA_TYPE_UINT8, result); +} + +/** + * @tc.name: transform_nntoms_transformdatatype_007 + * @tc.desc: Verify the TransDataType function return DATA_TYPE_UINT16. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_nntoms_transformdatatype_007, TestSize.Level0) +{ + OH_NN_DataType dataType = OH_NN_UINT16; + mindspore::lite::DataType result = NNToMS::TransformDataType(dataType); + EXPECT_EQ(mindspore::lite::DATA_TYPE_UINT16, result); +} + +/** + * @tc.name: transform_nntoms_transformdatatype_008 + * @tc.desc: Verify the TransDataType function return DATA_TYPE_UINT32. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_nntoms_transformdatatype_008, TestSize.Level0) +{ + OH_NN_DataType dataType = OH_NN_UINT32; + mindspore::lite::DataType result = NNToMS::TransformDataType(dataType); + EXPECT_EQ(mindspore::lite::DATA_TYPE_UINT32, result); +} + +/** + * @tc.name: transform_nntoms_transformdatatype_009 + * @tc.desc: Verify the TransDataType function return DATA_TYPE_UINT64. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_nntoms_transformdatatype_009, TestSize.Level0) +{ + OH_NN_DataType dataType = OH_NN_UINT64; + mindspore::lite::DataType result = NNToMS::TransformDataType(dataType); + EXPECT_EQ(mindspore::lite::DATA_TYPE_UINT64, result); +} + +/** + * @tc.name: transform_nntoms_transformdatatype_010 + * @tc.desc: Verify the TransDataType function return DATA_TYPE_FLOAT16. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_nntoms_transformdatatype_010, TestSize.Level0) +{ + OH_NN_DataType dataType = OH_NN_FLOAT16; + mindspore::lite::DataType result = NNToMS::TransformDataType(dataType); + EXPECT_EQ(mindspore::lite::DATA_TYPE_FLOAT16, result); +} + +/** + * @tc.name: transform_nntoms_transformdatatype_011 + * @tc.desc: Verify the TransDataType function return DATA_TYPE_FLOAT32. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_nntoms_transformdatatype_011, TestSize.Level0) +{ + OH_NN_DataType dataType = OH_NN_FLOAT32; + mindspore::lite::DataType result = NNToMS::TransformDataType(dataType); + EXPECT_EQ(mindspore::lite::DATA_TYPE_FLOAT32, result); +} + +/** + * @tc.name: transform_nntoms_transformdatatype_012 + * @tc.desc: Verify the TransDataType function return DATA_TYPE_UNKNOWN. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_nntoms_transformdatatype_012, TestSize.Level0) +{ + OH_NN_DataType dataType = OH_NN_UNKNOWN; + mindspore::lite::DataType result = NNToMS::TransformDataType(dataType); + EXPECT_EQ(mindspore::lite::DATA_TYPE_UNKNOWN, result); +} + +/** + * @tc.name: transform_nntoms_transformdatatype_013 + * @tc.desc: Verify the TransDataType function return DATA_TYPE_FLOAT64 + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_nntoms_transformdatatype_013, TestSize.Level0) +{ + OH_NN_DataType dataType = OH_NN_FLOAT64; + mindspore::lite::DataType result = NNToMS::TransformDataType(dataType); + EXPECT_EQ(mindspore::lite::DATA_TYPE_FLOAT64, result); +} + +/** + * @tc.name: transform_nntoms_transformformat_001 + * @tc.desc: Verify the TransFormat function return FORMAT_NCHW. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_nntoms_transformformat_001, TestSize.Level0) +{ + OH_NN_Format format = OH_NN_FORMAT_NCHW; + mindspore::lite::Format result = NNToMS::TransformFormat(format); + EXPECT_EQ(mindspore::lite::FORMAT_NCHW, result); +} + +/** + * @tc.name: transform_nntoms_transformformat_002 + * @tc.desc: Verify the TransFormat function return FORMAT_NHWC. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_nntoms_transformformat_002, TestSize.Level0) +{ + OH_NN_Format format = OH_NN_FORMAT_NHWC; + mindspore::lite::Format result = NNToMS::TransformFormat(format); + EXPECT_EQ(mindspore::lite::FORMAT_NHWC, result); +} + +/** + * @tc.name: transform_nntoms_transformformat_003 + * @tc.desc: Verify the TransFormat function return FORMAT_NHWC. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_nntoms_transformformat_003, TestSize.Level0) +{ + OH_NN_Format format = OH_NN_FORMAT_NONE; + mindspore::lite::Format result = NNToMS::TransformFormat(format); + EXPECT_EQ(mindspore::lite::FORMAT_NHWC, result); +} + +/** + * @tc.name: transform_nntoms_transformfusiontype_001 + * @tc.desc: Verify the TransFormat function return ACTIVATION_TYPE_NO_ACTIVATION. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_nntoms_transformfusiontype_001, TestSize.Level0) +{ + OH_NN_FuseType type = OH_NN_FUSED_NONE; + mindspore::lite::ActivationType result = NNToMS::TransfromFusionType(type); + EXPECT_EQ(mindspore::lite::ACTIVATION_TYPE_NO_ACTIVATION, result); +} + +/** + * @tc.name: transform_nntoms_transformfusiontype_002 + * @tc.desc: Verify the TransFormat function return ACTIVATION_TYPE_RELU. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_nntoms_transformfusiontype_002, TestSize.Level0) +{ + OH_NN_FuseType type = OH_NN_FUSED_RELU; + mindspore::lite::ActivationType result = NNToMS::TransfromFusionType(type); + EXPECT_EQ(mindspore::lite::ACTIVATION_TYPE_RELU, result); +} + +/** + * @tc.name: transform_nntoms_transformfusiontype_003 + * @tc.desc: Verify the TransFormat function return ACTIVATION_TYPE_RELU6. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_nntoms_transformfusiontype_003, TestSize.Level0) +{ + OH_NN_FuseType type = OH_NN_FUSED_RELU6; + mindspore::lite::ActivationType result = NNToMS::TransfromFusionType(type); + EXPECT_EQ(mindspore::lite::ACTIVATION_TYPE_RELU6, result); +} + +/** + * @tc.name: transform_nntoms_transformquanttype_001 + * @tc.desc: Verify the TransFormat function return QUANT_TYPE_NONE. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_nntoms_transformquanttype_001, TestSize.Level0) +{ + OHOS::NeuralNetworkRuntime::Ops::OpsQuantType type = OHOS::NeuralNetworkRuntime::Ops::OpsQuantType::QUANT_NONE; + mindspore::lite::QuantType result = NNToMS::TransformQuantType(type); + EXPECT_EQ(mindspore::lite::QUANT_TYPE_NONE, result); +} + +/** + * @tc.name: transform_nntoms_transformquanttype_002 + * @tc.desc: Verify the TransFormat function return QUANT_TYPE_ALL. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_nntoms_transformquanttype_002, TestSize.Level0) +{ + OHOS::NeuralNetworkRuntime::Ops::OpsQuantType type = OHOS::NeuralNetworkRuntime::Ops::OpsQuantType::QUANT_ALL; + mindspore::lite::QuantType result = NNToMS::TransformQuantType(type); + EXPECT_EQ(mindspore::lite::QUANT_TYPE_ALL, result); +} + + +/** + * @tc.name: transform_mstonn_transformdatatype_001 + * @tc.desc: Verify the TransIOTensor function return OH_NN_BOOL. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_mstonn_transformdatatype_001, TestSize.Level0) +{ + mindspore::lite::DataType dataType = mindspore::lite::DATA_TYPE_BOOL; + OH_NN_DataType result = MSToNN::TransformDataType(dataType); + EXPECT_EQ(OH_NN_BOOL, result); +} + +/** + * @tc.name: transform_mstonn_transformdatatype_002 + * @tc.desc: Verify the TransDataType function return OH_NN_INT8. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_mstonn_transformdatatype_002, TestSize.Level0) +{ + mindspore::lite::DataType dataType = mindspore::lite::DATA_TYPE_INT8; + OH_NN_DataType result = MSToNN::TransformDataType(dataType); + EXPECT_EQ(OH_NN_INT8, result); +} + +/** + * @tc.name: transform_mstonn_transformdatatype_003 + * @tc.desc: Verify the TransDataType function return OH_NN_INT16. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_mstonn_transformdatatype_003, TestSize.Level0) +{ + mindspore::lite::DataType dataType = mindspore::lite::DATA_TYPE_INT16; + OH_NN_DataType result = MSToNN::TransformDataType(dataType); + EXPECT_EQ(OH_NN_INT16, result); +} + +/** + * @tc.name: transform_mstonn_transformdatatype_004 + * @tc.desc: Verify the TransDataType function return OH_NN_INT32. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_mstonn_transformdatatype_004, TestSize.Level0) +{ + mindspore::lite::DataType dataType = mindspore::lite::DATA_TYPE_INT32; + OH_NN_DataType result = MSToNN::TransformDataType(dataType); + EXPECT_EQ(OH_NN_INT32, result); +} + +/** + * @tc.name: transform_mstonn_transformdatatype_005 + * @tc.desc: Verify the TransDataType function return OH_NN_INT64. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_mstonn_transformdatatype_005, TestSize.Level0) +{ + mindspore::lite::DataType dataType = mindspore::lite::DATA_TYPE_INT64; + OH_NN_DataType result = MSToNN::TransformDataType(dataType); + EXPECT_EQ(OH_NN_INT64, result); +} + +/** + * @tc.name: transform_mstonn_transformdatatype_006 + * @tc.desc: Verify the TransDataType function return OH_NN_UINT8. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_mstonn_transformdatatype_006, TestSize.Level0) +{ + mindspore::lite::DataType dataType = mindspore::lite::DATA_TYPE_UINT8; + OH_NN_DataType result = MSToNN::TransformDataType(dataType); + EXPECT_EQ(OH_NN_UINT8, result); +} + +/** + * @tc.name: transform_mstonn_transformdatatype_007 + * @tc.desc: Verify the TransDataType function return OH_NN_UINT16. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_mstonn_transformdatatype_007, TestSize.Level0) +{ + mindspore::lite::DataType dataType = mindspore::lite::DATA_TYPE_UINT16; + OH_NN_DataType result = MSToNN::TransformDataType(dataType); + EXPECT_EQ(OH_NN_UINT16, result); +} + +/** + * @tc.name: transform_mstonn_transformdatatype_008 + * @tc.desc: Verify the TransDataType function return OH_NN_UINT32. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_mstonn_transformdatatype_008, TestSize.Level0) +{ + mindspore::lite::DataType dataType = mindspore::lite::DATA_TYPE_UINT32; + OH_NN_DataType result = MSToNN::TransformDataType(dataType); + EXPECT_EQ(OH_NN_UINT32, result); +} + +/** + * @tc.name: transform_mstonn_transformdatatype_009 + * @tc.desc: Verify the TransDataType function return OH_NN_UINT64. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_mstonn_transformdatatype_009, TestSize.Level0) +{ + mindspore::lite::DataType dataType = mindspore::lite::DATA_TYPE_UINT64; + OH_NN_DataType result = MSToNN::TransformDataType(dataType); + EXPECT_EQ(OH_NN_UINT64, result); +} + +/** + * @tc.name: transform_mstonn_transformdatatype_010 + * @tc.desc: Verify the TransDataType function return OH_NN_FLOAT16 + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_mstonn_transformdatatype_010, TestSize.Level0) +{ + mindspore::lite::DataType dataType = mindspore::lite::DATA_TYPE_FLOAT16; + OH_NN_DataType result = MSToNN::TransformDataType(dataType); + EXPECT_EQ(OH_NN_FLOAT16, result); +} + +/** + * @tc.name: transform_mstonn_transformdatatype_011 + * @tc.desc: Verify the TransDataType function return OH_NN_FLOAT32. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_mstonn_transformdatatype_011, TestSize.Level0) +{ + mindspore::lite::DataType dataType = mindspore::lite::DATA_TYPE_FLOAT32; + OH_NN_DataType result = MSToNN::TransformDataType(dataType); + EXPECT_EQ(OH_NN_FLOAT32, result); +} + +/** + * @tc.name: transform_mstonn_transformdatatype_012 + * @tc.desc: Verify the TransDataType function return OH_NN_UNKNOWN. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_mstonn_transformdatatype_012, TestSize.Level0) +{ + mindspore::lite::DataType dataType = mindspore::lite::DATA_TYPE_UNKNOWN; + OH_NN_DataType result = MSToNN::TransformDataType(dataType); + EXPECT_EQ(OH_NN_UNKNOWN, result); +} + +/** + * @tc.name: transform_mstonn_transformdatatype_013 + * @tc.desc: Verify the TransDataType function return DATA_TYPE_FLOAT64 + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_mstonn_transformdatatype_013, TestSize.Level0) +{ + mindspore::lite::DataType dataType = mindspore::lite::DATA_TYPE_FLOAT64; + OH_NN_DataType result = MSToNN::TransformDataType(dataType); + EXPECT_EQ(OH_NN_FLOAT64, result); +} + +/** + * @tc.name: transform_mstonn_transformquantparams_001 + * @tc.desc: Verify the TransformQuantParams function. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_mstonn_transformquantparams_001, TestSize.Level0) +{ + std::vector msQuantParams = {{1, 1.0, 8}}; + std::vector result = MSToNN::TransformQuantParams(msQuantParams); + EXPECT_EQ(msQuantParams.size(), result.size()); +} +} // namespace UnitTest +} // namespace NeuralNetworkRuntime +} // namespace OHOS