diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..335cc7f2197b4f3e80c61c24f86a5091fed5920d --- /dev/null +++ b/.gitignore @@ -0,0 +1,20 @@ +# ignore .a .o +*.[ao] +*.so +*.ms + +# ignore build +build/ +libs/ +obj/ +_ignore/ + +# ignore schema +jni/include/schema/ + +# ignore vscode +.vscode + +# ignore runtime file +deploy.bat +log.txt \ No newline at end of file diff --git a/BUILD.gn b/BUILD.gn new file mode 100644 index 0000000000000000000000000000000000000000..36de2f911be63bc22a08c0277bf7e53ecae370e3 --- /dev/null +++ b/BUILD.gn @@ -0,0 +1,28 @@ +# Copyright (c) 2022 Huawei Device Co., Ltd. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import("//build/ohos.gni") + +group("nnrt_target") { + deps = [ + "frameworks:libneural_network_runtime" + ] +} + +group("nnrt_test_target") { + testonly = true + deps = [ + "test/unittest:unittest", + "test/system_test:system_test" + ] +} diff --git a/LICENSE b/LICENSE index 29f81d812f3e768fa89638d1f72920dbfd1413a8..4a459866a57c25462afad17f3fe0b50d440da080 100644 --- a/LICENSE +++ b/LICENSE @@ -1,3 +1,4 @@ + Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ @@ -173,29 +174,4 @@ incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. + END OF TERMS AND CONDITIONS \ No newline at end of file diff --git a/OAT.xml b/OAT.xml new file mode 100644 index 0000000000000000000000000000000000000000..8c1351e64115b03242d96e3fff2a722b1d4b5c11 --- /dev/null +++ b/OAT.xml @@ -0,0 +1,34 @@ + + + + + + + + + + + + + + + + + + diff --git a/README.en.md b/README.en.md deleted file mode 100644 index d2337089f2a3a14e80d163982b0ddb7d16e6ba67..0000000000000000000000000000000000000000 --- a/README.en.md +++ /dev/null @@ -1,36 +0,0 @@ -# neural_network_runtime - -#### Description -{**When you're done, you can delete the content in this README and update the file with details for others getting started with your repository**} - -#### Software Architecture -Software architecture description - -#### Installation - -1. xxxx -2. xxxx -3. xxxx - -#### Instructions - -1. xxxx -2. xxxx -3. xxxx - -#### Contribution - -1. Fork the repository -2. Create Feat_xxx branch -3. Commit your code -4. Create Pull Request - - -#### Gitee Feature - -1. You can use Readme\_XXX.md to support different languages, such as Readme\_en.md, Readme\_zh.md -2. Gitee blog [blog.gitee.com](https://blog.gitee.com) -3. Explore open source project [https://gitee.com/explore](https://gitee.com/explore) -4. The most valuable open source project [GVP](https://gitee.com/gvp) -5. The manual of Gitee [https://gitee.com/help](https://gitee.com/help) -6. The most popular members [https://gitee.com/gitee-stars/](https://gitee.com/gitee-stars/) diff --git a/README.md b/README.md deleted file mode 100644 index dc3cc3646c1b1d1fc42f2ee1ddc8a80a66d5592f..0000000000000000000000000000000000000000 --- a/README.md +++ /dev/null @@ -1,39 +0,0 @@ -# neural_network_runtime - -#### 介绍 -{**以下是 Gitee 平台说明,您可以替换此简介** -Gitee 是 OSCHINA 推出的基于 Git 的代码托管平台(同时支持 SVN)。专为开发者提供稳定、高效、安全的云端软件开发协作平台 -无论是个人、团队、或是企业,都能够用 Gitee 实现代码托管、项目管理、协作开发。企业项目请看 [https://gitee.com/enterprises](https://gitee.com/enterprises)} - -#### 软件架构 -软件架构说明 - - -#### 安装教程 - -1. xxxx -2. xxxx -3. xxxx - -#### 使用说明 - -1. xxxx -2. xxxx -3. xxxx - -#### 参与贡献 - -1. Fork 本仓库 -2. 新建 Feat_xxx 分支 -3. 提交代码 -4. 新建 Pull Request - - -#### 特技 - -1. 使用 Readme\_XXX.md 来支持不同的语言,例如 Readme\_en.md, Readme\_zh.md -2. Gitee 官方博客 [blog.gitee.com](https://blog.gitee.com) -3. 你可以 [https://gitee.com/explore](https://gitee.com/explore) 这个地址来了解 Gitee 上的优秀开源项目 -4. [GVP](https://gitee.com/gvp) 全称是 Gitee 最有价值开源项目,是综合评定出的优秀开源项目 -5. Gitee 官方提供的使用手册 [https://gitee.com/help](https://gitee.com/help) -6. Gitee 封面人物是一档用来展示 Gitee 会员风采的栏目 [https://gitee.com/gitee-stars/](https://gitee.com/gitee-stars/) diff --git a/README_zh.md b/README_zh.md new file mode 100644 index 0000000000000000000000000000000000000000..720bf3c0e49f4386ff954a2d89550536df15772e --- /dev/null +++ b/README_zh.md @@ -0,0 +1,26 @@ +# Neural Network Runtime + +Neural Network Runtime(神经网络运行时)是一套面向AI领域的运行时部件,适配上层AI推理引擎和底层加速芯片,为端侧AI推理引擎提供硬件加速的计算能力。 + +## 基本概念 + +在开发前,需要先了解以下概念,以便更好地理解全文内容: + +- Native API:Openharmony 面向应用开发者的C语言接口。 +- HDI:Hardware Device Interface,硬件设备接口,是OpenHarmony中系统组件与芯片组件通信的接口。关于更多HDI的细节,请浏览[驱动子系统](https://gitee.com/openharmony/docs/blob/master/zh-cn/readme/%E9%A9%B1%E5%8A%A8%E5%AD%90%E7%B3%BB%E7%BB%9F.md)。 + +## 运作机制 + +**图1** Neural Network Runtime架构图 +!["Neural Network Runtime架构图"](neural_network_runtime_intro.png) + +如图1所示,在OpenHarmony系统上,AI应用通常要经过AI推理引擎和Neural Network Runtime才能对接底层芯片驱动,进而加速推理计算。Neural Network Runtime和芯片驱动直接通过HDI接口交互,Neural Network Runtime将模型和数据传递给芯片驱动,通过HDI接口在加速芯片上执行推理计算,计算结果通过Neural Network Runtime、AI推理引擎逐层返回至AI应用。 + +通常,AI应用、AI推理引擎、Neural Network Runtime处在同一个进程下,芯片驱动运行在另一个进程下,两者之间需要借助进程间通信(IPC)传递模型和计算数据。Neural Network Runtime根据HDI接口实现了HDI客户端,相应的,芯片厂商需要根据HDI接口实现并开放HDI服务。 + +架构图中每层功能简单阐述如下: +- AI应用:借助AI模型,提供丰富的应用能力,如:图像分类、人脸识别、文字识别等。 +- AI推理引擎:为AI应用提供模型搭建、模型优化、推理计算的能力。 +- Neural Network Runtime:作为AI推理引擎和底层加速芯片的桥梁,它开放了标准统一的HDI接口,不同的芯片都可以通过HDI接口接入Neural Network Runtime。 +- HDI服务端:HDI服务端接收Neural Network Runtime传入的模型,将模型转换为加速芯片驱动所使用模型格式,并调用芯片驱动的接口执行计算。 +- 加速芯片:加速芯片通常能够加速AI模型或者模型中部分算子的计算,提供优于CPU的性能。 \ No newline at end of file diff --git a/bundle.json b/bundle.json new file mode 100644 index 0000000000000000000000000000000000000000..9f69a3eb16e558ee5a8e3ba135a766e5d5fc7fd0 --- /dev/null +++ b/bundle.json @@ -0,0 +1,50 @@ +{ + "name": "@ohos/neural_network_runtime", + "description": "The Neural Network Runtime that bridges the inference framework and the device accelerator.", + "version": "3.2", + "license": "MIT", + "publishAs": "code-segment", + "segment": { + "destPath": "foundation/ai/neural_network_runtime" + }, + "dirs": {}, + "scripts": {}, + "licensePath": "COPYING", + "readmePath": { + "en": "README.rst" + }, + "component": { + "name": "neural_network_runtime", + "subsystem": "ai", + "syscap": [], + "features": [], + "adapted_system_type": ["standard"], + "rom": "1024KB", + "ram": "2048KB", + "deps": { + "components": [ + "hilog" + ], + "third_party": [] + }, + "build": { + "sub_component": [ + "//foundation/ai/neural_network_runtime:nnrt_target" + ], + "inner_kits": [ + {"type": "so", + "name": "//foundation/ai/neural_network_runtime:nnrt_target", + "header": { + "header_files": [ + "neural_network_runtime_inner.h" + ], + "header_base":"//foundation/ai/neural_network_runtime/interfaces/innerkits/c" + } + } + ], + "test": [ + "//foundation/ai/neural_network_runtime:nnrt_test_target" + ] + } + } +} \ No newline at end of file diff --git a/common/log.h b/common/log.h new file mode 100644 index 0000000000000000000000000000000000000000..c75b3255fb8a98976afed0f7a02ce15c3dc9ad6b --- /dev/null +++ b/common/log.h @@ -0,0 +1,41 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_LOG_H +#define NEURAL_NETWORK_RUNTIME_LOG_H + +#include +#include +#include "hilog/log_c.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#ifndef NNR_LOG_DOMAIN +#define NNR_LOG_DOMAIN 0xD002101 +#endif + +#define LOGD(...) HiLogPrint(LOG_CORE, LOG_DEBUG, NNR_LOG_DOMAIN, "NNRt", __VA_ARGS__) +#define LOGI(...) HiLogPrint(LOG_CORE, LOG_INFO, NNR_LOG_DOMAIN, "NNRt", __VA_ARGS__) +#define LOGW(...) HiLogPrint(LOG_CORE, LOG_WARN, NNR_LOG_DOMAIN, "NNRt", __VA_ARGS__) +#define LOGE(...) HiLogPrint(LOG_CORE, LOG_ERROR, NNR_LOG_DOMAIN, "NNRt", __VA_ARGS__) +#define LOGF(...) HiLogPrint(LOG_CORE, LOG_FATAL, NNR_LOG_DOMAIN, "NNRt", __VA_ARGS__) + +#ifdef __cplusplus +} +#endif + +#endif // NEURAL_NETWORK_RUNTIME_LOG_H diff --git a/common/scoped_trace.h b/common/scoped_trace.h new file mode 100644 index 0000000000000000000000000000000000000000..650503e623a3f6b870c736673e78118c82274d95 --- /dev/null +++ b/common/scoped_trace.h @@ -0,0 +1,50 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_SCOPED_TRACE_H +#define NEURAL_NETWORK_RUNTIME_SCOPED_TRACE_H + +#include +#include "hitrace/trace.h" + +#define NNRT_TRACE_NAME(name) ScopedTrace ___tracer(name) +namespace OHOS { +namespace NeuralNetworkRuntime { +class ScopedTrace { +public: + inline ScopedTrace(const std::string& name) + { + m_name = name; + HiviewDFX::HiTraceId traceId = HiviewDFX::HiTraceChain::GetId(); + if (traceId.IsValid()) { + HiviewDFX::HiTraceChain::Tracepoint(HITRACE_TP_GENERAL, traceId, "NNRt Trace start: %s", name.c_str()); + } + } + + inline ~ScopedTrace() + { + HiviewDFX::HiTraceId traceId = HiviewDFX::HiTraceChain::GetId(); + if (traceId.IsValid()) { + HiviewDFX::HiTraceChain::Tracepoint(HITRACE_TP_GENERAL, traceId, "NNRt Trace end: %s", m_name.c_str()); + } + } + +private: + std::string m_name {}; +}; +} // namespace NeuralNetworkRuntime +} // namespace OHOS + +#endif // NEURAL_NETWORK_RUNTIME_SCOPED_TRACE_H diff --git a/common/utils.h b/common/utils.h new file mode 100644 index 0000000000000000000000000000000000000000..7430ad085b5b92eb86e7b053ccf3cbf84f1cb050 --- /dev/null +++ b/common/utils.h @@ -0,0 +1,40 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_UTILS_H +#define NEURAL_NETWORK_RUNTIME_UTILS_H + +#include + +#include "log.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +template +std::shared_ptr CreateSharedPtr(Args&&... args) +{ + std::shared_ptr tPtr = nullptr; + try { + tPtr = std::make_shared(args...); + } catch (const std::bad_alloc& except) { + LOGW("Create a new shared pointer failed. Error: %s", except.what()); + return nullptr; + } + return tPtr; +} + +} // namespace NeuralNetworkRuntime +} // namespace OHOS +#endif // NEURAL_NETWORK_RUNTIME_UTILS_H diff --git a/example/deep_learning_framework/CMakeLists.txt b/example/deep_learning_framework/CMakeLists.txt new file mode 100644 index 0000000000000000000000000000000000000000..c3dbaddb12183afeb6c431775ddafe983a896816 --- /dev/null +++ b/example/deep_learning_framework/CMakeLists.txt @@ -0,0 +1,29 @@ +# +# Copyright (c) 2022 Huawei Device Co., Ltd. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# CMake lowest version requirement +cmake_minimum_required(VERSION 3.16.5) + +# project information +project(label_classification) + +set(CMAKE_C_FLAGS "-pthread -fstack-protector-all -fPIC -D_FORTIFY_SOURCE=2") + +# If the CPU architecture is 32-bit, -march=armv7-a +set(CMAKE_CXX_FLAGS "-pthread -fstack-protector-all -fPIC -D_FORTIFY_SOURCE=2 -march=armv7-a") +set(LOCAL_DIRECTORY_PATH ${PROJECT_SOURCE_DIR}) + +add_subdirectory(${LOCAL_DIRECTORY_PATH}/tflite) + diff --git a/example/deep_learning_framework/Flowchart.png b/example/deep_learning_framework/Flowchart.png new file mode 100644 index 0000000000000000000000000000000000000000..d6018c17a9116b72f2ebc10e38e39764d2ec0058 Binary files /dev/null and b/example/deep_learning_framework/Flowchart.png differ diff --git a/example/deep_learning_framework/Principle.png b/example/deep_learning_framework/Principle.png new file mode 100644 index 0000000000000000000000000000000000000000..0ef12d2b71f31949258eed17f55dd852f8307586 Binary files /dev/null and b/example/deep_learning_framework/Principle.png differ diff --git a/example/deep_learning_framework/README_zh.md b/example/deep_learning_framework/README_zh.md new file mode 100644 index 0000000000000000000000000000000000000000..ef8e58e655f3e6d25ef4f8aa871850551eca3007 --- /dev/null +++ b/example/deep_learning_framework/README_zh.md @@ -0,0 +1,218 @@ +# Tensorflow Lite 接入NNRt Delegate Demo开发指南 + +## 概述 + +### 功能简介 +- 神经网络运行时部件(NNRt)是跨设备的AI运行时框架,作为端侧推理框架和专用加速芯片的中间桥梁,为端侧推理框架提供了统一的Native接口; +- 本demo旨在介绍上层AI业务如何利用NNRt在专有芯片上加速推理,使能OpenHarmony社区生态; +- 本demo根据用户输入参数(模型、标签、模型输入shape、循环浮点推理次数、是否允许动态尺寸推理、以及是否打印结果等)完成标签分类模型推理,用户可通过打印信息观察在不同条件下的模型推理性能、精度等KIP。 + +### 基本概念 +在开发前,开发者需要先了解以下概念,以便更好地理解全文内容: +- NNRt: Neural Network Runtime,神经网络运行时,是本指导主要介绍的部件。 +- OHOS:OpenHarmony Operating System,开源鸿蒙操作系统。 + +### 约束与限制 +- 系统版本:OpenHarmonyOS 3.2及以上 +- 开发环境:Ubuntu 18.04及以上 +- 接入设备:OpenHarmony定义的标准设备 +- 其他开发依赖: + - tensorflow-lite.so及其依赖库,目前完成在tensorflow lite 2.6版本上的测试; + - NNRt库libneural_network_runtime.z.so; + - TensorFlow Lite头文件:https://github.com/tensorflow/tensorflow/tree/master/tensorflow/lite; + - mobilenetv2.tflite模型(https://storage.googleapis.com/mobilenet_v2/checkpoints/mobilenet_v2_1.4_224.tgz); + - 标签文件labels.txt; + - 测试图片grace_hopper.bmp; + +### 运作机制 +
+ + - 用户调用TFLite的BuildFromFile接口完成初始构图; + - 用户设置自定义的参数options,应用于创建NnrtDelegate; + - 用户创建DelegateProviders,并调用DelegateProviders的CreateAllRankedDelegates接口创建NnrtDelegate,创建NnrtDelegate过程中dlopen打开NNRt的动态库,并加载API,返回delegate; + - 用户调用ModifyGraphWithDelegate接口完成Node替换,其中该步分四个步骤; + - Initalize初始化NnrtDelegate; + - 判断图中各node是否支持NnrtDelegate,返回支持的node集合; + - 调用TFLiteRegistration注册NnrtDelegate,并初始化init, prepare, invoke成员函数指针,指向delegateKernel的Init, Prepare和run函数方法; + - 替换TensorFlow Delegate的node为已注册的NNrt delegate kernel, 并调用Init完成构图步骤; + - 用户调用AllocateTensors,完成内存分配和图编译,其中支持delegate的node会转到delegateKernel的prepare完成编译,不支持delegate的会调用原有tflite node的prepare编译; + - 用户调用Invoke完成图执行; + +### 开发流程 +
+ +### 开发步骤 +本节主要描述NNRt接入TFLite的TFLite-delegate代理机制,重点对TFLite调用delegate的流程和delegate对接NNRt的方式进行了介绍。 +TensorFlow Lite Delegate有两个基类DelegateProvider、TfLiteDelegate, 本节主要描述继承这两个基类得到子类NnrtDelegate和NnrtDelegateProvider。 + +本demo主要文件目录结构如下图: +```text +. +├── CMakeLists.txt +├── delegates +│   └── nnrt_delegate +│   ├── CMakeLists.txt +│   ├── nnrt_delegate.cpp +│   ├── nnrt_delegate.h +│   ├── nnrt_delegate_kernel.cpp +│   ├── nnrt_delegate_kernel.h +│   ├── nnrt_delegate_provider.cpp +│   ├── nnrt_op_builder.cpp +│   ├── nnrt_op_builder.h +│   ├── nnrt_utils.cpp +│   ├── nnrt_utils.h +│   └── tensor_mapping.h +├── label_classify +│   ├── CMakeLists.txt +│   ├── label_classify.cpp +│   └── label_classify.h +├── nnrt +│   ├── CMakeLists.txt +│   ├── nnrt_implementation.cpp +│   └── nnrt_implementation.h +└── tools + ├── bitmap_helpers.cpp + ├── bitmap_helpers.h + ├── get_topn.h + ├── log.h + ├── utils.cpp + └── utils.h +``` +1. 创建Tensorflow Lite NnrtDelegate类 + - Tensorflow Lite NNRt Delegate 使TensorFlow Lite模型能够运行在NNRt框架(https://gitee.com/openharmony/neural_network_runtime)上,这导致了在OHOS设备上更快的模型推理 + - nnrt_delegate依赖nnrt_delegate_kernel, nnrt_delegate_kernel(将支持替换的TensorFlow Lite模型中的operators替换成Nnrt中的operators)依赖nnrt_op_builder(给每个operators设置输入输出tensor和operation属性),完成nnrt_delegate的自定义。 + + +2. 创建NnrtDelegateProvider + - NnrtDelegateProvider依赖nnrt_implementation(用于加载libneural_network_runtime.z.so中的Api)和nnrt_delegate(用于创建子类NnrtDelegate对象),完成与TFLite的对接; + + - 注册NnrtDelegateProvider + ```cpp + REGISTER_DELEGATE_PROVIDER(NnrtDelegateProvider); + ``` + + - 创建CreateTfLiteDelegate主要有以下几步 + ```cpp + NnrtDelegate::Options options; + + const auto* nnrtImpl = NnrtImplementation(); + if (!nnrtImpl->nnrtExists) { + TFLITE_LOG(WARN) << "NNRT acceleration is unsupported on this platform."; + return delegate; + } + + Interpreter::TfLiteDelegatePtr TfLiteDelegatePtr(new (std::nothrow) NnrtDelegate(nnrtImpl, options), + [](TfLiteDelegate* delegate) { delete reinterpret_cast(delegate); }); + ``` + +3. label_classify.cpp中加载Nnrt_Delegate + ```cpp + interpreter->ModifyGraphWithDelegate(std::move(delegate.delegate)) + ``` + +### 调测命令 +1. 编译生成Tensorflow Lite库及其依赖库 + 请参考Tensorflow Lite交叉编译指南(https://www.tensorflow.org/lite/guide/build_cmake_arm), 同时在```tensorflow/lite/CMakeLists.txt```中增加以下内容: + ```text + # TODO: TFLite External Delegate + list(APPEND TFLITE_EXTERNAL_DELEGATE_SRC + ${TFLITE_SOURCE_DIR}/tools/delegates/delegate_provider.cc + # ${TFLITE_SOURCE_DIR}/tools/delegates/external_delegate_provider.cc + ${TFLITE_SOURCE_DIR}/tools/tool_params.cc + ${TFLITE_SOURCE_DIR}/tools/command_line_flags.cc + ) + ``` + ```text + target_link_libraries(tensorflow-lite + PUBLIC + Eigen3::Eigen + NEON_2_SSE + absl::flags + absl::hash + absl::status + absl::strings + absl::synchronization + absl::variant + farmhash + fft2d_fftsg2d + flatbuffers + gemmlowp + ruy + ${CMAKE_DL_LIBS} + ${TFLITE_TARGET_DEPENDENCIES} + ) + ``` +2. 编译生成NNRt库libneural_network_runtime.z.so + 请参考编译指导(https://gitee.com/openharmony/build),编译命令如下 + ```shell + ./build.sh --product-name rk3568 –ccache --jobs=16 --build-target=neural_network_runtime + ``` +3. 用cmake编译北向demo + - 将TensorFlow Lite头文件(https://github.com/tensorflow/tensorflow/tree/master/tensorflow/lite)和编译生成的TensorFlow Lite库,分别放在```deep_learning_framework/lib_3rd_nnrt_tflite/include/tensorflow/lite/```和```deep_learning_framework/lib_3rd_nnrt_tflite/com/arm64-v8a/lib/```下; + - 指定ohos的cmake, ohos.toolchain.cmake路径,在```foundation/ai/neural_network_runtime/example/cmake_build/build_ohos_tflite.sh```中替换以下两行; + ```shell + ./tool_chain/native/build-tools/cmake/bin/cmake \ + -DCMAKE_TOOLCHAIN_FILE=./tool_chain/native/cmake_build/cmake/ohos.toolchain.cmake \ + ``` + - 进入```foundation/ai/neural_network_runtime/example/cmake_build```: + - 如果需要在arm32架构的CPU上运行: + - 修改```tflite/CMakeLists.txt``` + ```text + set(CMAKE_CXX_FLAGS "-pthread -fstack-protector-all -fPIC -D_FORTIFY_SOURCE=2 -march=armv7-a") + ``` + - 执行编译命令 + ```shell + bash build_ohos_tflite.sh armeabi-v7a + ``` + - 如果需要在arm64架构的CPU上运行: + - 修改```tflite/CMakeLists.txt``` + ```text + set(CMAKE_CXX_FLAGS "-pthread -fstack-protector-all -fPIC -D_FORTIFY_SOURCE=2 -march=armv8-a") + ``` + - 执行编译命令 + ```shell + bash build_ohos_tflite.sh arm64-v8a + ``` + - 在```example/deep_learning_framework/```目录下创建lib和output两个文件夹: + ```shell + mkdir lib output + ``` + - 进入```foundation/ai/neural_network_runtime/example/cmake_build```, 执行链接命令: + ```shell + make + ``` + - 北向demo成功编译完成后会在```deep_learning_framework/lib```生成libnnrt_delegate.so和libnnrt_implementation.so, 在```deep_learning_framework/output```下生成label_classify可执行文件,目录结构体如下所示。 + + ```text + deep_learning_framework + ├── lib + │   ├── libnnrt_delegate.so # 生成的TensorFlow Lite nnrt delegate库 + │   └── libnnrt_implementation.so # 生成的nnrt在TensorFlow Lite中接口实现库 + └── output + └── label_classify # 生成的可执行文件 + ``` + +4. 在开发板上运行北向demo + - 将步骤1生成的libnnrt_implementation.so, libnnrt_delegate.so和可执行文件label_classify, libneural_network_runtime.z.so, tensorflow-lite.so及其依赖的库, mobilenetv2.tflite模型, 标签labels.txt, 测试图片grace_hopper.bmp推送到开发板上: + ```shell + # 假设上述待推送文件均放在push_files/文件夹下 + hdc_std file send push_files/ /data/demo/ + ``` + - 进入开发板,执行demo前需要添加环境变量,文件执行权限等: + ```shell + # 进入开发板 + hdc_std shell + + # 进入推送文件目录,并增加可执行文件权限 + cd /data/demo + chmod +x ./label_classify + + # 添加环境变量 + export LD_LIBRARY_PATH=/data/demo:$LD_LIBRARY_PATH + + # 执行demo,-m tflite模型, -i 测试图片, -l 数据标签, -a 1表示使用nnrt, 0表示不使用nnrt推理,-z 1 表示打印输出张量大小的结果 + ./label_classify -m mobilenetv2.tflite -i grace_hopper.bmp -l labels.txt -a 1 -z 1 + ``` + +### 开发实例 +完整[Demo实例](xxx, Demo暂时还在黄区代码仓,超链接需等Demo开源后补充)可以参考社区实现。 diff --git a/example/deep_learning_framework/cmake_build/build_ohos_tflite.sh b/example/deep_learning_framework/cmake_build/build_ohos_tflite.sh new file mode 100644 index 0000000000000000000000000000000000000000..3dcc0ead0e81fcc6a85ef9eccae6630b7ea6f10d --- /dev/null +++ b/example/deep_learning_framework/cmake_build/build_ohos_tflite.sh @@ -0,0 +1,51 @@ +#!/bin/bash +# +# Copyright (c) 2022 Huawei Device Co., Ltd. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +set -e + +function help_info() { + echo "arm64-v8a(armeabi-v7a) means the CPU architecture is 64-bit(32-bit), the compile command like the following:" + echo "bash build_ohos_tflite.sh arm64-v8a" +} + +function build() { + echo "$1" + ./tool_chain/native/build-tools/cmake/bin/cmake \ + -DCMAKE_TOOLCHAIN_FILE=./tool_chain/native/build/cmake/ohos.toolchain.cmake \ + -DOHOS_ARCH=$1 \ + -DOHOS_PLATFORM=OHOS \ + -DCMAKE_BUILD_TYPE=RELEASE \ + -DBUILD_SHARED_LIBS=true \ + -DOHOS_STL=c++_static \ + -DCMAKE_BUILD_TYPE=Debug \ + .. +} + +if [ "$#" != 1 ]; then + echo "Incorrect command, please pass the correct number of parameters to the compile command." + help_info + exit 1; +fi + +if [ "$1" == "arm64-v8a" ]; then + build arm64-v8a +elif [ "$1" == "armeabi-v7a" ]; then + build armeabi-v7a +else + echo "Incorrect CPU architecture parameter or missing setting it, please pass the correct compile command." + help_info +fi + diff --git a/example/deep_learning_framework/tflite/CMakeLists.txt b/example/deep_learning_framework/tflite/CMakeLists.txt new file mode 100644 index 0000000000000000000000000000000000000000..e89cbbaa62c585053263341b70b2e4919e58bc5c --- /dev/null +++ b/example/deep_learning_framework/tflite/CMakeLists.txt @@ -0,0 +1,25 @@ +# +# Copyright (c) 2022 Huawei Device Co., Ltd. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +set(NNRT_INTERFACE_HOME ${LOCAL_DIRECTORY_PATH}/tflite/nnrt) +set(NNRT_DELEGATE_HOME ${LOCAL_DIRECTORY_PATH}/tflite/delegates/nnrt_delegate) +set(NNRT_DEMO_HOME ${LOCAL_DIRECTORY_PATH}/tflite/label_classify) +set(TFLITE_LIB_PATH ${LOCAL_DIRECTORY_PATH}/lib_3rd_nnrt_tflite) + +add_subdirectory(${NNRT_INTERFACE_HOME}) +add_subdirectory(${NNRT_DELEGATE_HOME}) +add_subdirectory(${NNRT_DEMO_HOME}) + + diff --git a/example/deep_learning_framework/tflite/delegates/nnrt_delegate/CMakeLists.txt b/example/deep_learning_framework/tflite/delegates/nnrt_delegate/CMakeLists.txt new file mode 100644 index 0000000000000000000000000000000000000000..ee5fa7f5f0972b3f4998bb186ff95486b725ab53 --- /dev/null +++ b/example/deep_learning_framework/tflite/delegates/nnrt_delegate/CMakeLists.txt @@ -0,0 +1,34 @@ +# +# Copyright (c) 2022 Huawei Device Co., Ltd. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +set(TFLITE_PATH ${LOCAL_DIRECTORY_PATH}/lib_3rd_nnrt_tflite) + +LINK_DIRECTORIES(${TFLITE_PATH}/com/arm64-v8a/lib/) + +# Header path +set(OHOS_INC ${LOCAL_DIRECTORY_PATH}/../../interfaces/kits/c) +set(TOOLS_INC ${LOCAL_DIRECTORY_PATH}/tflite/tools) +set(TFLITE_INC ${LOCAL_DIRECTORY_PATH}/lib_3rd_nnrt_tflite/include) +include_directories(${LOCAL_DIRECTORY_PATH} ${NNRT_DELEGATE_HOME} ${TFLITE_INC} ${OHOS_INC} ${TOOLS_INC}) + +# Scr path +file(GLOB NNRT_DELEGATE_SRCS "${NNRT_DELEGATE_HOME}/*.cpp") + +set(CMAKE_LIBRARY_OUTPUT_DIRECTORY ${LOCAL_DIRECTORY_PATH}/lib) + +add_library(nnrt_delegate SHARED ${NNRT_DELEGATE_SRCS}) +target_link_libraries(nnrt_delegate -ltensorflow-lite nnrt_implementation) + + diff --git a/example/deep_learning_framework/tflite/delegates/nnrt_delegate/nnrt_delegate.cpp b/example/deep_learning_framework/tflite/delegates/nnrt_delegate/nnrt_delegate.cpp new file mode 100644 index 0000000000000000000000000000000000000000..9528d7513740b759f75a429173305494adbff750 --- /dev/null +++ b/example/deep_learning_framework/tflite/delegates/nnrt_delegate/nnrt_delegate.cpp @@ -0,0 +1,316 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "nnrt_delegate.h" + +#include "tensorflow/lite/util.h" +#include "tensorflow/lite/context_util.h" +#include "tensorflow/lite/minimal_logging.h" + +#include "nnrt_utils.h" +#include "nnrt_delegate_kernel.h" + +namespace tflite { +const char* g_tfliteNnrtDelegateName = "TfLiteNnrtDelegate"; +constexpr int32_t TFLITE_NNRT_DELEGATE_VERSION = 1; + +NnrtDelegate::Data::Data(const NnrtApi* nnrt) : nnrt(nnrt) {} + +NnrtDelegate::Data::~Data() {} + +void NnrtDelegate::NnrtDelegateConstructorImpl(const Options& options) +{ + m_delegateData.acceleratorName = options.acceleratorName; + m_delegateData.cacheDir = options.cacheDir; + m_delegateData.modelToken = options.modelToken; + m_delegateData.enableFp16 = options.enableFp16; + m_delegateData.executionPriority = options.executionPriority; + m_delegateData.executionPerformance = options.executionPerformance; + m_delegateData.allowDynamicDimensions = options.allowDynamicDimensions; + m_delegateData.maxNumberDelegatedPartitions = options.maxNumberDelegatedPartitions; + m_delegateData.maxCompilationTimeoutDurationNs = options.maxCompilationTimeoutDurationNs; + m_delegateData.maxExecutionTimeoutDurationNs = options.maxExecutionTimeoutDurationNs; + m_delegateData.maxExecutionLoopTimeoutDurationNs = options.maxExecutionLoopTimeoutDurationNs; + + Prepare = DoPrepare; + CopyFromBufferHandle = DoCopyFromBufferHandle; + CopyToBufferHandle = DoCopyToBufferHandle; + FreeBufferHandle = DoFreeBufferHandle; + data_ = &m_delegateData; + + // NNRT support dynamic shape feature. + flags |= kTfLiteDelegateFlagsAllowDynamicTensors; + flags |= kTfLiteDelegateFlagsRequirePropagatedShapes; +} + +NnrtDelegate::NnrtDelegate(const NnrtApi* nnrt) : NnrtDelegate(nnrt, Options()) {} + +NnrtDelegate::NnrtDelegate(const Options& options) : NnrtDelegate(NnrtImplementation(), options) {} + +NnrtDelegate::NnrtDelegate(const NnrtApi* nnrt, const Options& options) + : TfLiteDelegate(TfLiteDelegateCreate()), m_delegateData(nnrt) +{ + NnrtDelegateConstructorImpl(options); +} + +NnrtDelegate::NnrtDelegate() : NnrtDelegate(Options()) {} + +TfLiteStatus NnrtDelegate::GetOptions(const TfLiteDelegate* pDelegate, Options& options) +{ + // Caller guarantees that parameters are legal + auto pDelegateData = static_cast(pDelegate->data_); + options.acceleratorName = pDelegateData->acceleratorName; + options.cacheDir = pDelegateData->cacheDir; + options.modelToken = pDelegateData->modelToken; + options.enableFp16 = pDelegateData->enableFp16; + options.executionPriority = pDelegateData->executionPriority; + options.executionPerformance = pDelegateData->executionPerformance; + options.allowDynamicDimensions = pDelegateData->allowDynamicDimensions; + options.maxNumberDelegatedPartitions = pDelegateData->maxNumberDelegatedPartitions; + options.maxCompilationTimeoutDurationNs = pDelegateData->maxCompilationTimeoutDurationNs; + options.maxExecutionTimeoutDurationNs = pDelegateData->maxExecutionTimeoutDurationNs; + options.maxExecutionLoopTimeoutDurationNs = pDelegateData->maxExecutionLoopTimeoutDurationNs; + options.version = pDelegateData->version; + + return kTfLiteOk; +} + +TfLiteStatus NnrtDelegate::DoCopyFromBufferHandle(TfLiteContext* context, + TfLiteDelegate* delegate, TfLiteBufferHandle bufferHandle, TfLiteTensor* tensor) +{ + return kTfLiteError; +} + +TfLiteStatus NnrtDelegate::DoCopyToBufferHandle(TfLiteContext* context, + TfLiteDelegate* delegate, TfLiteBufferHandle bufferHandle, TfLiteTensor* tensor) +{ + return kTfLiteError; +} + +void NnrtDelegate::DoFreeBufferHandle(TfLiteContext* context, + TfLiteDelegate* delegate, TfLiteBufferHandle* handle) +{ + return; +} + +TfLiteStatus NnrtDelegate::LimitDelegatedPartitions(int32_t maxPartitions, + std::vector partitionParamsArray, std::vector& nodesToDelegate) +{ + int32_t numPartitions = partitionParamsArray.size(); + if ((maxPartitions <= 0) || (numPartitions <= maxPartitions)) { // no limit or not exceed limit + return kTfLiteOk; + } + + int32_t numberDelegatedPartitions = std::count_if( + partitionParamsArray.begin(), partitionParamsArray.end(), + [nodesToDelegate](const TfLiteDelegateParams& partitionParams) { + return std::find(nodesToDelegate.begin(), nodesToDelegate.end(), + partitionParams.nodes_to_replace->data[0]) != nodesToDelegate.end(); + }); + // Adapt maxPartitions to limit delegate paritions, sort and abandon the low-ranking nodes. + if (numberDelegatedPartitions > maxPartitions) { + std::sort(partitionParamsArray.begin(), partitionParamsArray.end(), + [](const TfLiteDelegateParams& left, const TfLiteDelegateParams& right) -> bool { + return left.nodes_to_replace->size > right.nodes_to_replace->size; + }); + + nodesToDelegate.clear(); + + for (int32_t i = 0; i < maxPartitions; ++i) { + const TfLiteDelegateParams& partitionParams = partitionParamsArray[i]; + nodesToDelegate.insert(nodesToDelegate.end(), + partitionParams.nodes_to_replace->data, + partitionParams.nodes_to_replace->data + + partitionParams.nodes_to_replace->size); + } + } + + return kTfLiteOk; +} + +TfLiteStatus NnrtDelegate::GetSupportedNodes(TfLiteContext* context, + TfLiteDelegate* delegate, std::vector& supportedNodes) +{ + // Caller guarantees that parameters are legal + TfLiteIntArray* executionPlan = nullptr; + TF_LITE_ENSURE_STATUS(context->GetExecutionPlan(context, &executionPlan)); + TF_LITE_ENSURE_EQ(context, executionPlan != nullptr, true); + + // Check for every node if it is supported + TfLiteNode* node = nullptr; + TfLiteRegistration* registration = nullptr; + for (auto nodeIndex : TfLiteIntArrayView(executionPlan)) { + node = nullptr; + registration = nullptr; + TF_LITE_ENSURE_STATUS(context->GetNodeAndRegistration(context, nodeIndex, &node, ®istration)); + if (NnrtDelegateKernel::Validate(registration->builtin_code)) { + supportedNodes.emplace_back(nodeIndex); + } else { + TFLITE_LOG_PROD(TFLITE_LOG_WARNING, + "[NNRT-DELEGATE] Get unsupportted node: %d.", registration->builtin_code); + } + } + + return kTfLiteOk; +} + +void NnrtDelegate::GetDelegateKernelRegistration(TfLiteDelegate* delegate, TfLiteRegistration& nnrtDelegateKernel) +{ + // Caller guarantees that parameters are legal + nnrtDelegateKernel.profiling_string = nullptr; + nnrtDelegateKernel.builtin_code = kTfLiteBuiltinDelegate; + nnrtDelegateKernel.custom_name = g_tfliteNnrtDelegateName; + nnrtDelegateKernel.version = TFLITE_NNRT_DELEGATE_VERSION; + + nnrtDelegateKernel.init = [](TfLiteContext* context, const char* buffer, size_t length) -> void* { + if (buffer == nullptr) { + return nullptr; + } + + const TfLiteDelegateParams* params = reinterpret_cast(buffer); + auto* delegateData = static_cast(params->delegate->data_); + NnrtDelegateKernel* state = new (std::nothrow) NnrtDelegateKernel(delegateData->nnrt); + if (state == nullptr) { + TFLITE_LOG_PROD(TFLITE_LOG_ERROR, "Failed to create NnrtDelegateKernel instance."); + return state; + } + + TfLiteStatus status = state->Init(context, params); + if (status != kTfLiteOk) { + TFLITE_LOG_PROD(TFLITE_LOG_ERROR, "Failed to init NnrtDelegateKernel."); + delete state; + state = nullptr; + } + return state; + }; + + nnrtDelegateKernel.free = [](TfLiteContext* context, void* buffer) -> void { + if (buffer != nullptr) { + delete static_cast(buffer); + buffer = nullptr; + } + }; + + nnrtDelegateKernel.prepare = [](TfLiteContext* context, TfLiteNode* node) -> TfLiteStatus { + if (node == nullptr) { + TFLITE_LOG_PROD(TFLITE_LOG_ERROR, "Failed to prepare delegate kernels, the node is nullptr."); + return kTfLiteError; + } + + NnrtDelegateKernel* state = reinterpret_cast(node->user_data); + return state->Prepare(context, node); + }; + + nnrtDelegateKernel.invoke = [](TfLiteContext* context, TfLiteNode* node) -> TfLiteStatus { + if (node == nullptr) { + TFLITE_LOG_PROD(TFLITE_LOG_ERROR, "Failed to invoke delegate kernels, the node is nullptr."); + return kTfLiteError; + } + + NnrtDelegateKernel* state = reinterpret_cast(node->user_data); + return state->Invoke(context, node); + }; +} + +TfLiteStatus NnrtDelegate::CheckDeviceValid(TfLiteContext* context, TfLiteDelegate* delegate) +{ + // Caller guarantees that parameters are legal + auto* delegateData = static_cast(delegate->data_); + if (delegateData == nullptr) { + TFLITE_LOG_PROD(TFLITE_LOG_ERROR, "Delegate data not be found."); + return kTfLiteDelegateDataNotFound; + } + + const NnrtApi* nnrt = delegateData->nnrt; + if (nnrt == nullptr) { + TFLITE_LOG_PROD(TFLITE_LOG_ERROR, "Failed to get nnrt instance."); + return kTfLiteError; + } + + NnrtDelegate::Options delegateOptions; + TF_LITE_ENSURE_STATUS(NnrtDelegate::GetOptions(delegate, delegateOptions)); + + if (tflite::IsUseTargetDevice(delegateOptions)) { + size_t nnrtDevice; + TF_LITE_ENSURE_STATUS(GetTargetDevice(context, delegate, nnrt, nnrtDevice)); + } + + return kTfLiteOk; +} + +TfLiteStatus NnrtDelegate::DoPrepare(TfLiteContext* context, TfLiteDelegate* delegate) +{ + if ((context == nullptr) || (delegate == nullptr)) { + TFLITE_LOG_PROD(TFLITE_LOG_ERROR, "[NNRT-DELEGATE] Input TFLite-Context or TFLite-Delegate is nullptr."); + return kTfLiteError; + } + + auto* delegateData = static_cast(delegate->data_); + const NnrtApi* nnrt = delegateData->nnrt; + + // Do not delegate nodes_ if NN API is unavailable. + if (!nnrt->nnrtExists) { + return kTfLiteOk; + } + + // Check devices validity + TF_LITE_ENSURE_STATUS(CheckDeviceValid(context, delegate)); + + // Get supportted nodes by tflite. + // We don't care about all nodes_, we only care about ones in the current plan. + std::vector supportedNodes; + GetSupportedNodes(context, delegate, supportedNodes); + + // If there are no delegated nodes, short-circuit node replacement. + if (supportedNodes.empty()) { + TFLITE_LOG_PROD(TFLITE_LOG_INFO, "[NNRT-DELEGATE] supportted node list is empty."); + return kTfLiteOk; + } + + static TfLiteRegistration nnrtDelegateKernel; + GetDelegateKernelRegistration(delegate, nnrtDelegateKernel); + + std::vector nodesToDelegate(supportedNodes); + int32_t numPartitions; + TfLiteDelegateParams* paramsArray = nullptr; + auto supportedNodesArray = BuildTfLiteIntArray(supportedNodes); + TF_LITE_ENSURE_STATUS(context->PreviewDelegatePartitioning( + context, supportedNodesArray.get(), ¶msArray, &numPartitions)); + + NnrtDelegate::Options delegateOptions; + TF_LITE_ENSURE_STATUS(NnrtDelegate::GetOptions(delegate, delegateOptions)); + const auto partitionParamsArray = std::vector(paramsArray, paramsArray + numPartitions); + TF_LITE_ENSURE_STATUS(LimitDelegatedPartitions( + delegateOptions.maxNumberDelegatedPartitions, partitionParamsArray, nodesToDelegate)); + + auto nodesToDelegateArray = BuildTfLiteIntArray(nodesToDelegate); + if (nodesToDelegateArray->size == 0) { + TFLITE_LOG_PROD(TFLITE_LOG_INFO, "[NNRT-DELEGATE] No node to delegate."); + return kTfLiteOk; + } else { + // Request TFLite to partition the graph and make kernels + // for each independent node sub set a new nnrtDelegateKernel. + return context->ReplaceNodeSubsetsWithDelegateKernels(context, + nnrtDelegateKernel, nodesToDelegateArray.get(), delegate); + } +} + +// Return a singleton NNRT Delegate that can check ops supported. +TfLiteDelegate* NnrtDelegateSingleton() +{ + static NnrtDelegate delegate; + return &delegate; +} +} // namespace tflite \ No newline at end of file diff --git a/example/deep_learning_framework/tflite/delegates/nnrt_delegate/nnrt_delegate.h b/example/deep_learning_framework/tflite/delegates/nnrt_delegate/nnrt_delegate.h new file mode 100644 index 0000000000000000000000000000000000000000..681e0699d5d53ff8781538de7ce9cf03525dba2f --- /dev/null +++ b/example/deep_learning_framework/tflite/delegates/nnrt_delegate/nnrt_delegate.h @@ -0,0 +1,167 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef TENSORFLOW_LITE_DELEGATES_NNRT_DELEGATE_H +#define TENSORFLOW_LITE_DELEGATES_NNRT_DELEGATE_H + +#include +#include + +#include "neural_network_runtime.h" + +#include "tensorflow/lite/c/common.h" +#include "tensorflow/lite/delegates/serialization.h" + +#include "../nnrt/nnrt_implementation.h" + +namespace tflite { +namespace delegate { +namespace nnrt { +class NnrtDelegateKernel; +} // namespace nnrt +} // namespace delegate + +using tflite::delegate::nnrt::NnrtDelegateKernel; + +// TFliteDelegate to interface with NNRT. +class NnrtDelegate : public TfLiteDelegate { +public: + struct Options { + OH_NN_PerformanceMode executionPerformance = OH_NN_PERFORMANCE_NONE; + std::string acceleratorName; + std::string cacheDir; + std::string modelToken; + OH_NN_Priority executionPriority = OH_NN_PRIORITY_MEDIUM; + int32_t maxNumberDelegatedPartitions = -1; + uint64_t maxCompilationTimeoutDurationNs = 0; + uint64_t maxExecutionTimeoutDurationNs = 0; + uint64_t maxExecutionLoopTimeoutDurationNs = 0; + // allow fp32 compuation to be run in fp16. + bool enableFp16 = false; + bool allowDynamicDimensions = false; + + uint32_t version {0}; + }; + + // Uses default options. + NnrtDelegate(); + + // The ownership of the NNRT instance is left to the caller of the + // NnrtDelegate constructor; the caller must ensure that the lifetime + // of the NNRT instance exceeds the lifetime of the NnrtDelegate. + explicit NnrtDelegate(const NnrtApi* nnrt); + + // The constructor that accepts options from user. + // This makes a copy of any data that it needs from Options, so + // the caller can safely deallocate any storage pointed to by + // the 'const char *' members of Options immediately after calling this. + explicit NnrtDelegate(const Options& options); + + // Constructor that accepts both an NNRT instance and options. + // The ownership of the NNRT instance is left to the caller of the + // NnrtDelegate constructor; the caller must ensure that the lifetime + // of the NNRT instance exceeds the lifetime of the NnrtDelegate. + // This constructor makes a copy of any data that it needs from Options, so + // the caller can safely deallocate any storage pointed to by + // the 'const char *' members of Options immediately after calling this. + NnrtDelegate(const NnrtApi* nnrt, const Options& options); + + ~NnrtDelegate() = default; + + // Returns the delegate options. + // The lifetime of the storage pointed to by the 'const char *' members of the + // returned Options object is the same as the lifetime of the supplied + // TfLiteDelegate instance. + static TfLiteStatus GetOptions(const TfLiteDelegate* pDelegate, Options& options); + +private: + struct Data { + const NnrtApi* nnrt = nullptr; + + // Preferred Power/perf trade-off. + OH_NN_PerformanceMode executionPerformance = OH_NN_PERFORMANCE_NONE; + + // Selected NNRT accelerator name. + std::string acceleratorName; + + // The cache dir for NNRT model. + std::string cacheDir; + + // The unique token string for NNRT model. + std::string modelToken; + + // Maximum number of NNRT partition to delegate. Zero or negative means + // no limit. + int32_t maxNumberDelegatedPartitions = -1; + + // Specifies the relative priority for executions of the model. + OH_NN_Priority executionPriority = OH_NN_PRIORITY_MEDIUM; + + // Specifies the maximum expected duration in nanosecond for compiling the + // model. + uint64_t maxCompilationTimeoutDurationNs = 0; + + // Specifies the maximum expected duration in nanosecond for executing the + // model. + uint64_t maxExecutionTimeoutDurationNs = 0; + + // Specifies the maximum expected duration in nanosecond for WHILE loops in + // the execution + uint64_t maxExecutionLoopTimeoutDurationNs = 0; + + // allow fp32 compuation to be run in fp16. + bool enableFp16 = false; + + // Whether to allow dynamic dimension sizes without re-compilation. + bool allowDynamicDimensions = false; + + uint32_t version {0}; + + explicit Data(const NnrtApi* nnrt); + ~Data(); + }; + + static TfLiteStatus DoPrepare(TfLiteContext* context, TfLiteDelegate* delegate); + + static TfLiteStatus DoCopyFromBufferHandle(TfLiteContext* context, + TfLiteDelegate* delegate, TfLiteBufferHandle bufferHandle, TfLiteTensor* tensor); + + static TfLiteStatus DoCopyToBufferHandle(TfLiteContext* context, + TfLiteDelegate* delegate, TfLiteBufferHandle bufferHandle, TfLiteTensor* tensor); + + static void DoFreeBufferHandle(TfLiteContext* context, + TfLiteDelegate* delegate, TfLiteBufferHandle* handle); + + static TfLiteStatus LimitDelegatedPartitions(int32_t maxPartitions, + std::vector partitionParamsArray, std::vector& nodesToDelegate); + + static TfLiteStatus GetSupportedNodes(TfLiteContext* context, + TfLiteDelegate* delegate, std::vector& supportedNodes); + + static void GetDelegateKernelRegistration(TfLiteDelegate* delegate, TfLiteRegistration& nnrtDelegateKernel); + + static TfLiteStatus CheckDeviceValid(TfLiteContext* context, TfLiteDelegate* delegate); + + void NnrtDelegateConstructorImpl(const Options& options); + +private: + // Delegate data presented through TfLiteDelegate::data_. + Data m_delegateData; +}; + +TfLiteDelegate* NnrtDelegateSingleton(); +} // namespace tflite + +#endif // TENSORFLOW_LITE_DELEGATES_NNRT_DELEGATE_H diff --git a/example/deep_learning_framework/tflite/delegates/nnrt_delegate/nnrt_delegate_kernel.cpp b/example/deep_learning_framework/tflite/delegates/nnrt_delegate/nnrt_delegate_kernel.cpp new file mode 100644 index 0000000000000000000000000000000000000000..05933aeea2daba081ed6a9af0266a3967d32abc3 --- /dev/null +++ b/example/deep_learning_framework/tflite/delegates/nnrt_delegate/nnrt_delegate_kernel.cpp @@ -0,0 +1,420 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "nnrt_delegate_kernel.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "tensorflow/lite/context_util.h" +#include "neural_network_runtime.h" + +namespace tflite { +namespace delegate { +namespace nnrt { +constexpr int32_t SCALAR_RANK = 1; + +#define RETURN_TFLITE_ERROR_IF_NN_ERROR_FOR_COMPILE(code, callDesc) \ + do { \ + if ( (code) != OH_NN_SUCCESS) { \ + const auto errorDesc = NnrtErrorDescription((code)); \ + TFLITE_LOG_PROD(TFLITE_LOG_ERROR, "NN API returned error %s at line %d while %s.\n", errorDesc.c_str(), \ + __LINE__, (callDesc)); \ + m_nnrt->OH_NNCompilation_Destroy(&m_pNnCompilation); \ + return kTfLiteError; \ + } \ + } while (0) + +bool NnrtDelegateKernel::Validate(const int32_t builtinCode) +{ + if (TFLITE_TYPE_TO_NNRT_TYPE.count(builtinCode) && + TFLITE_TYPE_TO_NNRT_TYPE.at(builtinCode) != OH_NN_UNSUPPORT_OPS) { + return true; + } + + return false; +} + +TfLiteStatus NnrtDelegateKernel::Init(TfLiteContext* context, const TfLiteDelegateParams* params) +{ + TF_LITE_ENSURE_EQ(context, params != nullptr, true); + + if (m_initialised) { + TFLITE_LOG_PROD(TFLITE_LOG_INFO, + "[NNRT-DELEGATE_KERNEL] NnrtDelegateKernel has completed initialization, no need init again."); + return kTfLiteOk; + } + + for (auto nodeIndex : TfLiteIntArrayView(params->nodes_to_replace)) { + m_delegateNodes.emplace_back(nodeIndex); + } + + NnrtDelegate::Options delegateOptions; + TF_LITE_ENSURE_STATUS(NnrtDelegate::GetOptions(params->delegate, delegateOptions)); + TF_LITE_ENSURE_STATUS(tflite::GetTargetDevice(context, params->delegate, m_nnrt, m_nnrtDevice)); + if (m_nnModel == nullptr) { + m_nnModel = m_nnrt->OH_NNModel_Construct(); + if (m_nnModel == nullptr) { + TFLITE_LOG_PROD(TFLITE_LOG_ERROR, "[NNRT-DELEGATE_KERNEL] Fail to create ONNRT model."); + return kTfLiteError; + } + TF_LITE_ENSURE_STATUS(BuildGraph(context, delegateOptions, params->input_tensors, params->output_tensors)); + } + + m_initialised = true; + + return kTfLiteOk; +} + +TfLiteStatus NnrtDelegateKernel::Prepare(TfLiteContext* context, TfLiteNode* node) +{ + TF_LITE_ENSURE_EQ(context, node != nullptr, true); + + if (!m_initialised) { + TFLITE_LOG_PROD(TFLITE_LOG_ERROR, + "[NNRT-DELEGATE_KERNEL] NnrtDelegateKernel Prepare failed, not Init yet."); + return kTfLiteError; + } + + if (m_compiled) { + return kTfLiteOk; // If model has completed compilation, no need compile again. + } + + // Create OH_NNCompilation + m_pNnCompilation = m_nnrt->OH_NNCompilation_Construct(m_nnModel); + if (m_pNnCompilation == nullptr) { + TFLITE_LOG_PROD(TFLITE_LOG_ERROR, "[NNRT-DELEGATE_KERNEL] Fail to create OH_NNCompilation instance."); + return kTfLiteError; + } + + NnrtDelegate::Options delegateOptions; + TF_LITE_ENSURE_STATUS(NnrtDelegate::GetOptions(node->delegate, delegateOptions)); + + TF_LITE_ENSURE_STATUS(SetNnOptions(context, delegateOptions)); + RETURN_TFLITE_ERROR_IF_NN_ERROR_FOR_COMPILE(m_nnrt->OH_NNCompilation_Build(m_pNnCompilation), + "completing NNRT compilation"); + + m_compiled = true; + return kTfLiteOk; +} + +TfLiteStatus NnrtDelegateKernel::Invoke(TfLiteContext* context, TfLiteNode* node) +{ + if (!m_compiled) { + TFLITE_LOG_PROD(TFLITE_LOG_ERROR, + "[NNRT-DELEGATE_KERNEL] NnrtDelegateKernel Invoke failed, not compile yet."); + return kTfLiteError; + } + + // Create OH_NNExecutor_Construct + OH_NNExecutor* pNnExecution {nullptr}; + pNnExecution = m_nnrt->OH_NNExecutor_Construct(m_pNnCompilation); + if (pNnExecution == nullptr) { + TFLITE_LOG_PROD(TFLITE_LOG_ERROR, "[NNRT-DELEGATE_KERNEL] Fail to create OH_NNExecutor instance."); + return kTfLiteError; + } + + // Set the input tensor buffers. + OH_NN_Tensor inputNnTensor; + TF_LITE_ENSURE_STATUS(SetInputTensors(context, node, pNnExecution, inputNnTensor)); + + // Get the output tensor buffers. + TF_LITE_ENSURE_STATUS(SetOutputTensors(context, node, pNnExecution)); + + // Invoke delegated subgraph. + RETURN_TFLITE_ERROR_IF_NN_ERROR(m_nnrt->OH_NNExecutor_Run(pNnExecution), "running computation"); + + m_nnrt->OH_NNExecutor_Destroy(&pNnExecution); + pNnExecution = nullptr; + return kTfLiteOk; +} + +TfLiteStatus NnrtDelegateKernel::Map(const int32_t builtinCode, const NnrtOpMappingArgs& mappingArgs, + int32_t& nnOpType) const +{ + if (TFLITE_TYPE_TO_NNRT_TYPE.count(builtinCode) == 0) { + TFLITE_LOG_PROD(TFLITE_LOG_ERROR, + "[NNRT-DELEGATE_KERNEL] Not support current TF-Lite Operator, builtCode: %d.", builtinCode); + return kTfLiteError; + } + + TfLiteStatus retValue = mappingArgs.builder->AddOpFuncParams(mappingArgs, builtinCode); + if (retValue != kTfLiteOk) { + TFLITE_LOG_PROD(TFLITE_LOG_ERROR, "[NNRT-DELEGATE_KERNEL] Failed to add params to these operations."); + return retValue; + } + nnOpType = TFLITE_TYPE_TO_NNRT_TYPE.at(builtinCode); + + return kTfLiteOk; +} + +TfLiteStatus NnrtDelegateKernel::BuildGraph(TfLiteContext* context, const NnrtDelegate::Options& delegateOptions, + const TfLiteIntArray* inputTensors, const TfLiteIntArray* outputTensors) +{ + if (context == nullptr) { + TFLITE_LOG_PROD(TFLITE_LOG_ERROR, "[NNRT-DELEGATE_KERNEL] The context is nullptr when building the graph."); + return kTfLiteError; + } + + TF_LITE_ENSURE_EQ(context, inputTensors != nullptr, true); + TF_LITE_ENSURE_EQ(context, outputTensors != nullptr, true); + + // Build the ops and tensors. + TF_LITE_ENSURE_STATUS(AddOpsAndTensors(context, inputTensors, delegateOptions)); + // Map input and output tensor indices to NN + // Make the TensorFlow Lite inputs and outputs to nn_indices. + OH_NN_UInt32Array inputIndices; + OH_NN_UInt32Array outputIndices; + std::vector inputsData; + for (auto i : TfLiteIntArrayView(inputTensors)) { + // Constant tensors are not NNRT inputs. + if ((i != kTfLiteOptionalTensor) && (context->tensors[i].allocation_type != kTfLiteMmapRo) && + // The delegate might not have mapped this input (this can + // happen if one tensor is split in several ones) + (m_tensorMapping.LiteIndexToNn(i) != INVALID_INDEX)) { + const int32_t inputTensorNnIndex = m_tensorMapping.LiteIndexToNn(i); + inputsData.emplace_back(inputTensorNnIndex); + } + } + + std::vector outputsData; + for (auto i : TfLiteIntArrayView(outputTensors)) { + const int32_t outputTensorNnIndex = m_tensorMapping.LiteIndexToNn(i); + // Unmapped outputs are not added + if (outputTensorNnIndex != INVALID_INDEX) { + outputsData.emplace_back(outputTensorNnIndex); + } + } + + inputIndices.data = inputsData.data(); + outputIndices.data = outputsData.data(); + inputIndices.size = inputsData.size(); + outputIndices.size = outputsData.size(); + // Tell NN to declare inputs/outputs + RETURN_TFLITE_ERROR_IF_NN_ERROR(m_nnrt->OH_NNModel_SpecifyInputsAndOutputs(m_nnModel, &inputIndices, + &outputIndices), "identifying model inputs and outputs"); + + RETURN_TFLITE_ERROR_IF_NN_ERROR(m_nnrt->OH_NNModel_Finish(m_nnModel), "finalizing the model"); + return kTfLiteOk; +} + +TfLiteStatus NnrtDelegateKernel::AddOpsAndTensors(TfLiteContext* context, const TfLiteIntArray* inputTensors, + const NnrtDelegate::Options& delegateOptions) +{ + // The tensor builder allows creating a single op. It is created outside + // the for loop to avoid reallocating the vectors. + NnrtOpBuilderArgs opBuilderArgs = { + .context = context, + .nnModel = m_nnModel, + .inputTensors = const_cast(inputTensors), + .pTensorMapping = &m_tensorMapping, + .delegateOptions = delegateOptions + }; + NnrtOpBuilder builder(m_nnrt, opBuilderArgs); + + // Clear the input and output lists. + builder.ClearInputOuputLists(); + + // Add other tensors. + TfLiteNode* node = nullptr; + TfLiteRegistration* reg = nullptr; + for (int32_t nodeIndex : m_delegateNodes) { + node = nullptr; + reg = nullptr; + TF_LITE_ENSURE_STATUS( + context->GetNodeAndRegistration(context, nodeIndex, &node, ®)); // Obtain the op and registration. + if ((node == nullptr) || (reg == nullptr)) { + TFLITE_LOG_PROD(TFLITE_LOG_ERROR, "[NNRT-DELEGATE_KERNEL] Get node and registration failed."); + return kTfLiteError; + } + + const bool scalarAsTensor = IsScalarInputSupported(reg->builtin_code); + int32_t inputTensorFlags = 0; + if (scalarAsTensor) { + inputTensorFlags |= NN_TENSOR_FLAG_SCALAR_AS_TENSOR; + } + + // Get op type and tensors, fails if the Validate function failed. + int32_t nnOpType; + NnrtOpMappingArgs opMappingArgs = { context, &builder, node, nodeIndex }; + TF_LITE_ENSURE_STATUS(Map(reg->builtin_code, opMappingArgs, nnOpType)); + + for (int32_t inputPos = 0; inputPos < node->inputs->size; ++inputPos) { + if ((reg->builtin_code == kTfLiteBuiltinFullyConnected) && + (node->inputs->data[inputPos] == kTfLiteOptionalTensor)) { + continue; // skip optional bias and handle it during mapping. + } + const auto inputIndex = node->inputs->data[inputPos]; + TF_LITE_ENSURE_STATUS(builder.AddTensorInput(inputIndex, reg->builtin_code, inputTensorFlags)); + } + // Map outputs to NN API tensor indices. + int32_t outputTensorFlags = 0; + for (int32_t outputPos = 0; outputPos < node->outputs->size; ++outputPos) { + auto outputIndex = node->outputs->data[outputPos]; + TF_LITE_ENSURE_STATUS(builder.AddTensorOutput(outputIndex, reg->builtin_code, outputTensorFlags)); + } + TF_LITE_ENSURE_STATUS(builder.FinalizeAddOperation(static_cast(nnOpType), nodeIndex)); + } + + return kTfLiteOk; +} + +TfLiteStatus NnrtDelegateKernel::ConvertTensorTypeToNn(TfLiteContext* context, + const std::pair& indexPair, OH_NN_QuantParam* nnQuantParam, OH_NN_Tensor& nnTensor) +{ + TF_LITE_ENSURE_EQ(context, context->tensors_size > indexPair.first, true); + TfLiteTensor* tensor = &(context->tensors[indexPair.first]); + TF_LITE_ENSURE_EQ(context, tensor != nullptr, true); + + OH_NN_DataType nnType {OH_NN_UNKNOWN}; + TF_LITE_ENSURE_STATUS(m_tensorMapping.ConvertType(context, indexPair.first, 0, nnType)); + + uint32_t tensorRank = static_cast(tensor->dims->size); + int32_t* tensorDims = reinterpret_cast(tensor->dims->data); + if (tensorDims == nullptr) { + TFLITE_LOG_PROD(TFLITE_LOG_ERROR, + "[NNRT-DELEGATE_KERNEL] The tensorDims is nullptr when converting the type of tensors to nnrt."); + return kTfLiteError; + } + + // treat scalar input as single cell tensor in NNRT. + if (tensorRank == 0) { + tensorRank = SCALAR_RANK; + *tensorDims = SCALAR_RANK; + } + + nnTensor.dataType = nnType; + nnTensor.dimensionCount = tensorRank; + nnTensor.dimensions = tensorDims; + nnTensor.quantParam = nnQuantParam; + nnTensor.type = OH_NN_TENSOR; + + return kTfLiteOk; +} + +TfLiteStatus NnrtDelegateKernel::SetInputTensors(TfLiteContext* context, TfLiteNode* node, + OH_NNExecutor* pNnExecution, OH_NN_Tensor& nnTensor) +{ + TF_LITE_ENSURE_EQ(context, node != nullptr, true); + TF_LITE_ENSURE_EQ(context, pNnExecution != nullptr, true); + + // Note: we access tflite tensors using + // absolute indices but NN api indices inputs by relative indices. + int32_t relativeIndex = 0; + OH_NN_QuantParam* nnQuantParam = nullptr; + TfLiteIntArray* tensors = node->inputs; + TF_LITE_ENSURE_EQ(context, tensors != nullptr, true); + + for (auto absoluteIndex : TfLiteIntArrayView(tensors)) { + if (absoluteIndex == kTfLiteOptionalTensor) { + continue; + } + + std::pair indexPair = std::make_pair(absoluteIndex, relativeIndex); + ConvertTensorTypeToNn(context, indexPair, nnQuantParam, nnTensor); + + TfLiteTensor* tensor = &context->tensors[absoluteIndex]; + TF_LITE_ENSURE_EQ(context, tensor != nullptr, true); + + if (tensor->allocation_type != kTfLiteMmapRo) { + RETURN_TFLITE_ERROR_IF_NN_ERROR_FOR_TENSOR(m_nnrt->OH_NNExecutor_SetInput(pNnExecution, relativeIndex, + &nnTensor, tensor->data.raw, tensor->bytes), + "associating NNRT execution output with a memory object", tensor); + ++relativeIndex; + } else { + continue; + } + } + + return kTfLiteOk; +} + +TfLiteStatus NnrtDelegateKernel::SetOutputTensors(TfLiteContext* context, TfLiteNode* node, + OH_NNExecutor* pNnExecution) +{ + TF_LITE_ENSURE_EQ(context, node != nullptr, true); + TF_LITE_ENSURE_EQ(context, pNnExecution != nullptr, true); + + // Note: we access tflite tensors using + // absolute indices but NN api indices inputs by relative indices. + int32_t relativeIndex = 0; + TfLiteIntArray* tensors = node->outputs; + TF_LITE_ENSURE_EQ(context, tensors != nullptr, true); + for (auto absoluteIndex : TfLiteIntArrayView(tensors)) { + if (m_tensorMapping.LiteIndexToNn(absoluteIndex) == INVALID_INDEX) { + continue; + } + + TfLiteTensor* tensor = &context->tensors[absoluteIndex]; + TF_LITE_ENSURE_EQ(context, tensor != nullptr, true); + RETURN_TFLITE_ERROR_IF_NN_ERROR_FOR_TENSOR( + m_nnrt->OH_NNExecutor_SetOutput(pNnExecution, relativeIndex, tensor->data.raw, tensor->bytes), + "associating NNRT execution output to a memory object", tensor); + ++relativeIndex; + } + + return kTfLiteOk; +} + +TfLiteStatus NnrtDelegateKernel::SetNnOptions(TfLiteContext* context, const NnrtDelegate::Options& delegateOptions) +{ + if (context == nullptr) { + TFLITE_LOG_PROD(TFLITE_LOG_ERROR, + "[NNRT-DELEGATE_KERNEL] The context is nullptr when setting nnrt options."); + return kTfLiteError; + } + + RETURN_TFLITE_ERROR_IF_NN_ERROR(m_nnrt->OH_NNCompilation_SetDevice(m_pNnCompilation, m_nnrtDevice), + "creating NNRT compilation"); + + auto performance = delegateOptions.executionPerformance; + if (performance != OH_NN_PERFORMANCE_NONE) { + RETURN_TFLITE_ERROR_IF_NN_ERROR_FOR_COMPILE( + m_nnrt->OH_NNCompilation_SetPerformanceMode(m_pNnCompilation, performance), + "setting compilation performance"); + } + + // Set cacahe, if cacheDir & modelToken & device is valid. + std::string cacheDir = delegateOptions.cacheDir; + std::string modelToken = delegateOptions.modelToken; + uint32_t version = delegateOptions.version; + if (!cacheDir.empty() && (!IsUseTargetDevice(delegateOptions) || + (delegateOptions.acceleratorName == NNRT_REFERENCE_DEVICE))) { + RETURN_TFLITE_ERROR_IF_NN_ERROR_FOR_COMPILE( + m_nnrt->OH_NNCompilation_SetCache(m_pNnCompilation, cacheDir.c_str(), version), + "setting compilation cache"); + } else if (cacheDir.empty()) { + TFLITE_LOG_PROD(TFLITE_LOG_WARNING, "The cacheDir is empty, will not load or save cache."); + } + return kTfLiteOk; +} +} // namespace nnrt +} // namespace delegate +} // tflite \ No newline at end of file diff --git a/example/deep_learning_framework/tflite/delegates/nnrt_delegate/nnrt_delegate_kernel.h b/example/deep_learning_framework/tflite/delegates/nnrt_delegate/nnrt_delegate_kernel.h new file mode 100644 index 0000000000000000000000000000000000000000..997dc7f23decdcbe2840c93e6f7d5c1d97996c34 --- /dev/null +++ b/example/deep_learning_framework/tflite/delegates/nnrt_delegate/nnrt_delegate_kernel.h @@ -0,0 +1,104 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef TENSORFLOW_LITE_DELEGATES_NNRT_DELEGATE_KERNEL_H +#define TENSORFLOW_LITE_DELEGATES_NNRT_DELEGATE_KERNEL_H + + +#include "neural_network_runtime.h" +#include "tensorflow/lite/c/common.h" + +#include "tensor_mapping.h" +#include "nnrt_op_builder.h" + +namespace tflite { +namespace delegate { +namespace nnrt { + +// Represents a subgraph in TFLite that will be delegated to NNRt. +// It is abstracted as a single kernel node in the main TFLite graph and +// implements Init/Prepare/Invoke as TFLite kernel nodes. +class NnrtDelegateKernel { +public: + explicit NnrtDelegateKernel(const NnrtApi* nnrt) + : m_initialised(false), + m_compiled(false), + m_nnrt(nnrt), + m_nnModel(nullptr), + m_pNnCompilation(nullptr) {} + + NnrtDelegateKernel() : NnrtDelegateKernel(NnrtImplementation()) {} + virtual ~NnrtDelegateKernel() + { + m_nnrt->OH_NNModel_Destroy(&m_nnModel); + m_nnrt->OH_NNCompilation_Destroy(&m_pNnCompilation); + m_nnrt = nullptr; + } + + // Returns true if the node can be accelerated with NNRT. + static bool Validate(const int32_t builtinCode); + + // Initialize the kernel (a NN model) and builds the NN Model. + TfLiteStatus Init(TfLiteContext* context, const TfLiteDelegateParams* params); + + // Creates the NNRT Compilation for the NN model. It assumes that Init has + // been called and completed successfully. + TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node); + + // Invoke the NN Model. Expects Init and Prepare to have been completed successfully. + TfLiteStatus Invoke(TfLiteContext* context, TfLiteNode* node); + +private: + TfLiteStatus Map(int32_t builtinCode, const NnrtOpMappingArgs& mappingArgs, int32_t& nnOpType) const; + TfLiteStatus AddOpsAndTensors(TfLiteContext* context, const TfLiteIntArray* inputTensors, + const NnrtDelegate::Options& delegateOptions); + TfLiteStatus BuildGraph(TfLiteContext* context, const NnrtDelegate::Options& options, + const TfLiteIntArray* inputTensors, const TfLiteIntArray* outputTensors); + TfLiteStatus ConvertTensorTypeToNn(TfLiteContext* context, const std::pair& indexPair, + OH_NN_QuantParam* nnQuantParam, OH_NN_Tensor& nnTensor); + TfLiteStatus SetInputTensors(TfLiteContext* context, TfLiteNode* node, OH_NNExecutor* pNnExecution, + OH_NN_Tensor& nnTensor); + TfLiteStatus SetOutputTensors(TfLiteContext* context, TfLiteNode* node, OH_NNExecutor* pNnExecution); + TfLiteStatus SetNnOptions(TfLiteContext* context, const NnrtDelegate::Options& delegateOptions); + +private: + // True if initialization has been completed successfully + bool m_initialised; + + // True if compilation has been completed successfully + bool m_compiled; + + // NN device handle. + size_t m_nnrtDevice; + + // Access to NNRT. + const NnrtApi* m_nnrt; + + // NN API state. + OH_NNModel* m_nnModel; + OH_NNCompilation* m_pNnCompilation; + + // Node indices that this delegate is responsible for. Indices here + // indexes into the nodes array in the TfLiteContext. + std::vector m_delegateNodes; + + // Track indices we use + TensorMapping m_tensorMapping; +}; +} // namespace nnrt +} // namespace delegate +} // namespace tflite + +#endif // TENSORFLOW_LITE_DELEGATES_NNRT_DELEGATE_KERNEL_H diff --git a/example/deep_learning_framework/tflite/delegates/nnrt_delegate/nnrt_delegate_provider.cpp b/example/deep_learning_framework/tflite/delegates/nnrt_delegate/nnrt_delegate_provider.cpp new file mode 100644 index 0000000000000000000000000000000000000000..e7592a374a503e43b60e4be981afdd0f0cb7339c --- /dev/null +++ b/example/deep_learning_framework/tflite/delegates/nnrt_delegate/nnrt_delegate_provider.cpp @@ -0,0 +1,215 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include + +#include "tensorflow/lite/tools/delegates/delegate_provider.h" + +#include "nnrt_delegate.h" +#include "../nnrt/nnrt_implementation.h" + +namespace tflite { +namespace tools { +constexpr int32_t DEFAULT_THREADS = 1; +constexpr int32_t DEFAULT_DELEGATE_NUM = -1; +class NnrtDelegateProvider : public DelegateProvider { +public: + NnrtDelegateProvider() + { + default_params_.AddParam("use_nnrt", ToolParam::Create(false)); + default_params_.AddParam("performance", ToolParam::Create("")); + default_params_.AddParam("priority", ToolParam::Create("")); + default_params_.AddParam("device", ToolParam::Create("")); + default_params_.AddParam("cache_dir", ToolParam::Create("")); + default_params_.AddParam("model_token", ToolParam::Create("")); + default_params_.AddParam("max_delegate_num", ToolParam::Create(DEFAULT_DELEGATE_NUM)); + default_params_.AddParam("enable_fp16", ToolParam::Create(false)); + default_params_.AddParam("allow_dynamic_dimensions", ToolParam::Create(false)); + } + + ~NnrtDelegateProvider() {}; + + std::vector CreateFlags(ToolParams* param) const final; + + void LogParams(const ToolParams& params, bool verbose) const final; + + TfLiteDelegatePtr CreateTfLiteDelegate(const ToolParams& params) const final; + + std::pair CreateRankedTfLiteDelegate(const ToolParams& params) const final; + + std::string GetName() const final + { + return "NNRT"; + } +}; + +REGISTER_DELEGATE_PROVIDER(NnrtDelegateProvider); + +std::vector NnrtDelegateProvider::CreateFlags(ToolParams* params) const +{ + std::vector flags = { + CreateFlag("max_delegate_num", params, "Delegate max num limit, max_delegate_num <= 0 means no limit"), + CreateFlag("enable_fp16", params, "Whether to Infer model with FP16."), + CreateFlag("allow_dynamic_dimensions", params, + "Whether to allow dynamic dimension sizes without re-compilation."), + CreateFlag("performance", params, + "Execution performance for nnrt delegate. " + "choose within [low, medium, high, extreme, default]."), + CreateFlag("priority", params, + "The model execution priority in nnrt, and it " + "choose within [default, low, medium, high]."), + CreateFlag("device", params, + "The name of the nnrt accelerator to use, " + "choose within [cpu, gpu, apu, nnrt-reference], " + "nnrt-reference means chosen automatically by nnrt."), + CreateFlag("cache_dir", params, "The directory of load and save cache for delegate"), + CreateFlag("model_token", params, "The file_name of load and save cache for delegate"), + }; + return flags; +} + +void NnrtDelegateProvider::LogParams(const ToolParams& params, bool verbose) const +{ + LOG_TOOL_PARAM(params, bool, "use_nnrt", "Use NNRT", verbose); + if (!params.Get("use_nnrt")) { + return; // no use nnrt, return. + } + + LOG_TOOL_PARAM(params, std::string, "performance", "NNRT execution performance", verbose); + LOG_TOOL_PARAM(params, std::string, "priority", "NNRT execution priority", verbose); + LOG_TOOL_PARAM(params, std::string, "device", "NNRT accelerator name", verbose); + LOG_TOOL_PARAM(params, std::string, "cache_dir", "NNRT model cache directory", verbose); + LOG_TOOL_PARAM(params, std::string, "model_token", "NNRT model cache filename", verbose); + LOG_TOOL_PARAM(params, int32_t, "max_delegate_num", "NNRT delegate max partition", verbose); + LOG_TOOL_PARAM(params, bool, "enable_fp16", "NNRT allow fp16 inference", verbose); + LOG_TOOL_PARAM(params, bool, "allow_dynamic_dimensions", "NNRT allow dynamic dimensions", verbose); +} + +TfLiteStatus GetExecutionPerformance(const ToolParams& params, NnrtDelegate::Options& options) +{ + std::string stringExecutionPerformance = params.Get("performance"); + if (stringExecutionPerformance.empty()) { + return kTfLiteOk; // no set performance + } + + OH_NN_PerformanceMode executionPerformance = OH_NN_PERFORMANCE_NONE; + if (stringExecutionPerformance == "low") { + executionPerformance = OH_NN_PERFORMANCE_LOW; + } else if (stringExecutionPerformance == "medium") { + executionPerformance = OH_NN_PERFORMANCE_MEDIUM; + } else if (stringExecutionPerformance == "high") { + executionPerformance = OH_NN_PERFORMANCE_HIGH; + } else if (stringExecutionPerformance == "extreme") { + executionPerformance = OH_NN_PERFORMANCE_EXTREME; + } else if (stringExecutionPerformance == "default") { + executionPerformance = OH_NN_PERFORMANCE_NONE; + } else { + TFLITE_LOG(ERROR) << "The provided value is not a valid nnrt execution performance."; + return kTfLiteError; + } + options.executionPerformance = executionPerformance; + + return kTfLiteOk; +} + +TfLiteStatus GetExecutionPriority(const ToolParams& params, NnrtDelegate::Options& options) +{ + std::string stringExecutionPriority = params.Get("priority"); + if (stringExecutionPriority.empty()) { + return kTfLiteOk; // no set priority + } + + OH_NN_Priority executionPriority = OH_NN_PRIORITY_MEDIUM; + if (stringExecutionPriority == "low") { + executionPriority = OH_NN_PRIORITY_LOW; + } else if (stringExecutionPriority == "medium") { + executionPriority = OH_NN_PRIORITY_MEDIUM; + } else if (stringExecutionPriority == "high") { + executionPriority = OH_NN_PRIORITY_HIGH; + } else if (stringExecutionPriority == "default") { + executionPriority = OH_NN_PRIORITY_MEDIUM; + } else { + TFLITE_LOG(ERROR) << "The provided value is not a valid nnrt execution priority."; + return kTfLiteError; + } + options.executionPriority = executionPriority; + + return kTfLiteOk; +} + +TfLiteStatus MapParams(const ToolParams& params, NnrtDelegate::Options& options) +{ + std::string acceleratorName = params.Get("device"); + if (!acceleratorName.empty()) { + options.acceleratorName = acceleratorName; + } + + if (params.GetParam("max_delegate_num") != nullptr) { + options.maxNumberDelegatedPartitions = params.Get("max_delegate_num"); + } + + std::string cacheDir = params.Get("cache_dir"); + if (!cacheDir.empty()) { + options.cacheDir = cacheDir; + } + + std::string modelToken = params.Get("model_token"); + if (!modelToken.empty()) { + options.modelToken = modelToken; + } + + if (params.Get("enable_fp16")) { + options.enableFp16 = true; + } + + if (params.Get("allow_dynamic_dimensions")) { + options.allowDynamicDimensions = true; + } + + return kTfLiteOk; +} + +TfLiteDelegatePtr NnrtDelegateProvider::CreateTfLiteDelegate(const ToolParams& params) const +{ + TfLiteDelegatePtr delegate(nullptr, [](TfLiteDelegate*) {}); + if (!params.Get("use_nnrt")) { + return delegate; + } + + NnrtDelegate::Options options; + TFLITE_TOOLS_CHECK(MapParams(params, options) == kTfLiteOk) << "Map params to NNRT Delegate options failed."; + TFLITE_TOOLS_CHECK(GetExecutionPerformance(params, options) == kTfLiteOk) << + "Create TfLite NNRT Delegate failed."; + TFLITE_TOOLS_CHECK(GetExecutionPriority(params, options) == kTfLiteOk) << "Create TfLite NNRT Delegate failed."; + + const auto* nnrtImpl = NnrtImplementation(); + if (!nnrtImpl->nnrtExists) { + TFLITE_LOG(WARN) << "NNRT acceleration is unsupported on this platform."; + return delegate; + } + + return TfLiteDelegatePtr(new (std::nothrow) NnrtDelegate(nnrtImpl, options), + [](TfLiteDelegate* delegate) { delete reinterpret_cast(delegate); }); +} + +std::pair NnrtDelegateProvider::CreateRankedTfLiteDelegate(const ToolParams& params) const +{ + auto ptr = CreateTfLiteDelegate(params); + LogParams(params, false); + return std::make_pair(std::move(ptr), params.GetPosition("use_nnrt")); +} +} // namespace tools +} // namespace tflite \ No newline at end of file diff --git a/example/deep_learning_framework/tflite/delegates/nnrt_delegate/nnrt_op_builder.cpp b/example/deep_learning_framework/tflite/delegates/nnrt_delegate/nnrt_op_builder.cpp new file mode 100644 index 0000000000000000000000000000000000000000..b4bdb9362648a79ac7f53faad0a5dead6a62b141 --- /dev/null +++ b/example/deep_learning_framework/tflite/delegates/nnrt_delegate/nnrt_op_builder.cpp @@ -0,0 +1,503 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#define __STDC_WANT_LIB_EXT1__ 1 + +#include "nnrt_op_builder.h" + +#include + +#include "neural_network_runtime.h" +#include "tensorflow/lite/util.h" +#include "tensorflow/lite/context_util.h" + +#include "nnrt_utils.h" + +namespace tflite { +namespace delegate { +namespace nnrt { +constexpr int32_t SCALAR_TENSOR_RANK = 1; +constexpr int32_t ADDZEROS_BIAS_INDEX = -1; +constexpr int32_t UNSPECIFIED_DIMENSION_VALUE = -1; +const std::vector DEPTHWISE_TRANSPOSE_AXISS = { 3, 1, 2, 0 }; + +NnrtOpBuilder::NnrtOpBuilder(const NnrtApi* nnrt, NnrtOpBuilderArgs& opBuilderArgs) + : m_nnrt(nnrt), + m_context(opBuilderArgs.context), + m_pTensorMapping(opBuilderArgs.pTensorMapping), + m_nnModel(opBuilderArgs.nnModel), + m_allowDynamicDimensions(opBuilderArgs.delegateOptions.allowDynamicDimensions) +{ + // Map Op func pointer + MapBuiltinCodeToFunc(); + + // Get model inputs + for (int32_t i : TfLiteIntArrayView(opBuilderArgs.inputTensors)) { + // Constant tensors are not NNRT inputs. + if (i != kTfLiteOptionalTensor && opBuilderArgs.context->tensors[i].allocation_type != kTfLiteMmapRo) { + m_inputs.emplace_back(i); + } + } +} + +TfLiteStatus NnrtOpBuilder::AddZerosBias(const NnrtOpMappingArgs& mappingArgs, int32_t inputId, int32_t filterId, + int32_t channelNum) +{ + int32_t biasIndex = ADDZEROS_BIAS_INDEX; + mappingArgs.context->AddTensors(mappingArgs.context, 1, &biasIndex); + TfLiteTensor* biasTensor = &mappingArgs.context->tensors[biasIndex]; + const auto inputType = mappingArgs.context->tensors[inputId].type; + + if (inputType == kTfLiteFloat32) { + biasTensor->type = kTfLiteFloat32; + } else { + biasTensor->type = kTfLiteInt32; + } + + // Create an array with a required bias shape and resize the bias tensor. + TfLiteIntArray* biasShape = TfLiteIntArrayCreate(1); // 1-dimension + biasShape->data[0] = channelNum; + biasTensor->allocation_type = kTfLiteDynamic; + mappingArgs.context->ResizeTensor(mappingArgs.context, biasTensor, biasShape); + + // Set tensor's values to zeroes and add it using AddVector*, so that the values are copied to NNRT. +#ifdef __STDC_LIB_EXT1__ + if (inputType == kTfLiteFloat32) { + memset_s(biasTensor->data.f, biasTensor->bytes, 0, channelNum * sizeof(float)); + TF_LITE_ENSURE_STATUS(mappingArgs.builder->AddVectorFloat32Tensor(biasTensor->data.f, channelNum, + OH_NN_TENSOR)); + } else { + memset_s(biasTensor->data.i32, biasTensor->bytes, 0, channelNum * sizeof(int32_t)); + const TfLiteTensor& inputTensor = mappingArgs.context->tensors[inputId]; + const TfLiteTensor& filterTensor = mappingArgs.context->tensors[filterId]; + + // NNRT requires bias scale to be a product of an input scale and a filter scale. + biasTensor->params.scale = inputTensor.params.scale * filterTensor.params.scale; + TF_LITE_ENSURE_STATUS(mappingArgs.builder->AddVectorInt32Tensor(biasTensor->data.i32, channelNum, + OH_NN_TENSOR)); + } +#endif + + return kTfLiteOk; +} + +TfLiteStatus NnrtOpBuilder::AddBasicComputeParams(const NnrtOpMappingArgs& mappingArgs, int32_t builtinCode) +{ + if (builtinCode == kTfLiteBuiltinAdd) { + auto builtin = reinterpret_cast(mappingArgs.node->builtin_data); + TF_LITE_ENSURE_STATUS(AddActivateParamsInOperator(mappingArgs, builtin, builtinCode, + OH_NN_ADD_ACTIVATIONTYPE)); + } else if (builtinCode == kTfLiteBuiltinMul) { + auto builtin = reinterpret_cast(mappingArgs.node->builtin_data); + TF_LITE_ENSURE_STATUS(AddActivateParamsInOperator(mappingArgs, builtin, builtinCode, + OH_NN_MUL_ACTIVATION_TYPE)); + } else if (builtinCode == kTfLiteBuiltinSub) { + auto builtin = reinterpret_cast(mappingArgs.node->builtin_data); + TF_LITE_ENSURE_STATUS(AddActivateParamsInOperator(mappingArgs, builtin, builtinCode, + OH_NN_SUB_ACTIVATIONTYPE)); + } else { + TFLITE_LOG_PROD(TFLITE_LOG_ERROR, "[NNRT-OPBUILDER] unsupportted basic compute type %d.", builtinCode); + return kTfLiteError; + } + + return kTfLiteOk; +} + +TfLiteStatus NnrtOpBuilder::AddAvgPoolingParams(const NnrtOpMappingArgs& mappingArgs, int32_t builtinCode) +{ + auto builtin = reinterpret_cast(mappingArgs.node->builtin_data); + std::vector kernel = { static_cast(builtin->filter_height), + static_cast(builtin->filter_width) }; + std::vector stride = { static_cast(builtin->stride_height), + static_cast(builtin->stride_width) }; + + mappingArgs.builder->AddVectorInt64Tensor(kernel.data(), kernel.size(), OH_NN_AVG_POOL_KERNEL_SIZE); + mappingArgs.builder->AddVectorInt64Tensor(stride.data(), stride.size(), OH_NN_AVG_POOL_STRIDE); + TF_LITE_ENSURE_STATUS(AddPadParamsInOperator(mappingArgs, builtin, builtinCode, + OH_NN_AVG_POOL_PAD_MODE)); + TF_LITE_ENSURE_STATUS(AddActivateParamsInOperator(mappingArgs, builtin, builtinCode, + OH_NN_AVG_POOL_ACTIVATION_TYPE)); + + return kTfLiteOk; +} + +TfLiteStatus NnrtOpBuilder::AddMaxPoolingParams(const NnrtOpMappingArgs& mappingArgs, int32_t builtinCode) +{ + auto builtin = reinterpret_cast(mappingArgs.node->builtin_data); + std::vector kernel = { static_cast(builtin->filter_height), + static_cast(builtin->filter_width) }; + std::vector stride = { static_cast(builtin->stride_height), + static_cast(builtin->stride_width) }; + + mappingArgs.builder->AddVectorInt64Tensor(kernel.data(), kernel.size(), OH_NN_MAX_POOL_KERNEL_SIZE); + mappingArgs.builder->AddVectorInt64Tensor(stride.data(), stride.size(), OH_NN_MAX_POOL_STRIDE); + TF_LITE_ENSURE_STATUS(AddPadParamsInOperator(mappingArgs, builtin, builtinCode, + OH_NN_MAX_POOL_PAD_MODE)); + TF_LITE_ENSURE_STATUS(AddActivateParamsInOperator(mappingArgs, builtin, builtinCode, + OH_NN_MAX_POOL_ACTIVATION_TYPE)); + + return kTfLiteOk; +} + +TfLiteStatus NnrtOpBuilder::AddFullConnectedParams(const NnrtOpMappingArgs& mappingArgs, int32_t builtinCode) +{ + // IF bias is not presented, bias input index will be -1. + const bool isBiasPresent = + (mappingArgs.node->inputs->size == 3) && (mappingArgs.node->inputs->data[2] != kTfLiteOptionalTensor); + + if (!isBiasPresent) { + const int32_t inputTensorId = mappingArgs.node->inputs->data[0]; // kInputTensor + const int32_t filterTensorId = mappingArgs.node->inputs->data[1]; // kWeightsTensor + const int32_t numUnits = mappingArgs.context->tensors[filterTensorId].dims->data[0]; // bias channel num + TF_LITE_ENSURE_STATUS(AddZerosBias(mappingArgs, inputTensorId, filterTensorId, numUnits)); + } + + auto builtin = reinterpret_cast(mappingArgs.node->builtin_data); + TF_LITE_ENSURE_STATUS(AddActivateParamsInOperator(mappingArgs, builtin, builtinCode, + OH_NN_FULL_CONNECTION_ACTIVATIONTYPE)); + + return kTfLiteOk; +} + +TfLiteStatus NnrtOpBuilder::AddConcatenationParams(const NnrtOpMappingArgs& mappingArgs, int32_t builtinCode) +{ + auto builtin = reinterpret_cast(mappingArgs.node->builtin_data); + const int64_t axis = static_cast(builtin->axis); + mappingArgs.builder->AddScalarInt64Tensor(axis, OH_NN_CONCAT_AXIS); + + return kTfLiteOk; +} + +TfLiteStatus NnrtOpBuilder::AddSoftmaxParams(const NnrtOpMappingArgs& mappingArgs, int32_t builtinCode) +{ + auto builtin = reinterpret_cast(mappingArgs.node->builtin_data); + const int64_t axis = static_cast(builtin->beta); + mappingArgs.builder->AddScalarInt64Tensor(axis, OH_NN_SOFTMAX_AXIS); + + return kTfLiteOk; +} + +TfLiteStatus NnrtOpBuilder::AddQuantizeParams(const NnrtOpMappingArgs& mappingArgs, int32_t builtinCode) +{ + OH_NN_DataType nnType {OH_NN_FLOAT32}; + + int32_t inputIndex = mappingArgs.node->inputs->data[0]; + m_pTensorMapping->ConvertType(m_context, inputIndex, 0, nnType); + mappingArgs.builder->AddScalarInt64Tensor(static_cast(nnType), OH_NN_QUANT_DTYPE_CAST_SRC_T); + + int32_t outputIndex = mappingArgs.node->outputs->data[0]; + m_pTensorMapping->ConvertType(m_context, outputIndex, 0, nnType); + mappingArgs.builder->AddScalarInt64Tensor(static_cast(nnType), OH_NN_QUANT_DTYPE_CAST_DST_T); + + return kTfLiteOk; +} + +TfLiteStatus NnrtOpBuilder::AddPackParams(const NnrtOpMappingArgs& mappingArgs, int32_t builtinCode) +{ + auto builtin = reinterpret_cast(mappingArgs.node->builtin_data); + const int64_t axis = static_cast(builtin->axis); + mappingArgs.builder->AddScalarInt64Tensor(axis, OH_NN_STACK_AXIS); + + return kTfLiteOk; +} + +TfLiteStatus NnrtOpBuilder::AddPadParams(const NnrtOpMappingArgs& mappingArgs, int32_t builtinCode) +{ + float padValue = 0.0; + mappingArgs.builder->AddScalarFloat32Tensor(padValue, OH_NN_PAD_CONSTANT_VALUE); + return kTfLiteOk; +} + +TfLiteStatus NnrtOpBuilder::AddReduceMeanParams(const NnrtOpMappingArgs& mappingArgs, int32_t builtinCode) +{ + auto builtin = reinterpret_cast(mappingArgs.node->builtin_data); + const int32_t keepDims = (builtin->keep_dims); + mappingArgs.builder->AddScalarBoolTensor(keepDims, OH_NN_REDUCE_MEAN_KEEP_DIMS); + + return kTfLiteOk; +} + +TfLiteStatus NnrtOpBuilder::AddStridedSliceParams(const NnrtOpMappingArgs& mappingArgs, int32_t builtinCode) +{ + auto builtin = reinterpret_cast(mappingArgs.node->builtin_data); + mappingArgs.builder->AddScalarInt64Tensor(static_cast(builtin->begin_mask), + OH_NN_STRIDED_SLICE_BEGIN_MASK); + mappingArgs.builder->AddScalarInt64Tensor(static_cast(builtin->end_mask), + OH_NN_STRIDED_SLICE_END_MASK); + mappingArgs.builder->AddScalarInt64Tensor(static_cast(builtin->ellipsis_mask), + OH_NN_STRIDED_SLICE_ELLIPSIS_MASK); + mappingArgs.builder->AddScalarInt64Tensor(static_cast(builtin->new_axis_mask), + OH_NN_STRIDED_SLICE_NEW_AXIS_MASK); + mappingArgs.builder->AddScalarInt64Tensor(static_cast(builtin->shrink_axis_mask), + OH_NN_STRIDED_SLICE_SHRINK_AXIS_MASK); + + return kTfLiteOk; +} + +TfLiteStatus NnrtOpBuilder::AddReshapeParams(const NnrtOpMappingArgs& mappingArgs, int32_t builtinCode) +{ + if (mappingArgs.node->inputs->size == 1) { + auto builtin = reinterpret_cast(mappingArgs.node->builtin_data); + int32_t numDimensions = builtin->num_dimensions; + std::vector outputShape(numDimensions); + for (int32_t i = 0; i < numDimensions; ++i) { + outputShape[i] = builtin->shape[i]; + } + mappingArgs.builder->AddVectorInt32Tensor(outputShape.data(), outputShape.size(), OH_NN_TENSOR); + } + + return kTfLiteOk; +} + +TfLiteStatus NnrtOpBuilder::AddConv2DParams(const NnrtOpMappingArgs& mappingArgs, int32_t builtinCode) +{ + auto builtin = reinterpret_cast(mappingArgs.node->builtin_data); + std::vector stride = { static_cast(builtin->stride_height), + static_cast(builtin->stride_width) }; + std::vector dilation = { static_cast(builtin->dilation_height_factor), + static_cast(builtin->dilation_width_factor) }; + int64_t groupNum = 1; + + mappingArgs.builder->AddVectorInt64Tensor(stride.data(), stride.size(), OH_NN_CONV2D_STRIDES); + mappingArgs.builder->AddVectorInt64Tensor(dilation.data(), dilation.size(), OH_NN_CONV2D_DILATION); + + TF_LITE_ENSURE_STATUS(AddPadParamsInOperator(mappingArgs, builtin, builtinCode, + OH_NN_CONV2D_PAD_MODE)); + mappingArgs.builder->AddScalarInt64Tensor(groupNum, OH_NN_CONV2D_GROUP); + TF_LITE_ENSURE_STATUS(AddActivateParamsInOperator(mappingArgs, builtin, builtinCode, + OH_NN_CONV2D_ACTIVATION_TYPE)); + + return kTfLiteOk; +} + +TfLiteStatus NnrtOpBuilder::AddDepthwiseConv2DParams(const NnrtOpMappingArgs& mappingArgs, int32_t builtinCode) +{ + auto builtin = reinterpret_cast(mappingArgs.node->builtin_data); + std::vector stride = { static_cast(builtin->stride_height), + static_cast(builtin->stride_width) }; + std::vector dilation = { static_cast(builtin->dilation_height_factor), + static_cast(builtin->dilation_width_factor) }; + TF_LITE_ENSURE_STATUS(mappingArgs.builder->AddVectorInt64Tensor(stride.data(), stride.size(), + OH_NN_DEPTHWISE_CONV2D_NATIVE_STRIDES)); + TF_LITE_ENSURE_STATUS(mappingArgs.builder->AddVectorInt64Tensor(dilation.data(), dilation.size(), + OH_NN_DEPTHWISE_CONV2D_NATIVE_DILATION)); + TF_LITE_ENSURE_STATUS(AddPadParamsInOperator(mappingArgs, builtin, builtinCode, + OH_NN_DEPTHWISE_CONV2D_NATIVE_PAD)); + TF_LITE_ENSURE_STATUS(AddActivateParamsInOperator(mappingArgs, builtin, builtinCode, + OH_NN_DEPTHWISE_CONV2D_NATIVE_ACTIVATION_TYPE)); + return kTfLiteOk; +} + +TfLiteStatus NnrtOpBuilder::FinalizeAddOperation(OH_NN_OperationType type, int32_t liteNodeIndex) +{ + // Actually add a NN API Operation + OH_NN_UInt32Array inputIndices; + OH_NN_UInt32Array outputIndices; + OH_NN_UInt32Array paramIndices; + inputIndices.data = m_augmentedInputs.data(); + inputIndices.size = static_cast(m_augmentedInputs.size()); + outputIndices.data = m_augmentedOutputs.data(); + outputIndices.size = static_cast(m_augmentedOutputs.size()); + paramIndices.size = static_cast(m_augmentedParams.size()); + + paramIndices.data = (m_augmentedParams.size() == 0) ? nullptr : m_augmentedParams.data(); + + RETURN_TFLITE_ERROR_IF_NN_ERROR(m_nnrt->OH_NNModel_AddOperation(m_nnModel, + type, ¶mIndices, &inputIndices, &outputIndices), "adding operation"); + + m_augmentedInputs.clear(); + m_augmentedOutputs.clear(); + m_augmentedParams.clear(); + + return kTfLiteOk; +} + +TfLiteStatus NnrtOpBuilder::AddTensor(int32_t tensorIndex, int32_t builtinCode, std::vector& indices, + int32_t tensorFlags) +{ + int32_t nnTensorIndex = m_pTensorMapping->LiteIndexToNn(tensorIndex); + if (nnTensorIndex != INVALID_INDEX) { + indices.emplace_back(nnTensorIndex); + return kTfLiteOk; + } + + // Parameters needed for new type. + TfLiteTensor* tensor = &(m_context->tensors[tensorIndex]); + if (kTfLiteNoType == tensor->type) { + indices.emplace_back(INVALID_INDEX); + return kTfLiteOk; + } + + TF_LITE_ENSURE_STATUS(AddTensor(tensorIndex, builtinCode, tensorFlags, nnTensorIndex)); + + indices.emplace_back(nnTensorIndex); + + return kTfLiteOk; +} + +TfLiteStatus NnrtOpBuilder::AddTensor(int32_t tensorIndex, int32_t builtinCode, int32_t tensorFlags, + int32_t& nnTensorIndex) +{ + TfLiteTensor* tensor = &(m_context->tensors[tensorIndex]); + const bool scalarAsTensor = tensorFlags & NN_TENSOR_FLAG_SCALAR_AS_TENSOR; + OH_NN_Tensor nnTensor; + OH_NN_QuantParam nnQuantParam; + std::vector weightDims; + void* tensorData = tensor->data.data; + std::vector depthwiseTensorData; + TF_LITE_ENSURE_STATUS(ConstructNNTensor(tensorIndex, builtinCode, scalarAsTensor, nnQuantParam, nnTensor)); + + // For depth-wise conv operator, we should transpose weight tensor to adapt NN tensor format. + if ((builtinCode == kTfLiteBuiltinDepthwiseConv2d) && (tensor->allocation_type == kTfLiteMmapRo) && + (nnTensor.dimensionCount == DEPTHWISE_WEIGHT_DIMENSION_COUNT)) { + size_t typeBytes = 0; + int64_t tensorSize = 0; + TF_LITE_ENSURE_STATUS(GetSizeOfType(m_context, tensor->type, &typeBytes)); + TF_LITE_ENSURE_STATUS(GetTensorSize(m_context, nnTensor.dimensions, nnTensor.dimensionCount, tensorSize)); + + depthwiseTensorData.assign(tensorSize * typeBytes, 0); + TfLiteStatus retCode = TransposeDepthwiseTensor(tensorIndex, nnTensor, weightDims, depthwiseTensorData); + if (retCode != kTfLiteOk) { + TFLITE_LOG_PROD(TFLITE_LOG_ERROR, "[NNRT-OPBUILDER] Fail to transpose depthwise tensor."); + return kTfLiteError; + } + tensorData = static_cast(depthwiseTensorData.data()); + } + + int32_t nnRet = m_nnrt->OH_NNModel_AddTensor(m_nnModel, &nnTensor); + if (nnRet != OH_NN_SUCCESS) { + TFLITE_LOG_PROD(TFLITE_LOG_ERROR, "[NNRT-OPBUILDER] Fail to add nnTensor to NN model."); + return kTfLiteError; + } + + // Allocate a new tensor index + nnTensorIndex = m_pTensorMapping->AddNewNnTensorIndex(tensorIndex); + if (tensor->allocation_type == kTfLiteMmapRo) { + nnRet = m_nnrt->OH_NNModel_SetTensorData(m_nnModel, nnTensorIndex, + tensorData, tensor->bytes); + if (nnRet != OH_NN_SUCCESS) { + TFLITE_LOG_PROD(TFLITE_LOG_ERROR, "[NNRT-OPBUILDER] Fail to setting new nnTensor value."); + return kTfLiteError; + } + } + + return kTfLiteOk; +} + +TfLiteStatus NnrtOpBuilder::TransposeDepthwiseTensor(int32_t tensorIndex, OH_NN_Tensor& nnTensor, + std::vector& weightDims, std::vector& tensorData) +{ + const int32_t* tensorDims = nnTensor.dimensions; + uint32_t tensorRank = nnTensor.dimensionCount; + + // For Depth-wise Convolution, NNRT choose to Transpose dimension with [3, 1, 2, 0] + TF_LITE_ENSURE_STATUS(TransposeDims(m_context, tensorDims, tensorRank, DEPTHWISE_TRANSPOSE_AXISS, weightDims)); + nnTensor.dimensions = weightDims.data(); + + TfLiteTensor* tensor = &(m_context->tensors[tensorIndex]); + if (tensor->type == kTfLiteFloat32) { + TF_LITE_ENSURE_STATUS( + TransposeTensor(m_context, tensorIndex, tensorDims, reinterpret_cast(tensorData.data()))); + } else if (tensor->type == kTfLiteInt32) { + TF_LITE_ENSURE_STATUS( + TransposeTensor(m_context, tensorIndex, tensorDims, reinterpret_cast(tensorData.data()))); + } else if (tensor->type == kTfLiteInt8) { + TF_LITE_ENSURE_STATUS( + TransposeTensor(m_context, tensorIndex, tensorDims, reinterpret_cast(tensorData.data()))); + } else if (tensor->type == kTfLiteUInt8) { + TF_LITE_ENSURE_STATUS( + TransposeTensor(m_context, tensorIndex, tensorDims, reinterpret_cast(tensorData.data()))); + } else { + TFLITE_LOG_PROD(TFLITE_LOG_ERROR, "[NNRT-OPBUILDER] unsupportted weight tensor type %d.", tensor->type); + return kTfLiteError; + } + + return kTfLiteOk; +} + +TfLiteStatus NnrtOpBuilder::ConstructNNTensor(int32_t tensorIndex, int32_t builtinCode, int32_t tensorFlags, + OH_NN_QuantParam& nnQuantParam, OH_NN_Tensor& nnTensor) +{ + OH_NN_DataType nnType {OH_NN_UNKNOWN}; + TF_LITE_ENSURE_STATUS(m_pTensorMapping->ConvertType(m_context, tensorIndex, tensorFlags, nnType)); + TF_LITE_ENSURE_STATUS(m_pTensorMapping->ConvertQuantParams(m_context, tensorIndex, nnQuantParam)); + + TfLiteTensor* tensor = &(m_context->tensors[tensorIndex]); + uint32_t tensorRank = static_cast(tensor->dims->size); + m_dimsUnspecified.assign(tensorRank, UNSPECIFIED_DIMENSION_VALUE); + + int32_t* tensorDims = (m_allowDynamicDimensions && (tensor->allocation_type != kTfLiteMmapRo) && + std::find(m_inputs.begin(), m_inputs.end(), tensorIndex) != m_inputs.end()) ? + reinterpret_cast(m_dimsUnspecified.data()) : + tensor->dims->data; + + const bool scalarAsTensor = tensorFlags & NN_TENSOR_FLAG_SCALAR_AS_TENSOR; + if (scalarAsTensor && tensorRank == 0) { + tensorRank = SCALAR_TENSOR_RANK; // Use rank 1, shape {1} nnTensor for TFLite scalar tensors. + tensorDims = const_cast(&SCALAR_TENSOR_RANK); + } + + if (tensorRank == 0) { + // if the tensorRank is 0, the dimension ptr must be nullptr. + tensorDims = nullptr; + } + + nnTensor.dataType = nnType; + nnTensor.dimensionCount = tensorRank; + nnTensor.dimensions = tensorDims; + nnTensor.quantParam = nnQuantParam.quantCount ? &nnQuantParam : nullptr; + nnTensor.type = OH_NN_TENSOR; + + return kTfLiteOk; +} + +TfLiteStatus NnrtOpBuilder::AddOpFuncParams(const NnrtOpMappingArgs& mappingArgs, int32_t builtinCode) +{ + if (!m_keyToOpFunc.count(builtinCode)) { + TFLITE_LOG_PROD(TFLITE_LOG_ERROR, "[NNRT-OPBUILDER] unsupportted Op builtinCode : %d.", builtinCode); + return kTfLiteError; + } + + OpFuncPtr pfunc = m_keyToOpFunc[builtinCode]; + return (this->*pfunc)(mappingArgs, builtinCode); +} + +TfLiteStatus NnrtOpBuilder::MapBuiltinCodeToFunc() +{ + m_keyToOpFunc[kTfLiteBuiltinAdd] = &NnrtOpBuilder::AddBasicComputeParams; + m_keyToOpFunc[kTfLiteBuiltinAveragePool2d] = &NnrtOpBuilder::AddAvgPoolingParams; + m_keyToOpFunc[kTfLiteBuiltinConcatenation] = &NnrtOpBuilder::AddConcatenationParams; + m_keyToOpFunc[kTfLiteBuiltinConv2d] = &NnrtOpBuilder::AddConv2DParams; + m_keyToOpFunc[kTfLiteBuiltinDepthwiseConv2d] = &NnrtOpBuilder::AddDepthwiseConv2DParams; + m_keyToOpFunc[kTfLiteBuiltinDequantize] = &NnrtOpBuilder::AddQuantizeParams; + m_keyToOpFunc[kTfLiteBuiltinFullyConnected] = &NnrtOpBuilder::AddFullConnectedParams; + m_keyToOpFunc[kTfLiteBuiltinMaxPool2d] = &NnrtOpBuilder::AddMaxPoolingParams; + m_keyToOpFunc[kTfLiteBuiltinMul] = &NnrtOpBuilder::AddBasicComputeParams; + m_keyToOpFunc[kTfLiteBuiltinSub] = &NnrtOpBuilder::AddBasicComputeParams; + m_keyToOpFunc[kTfLiteBuiltinReshape] = &NnrtOpBuilder::AddReshapeParams; + m_keyToOpFunc[kTfLiteBuiltinSoftmax] = &NnrtOpBuilder::AddSoftmaxParams; + m_keyToOpFunc[kTfLiteBuiltinStridedSlice] = &NnrtOpBuilder::AddStridedSliceParams; + m_keyToOpFunc[kTfLiteBuiltinPack] = &NnrtOpBuilder::AddPackParams; + m_keyToOpFunc[kTfLiteBuiltinPad] = &NnrtOpBuilder::AddPadParams; + m_keyToOpFunc[kTfLiteBuiltinMean] = &NnrtOpBuilder::AddReduceMeanParams; + m_keyToOpFunc[kTfLiteBuiltinQuantize] = &NnrtOpBuilder::AddQuantizeParams; + m_keyToOpFunc[kTfLiteBuiltinHardSwish] = &NnrtOpBuilder::AddDefaultOpParams; + m_keyToOpFunc[kTfLiteBuiltinShape] = &NnrtOpBuilder::AddDefaultOpParams; + m_keyToOpFunc[kTfLiteBuiltinLogistic] = &NnrtOpBuilder::AddDefaultOpParams; + + return kTfLiteOk; +} +} // namespace nnrt +} // namespace delegate +} // namespace tflite \ No newline at end of file diff --git a/example/deep_learning_framework/tflite/delegates/nnrt_delegate/nnrt_op_builder.h b/example/deep_learning_framework/tflite/delegates/nnrt_delegate/nnrt_op_builder.h new file mode 100644 index 0000000000000000000000000000000000000000..bb4e82386f78c797246b929fdf7bb99b15d34330 --- /dev/null +++ b/example/deep_learning_framework/tflite/delegates/nnrt_delegate/nnrt_op_builder.h @@ -0,0 +1,325 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef TENSORFLOW_LITE_DELEGATES_NNRT_OP_BUILDER_H +#define TENSORFLOW_LITE_DELEGATES_NNRT_OP_BUILDER_H + +#include + +#include "tensorflow/lite/builtin_ops.h" +#include "tensorflow/lite/c/builtin_op_data.h" +#include "tensorflow/lite/kernels/kernel_util.h" +#include "tensorflow/lite/minimal_logging.h" + +#include "../nnrt/nnrt_implementation.h" +#include "tensor_mapping.h" + +namespace tflite { +namespace delegate { +namespace nnrt { +constexpr int32_t PADDING_SAME = 0; +constexpr int32_t PADDING_VALID = 1; + +// NN API Operator Builder +class NnrtOpBuilder; + +// The kernel that represents the node sub set of TF Lite being run on NN API. +struct NnrtOpMappingArgs { + TfLiteContext* context {nullptr}; + NnrtOpBuilder* builder {nullptr}; + TfLiteNode* node {nullptr}; + int32_t nodeIndex {-1}; +}; + +struct NnrtOpBuilderArgs { + TfLiteContext* context {nullptr}; + OH_NNModel* nnModel {nullptr}; + TfLiteIntArray* inputTensors {nullptr}; + TensorMapping* pTensorMapping {nullptr}; + NnrtDelegate::Options delegateOptions; +}; + +// Abstract builder for building an op in the NN API graph. This handles +// the disparity between TFLite and NN API nnTensor types. NN API has singular +// nnTensors for both tensors and parameters, and TFLite separates the two. +class NnrtOpBuilder { +public: + NnrtOpBuilder(const NnrtApi* nnrt, NnrtOpBuilderArgs& opBuilderArgs); + ~NnrtOpBuilder() = default; + + // Add scalar nnTensor, the datatypes involved are bool, Int32, Int8, Int64, Float32 + TfLiteStatus AddScalarBoolTensor(bool value, OH_NN_TensorType nnTensorType) + { + return AddScalarTensor(value, OH_NN_BOOL, nnTensorType); + } + TfLiteStatus AddScalarInt32Tensor(int32_t value, OH_NN_TensorType nnTensorType) + { + return AddScalarTensor(value, OH_NN_INT32, nnTensorType); + } + TfLiteStatus AddScalarInt8Tensor(int32_t value, OH_NN_TensorType nnTensorType) + { + return AddScalarTensor(value, OH_NN_INT8, nnTensorType); + } + TfLiteStatus AddScalarInt64Tensor(int64_t value, OH_NN_TensorType nnTensorType) + { + return AddScalarTensor(value, OH_NN_INT64, nnTensorType); + } + TfLiteStatus AddScalarFloat32Tensor(float value, OH_NN_TensorType nnTensorType) + { + return AddScalarTensor(value, OH_NN_FLOAT32, nnTensorType); + } + + // Add vector nnTensor, the datatypes involved are Int32, Int64, Int16, Int8, Float32 + TfLiteStatus AddVectorInt32Tensor(const int32_t* values, uint32_t numValues, OH_NN_TensorType nnTensorType) + { + return AddVectorTensor(values, numValues, OH_NN_UINT32, nnTensorType); + } + TfLiteStatus AddVectorInt64Tensor(const int64_t* values, uint32_t numValues, OH_NN_TensorType nnTensorType) + { + return AddVectorTensor(values, numValues, OH_NN_INT64, nnTensorType); + } + TfLiteStatus AddVectorFloat32Tensor(const float* values, uint32_t numValues, OH_NN_TensorType nnTensorType) + { + return AddVectorTensor(values, numValues, OH_NN_FLOAT32, nnTensorType); + } + + // Add input tensor + TfLiteStatus AddTensorInput(int32_t tensorIndex, int32_t builtinCode, int32_t tensorFlags = 0) + { + return AddTensor(tensorIndex, builtinCode, m_augmentedInputs, tensorFlags); + } + // Add output tensor + TfLiteStatus AddTensorOutput(int32_t tensorIndex, int32_t builtinCode, int32_t tensorFlags = 0) + { + return AddTensor(tensorIndex, builtinCode, m_augmentedOutputs, tensorFlags); + } + + // Finish emitting the op (of type `type`) into the NN API. + TfLiteStatus FinalizeAddOperation(OH_NN_OperationType type, int32_t liteNodeIndex); + + void ClearInputOuputLists() + { + m_augmentedInputs.clear(); + m_augmentedOutputs.clear(); + m_augmentedParams.clear(); + } + + TfLiteStatus AddOpFuncParams(const NnrtOpMappingArgs& mappingArgs, int32_t builtinCode); + + TfLiteStatus MapBuiltinCodeToFunc(); + +private: + template + TfLiteStatus AddScalarTensor(T value, OH_NN_DataType nnType, OH_NN_TensorType nnTensorType) + { + OH_NN_Tensor tensor { + .dataType = nnType, + .dimensionCount = 0, + .dimensions = nullptr, + .quantParam = nullptr, + .type = nnTensorType, + }; + + RETURN_TFLITE_ERROR_IF_NN_ERROR( + m_nnrt->OH_NNModel_AddTensor(m_nnModel, &tensor), "adding nnTensor"); + + const int32_t nnIndex = m_pTensorMapping->AddNewNonTensorTensor(); + RETURN_TFLITE_ERROR_IF_NN_ERROR( + m_nnrt->OH_NNModel_SetTensorData(m_nnModel, nnIndex, &value, sizeof(value)), + "setting new nnTensor value"); + m_augmentedParams.emplace_back(nnIndex); + + return kTfLiteOk; + } + + template + TfLiteStatus AddVectorTensor(const T* values, int32_t numValues, OH_NN_DataType nnType, + OH_NN_TensorType nnTensorType) + { + if (values == nullptr) { + TFLITE_LOG_PROD(TFLITE_LOG_ERROR, + "[NNRT-OPBUILDER] The variable of values is nullptr when adding vector to operator."); + return kTfLiteError; + } + uint32_t numBits = 8; + double doubleScale = 0.f; + int32_t zeroPoint = 0; + OH_NN_QuantParam quantParam = { + .quantCount = 1, + .numBits = &numBits, + .scale = &doubleScale, + .zeroPoint = &zeroPoint + }; + + OH_NN_Tensor tensor { + .dataType = nnType, + .dimensionCount = 1, // For 1-dim vector, dimensionCount is one. + .dimensions = &numValues, + .quantParam = &quantParam, + .type = nnTensorType, + }; + + RETURN_TFLITE_ERROR_IF_NN_ERROR( + m_nnrt->OH_NNModel_AddTensor(m_nnModel, &tensor), "adding nnTensor"); + const int32_t nnIndex = m_pTensorMapping->AddNewNonTensorTensor(); + RETURN_TFLITE_ERROR_IF_NN_ERROR( + m_nnrt->OH_NNModel_SetTensorData(m_nnModel, nnIndex, values, sizeof(*(values)) * numValues), + "settings new nnTensor value"); + m_augmentedParams.emplace_back(nnIndex); + + return kTfLiteOk; + } + + template + TfLiteStatus AddActivateParamsInOperator(const NnrtOpMappingArgs& mappingArgs, T* builtinParams, + int32_t builtinCode, OH_NN_TensorType nnTensorType) + { + if (builtinParams == nullptr) { + TFLITE_LOG_PROD(TFLITE_LOG_ERROR, + "[NNRT-OPBUILDER] The builtin params is nullptr when adding activate params to operator."); + return kTfLiteError; + } + + if ((builtinParams->activation >= 0) && + (builtinParams->activation < ACTIVATE_FUSE_TYPE_LIST.size()) && + (ACTIVATE_FUSE_TYPE_LIST[builtinParams->activation] != OH_NN_FUSE_UNSUPPORTED)) { + mappingArgs.builder->AddScalarInt8Tensor(ACTIVATE_FUSE_TYPE_LIST[builtinParams->activation], nnTensorType); + } else { + TFLITE_LOG_PROD(TFLITE_LOG_ERROR, + "[NNRT-OPBUILDER] unsupportted fused activation type %d for OpType %d.", + builtinParams->activation, builtinCode); + return kTfLiteError; + } + + return kTfLiteOk; + } + + template + TfLiteStatus AddPadParamsInOperator(const NnrtOpMappingArgs& mappingArgs, T* builtinParams, int32_t builtinCode, + OH_NN_TensorType nnTensorType) + { + if (builtinParams == nullptr) { + TFLITE_LOG_PROD(TFLITE_LOG_ERROR, + "[NNRT-OPBUILDER] The builtin params is nullptr when adding pad params to operator."); + return kTfLiteError; + } + + int32_t padding = 0; + if (builtinParams->padding == kTfLitePaddingUnknown) { + TFLITE_LOG_PROD(TFLITE_LOG_ERROR, "[NNRT-OPBUILDER] unknown padding mode for OpType %d.", builtinCode); + return kTfLiteError; + } else { + padding = (builtinParams->padding == kTfLitePaddingSame) ? PADDING_SAME : PADDING_VALID; + } + mappingArgs.builder->AddScalarInt8Tensor(padding, nnTensorType); + + return kTfLiteOk; + } + + // NNRT requires a bias tensor, so we allocate a new tensor to fill it with zeroes. + // It is deleted with other tensors in the context during subgraph destructor call. + TfLiteStatus AddZerosBias(const NnrtOpMappingArgs& mappingArgs, int32_t inputId, int32_t filterId, + int32_t channelNum); + + TfLiteStatus AddBasicComputeParams(const NnrtOpMappingArgs& mappingArgs, int32_t builtinCode); + + TfLiteStatus AddAvgPoolingParams(const NnrtOpMappingArgs& mappingArgs, int32_t builtinCode); + + TfLiteStatus AddMaxPoolingParams(const NnrtOpMappingArgs& mappingArgs, int32_t builtinCode); + + TfLiteStatus AddFullConnectedParams(const NnrtOpMappingArgs& mappingArgs, int32_t builtinCode); + + TfLiteStatus AddConv2DParams(const NnrtOpMappingArgs& mappingArgs, int32_t builtinCode); + + TfLiteStatus AddConcatenationParams(const NnrtOpMappingArgs& mappingArgs, int32_t builtinCode); + + TfLiteStatus AddSoftmaxParams(const NnrtOpMappingArgs& mappingArgs, int32_t builtinCode); + + TfLiteStatus AddQuantizeParams(const NnrtOpMappingArgs& mappingArgs, int32_t builtinCode); + + TfLiteStatus AddPackParams(const NnrtOpMappingArgs& mappingArgs, int32_t builtinCode); + + TfLiteStatus AddPadParams(const NnrtOpMappingArgs& mappingArgs, int32_t builtinCode); + + TfLiteStatus AddReduceMeanParams(const NnrtOpMappingArgs& mappingArgs, int32_t builtinCode); + + TfLiteStatus AddReshapeParams(const NnrtOpMappingArgs& mappingArgs, int32_t builtinCode); + + TfLiteStatus AddStridedSliceParams(const NnrtOpMappingArgs& mappingArgs, int32_t builtinCode); + + TfLiteStatus AddDepthwiseConv2DParams(const NnrtOpMappingArgs& mappingArgs, int32_t builtinCode); + + TfLiteStatus AddDefaultOpParams(const NnrtOpMappingArgs& mappingArgs, int32_t builtinCode) + { + return kTfLiteOk; + } + + // Adds a new NN API tensor that shadows the TF Lite tensor `tensorIndex`. + // This restores the NN API tensor index corresponding to the created tensor. + // If another caller previously created a NN API tensor for `tensorIndex` + // then the existing one is restored. + TfLiteStatus AddTensor(int32_t tensorIndex, int32_t builtinCode, std::vector& indices, + int32_t tensorFlags = 0); + + // Adds a new NN API nnTensor to NNModel. + // If the builtinCode is kTfLiteBuiltinDepthwiseConv2d, the weight tensor will be transposed to CHWN format. + TfLiteStatus AddTensor(int32_t tensorIndex, int32_t builtinCode, int32_t tensorFlags, int32_t& nnTensorIndex); + + // Transpose dimension for Depth-wise Convolution Operator. + TfLiteStatus TransposeDepthwiseTensor(int32_t tensorIndex, OH_NN_Tensor& nnTensor, std::vector& destDims, + std::vector& tensorData); + + // Get NN nnTensor from tensor + TfLiteStatus ConstructNNTensor(int32_t tensorIndex, int32_t builtinCode, int32_t tensorFlags, + OH_NN_QuantParam& nnQuantParam, OH_NN_Tensor& nnTensor); + +private: + // Access to NNRT. + const NnrtApi* const m_nnrt; + + // TfLiteContext for error handling. + TfLiteContext* const m_context; + + // Indices of all inputs of tflite subgraph. + std::vector m_inputs; + + // Tracks relationship between indices. + TensorMapping* const m_pTensorMapping; + + // The NNRT model. + OH_NNModel* const m_nnModel; + + // Inputs and outputs for the current op. These are augmented in the sense + // that NN API uses nnTensors for all arguments, not just tensors, unlike + // TensorFlow Lite. + std::vector m_augmentedInputs; + std::vector m_augmentedParams; + std::vector m_augmentedOutputs; + + // Whether to allow dynamic batch size without re-compilation. + bool m_allowDynamicDimensions; + + // the dynamic dimension information. + std::vector m_dimsUnspecified; + + // key builtInCode to OpFunc Map + using OpFuncPtr = TfLiteStatus(NnrtOpBuilder::*)(const NnrtOpMappingArgs& mappingArgs, int32_t builtinCode); + std::map m_keyToOpFunc; +}; +} // namespace nnrt +} // namespace delegate +} // namespace tflite + +#endif // TENSORFLOW_LITE_DELEGATES_NNRT_OP_BUILDER_H \ No newline at end of file diff --git a/example/deep_learning_framework/tflite/delegates/nnrt_delegate/nnrt_utils.cpp b/example/deep_learning_framework/tflite/delegates/nnrt_delegate/nnrt_utils.cpp new file mode 100644 index 0000000000000000000000000000000000000000..870976b3bbe555d55b9782dd6b2d16146ab2a5a4 --- /dev/null +++ b/example/deep_learning_framework/tflite/delegates/nnrt_delegate/nnrt_utils.cpp @@ -0,0 +1,347 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "nnrt_utils.h" + +#include +#include "tensorflow/lite/util.h" +#include "tensorflow/lite/builtin_ops.h" +#include "tensorflow/lite/context_util.h" +#include "tensorflow/lite/minimal_logging.h" +#include "tensorflow/lite/kernels/kernel_util.h" + +#include "neural_network_runtime_type.h" + +namespace tflite { +std::string NnrtErrorDescription(int32_t errorCode) +{ + switch (errorCode) { + case OH_NN_SUCCESS: + return "OH_NN_SUCCESS"; + case OH_NN_FAILED: + return "OH_NN_FAILED"; + case OH_NN_INVALID_PARAMETER: + return "OH_NN_INVALID_PARAMETER"; + case OH_NN_MEMORY_ERROR: + return "OH_NN_MEMORY_ERROR"; + case OH_NN_OPERATION_FORBIDDEN: + return "OH_NN_OPERATION_FORBIDDEN"; + case OH_NN_NULL_PTR: + return "OH_NN_NULL_PTR"; + case OH_NN_INVALID_FILE: + return "OH_NN_INVALID_FILE"; + case OH_NN_UNAVALIDABLE_DEVICE: + return "OH_NN_UNAVALIDABLE_DEVICE"; + case OH_NN_INVALID_PATH: + return "OH_NN_INVALID_PATH"; + default: + return "Unknown NNRT error code: " + std::to_string(errorCode); + } +} + +bool IsFloat(TfLiteType type) +{ + return type == kTfLiteFloat32; +} + +bool IsQuantized(TfLiteType type) +{ + return ((type == kTfLiteUInt8) || (type == kTfLiteInt8)); +} + +bool IsScalarInputSupported(int32_t builtinCode) +{ + switch (builtinCode) { + case kTfLiteBuiltinAdd: + case kTfLiteBuiltinMul: + case kTfLiteBuiltinSub: + case kTfLiteBuiltinDiv: + case kTfLiteBuiltinEqual: + case kTfLiteBuiltinNotEqual: + case kTfLiteBuiltinGreater: + case kTfLiteBuiltinGreaterEqual: + case kTfLiteBuiltinLess: + case kTfLiteBuiltinLessEqual: + case kTfLiteBuiltinPow: + case kTfLiteBuiltinMaximum: + case kTfLiteBuiltinMinimum: + case kTfLiteBuiltinPrelu: + case kTfLiteBuiltinLeakyRelu: + return true; + default: + return false; + } +} + +bool IsUseTargetDevice(NnrtDelegate::Options delegateOptions, bool excludeNnrtReference) +{ + const std::string& deviceName = delegateOptions.acceleratorName; + bool hasSelectedAccelerator = !deviceName.empty(); + if (!excludeNnrtReference && hasSelectedAccelerator) { + if (!deviceName.compare(NNRT_REFERENCE_DEVICE)) { + hasSelectedAccelerator = false; + } + } + + return hasSelectedAccelerator; +} + +TfLiteStatus GetTargetDevice(TfLiteContext* context, TfLiteDelegate* delegate, const NnrtApi* nnrt, size_t& dev) +{ + TF_LITE_ENSURE_EQ(context, nnrt != nullptr, true); + TF_LITE_ENSURE_EQ(context, delegate != nullptr, true); + + NnrtDelegate::Options delegateOptions; + TF_LITE_ENSURE_STATUS(NnrtDelegate::GetOptions(delegate, delegateOptions)); + const std::string& deviceName = delegateOptions.acceleratorName; + + uint32_t numDevices {0}; + const size_t* alldevicesID {nullptr}; + RETURN_TFLITE_ERROR_IF_NN_ERROR(nnrt->OH_NNDevice_GetAllDevicesID(&alldevicesID, &numDevices), + "Get available device number and deviceID."); + if (numDevices == 0) { + TFLITE_LOG_PROD(TFLITE_LOG_ERROR, "[NNRT-UTILS] Have no available device."); + return kTfLiteError; + } + + std::vector deviceTypes; + for (uint32_t i = 0; i < numDevices; ++i) { + OH_NN_DeviceType tempDeviceType {OH_NN_ACCELERATOR}; + RETURN_TFLITE_ERROR_IF_NN_ERROR(nnrt->OH_NNDevice_GetType(alldevicesID[i], &tempDeviceType), + "Get available devicesType."); + deviceTypes.emplace_back(tempDeviceType); + } + + OH_NN_DeviceType deviceType {OH_NN_CPU}; + std::vector::iterator pos = std::find(deviceTypes.begin(), deviceTypes.end(), deviceType); + if (pos != deviceTypes.end()) { + int index = distance(deviceTypes.begin(), pos); + dev = alldevicesID[index]; + } else { + TFLITE_LOG_PROD(TFLITE_LOG_ERROR, + "[NNRT-UTILS] Cannot find the %s device, please choose another process unit.", + deviceName.c_str()); + return kTfLiteError; + } + + return kTfLiteOk; +} + +TfLiteStatus TransposeDims(TfLiteContext* context, const int32_t* dims, uint32_t dimCount, + std::vector destAxis, std::vector& weightDims) +{ + TF_LITE_ENSURE_EQ(context, dims != nullptr, true); + + if (dimCount != destAxis.size()) { + TFLITE_LOG_PROD(TFLITE_LOG_ERROR, "[NNRT-UTILS] Invalid dimension count %d.", dimCount); + return kTfLiteError; + } + + for (auto axis : destAxis) { + if (axis < 0) { + TFLITE_LOG_PROD(TFLITE_LOG_ERROR, "[NNRT-UTILS] Invalid axis %d.", axis); + return kTfLiteError; + } + weightDims.emplace_back(*(dims + axis)); + } + + return kTfLiteOk; +} + +TfLiteStatus GetTensorSize(TfLiteContext* context, const int32_t* dims, int32_t dimCount, int64_t& tensorSize) +{ + TF_LITE_ENSURE_EQ(context, dims != nullptr, true); + + if (dimCount != DEPTHWISE_WEIGHT_DIMENSION_COUNT) { + TFLITE_LOG_PROD(TFLITE_LOG_ERROR, + "[NNRT-UTILS] Dimension count is not equal to destination axis number, should be 4."); + return kTfLiteError; + } + + tensorSize = 1; + for (int32_t i = 0; i < dimCount; ++i) { + if (*(dims + i) <= 0) { + TFLITE_LOG_PROD(TFLITE_LOG_ERROR, "[NNRT-UTILS] Get invalid dimenision."); + return kTfLiteError; + } + tensorSize *= *(dims + i); + } + + return kTfLiteOk; +} + +namespace delegate { +namespace nnrt { +const std::vector ACTIVATE_FUSE_TYPE_LIST = { + OH_NN_FUSED_NONE, + OH_NN_FUSED_RELU, + OH_NN_FUSE_UNSUPPORTED, + OH_NN_FUSED_RELU6, + OH_NN_FUSE_UNSUPPORTED, + OH_NN_FUSE_UNSUPPORTED, + OH_NN_FUSE_UNSUPPORTED +}; + +const unorderedTypeMap TFLITE_TYPE_TO_NNRT_TYPE = { + {kTfLiteBuiltinAdd, OH_NN_OPS_ADD}, + {kTfLiteBuiltinAveragePool2d, OH_NN_OPS_AVG_POOL}, + {kTfLiteBuiltinConcatenation, OH_NN_OPS_CONCAT}, + {kTfLiteBuiltinConv2d, OH_NN_OPS_CONV2D}, + {kTfLiteBuiltinDepthwiseConv2d, OH_NN_OPS_DEPTHWISE_CONV2D_NATIVE}, + {kTfLiteBuiltinDepthToSpace, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinDequantize, OH_NN_OPS_QUANT_DTYPE_CAST}, + {kTfLiteBuiltinEmbeddingLookup, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinFloor, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinFullyConnected, OH_NN_OPS_FULL_CONNECTION}, + {kTfLiteBuiltinHashtableLookup, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinL2Normalization, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinL2Pool2d, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinLocalResponseNormalization, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinLogistic, OH_NN_OPS_SIGMOID}, + {kTfLiteBuiltinLshProjection, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinLstm, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinMaxPool2d, OH_NN_OPS_MAX_POOL}, + {kTfLiteBuiltinMul, OH_NN_OPS_MUL}, + {kTfLiteBuiltinRelu, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinReluN1To1, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinRelu6, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinReshape, OH_NN_OPS_RESHAPE}, + {kTfLiteBuiltinResizeBilinear, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinRnn, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinSoftmax, OH_NN_OPS_SOFTMAX}, + {kTfLiteBuiltinSpaceToDepth, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinSvdf, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinTanh, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinConcatEmbeddings, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinSkipGram, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinCall, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinCustom, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinEmbeddingLookupSparse, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinPad, OH_NN_OPS_PAD}, + {kTfLiteBuiltinUnidirectionalSequenceRnn, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinGather, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinBatchToSpaceNd, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinSpaceToBatchNd, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinTranspose, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinMean, OH_NN_OPS_REDUCE_MEAN}, + {kTfLiteBuiltinSub, OH_NN_OPS_SUB}, + {kTfLiteBuiltinDiv, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinSqueeze, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinUnidirectionalSequenceLstm, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinStridedSlice, OH_NN_OPS_STRIDED_SLICE}, + {kTfLiteBuiltinBidirectionalSequenceRnn, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinExp, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinTopkV2, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinSplit, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinLogSoftmax, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinDelegate, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinBidirectionalSequenceLstm, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinCast, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinPrelu, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinMaximum, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinArgMax, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinMinimum, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinLess, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinNeg, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinPadv2, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinGreater, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinGreaterEqual, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinLessEqual, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinSelect, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinSlice, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinSin, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinTransposeConv, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinSparseToDense, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinTile, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinExpandDims, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinEqual, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinNotEqual, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinLog, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinSum, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinSqrt, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinRsqrt, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinShape, OH_NN_OPS_SHAPE}, + {kTfLiteBuiltinPow, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinArgMin, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinFakeQuant, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinReduceProd, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinReduceMax, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinPack, OH_NN_OPS_STACK}, + {kTfLiteBuiltinLogicalOr, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinOneHot, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinLogicalAnd, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinLogicalNot, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinUnpack, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinReduceMin, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinFloorDiv, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinReduceAny, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinSquare, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinZerosLike, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinFill, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinFloorMod, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinRange, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinResizeNearestNeighbor, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinLeakyRelu, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinSquaredDifference, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinMirrorPad, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinAbs, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinSplitV, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinUnique, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinCeil, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinReverseV2, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinAddN, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinGatherNd, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinCos, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinWhere, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinRank, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinElu, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinReverseSequence, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinMatrixDiag, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinQuantize, OH_NN_OPS_QUANT_DTYPE_CAST}, + {kTfLiteBuiltinMatrixSetDiag, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinRound, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinHardSwish, OH_NN_OPS_HSWISH}, + {kTfLiteBuiltinIf, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinWhile, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinNonMaxSuppressionV4, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinNonMaxSuppressionV5, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinScatterNd, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinSelectV2, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinDensify, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinSegmentSum, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinBatchMatmul, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinPlaceholderForGreaterOpCodes, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinCumsum, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinCallOnce, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinBroadcastTo, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinRfft2d, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinConv3d, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinImag, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinReal, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinComplexAbs, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinHashtable, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinHashtableFind, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinHashtableImport, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinHashtableSize, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinReduceAll, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinConv3dTranspose, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinVarHandle, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinReadVariable, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinAssignVariable, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinBroadcastTo, OH_NN_UNSUPPORT_OPS}, +}; +} // nnrt +} // namespace +} // namespace tflite \ No newline at end of file diff --git a/example/deep_learning_framework/tflite/delegates/nnrt_delegate/nnrt_utils.h b/example/deep_learning_framework/tflite/delegates/nnrt_delegate/nnrt_utils.h new file mode 100644 index 0000000000000000000000000000000000000000..a4908aae07dc0c2bd4d07b51ab1cd19d8bb50270 --- /dev/null +++ b/example/deep_learning_framework/tflite/delegates/nnrt_delegate/nnrt_utils.h @@ -0,0 +1,153 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef TENSORFLOW_LITE_DELEGATES_NNRT_UTILS_H +#define TENSORFLOW_LITE_DELEGATES_NNRT_UTILS_H + +#include +#include +#include + +#include "nnrt_delegate.h" + +namespace tflite { +constexpr int32_t DEPTHWISE_WEIGHT_BATCH_DIMENSION = 0; +constexpr int32_t DEPTHWISE_WEIGHT_HEIGHT_DIMENSION = 1; +constexpr int32_t DEPTHWISE_WEIGHT_WIDTH_DIMENSION = 2; +constexpr int32_t DEPTHWISE_WEIGHT_CHANNEL_DIMENSION = 3; +constexpr int32_t DEPTHWISE_WEIGHT_DIMENSION_COUNT = 4; +const std::string NNRT_REFERENCE_DEVICE = "nnrt-reference"; + +// Bit mask for tensor flags. +enum BIT_MASK { + NN_TENSOR_FLAG_SCALAR_AS_TENSOR = 1U << 0, + NN_TENSOR_FLAG_INT8_CONVERSION = 1U << 1, + NN_TENSOR_FLAG_USE_INT8_ASYMM_SIGNED = 1U << 2, + NN_TENSOR_FLAG_FORCE_PER_CHANNEL = 1U << 3, + NN_TENSOR_FLAG_HALF_TO_FLOAT_CONVERSION = 1U << 4, +}; + +// Returns the enum name corresponding to the given error code if the given +// value corresponds to an of the error codes in the enumeration above or +// an message with the unknown code. +// LINT.IfChange(NnrtErrorDescription) +extern std::string NnrtErrorDescription(int32_t errorCode); + +#define RETURN_TFLITE_ERROR_IF_NN_ERROR(code, callDesc) \ + do { \ + if ((code) != OH_NN_SUCCESS) { \ + const auto errorDesc = NnrtErrorDescription((code)); \ + TFLITE_LOG_PROD(TFLITE_LOG_ERROR, "NN API returned error %s at line %d while %s.\n", \ + errorDesc.c_str(), __LINE__, (callDesc)); \ + return kTfLiteError; \ + } \ + } while (0) + +#define RETURN_TFLITE_ERROR_IF_NN_ERROR_FOR_TENSOR(code, callDesc, pTensor) \ + do { \ + if ((code) != OH_NN_SUCCESS) { \ + const auto errorDesc = NnrtErrorDescription((code)); \ + TFLITE_LOG_PROD(TFLITE_LOG_ERROR, \ + "NN API returned error %s at line %d while %s for tensor '%s'.\n", errorDesc.c_str(), \ + __LINE__, (callDesc), (pTensor)->name ? (pTensor)->name : "no-name"); \ + return kTfLiteError; \ + } \ + } while (0) + +// Return true if type is kTfLiteFloat32. +extern bool IsFloat(TfLiteType type); + +// Return true if type is kTfLiteUInt8 or kTfLiteInt8. +extern bool IsQuantized(TfLiteType type); + +// Return true if the operator supports scalar data as input. +extern bool IsScalarInputSupported(int32_t builtinCode); + +// Returns true if this delegate is configured to use a specific set of devices. +// If the acceleratorName in the delegate options is equal to "nnrt-reference" +// this method will return true only if the excludeNnrtReference is true. +extern bool IsUseTargetDevice( + NnrtDelegate::Options delegateOptions, bool excludeNnrtReference = false); + +// Fills the given result vector with the list of devices the given delegate +// is referring to. +// There are three possible results, +// - An empty array (not the full list of available accelerators, +// for efficiency reasons) if no accelerator is chosen and the +// disallowNnrtCpu delegate option is false. +// - A single element array with the target processor, if an accelerator name +// is specified in the delegate options. +// - The target available device on device. +extern TfLiteStatus GetTargetDevice(TfLiteContext* context, TfLiteDelegate* delegate, + const NnrtApi* nnrt, size_t& dev); + +// Transpose demension following fixed axis. +// If exist -1 in destAxis, return kTfLiteError. +extern TfLiteStatus TransposeDims(TfLiteContext* context, const int32_t* dims, uint32_t dimCount, + std::vector destAxis, std::vector& weightDims); + +// Get Tensor size by byte. +// Calculate Tesnorsize by mul all dimension in dims. +// Return kTfLiteError if element dimension is less 0. +extern TfLiteStatus GetTensorSize(TfLiteContext* context, const int32_t* dims, int32_t dimCount, int64_t& tensorSize); + +// Transpose dimension for Tensor. +// Only change NHWC format tensor to CHWN format tensor, and +// the capacity of result vec must equal to input tensor size. +template +TfLiteStatus TransposeTensor(TfLiteContext* context, int32_t tensorIndex, const int32_t* dims, + T* transposeTensor) +{ + TF_LITE_ENSURE_EQ(context, dims != nullptr, true); + + // NHWC -> CHWN + TfLiteTensor* tensor = &(context->tensors[tensorIndex]); + const T* tensorData = reinterpret_cast(tensor->data.data); + const int32_t batch = dims[DEPTHWISE_WEIGHT_BATCH_DIMENSION]; + const int32_t height = dims[DEPTHWISE_WEIGHT_HEIGHT_DIMENSION]; + const int32_t width = dims[DEPTHWISE_WEIGHT_WIDTH_DIMENSION]; + const int32_t channel = dims[DEPTHWISE_WEIGHT_CHANNEL_DIMENSION]; + + for (int32_t c = 0; c < channel; ++c) { + for (int32_t j = 0; j < height * width; ++j) { + for (int32_t n = 0; n < batch; ++n) { + int32_t newPos = c * (height * width) * batch + j * batch + n; + int32_t orgPos = n * (height * width) * channel + j * channel + c; + *(transposeTensor + newPos) = *(tensorData + orgPos); + } + } + } + + return kTfLiteOk; +}; + +namespace delegate { +namespace nnrt { +using unorderedTypeMap = std::unordered_map; + +extern const std::vector ACTIVATE_FUSE_TYPE_LIST; + +extern const unorderedTypeMap TFLITE_TYPE_TO_NNRT_TYPE; + +const int32_t INVALID_INDEX = -1; + +const int32_t OH_NN_UNSUPPORT_OPS = -1; + +const int32_t OH_NN_FUSE_UNSUPPORTED = -1; +} // namespace nnrt +} // namespace delegate +} // namespace tflite + +#endif // TENSORFLOW_LITE_DELEGATES_NNRT_UTILS_H diff --git a/example/deep_learning_framework/tflite/delegates/nnrt_delegate/tensor_mapping.h b/example/deep_learning_framework/tflite/delegates/nnrt_delegate/tensor_mapping.h new file mode 100644 index 0000000000000000000000000000000000000000..0f3d13971aac201151b8e0ea463aaecaf8cc6437 --- /dev/null +++ b/example/deep_learning_framework/tflite/delegates/nnrt_delegate/tensor_mapping.h @@ -0,0 +1,177 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef TENSORFLOW_LITE_DELEGATES_NNRT_TENSOR_MAPPING_H +#define TENSORFLOW_LITE_DELEGATES_NNRT_TENSOR_MAPPING_H + +#include "tensorflow/lite/c/common.h" +#include "tensorflow/lite/minimal_logging.h" + +#include "nnrt_utils.h" + +namespace tflite { +namespace delegate { +namespace nnrt { +constexpr uint32_t QUANT_NUMBITS = 8; + +class TensorMapping { +public: + // Given a TFLite index return the NN index. If it doesn't exist + // return -1. + int32_t LiteIndexToNn(int32_t index) const + { + const int64_t maxSize = m_liteTensorToNnTensor.size(); + if (index >= 0 && index < maxSize) { + return m_liteTensorToNnTensor[index]; + } else { + return INVALID_INDEX; + } + } + + // NN API uses non tensor tensors instead of structs. This creates one + // and returns the index. It uses a std::vector and resizes it as needed + // keeping -1 to unmapped values. Intermediate tensors likely will not + // be mapped. + const int32_t AddNewNonTensorTensor() + { + return m_nextNnTensorIndex++; + } + + // Add a new mapping from `tfliteIndex` and return the NN API tensor index. + int32_t AddNewNnTensorIndex(int32_t tfliteIndex) + { + const int64_t currentSize = m_liteTensorToNnTensor.size(); + if (tfliteIndex >= currentSize) { + m_liteTensorToNnTensor.resize(tfliteIndex + 1, INVALID_INDEX); + } + const int32_t newTensorIndex = m_nextNnTensorIndex++; + m_liteTensorToNnTensor[tfliteIndex] = newTensorIndex; + return newTensorIndex; + } + + // Get nn tensor tensor tensor num. + int32_t GetTensorTensorNum() const + { + return m_nextNnTensorIndex; + } + + // Given a TFLite index returns a TFLite type to which a tensor must be + // converted during copying the data to the memory allocated for NN API. + // kTfLiteNoType means no conversion is needed. + TfLiteType GetEqualLiteTypeFromLiteIndex(int32_t index) const + { + const int64_t maxSize = m_indexToTypeConversion.size(); + if (index >= 0 && index < maxSize) + return m_indexToTypeConversion[index]; + else + return kTfLiteNoType; + } + + // Add a new mapping from TFLite index to a type conversion. + void AddTypeConversion(int32_t tfliteIndex, TfLiteType tfliteType) + { + const int64_t currentSize = m_indexToTypeConversion.size(); + if (tfliteIndex >= currentSize) { + m_indexToTypeConversion.resize(tfliteIndex + 1, kTfLiteNoType); + } + m_indexToTypeConversion[tfliteIndex] = tfliteType; + } + + // Convert TFLite tensor quant params to NNRT tensor quant params + TfLiteStatus ConvertQuantParams(TfLiteContext* context, int32_t tensorIndex, OH_NN_QuantParam& quantParam) + { + TfLiteTensor* tensor = &(context->tensors[tensorIndex]); + TfLiteType tfType = tensor->type; + if ((tfType != kTfLiteFloat32) && (tfType != kTfLiteFloat16) && (tfType != kTfLiteBool) && + (tfType != kTfLiteInt32) && (tfType != kTfLiteUInt8) && (tfType != kTfLiteInt8)) { + TFLITE_LOG_PROD(TFLITE_LOG_ERROR, + "[TENSOR_MAPPING] type %s is not supported.", TfLiteTypeGetName(tensor->type)); + return kTfLiteError; + } + + if (tensor->quantization.type) { + TfLiteAffineQuantization* params = reinterpret_cast(tensor->quantization.params); + int number = params->scale->size; + std::vector scale; + for (int i = 0; i < number; ++i) { + scale.emplace_back(static_cast(params->scale->data[i])); + } + m_scale.emplace_back(scale); + quantParam.scale = m_scale.back().data(); + quantParam.zeroPoint = params->zero_point->data; + quantParam.quantCount = number; + m_numBits.emplace_back(number, QUANT_NUMBITS); + quantParam.numBits = m_numBits.back().data(); + } else { + quantParam.quantCount = 0; + } + + return kTfLiteOk; + } + + // Convert TFLite tensor type to NNRT tensor type + TfLiteStatus ConvertType(TfLiteContext* context, int32_t tensorIndex, int32_t tensorFlags, OH_NN_DataType& nnType) + { + const bool scalarAsTensor = tensorFlags & NN_TENSOR_FLAG_SCALAR_AS_TENSOR; + TfLiteTensor* tensor = &(context->tensors[tensorIndex]); + TfLiteType nnTypeEquivalent = GetEqualLiteTypeFromLiteIndex(tensorIndex); + if (tensor->type == kTfLiteFloat32) { + nnType = OH_NN_FLOAT32; + } else if (tensor->type == kTfLiteFloat16) { + nnType = OH_NN_FLOAT16; + if (scalarAsTensor) { + nnType = OH_NN_FLOAT32; + AddTypeConversion(tensorIndex, kTfLiteFloat32); + } + } else if (tensor->type == kTfLiteInt32) { + nnType = OH_NN_INT32; + } else if (tensor->type == kTfLiteBool) { + nnType = OH_NN_INT8; + } else if (tensor->type == kTfLiteUInt8) { + nnType = (nnTypeEquivalent == kTfLiteInt32) ? OH_NN_INT32 : OH_NN_INT8; + } else if (tensor->type == kTfLiteInt8) { + nnType = (nnTypeEquivalent == kTfLiteInt32) ? OH_NN_INT32 : OH_NN_UINT8; + } else { + TFLITE_LOG_PROD(TFLITE_LOG_ERROR, + "[TENSOR_MAPPING] type %s is not supported.", TfLiteTypeGetName(tensor->type)); + return kTfLiteError; + } + + return kTfLiteOk; + } + +private: + // Next index of nnrt tensor + int32_t m_nextNnTensorIndex = 0; + + // Mapping from lite index. Use a std::vector for speed and code size + // rather than a map. + std::vector m_liteTensorToNnTensor; + + // Mapping from lite index to a type which tensor must be converted to during + // the copying of the data to the memory allocated for NN API. kTfLiteNoType + // means no conversion is needed. Use an std::vector for speed and code size + // rather than a map. + std::vector m_indexToTypeConversion; + + std::vector> m_numBits; + + std::vector> m_scale; +}; +} // namespace nnrt +} // namespace delegate +} // namespace tflite + +#endif // TENSORFLOW_LITE_DELEGATES_NNRT_TENSOR_MAPPING_H \ No newline at end of file diff --git a/example/deep_learning_framework/tflite/label_classify/CMakeLists.txt b/example/deep_learning_framework/tflite/label_classify/CMakeLists.txt new file mode 100644 index 0000000000000000000000000000000000000000..a4097480f322d5dfe33b472fc39e78c65149d0fb --- /dev/null +++ b/example/deep_learning_framework/tflite/label_classify/CMakeLists.txt @@ -0,0 +1,39 @@ +# +# Copyright (c) 2022 Huawei Device Co., Ltd. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# Header path +set(OHOS_INC ${LOCAL_DIRECTORY_PATH}/../../interfaces/kits/c) +set(TOOLS_INC ${LOCAL_DIRECTORY_PATH}/tflite/tools) +set(TFLITE_INC ${LOCAL_DIRECTORY_PATH}/lib_3rd_nnrt_tflite/include) +set(TFLITE_FLATBUFFER_INC ${LOCAL_DIRECTORY_PATH}/lib_3rd_nnrt_tflite/include/tensorflow/lite) +include_directories(${NNRT_DEMO_HOME} ${TFLITE_INC} ${OHOS_INC} ${TOOLS_INC} ${TFLITE_FLATBUFFER_INC} ${LOCAL_DIRECTORY_PATH}) + +# Scr path +aux_source_directory(${NNRT_DEMO_HOME} NNRT_DEMO_SRCS) +file(GLOB TOOLS_SRCS "${TOOLS_INC}/*.cpp") + +LINK_DIRECTORIES(${TFLITE_LIB_PATH}/com/arm64-v8a/lib/) +add_executable(label_classify ${NNRT_DEMO_SRCS} ${TOOLS_SRCS}) + +set(CMAKE_SHARED_LINKER_FLAGS "-Wl,-z,relro,-z,now,-z,noexecstack") +target_link_libraries(label_classify ${LOCAL_DIRECTORY_PATH}/lib/libnnrt_implementation.so) +target_link_libraries(label_classify ${LOCAL_DIRECTORY_PATH}/lib/libnnrt_delegate.so) +target_link_libraries(label_classify -ltensorflow-lite) + +set (EXECUTABLE_OUTPUT_PATH ${LOCAL_DIRECTORY_PATH}/output) + + + + diff --git a/example/deep_learning_framework/tflite/label_classify/label_classify.cpp b/example/deep_learning_framework/tflite/label_classify/label_classify.cpp new file mode 100644 index 0000000000000000000000000000000000000000..18b8506db90b05d0e65282d1a056b93283010999 --- /dev/null +++ b/example/deep_learning_framework/tflite/label_classify/label_classify.cpp @@ -0,0 +1,341 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "label_classify.h" + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "tensorflow/lite/kernels/register.h" +#include "tensorflow/lite/optional_debug_tools.h" +#include "tensorflow/lite/string_util.h" +#include "tensorflow/lite/tools/command_line_flags.h" +#include "tensorflow/lite/tools/delegates/delegate_provider.h" + +#include "log.h" +#include "utils.h" + +namespace tflite { +namespace label_classify { +using TfLiteDelegatePtr = tflite::Interpreter::TfLiteDelegatePtr; +using ProvidedDelegateList = tflite::tools::ProvidedDelegateList; +constexpr int BASE_NUMBER = 10; +constexpr int CONVERSION_RATE = 1000; +static struct option LONG_OPTIONS[] = { + {"help", no_argument, nullptr, 'h'}, + {"use_nnrt", required_argument, nullptr, 'a'}, + {"count", required_argument, nullptr, 'c'}, + {"image", required_argument, nullptr, 'i'}, + {"labels", required_argument, nullptr, 'l'}, + {"tflite_model", required_argument, nullptr, 'm'}, + {"num_results", required_argument, nullptr, 'n'}, + {"input_mean", required_argument, nullptr, 'b'}, + {"input_std", required_argument, nullptr, 's'}, + {"verbose", required_argument, nullptr, 'v'}, + {"warmup_nums", required_argument, nullptr, 'w'}, + {"print_result", required_argument, nullptr, 'z'}, + {"input_shape", required_argument, nullptr, 'p'}, + {nullptr, 0, nullptr, 0}, +}; + +class DelegateProviders { +public: + DelegateProviders() : m_delegateListUtil(¶ms) + { + m_delegateListUtil.AddAllDelegateParams(); // Add all registered delegate params to the contained 'params_'. + } + + ~DelegateProviders() {} + + bool InitFromCmdlineArgs(int32_t* argc, const char** argv) + { + std::vector flags; + m_delegateListUtil.AppendCmdlineFlags(&flags); + + const bool parseResult = Flags::Parse(argc, argv, flags); + if (!parseResult) { + std::string usage = Flags::Usage(argv[0], flags); + LOG(ERROR) << usage; + } + return parseResult; + } + + void MergeSettingsIntoParams(const Settings& settings) + { + if (settings.accel) { + if (!params.HasParam("use_nnrt")) { + LOG(WARN) << "NNRT deleate execution provider isn't linked or NNRT " + << "delegate isn't supported on the platform!"; + } else { + params.Set("use_nnrt", true); + } + } + } + + std::vector CreateAllDelegates() const + { + return m_delegateListUtil.CreateAllRankedDelegates(); + } + +private: + // Contain delegate-related parameters that are initialized from command-line flags. + tflite::tools::ToolParams params; + + // A helper to create TfLite delegates. + ProvidedDelegateList m_delegateListUtil; +}; + +void PrepareModel(Settings& settings, std::unique_ptr& interpreter, + DelegateProviders& delegateProviders) +{ + const std::vector inputs = interpreter->inputs(); + const std::vector outputs = interpreter->outputs(); + + if (settings.verbose) { + LOG(INFO) << "number of inputs: " << inputs.size(); + LOG(INFO) << "number of outputs: " << outputs.size(); + } + + std::map> neededInputShapes; + if (settings.inputShape != "") { + if (FilterDynamicInputs(settings, interpreter, neededInputShapes) != kTfLiteOk) { + return; + } + } + + delegateProviders.MergeSettingsIntoParams(settings); + auto delegates = delegateProviders.CreateAllDelegates(); + + for (auto& delegate : delegates) { + const auto delegateName = delegate.provider->GetName(); + if (interpreter->ModifyGraphWithDelegate(std::move(delegate.delegate)) != kTfLiteOk) { + LOG(ERROR) << "Failed to apply " << delegateName << " delegate."; + return; + } else { + LOG(INFO) << "Applied " << delegateName << " delegate."; + } + } + + if (settings.inputShape != "") { + for (const auto& inputShape : neededInputShapes) { + if (IsEqualShape(inputShape.first, inputShape.second, interpreter)) { + LOG(WARNING) << "The input shape is same as the model shape, not resize."; + continue; + } + if (interpreter->ResizeInputTensor(inputShape.first, inputShape.second) != kTfLiteOk) { + LOG(ERROR) << "Fail to resize index " << inputShape.first << "."; + return; + } else { + LOG(INFO) << "Susccess to resize index " << inputShape.first << "."; + } + } + } + + if (interpreter->AllocateTensors() != kTfLiteOk) { + LOG(ERROR) << "Failed to allocate tensors!"; + return; + } + + if (settings.verbose) { + PrintInterpreterState(interpreter.get()); + } +} + +void LogInterpreterParams(Settings& settings, std::unique_ptr& interpreter) +{ + if (!interpreter) { + LOG(ERROR) << "Failed to construct interpreter"; + return; + } + + if (settings.verbose) { + LOG(INFO) << "tensors size: " << interpreter->tensors_size(); + LOG(INFO) << "nodes size: " << interpreter->nodes_size(); + LOG(INFO) << "inputs: " << interpreter->inputs().size(); + LOG(INFO) << "input(0) name: " << interpreter->GetInputName(0); + + size_t tSize = interpreter->tensors_size(); + for (size_t i = 0; i < tSize; ++i) { + if (interpreter->tensor(i)->name) { + LOG(INFO) << i << ": " << interpreter->tensor(i)->name << ", " << interpreter->tensor(i)->bytes << + ", " << interpreter->tensor(i)->type << ", " << interpreter->tensor(i)->params.scale << ", " << + interpreter->tensor(i)->params.zero_point; + } + } + } +} + +void InferenceModel(Settings& settings, DelegateProviders& delegateProviders) +{ + if (!settings.modelName.c_str()) { + LOG(ERROR) << "no model file name"; + return; + } + std::unique_ptr model; + std::unique_ptr interpreter; + model = tflite::FlatBufferModel::BuildFromFile(settings.modelName.c_str()); + if (!model) { + LOG(ERROR) << "Failed to mmap model " << settings.modelName; + return; + } + + settings.model = model.get(); + model->error_reporter(); + tflite::ops::builtin::BuiltinOpResolver resolver; + tflite::InterpreterBuilder(*model, resolver)(&interpreter); + if (!interpreter) { + LOG(ERROR) << "Failed to construct interpreter, please check the model."; + return; + } + + LogInterpreterParams(settings, interpreter); + + // set settings input type + PrepareModel(settings, interpreter, delegateProviders); + std::vector imageSize { 224, 224, 3}; + ImportData(settings, imageSize, interpreter); + + if (settings.loopCount > 0 && settings.numberOfWarmupRuns > 0) { + LOG(INFO) << "Warm-up for " << settings.numberOfWarmupRuns << " times"; + for (int32_t i = 0; i < settings.numberOfWarmupRuns; ++i) { + if (interpreter->Invoke() != kTfLiteOk) { + LOG(ERROR) << "Failed to invoke tflite!"; + return; + } + } + } + + struct timeval startTime, stopTime; + LOG(INFO) << "Invoke for " << settings.loopCount << " times"; + gettimeofday(&startTime, nullptr); + for (int32_t i = 0; i < settings.loopCount; ++i) { + if (interpreter->Invoke() != kTfLiteOk) { + LOG(ERROR) << "Failed to invoke tflite!"; + return; + } + } + + gettimeofday(&stopTime, nullptr); + LOG(INFO) << "invoked, average time: " << + (GetUs(stopTime) - GetUs(startTime)) / (settings.loopCount * CONVERSION_RATE) << " ms"; + AnalysisResults(settings, interpreter); +} + +void DisplayUsage() +{ + LOG(INFO) << "label_classify\n" + << "\t--help, -h: show the usage of the demo\n" + << "\t--use_nnrt, -a: [0|1], use NNRT or not\n" + << "\t--input_mean, -b: input mean\n" + << "\t--count, -c: loop interpreter->Invoke() for certain times\n" + << "\t--image, -i: image_name.bmp\n" + << "\t--labels, -l: labels for the model\n" + << "\t--tflite_model, -m: modelName.tflite\n" + << "\t--num_results, -n: number of results to show\n" + << "\t--input_std, -s: input standard deviation\n" + << "\t--verbose, -v: [0|1] print more information\n" + << "\t--warmup_nums, -w: number of warmup runs\n" + << "\t--print_result, -z: flag to print results\n" + << "\t--input_shape, -p: Indicates the specified dynamic input node and the corresponding shape.\n"; +} + +void InitSettings(int32_t argc, char** argv, Settings& settings) +{ + // getopt_long stores the option index here. + int32_t optionIndex = 0; + while ((optionIndex = getopt_long(argc, argv, "a:b:c:h:i:l:m:n:p:s:v:w:z:", LONG_OPTIONS, nullptr)) != -1) { + switch (optionIndex) { + case 'a': + settings.accel = strtol(optarg, nullptr, BASE_NUMBER); + break; + case 'b': + settings.inputMean = strtod(optarg, nullptr); + break; + case 'c': + settings.loopCount = strtol(optarg, nullptr, BASE_NUMBER); + break; + case 'i': + settings.inputBmpName = optarg; + break; + case 'l': + settings.labelsFileName = optarg; + break; + case 'm': + settings.modelName = optarg; + break; + case 'n': + settings.numberOfResults = strtol(optarg, nullptr, BASE_NUMBER); + break; + case 'p': + settings.inputShape = optarg; + break; + case 's': + settings.inputStd = strtod(optarg, nullptr); + break; + case 'v': + settings.verbose = strtol(optarg, nullptr, BASE_NUMBER); + break; + case 'w': + settings.numberOfWarmupRuns = strtol(optarg, nullptr, BASE_NUMBER); + break; + case 'z': + settings.printResult = strtol(optarg, nullptr, BASE_NUMBER); + break; + case 'h': + case '?': + // getopt_long already printed an error message. + DisplayUsage(); + return; + default: + return; + } + } +} + +int32_t Main(int32_t argc, char** argv) +{ + DelegateProviders delegateProviders; + bool parseResult = delegateProviders.InitFromCmdlineArgs(&argc, const_cast(argv)); + if (!parseResult) { + return EXIT_FAILURE; + } + + Settings settings; + InitSettings(argc, argv, settings); + InferenceModel(settings, delegateProviders); + return 0; +} +} // namespace label_classify +} // namespace tflite + +int32_t main(int32_t argc, char** argv) +{ + return tflite::label_classify::Main(argc, argv); +} diff --git a/example/deep_learning_framework/tflite/label_classify/label_classify.h b/example/deep_learning_framework/tflite/label_classify/label_classify.h new file mode 100644 index 0000000000000000000000000000000000000000..e8c1c4a2454e6a945351cfa586ab8fe84b65e0ce --- /dev/null +++ b/example/deep_learning_framework/tflite/label_classify/label_classify.h @@ -0,0 +1,47 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef TENSORFLOW_LITE_EXAMPLES_LABEL_CLASSIFY_H +#define TENSORFLOW_LITE_EXAMPLES_LABEL_CLASSIFY_H + +#include + +#include "tensorflow/lite/model_builder.h" +#include "tensorflow/lite/string_type.h" +#include "tensorflow/lite/c/c_api_types.h" + +namespace tflite { +namespace label_classify { +struct Settings { + tflite::FlatBufferModel* model; + bool verbose = false; + bool accel = false; + bool printResult = false; + TfLiteType inputType = kTfLiteFloat32; + int32_t loopCount = 1; + float inputMean = 127.5f; + float inputStd = 127.5f; + string modelName = "./mbv2.tflite"; + string inputBmpName = "./grace_hopper.bmp"; + string labelsFileName = "./labels.txt"; + string inputShape = ""; + int32_t numberOfThreads = 1; + int32_t numberOfResults = 5; + int32_t numberOfWarmupRuns = 0; +}; +} // namespace label_classify +} // namespace tflite + +#endif // TENSORFLOW_LITE_EXAMPLES_LABEL_CLASSIFY_H diff --git a/example/deep_learning_framework/tflite/nnrt/CMakeLists.txt b/example/deep_learning_framework/tflite/nnrt/CMakeLists.txt new file mode 100644 index 0000000000000000000000000000000000000000..ba7fa6dfbd85d59c2c31a0d85181b5e38db381b0 --- /dev/null +++ b/example/deep_learning_framework/tflite/nnrt/CMakeLists.txt @@ -0,0 +1,26 @@ +# +# Copyright (c) 2022 Huawei Device Co., Ltd. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# Header path +set(OHOS_INC ${LOCAL_DIRECTORY_PATH}/../../interfaces/kits/c) +include_directories(${NNRT_INTERFACE_HOME} ${OHOS_INC}) + +# Scr path +file(GLOB NNRT_SRCS "${NNRT_INTERFACE_HOME}/*.cpp") + +set(CMAKE_LIBRARY_OUTPUT_DIRECTORY ${LOCAL_DIRECTORY_PATH}/lib) +add_library(nnrt_implementation SHARED ${NNRT_SRCS}) + + diff --git a/example/deep_learning_framework/tflite/nnrt/nnrt_implementation.cpp b/example/deep_learning_framework/tflite/nnrt/nnrt_implementation.cpp new file mode 100644 index 0000000000000000000000000000000000000000..136f9f1d9b3a0b15e53eb41da49ab5c552069e56 --- /dev/null +++ b/example/deep_learning_framework/tflite/nnrt/nnrt_implementation.cpp @@ -0,0 +1,113 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "nnrt_implementation.h" + +#include +#include +#include +#include + +#include +#include + +namespace tflite { +// These function parameters are guaranteed to be nullptr by the caller +template +void LoadFunction(void* handle, const char* name, T* nnrtFunction) +{ + if (name == nullptr) { + NNRT_LOG("nnrt error: the function %s does not exist.", name); + return; + } + + void* fn = dlsym(handle, name); + if (fn == nullptr) { + NNRT_LOG("nnrt error: unable to open function %s", name); + return; + } + + *nnrtFunction = reinterpret_cast(fn); + return; +} + +const NnrtApi LoadNnrt() +{ + NnrtApi nnrt; + nnrt.nnrtExists = false; + void* libNeuralNetworks = nullptr; + + // Assumes there can be multiple instances of NN API + static std::string nnrtLibraryName = "libneural_network_runtime.z.so"; + libNeuralNetworks = dlopen(nnrtLibraryName.c_str(), RTLD_LAZY | RTLD_NODELETE); + if (libNeuralNetworks == nullptr) { + NNRT_LOG("nnrt error: unable to open library %s", nnrtLibraryName.c_str()); + return nnrt; + } else { + nnrt.nnrtExists = true; + } + + // NNModel + LoadFunction(libNeuralNetworks, "OH_NNModel_Construct", &nnrt.OH_NNModel_Construct); + LoadFunction(libNeuralNetworks, "OH_NNModel_AddTensor", &nnrt.OH_NNModel_AddTensor); + LoadFunction(libNeuralNetworks, "OH_NNModel_SetTensorData", &nnrt.OH_NNModel_SetTensorData); + LoadFunction(libNeuralNetworks, "OH_NNModel_AddOperation", &nnrt.OH_NNModel_AddOperation); + LoadFunction(libNeuralNetworks, "OH_NNModel_SpecifyInputsAndOutputs", &nnrt.OH_NNModel_SpecifyInputsAndOutputs); + LoadFunction(libNeuralNetworks, "OH_NNModel_Finish", &nnrt.OH_NNModel_Finish); + LoadFunction(libNeuralNetworks, "OH_NNModel_Destroy", &nnrt.OH_NNModel_Destroy); + LoadFunction(libNeuralNetworks, "OH_NNModel_GetAvailableOperations", &nnrt.OH_NNModel_GetAvailableOperations); + + // NNCompilation + LoadFunction(libNeuralNetworks, "OH_NNCompilation_Construct", &nnrt.OH_NNCompilation_Construct); + LoadFunction(libNeuralNetworks, "OH_NNCompilation_SetDevice", &nnrt.OH_NNCompilation_SetDevice); + LoadFunction(libNeuralNetworks, "OH_NNCompilation_SetCache", &nnrt.OH_NNCompilation_SetCache); + LoadFunction(libNeuralNetworks, "OH_NNCompilation_SetPerformanceMode", &nnrt.OH_NNCompilation_SetPerformanceMode); + LoadFunction(libNeuralNetworks, "OH_NNCompilation_SetPriority", &nnrt.OH_NNCompilation_SetPriority); + LoadFunction(libNeuralNetworks, "OH_NNCompilation_EnableFloat16", &nnrt.OH_NNCompilation_EnableFloat16); + LoadFunction(libNeuralNetworks, "OH_NNCompilation_Build", &nnrt.OH_NNCompilation_Build); + LoadFunction(libNeuralNetworks, "OH_NNCompilation_Destroy", &nnrt.OH_NNCompilation_Destroy); + + // NNExecutor + LoadFunction(libNeuralNetworks, "OH_NNExecutor_Construct", &nnrt.OH_NNExecutor_Construct); + LoadFunction(libNeuralNetworks, "OH_NNExecutor_SetInput", &nnrt.OH_NNExecutor_SetInput); + LoadFunction(libNeuralNetworks, "OH_NNExecutor_SetOutput", &nnrt.OH_NNExecutor_SetOutput); + LoadFunction(libNeuralNetworks, "OH_NNExecutor_GetOutputShape", &nnrt.OH_NNExecutor_GetOutputShape); + LoadFunction(libNeuralNetworks, "OH_NNExecutor_Run", &nnrt.OH_NNExecutor_Run); + LoadFunction(libNeuralNetworks, "OH_NNExecutor_AllocateInputMemory", &nnrt.OH_NNExecutor_AllocateInputMemory); + LoadFunction(libNeuralNetworks, "OH_NNExecutor_AllocateOutputMemory", &nnrt.OH_NNExecutor_AllocateOutputMemory); + LoadFunction(libNeuralNetworks, "OH_NNExecutor_DestroyInputMemory", &nnrt.OH_NNExecutor_DestroyInputMemory); + LoadFunction(libNeuralNetworks, "OH_NNExecutor_DestroyOutputMemory", &nnrt.OH_NNExecutor_DestroyOutputMemory); + LoadFunction(libNeuralNetworks, "OH_NNExecutor_SetInputWithMemory", &nnrt.OH_NNExecutor_SetInputWithMemory); + LoadFunction(libNeuralNetworks, "OH_NNExecutor_SetOutputWithMemory", &nnrt.OH_NNExecutor_SetOutputWithMemory); + LoadFunction(libNeuralNetworks, "OH_NNExecutor_Destroy", &nnrt.OH_NNExecutor_Destroy); + + // NNDevice + LoadFunction(libNeuralNetworks, "OH_NNDevice_GetAllDevicesID", &nnrt.OH_NNDevice_GetAllDevicesID); + LoadFunction(libNeuralNetworks, "OH_NNDevice_GetName", &nnrt.OH_NNDevice_GetName); + LoadFunction(libNeuralNetworks, "OH_NNDevice_GetType", &nnrt.OH_NNDevice_GetType); + + return nnrt; +} + +const NnrtApi* NnrtImplementation() +{ + static const NnrtApi nnrt = LoadNnrt(); + if (!nnrt.nnrtExists) { + return nullptr; + } + return &nnrt; +} + +} // namespace tflite diff --git a/example/deep_learning_framework/tflite/nnrt/nnrt_implementation.h b/example/deep_learning_framework/tflite/nnrt/nnrt_implementation.h new file mode 100644 index 0000000000000000000000000000000000000000..5a342cb706767219af03f918c9584dc9e2d89c89 --- /dev/null +++ b/example/deep_learning_framework/tflite/nnrt/nnrt_implementation.h @@ -0,0 +1,88 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef TENSORFLOW_LITE_NNRT_IMPLEMENTATION_H +#define TENSORFLOW_LITE_NNRT_IMPLEMENTATION_H + +#include +#include +#include +#include +#include +#include + +#include "neural_network_runtime_type.h" + +namespace tflite { +#define NNRT_LOG(format, ...) fprintf(stderr, format "\n", __VA_ARGS__) + +struct NnrtApi { + // This indicates the availability of nnrt library. If it is false, it means that loading + // the nnrt library failed and tflite will not use nnrt to run the model, vice versa. + bool nnrtExists; + + // Create model interface + OH_NNModel* (*OH_NNModel_Construct)(void); + OH_NN_ReturnCode (*OH_NNModel_AddTensor)(OH_NNModel* model, const OH_NN_Tensor* nnTensor); + OH_NN_ReturnCode (*OH_NNModel_SetTensorData)(OH_NNModel* model, uint32_t index, const void* buffer, + size_t length); + OH_NN_ReturnCode (*OH_NNModel_AddOperation)(OH_NNModel* model, OH_NN_OperationType op, + const OH_NN_UInt32Array* paramIndices, const OH_NN_UInt32Array* inputIndices, + const OH_NN_UInt32Array* outputIndices); + OH_NN_ReturnCode (*OH_NNModel_SpecifyInputsAndOutputs)(OH_NNModel* model, const OH_NN_UInt32Array* inputIndices, + const OH_NN_UInt32Array* outputIndices); + OH_NN_ReturnCode (*OH_NNModel_Finish)(OH_NNModel* model); + void (*OH_NNModel_Destroy)(OH_NNModel** model); + OH_NN_ReturnCode (*OH_NNModel_GetAvailableOperations)(OH_NNModel* model, size_t deviceID, const bool** isSupported, + uint32_t* opCount); + // Compilation interface + OH_NNCompilation* (*OH_NNCompilation_Construct)(const OH_NNModel* model); + OH_NN_ReturnCode (*OH_NNCompilation_SetCache)(OH_NNCompilation* compilation, const char* cacheDir, + uint32_t version); + OH_NN_ReturnCode (*OH_NNCompilation_SetPerformanceMode)(OH_NNCompilation* compilation, + OH_NN_PerformanceMode performanceMode); + OH_NN_ReturnCode (*OH_NNCompilation_SetPriority)(OH_NNCompilation* compilation, OH_NN_Priority priority); + OH_NN_ReturnCode (*OH_NNCompilation_EnableFloat16)(OH_NNCompilation* compilation, bool enablefloat16); + OH_NN_ReturnCode (*OH_NNCompilation_SetDevice)(OH_NNCompilation* compilation, size_t deviceID); + OH_NN_ReturnCode (*OH_NNCompilation_Build)(OH_NNCompilation* compilation); + void (*OH_NNCompilation_Destroy)(OH_NNCompilation** compilation); + // Executor interface + OH_NNExecutor* (*OH_NNExecutor_Construct)(OH_NNCompilation* compilation); + OH_NN_ReturnCode (*OH_NNExecutor_SetInput)(OH_NNExecutor* executor, uint32_t inputIndex, + const OH_NN_Tensor* nnTensor, const void* buffer, size_t length); + OH_NN_ReturnCode (*OH_NNExecutor_SetOutput)(const OH_NNExecutor* executor, uint32_t outputIndex, void* buffer, + size_t length); + OH_NN_ReturnCode (*OH_NNExecutor_GetOutputShape)(const OH_NNExecutor* executor, uint32_t outputIndex, + const uint32_t** dimensions, uint32_t* dimensionCount); + OH_NN_ReturnCode (*OH_NNExecutor_Run)(OH_NNExecutor* executor); + OH_NN_Memory* (*OH_NNExecutor_AllocateInputMemory)(OH_NNExecutor* executor, uint32_t inputIndex, size_t length); + OH_NN_Memory* (*OH_NNExecutor_AllocateOutputMemory)(OH_NNExecutor* executor, uint32_t outputIndex, size_t length); + void (*OH_NNExecutor_DestroyOutputMemory)(OH_NNExecutor* executor, uint32_t outputIndex, OH_NN_Memory** memory); + void (*OH_NNExecutor_DestroyInputMemory)(OH_NNExecutor* executor, uint32_t inputIndex, OH_NN_Memory** memory); + OH_NN_ReturnCode (*OH_NNExecutor_SetInputWithMemory)(OH_NNExecutor* executor, uint32_t inputIndex, + const OH_NN_Tensor* nnTensor, const OH_NN_Memory* memory); + OH_NN_ReturnCode (*OH_NNExecutor_SetOutputWithMemory)(OH_NNExecutor* executor, uint32_t outputIndex, + const OH_NN_Memory* memory); + void (*OH_NNExecutor_Destroy)(OH_NNExecutor** executor); + // Device interface + OH_NN_ReturnCode (*OH_NNDevice_GetAllDevicesID)(const size_t** allDevicesID, uint32_t* deviceCount); + OH_NN_ReturnCode (*OH_NNDevice_GetName)(size_t deviceID, const char** name); + OH_NN_ReturnCode (*OH_NNDevice_GetType)(size_t deviceID, OH_NN_DeviceType* deviceType); +}; + +const NnrtApi* NnrtImplementation(); +} // namespace tflite + +#endif // TENSORFLOW_LITE_NNRT_IMPLEMENTATION_H \ No newline at end of file diff --git a/example/deep_learning_framework/tflite/tools/bitmap_helpers.cpp b/example/deep_learning_framework/tflite/tools/bitmap_helpers.cpp new file mode 100644 index 0000000000000000000000000000000000000000..379ca94c0d245572bcd6ba997c02b9089ae88bbb --- /dev/null +++ b/example/deep_learning_framework/tflite/tools/bitmap_helpers.cpp @@ -0,0 +1,116 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "tflite/tools/bitmap_helpers.h" + +#include +#include + +#include "tflite/tools/log.h" + +namespace tflite { +namespace label_classify { +void DecodeBmp(const uint8_t* input, int32_t rowSize, ImageInfo imageInfo, bool topDown, std::vector& output) +{ + ColorChannelOffset colorChannelOffset = { BLUE_OFFSET, GREEN_OFFSET, ALPHA_OFFSET }; + for (int32_t i = 0; i < imageInfo.height; ++i) { + int32_t srcPos; + int32_t dstPos; + + for (int32_t j = 0; j < imageInfo.width; j++) { + if (!topDown) { + srcPos = ((imageInfo.height - 1 - i) * rowSize) + j * imageInfo.channels; + } else { + srcPos = i * rowSize + j * imageInfo.channels; + } + + dstPos = (i * imageInfo.width + j) * imageInfo.channels; + + switch (imageInfo.channels) { + case GRAYSCALE_DIM: + output[dstPos] = input[srcPos]; + break; + case BGR_DIM: + // BGR -> RGB + output[dstPos] = input[srcPos + colorChannelOffset.blueOffset]; + output[dstPos + colorChannelOffset.greenOffset] = input[srcPos + colorChannelOffset.greenOffset]; + output[dstPos + colorChannelOffset.blueOffset] = input[srcPos]; + break; + case BGRA_DIM: + // BGRA -> RGBA + output[dstPos] = input[srcPos + colorChannelOffset.blueOffset]; + output[dstPos + colorChannelOffset.greenOffset] = input[srcPos + colorChannelOffset.greenOffset]; + output[dstPos + colorChannelOffset.blueOffset] = input[srcPos]; + output[dstPos + colorChannelOffset.alphaOffset] = input[srcPos + colorChannelOffset.alphaOffset]; + break; + default: + LOG(FATAL) << "Unexpected number of channels: " << imageInfo.channels; + break; + } + } + } + return; +} + +void ReadBmp(const std::string& inputBmpName, ImageInfo& imageInfo, Settings* s, std::vector& inputImage) +{ + int32_t begin, end; + std::ifstream file(inputBmpName, std::ios::in | std::ios::binary); + if (!file) { + LOG(FATAL) << "input file " << inputBmpName << " not found"; + return; + } + + begin = file.tellg(); + file.seekg(0, std::ios::end); + end = file.tellg(); + size_t len = end - begin; + if (s->verbose) { + LOG(INFO) << "len: " << len; + } + + std::vector img_bytes(len); + BmpAddressOffset bmpAddressOffset = { HEADER_ADDRESS_OFFSET, WIDTH_ADDRESS_OFFSET, + HEIGHT_ADDRESS_OFFSET, BBP_ADDRESS_OFFSET }; + file.seekg(0, std::ios::beg); + file.read(reinterpret_cast(img_bytes.data()), len); + const int32_t headerSize = + *(reinterpret_cast(img_bytes.data() + bmpAddressOffset.headerAddressOffset)); + imageInfo.width = *(reinterpret_cast(img_bytes.data() + bmpAddressOffset.widthAddressOffset)); + imageInfo.height = + abs(*(reinterpret_cast(img_bytes.data() + bmpAddressOffset.heightAddressOffset))); + const int32_t bpp = *(reinterpret_cast(img_bytes.data() + bmpAddressOffset.bbpAddressOffset)); + imageInfo.channels = bpp / BIT_TO_BYTE; + inputImage.resize(imageInfo.height * imageInfo.width * imageInfo.channels); + + if (s->verbose) { + LOG(INFO) << "width, height, channels: " << imageInfo.width << ", " << imageInfo.height << ", " + << imageInfo.channels; + } + + // there may be padding bytes when the width is not a multiple of 4 bytes. + // 8 * channels == bits per pixel + const int32_t rowSize = ((8 * imageInfo.channels * imageInfo.width + 31) >> 5) << 2; + + // if height is negative, data layout is top down. otherwise, it's bottom up. + bool topDown = (imageInfo.height < 0); + + // Decode image, allocating tensor once the image size is known. + const uint8_t* bmpPixels = &img_bytes[headerSize]; + DecodeBmp(bmpPixels, rowSize, imageInfo, topDown, inputImage); + return; +} +} // namespace label_classify +} // namespace tflite diff --git a/example/deep_learning_framework/tflite/tools/bitmap_helpers.h b/example/deep_learning_framework/tflite/tools/bitmap_helpers.h new file mode 100644 index 0000000000000000000000000000000000000000..f8a8123b053421e04d59fbb73b7420e1e3d96a22 --- /dev/null +++ b/example/deep_learning_framework/tflite/tools/bitmap_helpers.h @@ -0,0 +1,141 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef TENSORFLOW_LITE_EXAMPLES_LABEL_CLASSIFY_BITMAP_HELPERS_H +#define TENSORFLOW_LITE_EXAMPLES_LABEL_CLASSIFY_BITMAP_HELPERS_H + +#include "tflite/label_classify/label_classify.h" +#include "tensorflow/lite/builtin_op_data.h" +#include "tensorflow/lite/interpreter.h" +#include "tensorflow/lite/kernels/register.h" +#include "tensorflow/lite/string_util.h" +#include "log.h" + + +namespace tflite { +namespace label_classify { +const int INPUT_NUMBER = 2; +const int OUPUT_NUMBER = 1; +const int INT8_OFFSET_NUMBER = 128; +const int BIT_TO_BYTE = 8; +const int BLUE_OFFSET = 2; +const int GREEN_OFFSET = 1; +const int ALPHA_OFFSET = 3; +const int HEADER_ADDRESS_OFFSET = 10; +const int WIDTH_ADDRESS_OFFSET = 18; +const int HEIGHT_ADDRESS_OFFSET = 22; +const int BBP_ADDRESS_OFFSET = 28; +enum ChannelDim : int { + GRAYSCALE_DIM = 1, + BGR_DIM = 3, + BGRA_DIM = 4 +}; + +struct BmpAddressOffset { + int headerAddressOffset = 0; + int widthAddressOffset = 0; + int heightAddressOffset = 0; + int bbpAddressOffset = 0; +}; + +struct ColorChannelOffset { + int blueOffset = 0; + int greenOffset = 0; + int alphaOffset = 0; +}; + +struct ImageInfo { + int32_t width = 0; + int32_t height = 0; + int32_t channels = 0; +}; + +void ReadBmp(const std::string& input_bmp_name, ImageInfo& imageInfo, Settings* s, std::vector& input); + +template +void Resize(T* out, uint8_t* in, ImageInfo inputImageInfo, ImageInfo wantedImageInfo, Settings* s) +{ + std::unique_ptr interpreter = std::make_unique(); + + int32_t baseIndex = 0; + int32_t outputIndex = 2; + + // two inputs: input and new_sizes + interpreter->AddTensors(INPUT_NUMBER, &baseIndex); + // one output + interpreter->AddTensors(OUPUT_NUMBER, &baseIndex); + // set input and output tensors + interpreter->SetInputs({ 0, 1 }); + interpreter->SetOutputs({ 2 }); + + // set parameters of tensors + TfLiteQuantizationParams quant; + interpreter->SetTensorParametersReadWrite(0, kTfLiteFloat32, "input", + { 1, inputImageInfo.height, inputImageInfo.width, inputImageInfo.channels }, quant); + interpreter->SetTensorParametersReadWrite(1, kTfLiteInt32, "new_size", { 2 }, quant); + interpreter->SetTensorParametersReadWrite(outputIndex, kTfLiteFloat32, "output", + { 1, wantedImageInfo.height, wantedImageInfo.width, wantedImageInfo.channels }, quant); + + ops::builtin::BuiltinOpResolver resolver; + const TfLiteRegistration* resizeOp = resolver.FindOp(BuiltinOperator_RESIZE_BILINEAR, 1); + auto* params = reinterpret_cast(malloc(sizeof(TfLiteResizeBilinearParams))); + if (params == nullptr) { + LOG(ERROR) << "Malloc memory failed in BitmapHelperslmpl."; + return; + } + params->align_corners = false; + params->half_pixel_centers = false; + interpreter->AddNodeWithParameters({ 0, 1 }, { 2 }, nullptr, 0, params, resizeOp, nullptr); + interpreter->AllocateTensors(); + + // fill input image + // in[] are integers, cannot do memcpy() directly + auto input = interpreter->typed_tensor(0); + + for (int32_t i = 0; i < inputImageInfo.height * inputImageInfo.width * inputImageInfo.channels; ++i) { + input[i] = in[i]; + } + + // fill new_sizes + interpreter->typed_tensor(1)[0] = wantedImageInfo.height; + interpreter->typed_tensor(1)[1] = wantedImageInfo.width; + interpreter->Invoke(); + auto output = interpreter->typed_tensor(2); + for (int32_t i = 0; i < wantedImageInfo.height * wantedImageInfo.width * wantedImageInfo.channels; ++i) { + switch (s->inputType) { + case kTfLiteFloat32: + out[i] = (output[i] - s->inputMean) / s->inputStd; + break; + case kTfLiteInt8: + out[i] = static_cast(output[i] - INT8_OFFSET_NUMBER); + break; + case kTfLiteUInt8: + out[i] = static_cast(output[i]); + break; + default: + break; + } + } +} + +// explicit instantiation +template void Resize(float*, uint8_t*, ImageInfo, ImageInfo, Settings*); +template void Resize(int8_t*, uint8_t*, ImageInfo, ImageInfo, Settings*); +template void Resize(uint8_t*, uint8_t*, ImageInfo, ImageInfo, Settings*); +template void Resize(int64_t*, uint8_t*, ImageInfo, ImageInfo, Settings*); +} // namespace label_classify +} // namespace tflite + +#endif // TENSORFLOW_LITE_EXAMPLES_LABEL_CLASSIFY_BITMAP_HELPERS_H diff --git a/example/deep_learning_framework/tflite/tools/get_topn.h b/example/deep_learning_framework/tflite/tools/get_topn.h new file mode 100644 index 0000000000000000000000000000000000000000..8e01f535b27530456c4d57f6b7281f29c0a5789d --- /dev/null +++ b/example/deep_learning_framework/tflite/tools/get_topn.h @@ -0,0 +1,87 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef TENSORFLOW_LITE_EXAMPLES_LABEL_CLASSIFY_GET_TOP_N_H +#define TENSORFLOW_LITE_EXAMPLES_LABEL_CLASSIFY_GET_TOP_N_H + +#include +#include +#include + +#include "tensorflow/lite/c/common.h" + +namespace tflite { +namespace label_classify { +template +void GetTopN(T* prediction, int32_t predictionSize, size_t numResults, float threshold, + std::vector>* topResults, TfLiteType inputType) +{ + // Will contain top N results in ascending order. + std::priority_queue, std::vector>, + std::greater>> + topResultPQ; + + const long count = predictionSize; // NOLINT(runtime/int32_t) + float value = 0.0; + float intNormalizedFactor = 256.0; + float uintNormalizedFactor = 255.0; + uint32_t offsetNumber = 128; + + for (int32_t i = 0; i < count; ++i) { + switch (inputType) { + case kTfLiteFloat32: + value = prediction[i]; + break; + case kTfLiteInt8: + value = (prediction[i] + offsetNumber) / intNormalizedFactor; + break; + case kTfLiteUInt8: + value = prediction[i] / uintNormalizedFactor; + break; + default: + break; + } + + // Only add it if it beats the threshold and has a chance at being in the top N. + if (value < threshold) { + continue; + } + + topResultPQ.push(std::pair(value, i)); + + // If at capacity, kick the smallest value out. + if (topResultPQ.size() > numResults) { + topResultPQ.pop(); + } + } + + // Copy to output vector and reverse into descending order. + while (!topResultPQ.empty()) { + topResults->push_back(topResultPQ.top()); + topResultPQ.pop(); + } + + std::reverse(topResults->begin(), topResults->end()); +} + +// explicit instantiation so that we can use them otherwhere +template void GetTopN(float*, int32_t, size_t, float, std::vector>*, TfLiteType); +template void GetTopN(int8_t*, int32_t, size_t, float, std::vector>*, TfLiteType); +template void GetTopN(uint8_t*, int32_t, size_t, float, std::vector>*, TfLiteType); +template void GetTopN(int64_t*, int32_t, size_t, float, std::vector>*, TfLiteType); +} // namespace label_classify +} // namespace tflite + +#endif // TENSORFLOW_LITE_EXAMPLES_LABEL_CLASSIFY_GET_TOP_N_H diff --git a/example/deep_learning_framework/tflite/tools/log.h b/example/deep_learning_framework/tflite/tools/log.h new file mode 100644 index 0000000000000000000000000000000000000000..06bcf72bd0d18dec5409e1392f83ef2020445c75 --- /dev/null +++ b/example/deep_learning_framework/tflite/tools/log.h @@ -0,0 +1,46 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef TENSORFLOW_LITE_EXAMPLES_LABEL_CLASSIFY_LOG_H +#define TENSORFLOW_LITE_EXAMPLES_LABEL_CLASSIFY_LOG_H + +#include +#include + +namespace tflite { +namespace label_classify { +class Log { + std::stringstream stream_; + +public: + explicit Log(const char* severity) + { + stream_ << severity << ": "; + } + std::stringstream& Stream() + { + return stream_; + } + ~Log() + { + std::cerr << stream_.str() << std::endl; + } +}; + +#define LOG(severity) tflite::label_classify::Log(#severity).Stream() +} // namespace label_classify +} // namespace tflite + +#endif // TENSORFLOW_LITE_EXAMPLES_LABEL_CLASSIFY_LOG_H diff --git a/example/deep_learning_framework/tflite/tools/utils.cpp b/example/deep_learning_framework/tflite/tools/utils.cpp new file mode 100644 index 0000000000000000000000000000000000000000..ada22686608893407f1e7e09d6af736f89718c6f --- /dev/null +++ b/example/deep_learning_framework/tflite/tools/utils.cpp @@ -0,0 +1,267 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "utils.h" + +#include +#include +#include + +#include "tflite/tools/bitmap_helpers.h" +#include "tflite/tools/get_topn.h" +#include "tflite/tools/log.h" + +namespace tflite { +namespace label_classify { +constexpr int32_t DATA_PRINT_NUM = 1000; +constexpr int32_t DATA_EACHLINE_NUM = 1000; +constexpr int32_t SECOND_TO_MICROSECOND_RATIO = 1000000; +constexpr uint8_t WEIGHT_DIMENSION = 2; +constexpr uint8_t CHANNEL_DIMENSION = 3; + +double GetUs(struct timeval t) +{ + return (t.tv_sec * SECOND_TO_MICROSECOND_RATIO + t.tv_usec); +} + +TfLiteStatus ReadLabelsFile(const string& fileName, std::vector& result, size_t& foundLabelCount) +{ + std::ifstream file(fileName); + if (!file) { + LOG(ERROR) << "Labels file " << fileName << " not found"; + return kTfLiteError; + } + result.clear(); + string line; + while (std::getline(file, line)) { + result.push_back(line); + } + foundLabelCount = result.size(); + const int32_t padding = 16; + while (result.size() % padding) { + result.emplace_back(); + } + + return kTfLiteOk; +} + +void GetInputNameAndShape(string& inputShapeString, std::map>& userInputShapes) +{ + if (inputShapeString == "") { + return; + } + int pos = inputShapeString.find_last_of(":"); + string userInputName = inputShapeString.substr(0, pos); + + string dimString = inputShapeString.substr(pos + 1); + int dimPos = dimString.find(","); + std::vector inputDims; + while (dimPos != dimString.npos) { + inputDims.push_back(std::stoi(dimString.substr(0, dimPos))); + dimString = dimString.substr(dimPos + 1); + dimPos = dimString.find(","); + } + inputDims.push_back(std::stoi(dimString)); + userInputShapes.insert(std::map>::value_type(userInputName, inputDims)); +} + +TfLiteStatus FilterDynamicInputs(Settings& settings, std::unique_ptr& interpreter, + std::map>& neededInputShapes) +{ + std::vector inputIndexes = interpreter->inputs(); + std::map nameIndexs; + for (int i = 0; i < inputIndexes.size(); i++) { + LOG(INFO) << "input index: " << inputIndexes[i]; + nameIndexs.insert(std::map::value_type(interpreter->GetInputName(i), inputIndexes[i])); + } + + if (settings.inputShape.find(":") == settings.inputShape.npos) { + LOG(ERROR) << "The format of input shapes string is not supported."; + return kTfLiteError; + } + + // Get input names and shapes + std::map> userInputShapes; + string inputShapeString = settings.inputShape; + int pos = inputShapeString.find(";"); + while (pos != inputShapeString.npos) { + GetInputNameAndShape(inputShapeString, userInputShapes); + inputShapeString = inputShapeString.substr(pos + 1); + pos = inputShapeString.find(";"); + } + GetInputNameAndShape(inputShapeString, userInputShapes); + + for (const auto& inputShape : userInputShapes) { + string inputName = inputShape.first; + auto findName = nameIndexs.find(inputName); + if (findName == nameIndexs.end()) { + LOG(ERROR) << "The input name is error: " << inputShape.first << "."; + return kTfLiteError; + } else { + neededInputShapes.insert(std::map>::value_type(findName->second, inputShape.second)); + } + } + + return kTfLiteOk; +} + +template void PrintData(T* data, int32_t dataSize, int32_t printSize) +{ + if (printSize > dataSize) { + printSize = dataSize; + } + for (int32_t i = 0; i < printSize; ++i) { + std::cout << static_cast(*(data + i)) << "\t"; + } + std::cout << std::endl; +} + +void PrintResult(std::unique_ptr& interpreter) +{ + for (int32_t index = 0; index < interpreter->outputs().size(); ++index) { + int32_t output_index = interpreter->outputs()[index]; + TfLiteIntArray* outputsDims = interpreter->tensor(output_index)->dims; + int32_t dimSize = outputsDims->size; + int32_t outputTensorSize = 1; + for (int32_t i = 0; i < dimSize; ++i) { + outputTensorSize *= outputsDims->data[i]; + } + + TfLiteTensor* outputTensor = interpreter->tensor(output_index); + switch (outputTensor->type) { + case kTfLiteFloat32: + PrintData(interpreter->typed_output_tensor(index), outputTensorSize, DATA_PRINT_NUM); + break; + case kTfLiteInt32: + PrintData(interpreter->typed_output_tensor(index), outputTensorSize, DATA_PRINT_NUM); + break; + case kTfLiteUInt8: + PrintData(interpreter->typed_output_tensor(index), outputTensorSize, DATA_PRINT_NUM); + break; + case kTfLiteInt8: + PrintData(interpreter->typed_output_tensor(index), outputTensorSize, DATA_PRINT_NUM); + break; + default: + LOG(ERROR) << "Unsupportted tensor datatype: " << outputTensor->type << "!"; + return; + } + } +} + +void AnalysisResults(Settings& settings, std::unique_ptr& interpreter) +{ + const float threshold = 0.001f; + std::vector> topResults; + + if (settings.printResult) { + LOG(INFO) << "Outputs Data:"; + PrintResult(interpreter); + } + + int32_t output = interpreter->outputs()[0]; + TfLiteIntArray* outputDims = interpreter->tensor(output)->dims; + // assume output dims to be something like (1, 1, ... ,size) + auto outputSize = outputDims->data[outputDims->size - 1]; + + auto tfType = interpreter->tensor(output)->type; + switch (tfType) { + case kTfLiteFloat32: + GetTopN(interpreter->typed_output_tensor(0), outputSize, settings.numberOfResults, threshold, + &topResults, settings.inputType); + break; + case kTfLiteInt8: + GetTopN(interpreter->typed_output_tensor(0), outputSize, settings.numberOfResults, + threshold, &topResults, settings.inputType); + break; + case kTfLiteUInt8: + GetTopN(interpreter->typed_output_tensor(0), outputSize, settings.numberOfResults, + threshold, &topResults, settings.inputType); + break; + case kTfLiteInt64: + GetTopN(interpreter->typed_output_tensor(0), outputSize, settings.numberOfResults, + threshold, &topResults, settings.inputType); + break; + default: + LOG(ERROR) << "cannot handle output type " << tfType << " yet"; + return; + } + + std::vector labels; + size_t labelCount; + + if (ReadLabelsFile(settings.labelsFileName, labels, labelCount) != kTfLiteOk) { + return; + } + for (const auto& result : topResults) { + const float confidence = result.first; + const int32_t index = result.second; + LOG(INFO) << confidence << ": " << index << " " << labels[index]; + } +} + +void ImportData(Settings& settings, std::vector& imageSize, std::unique_ptr& interpreter) +{ + ImageInfo inputImageInfo = {imageSize[0], imageSize[1], imageSize[2]}; + std::vector in; + ReadBmp(settings.inputBmpName, inputImageInfo, &settings, in); + + int32_t input = interpreter->inputs()[0]; + if (settings.verbose) { + LOG(INFO) << "input: " << input; + } + + // get input dimension from the model. + TfLiteIntArray* dims = interpreter->tensor(input)->dims; + ImageInfo wantedimageInfo; + wantedimageInfo.height = dims->data[1]; + wantedimageInfo.width = dims->data[WEIGHT_DIMENSION]; + wantedimageInfo.channels = (dims->size > CHANNEL_DIMENSION) ? dims->data[CHANNEL_DIMENSION] : 1; + + settings.inputType = interpreter->tensor(input)->type; + switch (settings.inputType) { + case kTfLiteFloat32: + Resize(interpreter->typed_tensor(input), in.data(), inputImageInfo, wantedimageInfo, + &settings); + break; + case kTfLiteInt8: + Resize(interpreter->typed_tensor(input), in.data(), inputImageInfo, wantedimageInfo, + &settings); + break; + case kTfLiteUInt8: + Resize(interpreter->typed_tensor(input), in.data(), inputImageInfo, wantedimageInfo, + &settings); + break; + case kTfLiteInt64: + Resize(interpreter->typed_tensor(input), in.data(), inputImageInfo, wantedimageInfo, + &settings); + break; + default: + LOG(ERROR) << "cannot handle input type " << settings.inputType << " yet"; + return; + } +} + +bool IsEqualShape(int tensorIndex, const std::vector& dims, std::unique_ptr& interpreter) +{ + TfLiteTensor* tensor = interpreter->tensor(tensorIndex); + for (int i = 0; i < tensor->dims->size; ++i) { + if (tensor->dims->data[i] != dims[i]) { + return false; + } + } + return true; +} +} // namespace label_classify +} // namespace tflite \ No newline at end of file diff --git a/example/deep_learning_framework/tflite/tools/utils.h b/example/deep_learning_framework/tflite/tools/utils.h new file mode 100644 index 0000000000000000000000000000000000000000..f97a8831b75f80c9bb69bc6a662c0494107efc5e --- /dev/null +++ b/example/deep_learning_framework/tflite/tools/utils.h @@ -0,0 +1,44 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef TENSORFLOW_LITE_EXAMPLES_LABEL_CLASSIFY_UTILS_H +#define TENSORFLOW_LITE_EXAMPLES_LABEL_CLASSIFY_UTILS_H + +#include "../label_classify/label_classify.h" + +#include "sys/time.h" + +#include "tensorflow/lite/model_builder.h" +#include "tensorflow/lite/string_type.h" +#include "tensorflow/lite/c/c_api_types.h" +#include "tensorflow/lite/interpreter.h" + +#include "neural_network_runtime.h" + +namespace tflite { +namespace label_classify { +double GetUs(struct timeval t); +TfLiteStatus ReadLabelsFile(const string& fileName, std::vector& result, size_t& foundLabelCount); +TfLiteStatus FilterDynamicInputs(Settings& settings, + std::unique_ptr& interpreter, std::map>& neededInputShapes); +bool IsEqualShape(int tensorIndex, const std::vector& dim, std::unique_ptr& interpreter); +void GetInputNameAndShape(string &inputShapeString, std::map>& userInputShapes); +void PrintResult(std::unique_ptr& interpreter); +void AnalysisResults(Settings& settings, std::unique_ptr& interpreter); +void ImportData(Settings& settings, std::vector& imageSize, std::unique_ptr& interpreter); +} // namespace label_classify +} // namespace tflite + +#endif // TENSORFLOW_LITE_EXAMPLES_LABEL_CLASSIFY_UTILS_H diff --git a/example/drivers/README_zh.md b/example/drivers/README_zh.md new file mode 100644 index 0000000000000000000000000000000000000000..0848080cd6433a2736f5ba0980c84e4b13d4ccf8 --- /dev/null +++ b/example/drivers/README_zh.md @@ -0,0 +1,329 @@ +# NNRt开发指导 + +## NNRt开发概述 + +### 功能简介 + +神经网络运行时部件(NNRt)是跨设备的AI运行时框架,作为端侧推理框架和专用加速芯片的中间桥梁,为端侧推理框架提供了统一的Native接口,使能端侧推理框架在专有加速芯片上推理;为芯片厂商提供了统一的HDI接口,使能专有加速芯片接入OpenHarmony社区生态。 + +本文介绍芯片厂商如何在将专有加速芯片接入NNRt,接入OpenHarmony社区生态。 + +### 基本概念 +在开发前,开发者需要先了解以下概念,以便更好地理解全文内容: + +- NNRt:Neural Network Runtime,神经网络运行时,是本指导主要介绍的部件。 +- OHOS:OpenHarmony Operating System,开源鸿蒙操作系统。 +- HDI:Hardware Device Interface,硬件设备接口,是OHOS中系统组件与芯片组件通信的接口。 +- IDL: Interface Description Language,接口描述语言,是HDI接口的语言格式。 + +### 约束与限制 +- 系统版本:OpenHarmony 3.2及以上。 +- 开发环境:Ubuntu 18.04及以上。 +- 接入设备:OpenHarmony定义的标准设备。 + +### 运作机制 +NNRt通过HDI接口实现与设备芯片的对接,由HDI接口实现跨进程通信。 + +**图1** NNRt架构图 + +![架构图](./arch_diagram.png) + +整个架构主要分为三层,AI应用在应用层,AI推理框架和神经网络运行时在系统层,设备服务在芯片层。AI应用要在专用加速芯片上完成模型推理,需要经过AI推理框架和神经网络运行时才能调用到底层的芯片设备,而神经网络运行时就是负责适配底层各种芯片设备,它开放了标准统一的南向接口,众多的第三方芯片设备都可以通过HDI接口接入OHOS。 + +程序运行时,AI应用、AI推理框架、神经网络运行时都在同一个进程,底层设备服务在另一个进程,进程间是通过IPC的机制通信,神经网络运行时根据南向HDI接口实现了HDI Client,服务端也需要根据南向HDI接口实现HDI Service。 + +## NNRt开发指导 + +### 场景介绍 +下文以rk3568芯片为例,展示rk3568 CPU如何通过HDI接口接入NNRt,并完成AI模型推理。 + +### 开发流程 +适配操作的整体流程如下: + +**图2** NNRt适配流程 + +![开发流程](./dev_flow.png) + +### 开发步骤 +开发者具体可通过以下步骤在芯片侧对接NNRt: +1. 开源社区下载OpenHarmony的代码,编译drivers_interface部件,生成HDI接口的头文件。 + - [下载源码](../get-code/sourcecode-acquire.md)。 + - 编译接口IDL文件。 + ```shell + ./build.sh --product-name rk3568 –ccache --target-cpu arm64 --build-target=drivers_interface_nnrt + ``` + + --target-cpu arm64:是64位编译选项,若编译32位,则不需添加--target-cpu arm64 + + 编译之后,可以在```out/rk3568/gen/drivers/interface/nnrt```目录下找到生成的头文件,默认生成C++头文件,若需要生成C头文件,则修改```drivers/interface/nnrt/v1_0/BUILD.gn```文件中的language。 + ```shell + language = "c" + ``` + + 生成头文件目录如下所示: + ```text + out/rk3568/gen/drivers/interface/nnrt + └── v1_0 + ├── drivers_interface_nnrt__libnnrt_proxy_1.0_external_deps_temp.json + ├── drivers_interface_nnrt__libnnrt_stub_1.0_external_deps_temp.json + ├── innrt_device.h # 设备接口头文件 + ├── iprepared_model.h # 编译AI模型对象头文件 + ├── libnnrt_proxy_1.0__notice.d + ├── libnnrt_stub_1.0__notice.d + ├── model_types.cpp # AI模型结构定义实现文件 + ├── model_types.h # AI模型结构定义头文件 + ├── nnrt_device_driver.cpp # 设备驱动实现参考样例 + ├── nnrt_device_proxy.cpp + ├── nnrt_device_proxy.h + ├── nnrt_device_service.cpp # 设备服务端实现参考样例 + ├── nnrt_device_service.h # 设备服务端头文件 + ├── nnrt_device_stub.cpp + ├── nnrt_device_stub.h + ├── nnrt_types.cpp # 数据类型定义实现文件 + ├── nnrt_types.h # 数据类型定义头文件 + ├── node_attr_types.cpp # AI模型算子属性定义实现文件 + ├── node_attr_types.h # AI模型算子属性定义 + ├── prepared_model_proxy.cpp + ├── prepared_model_proxy.h + ├── prepared_model_service.cpp # 编译AI模型对象服务端实现参考样例 + ├── prepared_model_service.h # 编译AI模型对象服务端头文件 + ├── prepared_model_stub.cpp + └── prepared_model_stub.h + ``` + +2. 实现HDI服务 + - 在drivers/peripheral目录下新建开发目录,用于HDI服务开发,开发目录结构如下所示。 + ```text + drivers/peripheral/nnrt + ├── BUILD.gn # 代码编译脚本文件 + ├── bundle.json + └── hdi_cpu_service # 自定义目录 + ├── BUILD.gn # 代码编译脚本文件 + ├── include + │   ├── nnrt_device_service.h # 设备服务端头文件 + │   ├── node_functions.h # 非必须,由具体实现决定 + │   ├── node_registry.h # 非必须,由具体实现决定 + │   └── prepared_model_service.h # 编译AI模型对象服务端头文件 + └── src + ├── nnrt_device_driver.cpp # 设备驱动实现文件 + ├── nnrt_device_service.cpp # 设备服务端实现文件 + ├── nnrt_device_stub.cpp # 非必须,由具体实现决定 + ├── node_attr_types.cpp # 非必须,由具体实现决定 + ├── node_functions.cpp # 非必须,由具体实现决定 + ├── node_registry.cpp # 非必须,由具体实现决定 + └── prepared_model_service.cpp # 编译AI模型对象服务端实现文件 + ``` + + - 实现设备驱动,无特殊需求可直接使用步骤1中生成的nnrt_device_driver.cpp文件,否则根据具体驱动开发。 + - 实现服务接口,主要实现nnrt_device_service.cpp和prepared_model_service.cpp文件,接口定义可以参考```drivers/interface/nnrt```。 + + - 编译驱动和服务实现为共享库。 + 在```drivers/peripheral/nnrt/hdi_cpu_service/```下新建```BUILD.gn```文件,对驱动入口和服务实现编译为共享库。 + + ```shell + import("//build/ohos.gni") + import("//drivers/hdf_core/adapter/uhdf2/uhdf.gni") + + ohos_shared_library("libnnrt_service_1.0") { + include_dirs = [] + sources = [ + "src/nnrt_device_service.cpp", + "src/prepared_model_service.cpp", + "src/node_registry.cpp", + "src/node_functions.cpp", + "src/node_attr_types.cpp" + ] + public_deps = [ "//drivers/interface/nnrt/v1_0:nnrt_idl_headers" ] + external_deps = [ + "hdf_core:libhdf_utils", + "hiviewdfx_hilog_native:libhilog", + "ipc:ipc_single", + "c_utils:utils", + ] + + install_images = [ chipset_base_dir ] + subsystem_name = "hdf" + part_name = "drivers_peripheral_nnrt" + } + + ohos_shared_library("libnnrt_driver") { + include_dirs = [] + sources = [ "src/nnr_device_driver.cpp" ] + deps = [ "//drivers/peripheral/nnrt/hdi_cpu_service:libnnrt_service_1.0" ] + + external_deps = [ + "hdf_core:libhdf_host", + "hdf_core:libhdf_ipc_adapter", + "hdf_core:libhdf_utils", + "hiviewdfx_hilog_native:libhilog", + "ipc:ipc_single", + "c_utils:utils", + ] + + install_images = [ chipset_base_dir ] + subsystem_name = "hdf" + part_name = "drivers_peripheral_nnrt" + } + + group("hdf_nnrt_service") { + deps = [ + ":libnnrt_driver", + ":libnnrt_service_1.0", + ] + } + ``` + + 将```group("hdf_nnrt_service")```添加到```drivers/peripheral/nnrt/BUILD.gn```文件中 + ```shell + if (defined(ohos_lite)) { + group("nnrt_entry") { + deps = [ ] + } + } else { + group("nnrt_entry") { + deps = [ + "./hdi_cpu_service:hdf_nnrt_service", + ] + } + } + ``` + + 新建```drivers/peripheral/nnrt/bundle.json```用于定义新增的```drivers_peripheral_nnrt```部件。 + ```json + { + "name": "drivers_peripheral_nnrt", + "description": "Neural network runtime device driver", + "version": "3.2", + "license": "Apache License 2.0", + "component": { + "name": "drivers_peripheral_nnrt", + "subsystem": "hdf", + "syscap": [""], + "adapter_system_type": ["standard"], + "rom": "1024KB", + "ram": "2048KB", + "deps": { + "components": [ + "ipc", + "hdf_core", + "hiviewdfx_hilog_native", + "c_utils" + ], + "third_part": [ + "bounds_checking_function" + ] + }, + "build": { + "sub_component": [ + "//drivers/peripheral/nnrt:nnrt_entry" + ], + "test": [ + ], + "inner_kits": [ + ] + } + } + } + ``` + +3. 声明HDI服务 + 在对应产品的uhdf hcs配置文件中声明用户态驱动与服务,本例中rk3568对应在```vendor/hihope/rk3568/hdf_config/uhdf/device_info.hcs```文件中新增如下配置: + ```text + nnrt :: host { + hostName = "nnrt_host"; + priority = 50; + uid = ""; + gid = ""; + caps = ["DAC_OVERRIDE", "DAC_READ_SEARCH"]; + nnrt_device :: device { + device0 :: deviceNode { + policy = 2; + priority = 100; + moduleName = "libnnrt_driver.z.so"; + serviceName = "nnrt_device_service"; + } + } + } + ``` + 注意:修改hcs文件后请删除out目录重新编译,才能生效。 + +4. 配置host进程用户和组 + 对于新增host进程的场景,需要新增配置对应进程的用户ID和组ID。 进程的用户ID在文件```base/startup/init/services/etc/passwd```中配置,进程的组ID在文件```base/startup/init/services/etc/group```中配置。 + ```text + # 在base/startup/init/services/etc/passwd新增 + nnrt_host:x:3311:3311:::/bin/false + + # 在base/startup/init/services/etc/group新增 + nnrt_host:x:3311: + ``` + 完成上述所有配置后,全量编译版本后应该可以观察到新增host进程启动,也可以通过hilog输出检索新增的服务名称nnrt_interface_service观察到服务发布成功 + +5. SELinux配置 + OHOS已经开启SELinux特性,需要对新增的进程和服务配置相应的SELinux规则,用于运行host进程启动访问某些资源、发布HDI服务。对于调用者来说,也需要配置SELinux规则运行获取和调用某个HDI服务。 + + 在```base/security/selinux/sepolicy/ohos_policy/drivers/adapter/vendor/type.te```文件中配置nnrt_host进程安全上下文 + ```text + # 新增 + type nnrt_host, hdfdomain, domain; + ``` + + 由于SeLinux是白名单访问的权限机制,需要根据实际权限需求配置,将服务启动起来之后,通过以下dmesg命令可能查看avc告警, + avc告警会给出缺少的权限,SeLinux的配置也可以参考[OpenHarmony SeLinux子系统的说明](https://gitee.com/openharmony/security_selinux/blob/master/README.md) + ```shell + hdc_std shell + dmesg | grep nnrt + ``` + + 新建nnrt_host.te配置文件,将权限配置到nnrt_host.te文件中 + ```shell + # 创建nnrt文件夹 + mkdir base/security/selinux/sepolicy/ohos_policy/drivers/peripheral/nnrt + + # 创建vendor文件夹 + mkdir base/security/selinux/sepolicy/ohos_policy/drivers/peripheral/nnrt/vendor + + # 创建nnrt_host.te文件 + touch base/security/selinux/sepolicy/ohos_policy/drivers/peripheral/nnrt/vendor/nnrt_host.te + ``` + + 然后再将所需的权限写入nnrt_host.te文件中,比如: + ```text + allow nnrt_host dev_hdf_kevent:chr_file { ioctl }; + allow nnrt_host hilog_param:file { read }; + allow nnrt_host sh:binder { transfer }; + allow nnrt_host dev_ashmem_file:chr_file { open }; + allow sh nnrt_host:fd { use }; + ``` + +6. 删除out目录编译整个系统 + ```shell + ./build.sh --product-name rk3568 –ccache --jobs=4 + ``` + + +### 调测验证 +服务开发完成后,可以使用XTS用例验证基本功能和兼容性,开发者可通过以下步骤进行验证: +1. 开源社区下载[OpenHarmony代码](https://gitee.com/openharmony/docs/blob/master/zh-cn/device-dev/get-code/sourcecode-acquire.md),相关用例在test/xts/hats/hdf/nnrt目录下。 +2. 编译XTS用例。 +```shell +cd test/xts/hats +./build.sh suite=hats system_size=standard --product-name rk3568 +``` +编译好的测试用例会输出到out/rk3568/suites/hats/testcases/HatsHdfNnrtFunctionTest + + +3. 将测试用例push到设备上。 +```shell +# 将测试用例可执行文件推送到设备上,HatsHdfNnrtFunctionTest是测试用例可执行文件。 +hdc_std file send out/rk3568/suites/hats/testcases/HatsHdfNnrtFunctionTest /data/local/tmp/ + +# 给测试用例可执行文件加上权限。 +hdc_std shell "chmod +x /data/local/tmp/HatsHdfNnrtFunctionTest" + +# 执行测试用例 +hdc_std shell "/data/local/tmp/HatsHdfNnrtFunctionTest" +``` + +### 开发实例 +完整[Demo实例](xxx, Demo暂时还在黄区代码仓,超链接需等Demo开源后补充)可以参考社区实现。 + diff --git a/example/drivers/arch_diagram.png b/example/drivers/arch_diagram.png new file mode 100644 index 0000000000000000000000000000000000000000..2e4b44da94089ec9ebd22d3985ad8435b9cb9d2d Binary files /dev/null and b/example/drivers/arch_diagram.png differ diff --git a/example/drivers/dev_flow.png b/example/drivers/dev_flow.png new file mode 100644 index 0000000000000000000000000000000000000000..1e75ccb0abedf661e64b08c791831308cf84c02c Binary files /dev/null and b/example/drivers/dev_flow.png differ diff --git a/example/drivers/nnrt/BUILD.gn b/example/drivers/nnrt/BUILD.gn new file mode 100644 index 0000000000000000000000000000000000000000..28ca28bae4175fdbc5f5efc5da300d89c224fc8a --- /dev/null +++ b/example/drivers/nnrt/BUILD.gn @@ -0,0 +1,24 @@ +# Copyright (c) 2022 Huawei Device Co., Ltd. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +if (defined(ohos_lite)) { + group("nnrt_entry") { + deps = [ ] + } +} else { + group("nnrt_entry") { + deps = [ + "./hdi_cpu_service:hdf_nnrt_service", + ] + } +} \ No newline at end of file diff --git a/example/drivers/nnrt/hdi_cpu_service/BUILD.gn b/example/drivers/nnrt/hdi_cpu_service/BUILD.gn new file mode 100644 index 0000000000000000000000000000000000000000..e2f8ab4b1bf221e3d09ad4fb2bafc51dbd5a925e --- /dev/null +++ b/example/drivers/nnrt/hdi_cpu_service/BUILD.gn @@ -0,0 +1,90 @@ +# Copyright (c) 2022 Huawei Device Co., Ltd. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import("//build/ohos.gni") +import("//drivers/hdf_core/adapter/uhdf2/uhdf.gni") + +ohos_prebuilt_shared_library("mindspore_demo") { + source = "//drivers/peripheral/nnrt/mindspore/mindspore/libmindspore-lite.huawei.so" + + install_images = [chipset_base_dir] + subsystem_name = "hdf" + part_name = "drivers_peripheral_nnrt" +} + +ohos_shared_library("libnnrt_device_service_1.0") { + include_dirs = [ + "//drivers/peripheral/nnrt/hdi_cpu_service/include", + "//drivers/peripheral/nnrt/mindspore", + "//third_party/flatbuffers/include", + "//commonlibrary/c_utils/base/include" + ] + sources = [ + "src/nnrt_device_service.cpp", + "src/prepared_model_service.cpp", + "src/node_registry.cpp", + "src/node_functions.cpp", + "src/shared_buffer_parser.cpp", + "src/validation.cpp" + ] + + deps = [ + "//drivers/interface/nnrt/v1_0:nnrt_idl_headers", + "//drivers/interface/nnrt/v1_0:libnnrt_stub_1.0", + ":mindspore_demo" + ] + + external_deps = [ + "hdf_core:libhdf_utils", + "hiviewdfx_hilog_native:libhilog", + "ipc:ipc_single", + "c_utils:utils" + ] + + install_images = [ chipset_base_dir ] + subsystem_name = "hdf" + part_name = "drivers_peripheral_nnrt" +} + +ohos_shared_library("libnnrt_driver") { + include_dirs = [] + sources = [ + "src/nnrt_device_driver.cpp" + ] + deps = [ + "//drivers/peripheral/nnrt/hdi_cpu_service:libnnrt_device_service_1.0", + "//drivers/interface/nnrt/v1_0:libnnrt_stub_1.0" + ] + + external_deps = [ + "hdf_core:libhdf_host", + "hdf_core:libhdf_ipc_adapter", + "hdf_core:libhdf_utils", + "hiviewdfx_hilog_native:libhilog", + "ipc:ipc_single", + "c_utils:utils", + "hdf_core:libhdi" + ] + + install_images = [ chipset_base_dir ] + subsystem_name = "hdf" + part_name = "drivers_peripheral_nnrt" +} + +group("hdf_nnrt_service") { + deps = [ + ":mindspore_demo", + ":libnnrt_driver", + ":libnnrt_device_service_1.0", + ] +} \ No newline at end of file diff --git a/example/drivers/nnrt/hdi_cpu_service/include/nnrt_device_service.h b/example/drivers/nnrt/hdi_cpu_service/include/nnrt_device_service.h new file mode 100644 index 0000000000000000000000000000000000000000..98ae288945c4e9205b9a87b8dcd21dda34d08dfc --- /dev/null +++ b/example/drivers/nnrt/hdi_cpu_service/include/nnrt_device_service.h @@ -0,0 +1,83 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef OHOS_HDI_NNRT_V1_0_NNRTDEVICESERVICE_H +#define OHOS_HDI_NNRT_V1_0_NNRTDEVICESERVICE_H + +#include + +#include "v1_0/innrt_device.h" +#include "ashmem.h" +#include "include/api/model.h" + +#include "mindspore_schema/model_generated.h" + +namespace OHOS { +namespace HDI { +namespace Nnrt { +namespace V1_0 { +class NnrtDeviceService : public INnrtDevice { +public: + NnrtDeviceService() = default; + virtual ~NnrtDeviceService(); + + int32_t GetDeviceName(std::string& name) override; + + int32_t GetVendorName(std::string& name) override; + + int32_t GetDeviceType(DeviceType& deviceType) override; + + int32_t GetDeviceStatus(DeviceStatus& status) override; + + int32_t GetSupportedOperation(const Model& model, std::vector& ops) override; + + int32_t IsFloat16PrecisionSupported(bool& isSupported) override; + + int32_t IsPerformanceModeSupported(bool& isSupported) override; + + int32_t IsPrioritySupported(bool& isSupported) override; + + int32_t IsDynamicInputSupported(bool& isSupported) override; + + int32_t PrepareModel(const Model& model, const ModelConfig& config, sptr& preparedModel) override; + + int32_t IsModelCacheSupported(bool& isSupported) override; + + int32_t PrepareModelFromModelCache(const std::vector& modelCache, const ModelConfig& config, + sptr& preparedModel) override; + + int32_t AllocateBuffer(uint32_t length, SharedBuffer& buffer) override; + + int32_t ReleaseBuffer(const SharedBuffer& buffer) override; + +private: + int32_t ValidateModelConfig(const ModelConfig& config) const; + int32_t ValidateModel(const Model& model) const; + std::shared_ptr TransModelToGraph(const Model& model) const; + std::unique_ptr TransTensor(const Tensor& tensor) const; + std::unique_ptr TransNode(const Node& node) const; + std::unique_ptr TransSubGraph(const SubGraph& graph, const size_t numTensor) const; + std::shared_ptr TransModelConfig(const ModelConfig& config) const; + +private: + std::shared_ptr m_model {nullptr}; + std::unordered_map> m_ashmems; +}; +} // V1_0 +} // Nnrt +} // HDI +} // OHOS + +#endif // OHOS_HDI_NNRT_V1_0_NNRTDEVICESERVICE_H \ No newline at end of file diff --git a/example/drivers/nnrt/hdi_cpu_service/include/node_functions.h b/example/drivers/nnrt/hdi_cpu_service/include/node_functions.h new file mode 100644 index 0000000000000000000000000000000000000000..5e820b92dd9d77aa4431f12df6aa810592d8956a --- /dev/null +++ b/example/drivers/nnrt/hdi_cpu_service/include/node_functions.h @@ -0,0 +1,71 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef OHOS_HDI_NNR_NODE_FUNCTIONS_H +#define OHOS_HDI_NNR_NODE_FUNCTIONS_H + +#include + +#include "hdf_base.h" +#include "utils/hdf_log.h" + +#include "node_registry.h" + +namespace OHOS { +namespace HDI { +namespace Nnrt { +namespace V1_0 { +template +int32_t ParsePrimitive(const std::vector& primitive, T& attr, + std::function parseFunc) +{ + if (primitive.empty()) { + HDF_LOGE("Primitive data is empty."); + return HDF_FAILURE; + } + + OHOS::MessageParcel parcelData; + bool ret = parcelData.WriteBuffer(primitive.data(), primitive.size()); + if (!ret) { + HDF_LOGE("Write data to MessageParcel failed."); + return HDF_FAILURE; + } + + ret = parseFunc(parcelData, attr); + if (!ret) { + HDF_LOGE("Unmarshalling data failed."); + return HDF_FAILURE; + } + return HDF_SUCCESS; +} + +PrimUniquePtr GetAddPrimitive(const std::vector& primitive); +PrimUniquePtr GetAvgPoolPrimitive(const std::vector& primitive); +PrimUniquePtr GetConcatPrimitive(const std::vector& primitive); +PrimUniquePtr GetConv2dPrimitive(const std::vector& primitive); +PrimUniquePtr GetFullConnectionPrimitive(const std::vector& primitive); +PrimUniquePtr GetMaxPoolFusionPrimitive(const std::vector& primitive); +PrimUniquePtr GetMatMulFusionPrimitive(const std::vector& primitive); +PrimUniquePtr GetSoftmaxPrimitive(const std::vector& primitive); +PrimUniquePtr GetReshapePrimitive(const std::vector& primitive); +PrimUniquePtr GetScaleFusionPrimitive(const std::vector& primitive); +PrimUniquePtr GetActivationPrimitive(const std::vector& primitive); +PrimUniquePtr GetQuantDTypeCastPrimitive(const std::vector& primitive); +PrimUniquePtr GetMulFusionPrimitive(const std::vector& primitive); +} // namespace V1_0 +} // namespace Nnrt +} // namespace HDI +} // namespace OHOS +#endif // OHOS_HDI_NNR_NODE_FUNCTIONS_H \ No newline at end of file diff --git a/example/drivers/nnrt/hdi_cpu_service/include/node_registry.h b/example/drivers/nnrt/hdi_cpu_service/include/node_registry.h new file mode 100644 index 0000000000000000000000000000000000000000..cc2083c83b55e7fd79c7216d88be310cd2d2cc32 --- /dev/null +++ b/example/drivers/nnrt/hdi_cpu_service/include/node_registry.h @@ -0,0 +1,57 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef OHOS_HDI_NNR_NODE_REGISTRY_H +#define OHOS_HDI_NNR_NODE_REGISTRY_H + +#include +#include +#include + +#include "v1_0/nnrt_types.h" +#include "mindspore_schema/model_generated.h" + +namespace OHOS { +namespace HDI { +namespace Nnrt { +namespace V1_0 { +using PrimUniquePtr = std::unique_ptr; +class NodeRegistry { +public: + struct Registrar { + Registrar() = delete; + Registrar(NodeType type, std::function&)> nodeFunc); + }; + +public: + static NodeRegistry& GetSingleton(); + std::function&)> GetNodeFunc(NodeType type) const; + bool IsNodeTypeExist(NodeType type) const; + +private: + NodeRegistry() {}; + NodeRegistry(const NodeRegistry&) = delete; + NodeRegistry& operator=(const NodeRegistry&) = delete; + +private: + std::unordered_map&)>> m_nodeRegs; +}; + +#define REGISTER_NODE(nodeName, nodeType, funcPtr) static NodeRegistry::Registrar g_##nodeName(nodeType, funcPtr) +} // namespace V1_0 +} // namespace Nnrt +} // namespace HDI +} // namespace OHOS +#endif // OHOS_HDI_NNR_NODE_REGISTRY_H \ No newline at end of file diff --git a/example/drivers/nnrt/hdi_cpu_service/include/prepared_model_service.h b/example/drivers/nnrt/hdi_cpu_service/include/prepared_model_service.h new file mode 100644 index 0000000000000000000000000000000000000000..f4cb99e6c1d194bc6a124938df997f169b1e4ad2 --- /dev/null +++ b/example/drivers/nnrt/hdi_cpu_service/include/prepared_model_service.h @@ -0,0 +1,77 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef OHOS_HDI_NNR_V1_0_PREPAREDMODELSERVICE_H +#define OHOS_HDI_NNR_V1_0_PREPAREDMODELSERVICE_H + +#include "v1_0/iprepared_model.h" +#include "include/api/data_type.h" +#include "include/api/context.h" +#include "include/api/types.h" +#include "include/api/model.h" +#include "mindspore_schema/model_generated.h" +#include "ashmem.h" + +namespace OHOS { +namespace HDI { +namespace Nnrt { +namespace V1_0 { +constexpr int DYNAMIC_SHAPE_FLAG = -1; +class PreparedModelService : public IPreparedModel { +public: + PreparedModelService() = default; + + virtual ~PreparedModelService(); + + explicit PreparedModelService(std::shared_ptr context); + + int32_t Compile(std::shared_ptr graph); + + int32_t Compile(const void* modelBuffer, size_t length); + + int32_t ExportModelCache(std::vector& modelCache) override; + + int32_t Run(const std::vector& inputs, const std::vector& outputs, + std::vector>& outputsDims, std::vector& isOutputBufferEnough) override; + +private: + int32_t SetInputs(const std::vector& inputs); + int32_t SetOutputs(const std::vector& outputs); + int32_t GetMSInputsAndOutputs(); + int32_t CompareTensor(const IOTensor& tensor, const mindspore::MSTensor& msTensor); + sptr ParseBuffer(const SharedBuffer& buffer); + int32_t UpdateOutput(const std::vector& outputs, + std::vector>& outputsDims, std::vector& isOutputBufferEnough); + void ResetInputAndOutput(); + +private: + std::shared_ptr m_graph {nullptr}; + std::shared_ptr m_context {nullptr}; + flatbuffers::FlatBufferBuilder m_builder; + std::shared_ptr m_model {nullptr}; + sptr m_cacheBuffer {nullptr}; + std::vector> m_inputAshmems; + std::vector m_inputs; + std::vector> m_outputAshmems; + std::vector m_outputs; + std::vector> m_inputDims; + bool m_isDynamicShape {false}; +}; +} // V1_0 +} // Nnrt +} // HDI +} // OHOS + +#endif // OHOS_HDI_NNR_V1_0_PREPAREDMODELSERVICE_H \ No newline at end of file diff --git a/example/drivers/nnrt/hdi_cpu_service/include/shared_buffer_parser.h b/example/drivers/nnrt/hdi_cpu_service/include/shared_buffer_parser.h new file mode 100644 index 0000000000000000000000000000000000000000..b3479caa6854c1fe0a2df0d17be20d66d1d4eec1 --- /dev/null +++ b/example/drivers/nnrt/hdi_cpu_service/include/shared_buffer_parser.h @@ -0,0 +1,49 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef OHOS_HDI_NNR_V1_0_SHARED_BUFFER_PARSER_H +#define OHOS_HDI_NNR_V1_0_SHARED_BUFFER_PARSER_H + +#include "ashmem.h" +#include "v1_0/nnrt_types.h" + +namespace OHOS { +namespace HDI { +namespace Nnrt { +namespace V1_0 { +namespace { +const int INVALID_FD = -1; +} + +class SharedBufferParser { +public: + SharedBufferParser() {}; + ~SharedBufferParser(); + + int32_t Init(const SharedBuffer& buffer); + int32_t Init(const std::string& name, int32_t size); + void* GetBufferPtr(); + SharedBuffer GetBuffer(); + +private: + SharedBuffer m_buffer; + sptr m_ashptr {nullptr}; + void* m_bufferAddr {nullptr}; +}; +} // V1_0 +} // Nnrt +} // HDI +} // OHOS +#endif // OHOS_HDI_NNR_V1_0_SHARED_BUFFER_PARSER_H \ No newline at end of file diff --git a/example/drivers/nnrt/hdi_cpu_service/include/validation.h b/example/drivers/nnrt/hdi_cpu_service/include/validation.h new file mode 100644 index 0000000000000000000000000000000000000000..42cd84ba80c0755b4cd5002ef2087bcad50b7755 --- /dev/null +++ b/example/drivers/nnrt/hdi_cpu_service/include/validation.h @@ -0,0 +1,33 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef OHOS_HDI_NNRT_VALIDATION_H +#define OHOS_HDI_NNRT_VALIDATION_H + +#include "v1_0/nnrt_types.h" + +namespace OHOS { +namespace HDI { +namespace Nnrt { +namespace V1_0 { +int32_t ValidatePerformanceMode(PerformanceMode mode); +int32_t ValidatePriority(Priority priority); +int32_t ValidateDataType(DataType dataType); +int32_t ValidateFormat(Format format); +} // namespace V1_0 +} // namespace Nnrt +} // namespace HDI +} // namespace OHOS +#endif // OHOS_HDI_NNRT_VALIDATION_H \ No newline at end of file diff --git a/example/drivers/nnrt/hdi_cpu_service/src/nnrt_device_driver.cpp b/example/drivers/nnrt/hdi_cpu_service/src/nnrt_device_driver.cpp new file mode 100644 index 0000000000000000000000000000000000000000..dfa9d23143beade893da842ba582d2ca59a5862d --- /dev/null +++ b/example/drivers/nnrt/hdi_cpu_service/src/nnrt_device_driver.cpp @@ -0,0 +1,115 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include +#include "v1_0/nnrt_device_stub.h" + +using namespace OHOS::HDI::Nnrt::V1_0; + +struct HdfNnrtDeviceHost { + struct IDeviceIoService ioService; + OHOS::sptr stub; +}; + +static int32_t NnrtDeviceDriverDispatch(struct HdfDeviceIoClient *client, int cmdId, struct HdfSBuf *data, + struct HdfSBuf *reply) +{ + auto *hdfNnrtDeviceHost = CONTAINER_OF(client->device->service, struct HdfNnrtDeviceHost, ioService); + + OHOS::MessageParcel *dataParcel = nullptr; + OHOS::MessageParcel *replyParcel = nullptr; + OHOS::MessageOption option; + + if (SbufToParcel(data, &dataParcel) != HDF_SUCCESS) { + HDF_LOGE("%{public}s:invalid data sbuf object to dispatch", __func__); + return HDF_ERR_INVALID_PARAM; + } + if (SbufToParcel(reply, &replyParcel) != HDF_SUCCESS) { + HDF_LOGE("%{public}s:invalid reply sbuf object to dispatch", __func__); + return HDF_ERR_INVALID_PARAM; + } + + return hdfNnrtDeviceHost->stub->SendRequest(cmdId, *dataParcel, *replyParcel, option); +} + +static int HdfNnrtDeviceDriverInit(struct HdfDeviceObject *deviceObject) +{ + HDF_LOGI("HdfNnrtDeviceDriverInit enter"); + return HDF_SUCCESS; +} + +static int HdfNnrtDeviceDriverBind(struct HdfDeviceObject *deviceObject) +{ + HDF_LOGI("HdfNnrtDeviceDriverBind enter"); + + auto *hdfNnrtDeviceHost = new (std::nothrow) HdfNnrtDeviceHost; + if (hdfNnrtDeviceHost == nullptr) { + HDF_LOGE("%{public}s: failed to create create HdfNnrtDeviceHost object", __func__); + return HDF_FAILURE; + } + + hdfNnrtDeviceHost->ioService.Dispatch = NnrtDeviceDriverDispatch; + hdfNnrtDeviceHost->ioService.Open = NULL; + hdfNnrtDeviceHost->ioService.Release = NULL; + + auto serviceImpl = INnrtDevice::Get(true); + if (serviceImpl == nullptr) { + HDF_LOGE("%{public}s: failed to get of implement service", __func__); + delete hdfNnrtDeviceHost; + return HDF_FAILURE; + } + + hdfNnrtDeviceHost->stub = OHOS::HDI::ObjectCollector::GetInstance().GetOrNewObject(serviceImpl, + INnrtDevice::GetDescriptor()); + if (hdfNnrtDeviceHost->stub == nullptr) { + HDF_LOGE("%{public}s: failed to get stub object", __func__); + delete hdfNnrtDeviceHost; + return HDF_FAILURE; + } + + deviceObject->service = &hdfNnrtDeviceHost->ioService; + return HDF_SUCCESS; +} + +static void HdfNnrtDeviceDriverRelease(struct HdfDeviceObject *deviceObject) +{ + HDF_LOGI("HdfNnrtDeviceDriverRelease enter"); + if (deviceObject->service == nullptr) { + HDF_LOGE("HdfNnrtDeviceDriverRelease not initted"); + return; + } + + auto *hdfNnrtDeviceHost = CONTAINER_OF(deviceObject->service, struct HdfNnrtDeviceHost, ioService); + delete hdfNnrtDeviceHost; +} + +struct HdfDriverEntry g_nnrtdeviceDriverEntry = { + .moduleVersion = 1, + .moduleName = "nnrt", + .Bind = HdfNnrtDeviceDriverBind, + .Init = HdfNnrtDeviceDriverInit, + .Release = HdfNnrtDeviceDriverRelease, +}; + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ +HDF_INIT(g_nnrtdeviceDriverEntry); +#ifdef __cplusplus +} +#endif /* __cplusplus */ \ No newline at end of file diff --git a/example/drivers/nnrt/hdi_cpu_service/src/nnrt_device_service.cpp b/example/drivers/nnrt/hdi_cpu_service/src/nnrt_device_service.cpp new file mode 100644 index 0000000000000000000000000000000000000000..e3b43752011f647364572be3bbf522735cde1871 --- /dev/null +++ b/example/drivers/nnrt/hdi_cpu_service/src/nnrt_device_service.cpp @@ -0,0 +1,436 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "nnrt_device_service.h" + +#include +#include "utils/hdf_log.h" +#include "ashmem.h" +#include "securec.h" + +#include "node_registry.h" +#include "prepared_model_service.h" +#include "shared_buffer_parser.h" +#include "validation.h" + +namespace OHOS { +namespace HDI { +namespace Nnrt { +namespace V1_0 { +extern "C" INnrtDevice *NnrtDeviceImplGetInstance(void) +{ + return new (std::nothrow) NnrtDeviceService(); +} + +NnrtDeviceService::~NnrtDeviceService() +{ + for (auto ash : m_ashmems) { + ash.second->UnmapAshmem(); + ash.second->CloseAshmem(); + } +} + +int32_t NnrtDeviceService::GetDeviceName(std::string& name) +{ + name = "RK3568-CPU"; + return HDF_SUCCESS; +} + +int32_t NnrtDeviceService::GetVendorName(std::string& name) +{ + name = "Rockchip"; + return HDF_SUCCESS; +} + +int32_t NnrtDeviceService::GetDeviceType(DeviceType& deviceType) +{ + deviceType = DeviceType::CPU; + return HDF_SUCCESS; +} + +int32_t NnrtDeviceService::GetDeviceStatus(DeviceStatus& status) +{ + status = DeviceStatus::AVAILABLE; + return HDF_SUCCESS; +} + +int32_t NnrtDeviceService::GetSupportedOperation(const Model& model, std::vector& ops) +{ + size_t nodeSize = model.nodes.size(); + auto nodes = model.nodes; + ops.resize(nodeSize, false); + auto& regInstance = NodeRegistry::GetSingleton(); + for (size_t i = 0; i < nodeSize; i++) { + ops[i] = regInstance.IsNodeTypeExist(nodes[i].nodeType); + } + return HDF_SUCCESS; +} + +int32_t NnrtDeviceService::IsFloat16PrecisionSupported(bool& isSupported) +{ + isSupported = true; + return HDF_SUCCESS; +} + +int32_t NnrtDeviceService::IsPerformanceModeSupported(bool& isSupported) +{ + isSupported = true; + return HDF_SUCCESS; +} + +int32_t NnrtDeviceService::IsPrioritySupported(bool& isSupported) +{ + isSupported = false; + return HDF_SUCCESS; +} + +int32_t NnrtDeviceService::IsDynamicInputSupported(bool& isSupported) +{ + isSupported = true; + return HDF_SUCCESS; +} + +int32_t NnrtDeviceService::PrepareModel(const Model& model, const ModelConfig& config, + sptr& preparedModel) +{ + auto ret = ValidateModel(model); + if (ret != HDF_SUCCESS) { + HDF_LOGE("Model is invalid."); + return ret; + } + + auto graph = TransModelToGraph(model); + if (graph == nullptr) { + HDF_LOGE("Transfrom model to graph failed."); + return HDF_ERR_INVALID_PARAM; + } + + ret = ValidateModelConfig(config); + if (ret != HDF_SUCCESS) { + HDF_LOGE("ModelConfig is invalid."); + return ret; + } + + auto context = TransModelConfig(config); + sptr service = new (std::nothrow) PreparedModelService(context); + if (service == nullptr) { + HDF_LOGE("Create new PreparedModelService instance failed."); + return HDF_ERR_MALLOC_FAIL; + } + + ret = service->Compile(graph); + if (ret != HDF_SUCCESS) { + HDF_LOGE("Prepared model failed."); + return ret; + } + + preparedModel = service; + return HDF_SUCCESS; +} + +int32_t NnrtDeviceService::IsModelCacheSupported(bool& isSupported) +{ + isSupported = true; + return HDF_SUCCESS; +} + +int32_t NnrtDeviceService::PrepareModelFromModelCache(const std::vector& modelCache, + const ModelConfig& config, sptr& preparedModel) +{ + HDF_LOGD("Using cache to prepare model."); + + // modelCache must be 1, because PreparedModel only export one cache file. + if (modelCache.size() != 1) { + HDF_LOGE("The size of modelCache vector is not valid, it should be one elememt in that vector."); + return HDF_ERR_INVALID_PARAM; + } + + SharedBufferParser parser; + auto ret = parser.Init(modelCache[0]); + if (ret != HDF_SUCCESS) { + HDF_LOGE("Parse modle buffer failed."); + return HDF_ERR_INVALID_PARAM; + } + + void* modelBuffer = parser.GetBufferPtr(); + auto context = TransModelConfig(config); + sptr service = new (std::nothrow) PreparedModelService(context); + if (service == nullptr) { + HDF_LOGE("Create new instance PreparedModelService failed."); + return HDF_ERR_MALLOC_FAIL; + } + + ret = service->Compile(modelBuffer, modelCache[0].dataSize); + if (ret != HDF_SUCCESS) { + HDF_LOGE("Prepared model failed."); + return ret; + } + + preparedModel = service; + return HDF_SUCCESS; +} + +int32_t NnrtDeviceService::AllocateBuffer(uint32_t length, SharedBuffer& buffer) +{ + sptr ashptr = Ashmem::CreateAshmem("allocateBuffer", length); + if (ashptr == nullptr) { + HDF_LOGE("Create shared memory failed."); + return HDF_FAILURE; + } + + if (!ashptr->MapReadAndWriteAshmem()) { + HDF_LOGE("Map allocate buffer failed."); + return HDF_FAILURE; + } + + buffer.fd = ashptr->GetAshmemFd(); + buffer.bufferSize = ashptr->GetAshmemSize(); + buffer.offset = 0; + buffer.dataSize = length; + + m_ashmems[buffer.fd] = ashptr; + return HDF_SUCCESS; +} + +int32_t NnrtDeviceService::ReleaseBuffer(const SharedBuffer& buffer) +{ + // parser will close current fd. + SharedBufferParser parser; + auto ret = parser.Init(buffer); + if (ret != HDF_SUCCESS) { + HDF_LOGE("Parse buffer failed."); + return HDF_ERR_INVALID_PARAM; + } + + for (auto& ash : m_ashmems) { + ash.second->UnmapAshmem(); + ash.second->CloseAshmem(); + } + m_ashmems.clear(); + return HDF_SUCCESS; +} + +int32_t NnrtDeviceService::ValidateModelConfig(const ModelConfig& config) const +{ + if (!ValidatePerformanceMode(config.mode)) { + HDF_LOGE("PerformanceMode is invalid. mode=%d", config.mode); + return HDF_ERR_INVALID_PARAM; + } + + if (!ValidatePriority(config.priority)) { + HDF_LOGE("Priority is invalid. priority=%d", config.priority); + return HDF_ERR_INVALID_PARAM; + } + + return HDF_SUCCESS; +} + +int32_t NnrtDeviceService::ValidateModel(const Model& model) const +{ + if (model.allTensors.empty()) { + HDF_LOGE("Model has no tensors."); + return HDF_ERR_INVALID_PARAM; + } + + if (model.subGraph.empty()) { + HDF_LOGE("Model has no subGraphs."); + return HDF_ERR_INVALID_PARAM; + } + + if (model.nodes.empty()) { + HDF_LOGE("Model has no nodes."); + return HDF_ERR_INVALID_PARAM; + } + + if (model.inputIndex.empty()) { + HDF_LOGE("Model has no input."); + return HDF_ERR_INVALID_PARAM; + } + + if (model.outputIndex.empty()) { + HDF_LOGE("Model has no output."); + return HDF_ERR_INVALID_PARAM; + } + + size_t tensorSize = model.allTensors.size(); + for (auto index : model.inputIndex) { + if (index > tensorSize) { + HDF_LOGE("Input index is invalid, index=%u", index); + return HDF_ERR_INVALID_PARAM; + } + } + + for (auto index : model.outputIndex) { + if (index > tensorSize) { + HDF_LOGE("Output index is invalid, index=%u", index); + return HDF_ERR_INVALID_PARAM; + } + } + + return HDF_SUCCESS; +} + +std::shared_ptr NnrtDeviceService::TransModelToGraph(const Model& model) const +{ + auto metaGraph = std::make_shared(); + metaGraph->name = model.name; + metaGraph->version = mindspore::Version(); + + std::unique_ptr transTensor{nullptr}; + for (auto tensor : model.allTensors) { + transTensor = TransTensor(tensor); + if (transTensor == nullptr) { + HDF_LOGE("Transform tensor failed."); + return nullptr; + } + metaGraph->allTensors.emplace_back(std::move(transTensor)); + } + metaGraph->inputIndex = model.inputIndex; + metaGraph->outputIndex = model.outputIndex; + + // Transform node + std::unique_ptr transNode {nullptr}; + for (auto& node : model.nodes) { + transNode = TransNode(node); + if (transNode == nullptr) { + HDF_LOGE("Transform node failed, node name=%{public}s", node.name.c_str()); + return nullptr; + } + metaGraph->nodes.emplace_back(std::move(transNode)); + } + + // Transform subgraph + const size_t numTensor = model.allTensors.size(); + for (auto graph : model.subGraph) { + metaGraph->subGraph.emplace_back(TransSubGraph(graph, numTensor)); + } + return metaGraph; +} + +std::unique_ptr NnrtDeviceService::TransTensor(const Tensor& tensor) const +{ + if (!ValidateDataType(tensor.dataType)) { + HDF_LOGE("DataType of tensor is invalid. dataType=%d", tensor.dataType); + return nullptr; + } + + if (!ValidateFormat(tensor.format)) { + HDF_LOGE("Format of tensor is invalid. format=%d", tensor.format); + return nullptr; + } + + auto schemaTensor = std::make_unique(); + schemaTensor->name = tensor.name; + schemaTensor->dataType = static_cast(tensor.dataType); + schemaTensor->format = static_cast(tensor.format); + schemaTensor->dims = tensor.dims; + for (auto param : tensor.quantParams) { + auto quantParam = std::make_unique(); + quantParam->scale = param.scale; + quantParam->zeroPoint = param.zeroPoint; + quantParam->numBits = param.numBits; + quantParam->inited = true; + schemaTensor->quantParams.emplace_back(std::move(quantParam)); + } + + if (tensor.data.fd != INVALID_FD) { + SharedBufferParser parser; + auto ret = parser.Init(tensor.data); + if (ret != HDF_SUCCESS) { + HDF_LOGE("Parse tensor data failed."); + return nullptr; + } + + auto data = parser.GetBufferPtr(); + schemaTensor->data.resize(tensor.data.dataSize); + auto memRet = memcpy_s(const_cast(schemaTensor->data.data()), + tensor.data.dataSize, data, tensor.data.dataSize); + if (memRet != EOK) { + HDF_LOGW("Copy tensor data failed."); + return nullptr; + } + } + return schemaTensor; +} + +std::unique_ptr NnrtDeviceService::TransNode(const Node& node) const +{ + auto cnode = std::make_unique(); + cnode->name = node.name; + cnode->inputIndex = node.inputIndex; + cnode->outputIndex = node.outputIndex; + cnode->quantType = static_cast(node.quantType); + + auto& regInstance = NodeRegistry::GetSingleton(); + auto parseFunc = regInstance.GetNodeFunc(node.nodeType); + auto primitive = parseFunc(node.nodeAttr); + if (primitive == nullptr) { + HDF_LOGE("Parse primitve data failed. node name=%{public}s", node.name.c_str()); + return nullptr; + } + + cnode->primitive = std::move(primitive); + return cnode; +} + +std::unique_ptr NnrtDeviceService::TransSubGraph(const SubGraph& graph, + const size_t numTensor) const +{ + auto subGraph = std::make_unique(); + subGraph->name = graph.name; + subGraph->inputIndices = graph.inputIndices; + subGraph->outputIndices = graph.outputIndices; + subGraph->nodeIndices = graph.nodeIndices; + subGraph->tensorIndices.reserve(numTensor); + for (auto i = 0; i < numTensor; i++) { + subGraph->tensorIndices.emplace_back(static_cast(i)); + } + return subGraph; +} + +std::shared_ptr NnrtDeviceService::TransModelConfig(const ModelConfig& config) const +{ + auto context = std::make_shared(); + const int cpuThreadNum = 2; + const int cpuNoAffinities = 0; + const int cpuBigCore = 1; + const int cpuLittleCore = 2; + context->SetThreadNum(cpuThreadNum); + + int mode = cpuNoAffinities; + switch (config.mode) { + case PerformanceMode::PERFORMANCE_LOW: + case PerformanceMode::PERFORMANCE_MEDIUM: + mode = cpuLittleCore; + break; + case PerformanceMode::PERFORMANCE_HIGH: + case PerformanceMode::PERFORMANCE_EXTREME: + mode = cpuBigCore; + break; + default: + mode = cpuNoAffinities; + } + context->SetThreadAffinity(mode); + + auto cpuInfo = std::make_shared(); + cpuInfo->SetEnableFP16(config.enableFloat16); + auto& deviceInfos = context->MutableDeviceInfo(); + deviceInfos.emplace_back(cpuInfo); + return context; +} +} // V1_0 +} // Nnrt +} // HDI +} // OHOS diff --git a/example/drivers/nnrt/hdi_cpu_service/src/node_functions.cpp b/example/drivers/nnrt/hdi_cpu_service/src/node_functions.cpp new file mode 100644 index 0000000000000000000000000000000000000000..51ca0ee466f32ad0116b45bba14f18584dba506e --- /dev/null +++ b/example/drivers/nnrt/hdi_cpu_service/src/node_functions.cpp @@ -0,0 +1,373 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "node_functions.h" + +#include "node_registry.h" +#include +#include + +namespace OHOS { +namespace HDI { +namespace Nnrt { +namespace V1_0 { +PrimUniquePtr GetAddPrimitive(const std::vector& primitive) +{ + AddFusion addAttr; + auto ret = ParsePrimitive(primitive, addAttr, AddFusionBlockUnmarshalling); + if (ret != HDF_SUCCESS) { + HDF_LOGE("Parse primitive data of AddFusion operator failed."); + return nullptr; + } + + auto prim = std::make_unique(); + prim->value.type = mindspore::schema::PrimitiveType_AddFusion; + auto attr = new (std::nothrow) mindspore::schema::AddFusionT; + if (attr == nullptr) { + HDF_LOGE("Create AddFusion primitive failed."); + return nullptr; + } + attr->activation_type = static_cast(addAttr.activationType); + prim->value.value = attr; + return prim; +} + +PrimUniquePtr GetAvgPoolPrimitive(const std::vector& primitive) +{ + AvgPoolFusion avgPoolAttr; + auto ret = ParsePrimitive(primitive, avgPoolAttr, AvgPoolFusionBlockUnmarshalling); + if (ret != HDF_SUCCESS) { + HDF_LOGE("Parse primitive data of AvgPoolFusion operator failed."); + return nullptr; + } + + auto prim = std::make_unique(); + prim->value.type = mindspore::schema::PrimitiveType_AvgPoolFusion; + + auto attr = new (std::nothrow) mindspore::schema::AvgPoolFusionT; + if (attr == nullptr) { + HDF_LOGE("Create AvgPoolFusion primitive failed."); + return nullptr; + } + attr->kernel_size = avgPoolAttr.kernelSize; + attr->strides = avgPoolAttr.strides; + attr->pad = avgPoolAttr.pad; + attr->pad_mode = static_cast(avgPoolAttr.padMode); + attr->round_mode = static_cast(avgPoolAttr.roundMode); + attr->format = static_cast(avgPoolAttr.format); + attr->global = avgPoolAttr.global; + attr->activation_type = static_cast(avgPoolAttr.activationType); + prim->value.value = attr; + return prim; +} + +PrimUniquePtr GetConcatPrimitive(const std::vector& primitive) +{ + Concat concatAttr; + auto ret = ParsePrimitive(primitive, concatAttr, ConcatBlockUnmarshalling); + if (ret != HDF_SUCCESS) { + HDF_LOGE("Parse primitive data of Concat operator failed."); + return nullptr; + } + + auto prim = std::make_unique(); + prim->value.type = mindspore::schema::PrimitiveType_Concat; + + auto attr = new (std::nothrow) mindspore::schema::ConcatT; + if (attr == nullptr) { + HDF_LOGE("Create concat primitive failed."); + return nullptr; + } + attr->axis = concatAttr.axis; + prim->value.value = attr; + return prim; +} + +PrimUniquePtr GetConv2dPrimitive(const std::vector& primitive) +{ + Conv2DFusion conv2dAttr; + auto ret = ParsePrimitive(primitive, conv2dAttr, Conv2DFusionBlockUnmarshalling); + if (ret != HDF_SUCCESS) { + HDF_LOGE("Parse primitive data of Conv2DFusion operator failed."); + return nullptr; + } + + auto prim = std::make_unique(); + prim->value.type = mindspore::schema::PrimitiveType_Conv2DFusion; + + auto attr = new (std::nothrow) mindspore::schema::Conv2DFusionT; + if (attr == nullptr) { + HDF_LOGE("Create Conv2DFusion primitive failed."); + return nullptr; + } + + attr->kernel_size = conv2dAttr.kernelSize; + attr->stride = conv2dAttr.stride; + attr->dilation = conv2dAttr.dilation; + attr->pad_mode = static_cast(conv2dAttr.padMode); + attr->pad_list = conv2dAttr.padList; + attr->group = conv2dAttr.group; + attr->in_channel = conv2dAttr.inChannel; + attr->out_channel = conv2dAttr.outChannel; + attr->activation_type = static_cast(conv2dAttr.activationType); + + prim->value.value = attr; + return prim; +} + +PrimUniquePtr GetFullConnectionPrimitive(const std::vector& primitive) +{ + FullConnection fullConnAttr; + auto ret = ParsePrimitive(primitive, fullConnAttr, FullConnectionBlockUnmarshalling); + if (ret != HDF_SUCCESS) { + HDF_LOGE("Parse primitive data of FullConnection operator failed."); + return nullptr; + } + + auto prim = std::make_unique(); + prim->value.type = mindspore::schema::PrimitiveType_FullConnection; + + auto attr = new (std::nothrow) mindspore::schema::FullConnectionT; + if (attr == nullptr) { + HDF_LOGE("Create FullConnection primitive failed."); + return nullptr; + } + + attr->has_bias = fullConnAttr.hasBias; + attr->use_axis = fullConnAttr.useAxis; + attr->axis = fullConnAttr.axis; + attr->activation_type = static_cast(fullConnAttr.activationType); + + prim->value.value = attr; + return prim; +} + +PrimUniquePtr GetMaxPoolFusionPrimitive(const std::vector& primitive) +{ + MaxPoolFusion maxPoolAttr; + auto ret = ParsePrimitive(primitive, maxPoolAttr, MaxPoolFusionBlockUnmarshalling); + if (ret != HDF_SUCCESS) { + HDF_LOGE("Parse primitive data of MaxPoolFusion operator failed."); + return nullptr; + } + + auto prim = std::make_unique(); + prim->value.type = mindspore::schema::PrimitiveType_MaxPoolFusion; + + auto attr = new (std::nothrow) mindspore::schema::MaxPoolFusionT; + if (attr == nullptr) { + HDF_LOGE("Create MaxPoolFusion primitive failed."); + return nullptr; + } + + attr->kernel_size = maxPoolAttr.kernelSize; + attr->strides = maxPoolAttr.strides; + attr->pad = maxPoolAttr.pad; + attr->pad_mode = static_cast(maxPoolAttr.padMode); + attr->format = static_cast(maxPoolAttr.format); + attr->global = maxPoolAttr.global; + attr->activation_type = static_cast(maxPoolAttr.activationType); + + prim->value.value = attr; + return prim; +} + +PrimUniquePtr GetMatMulFusionPrimitive(const std::vector& primitive) +{ + MatMulFusion matmulAttr; + auto ret = ParsePrimitive(primitive, matmulAttr, MatMulFusionBlockUnmarshalling); + if (ret != HDF_SUCCESS) { + HDF_LOGE("Parse primitive data of MatMulFusion operator failed."); + return nullptr; + } + + auto prim = std::make_unique(); + prim->value.type = mindspore::schema::PrimitiveType_MatMulFusion; + + auto attr = new (std::nothrow) mindspore::schema::MatMulFusionT; + if (attr == nullptr) { + HDF_LOGE("Create MatMulFusion primitive failed."); + return nullptr; + } + + attr->transpose_a = matmulAttr.transposeA; + attr->transpose_b = matmulAttr.transposeB; + attr->activation_type = static_cast(matmulAttr.activationType); + + prim->value.value = attr; + return prim; +} + +PrimUniquePtr GetSoftmaxPrimitive(const std::vector& primitive) +{ + Softmax softmaxAttr; + auto ret = ParsePrimitive(primitive, softmaxAttr, SoftmaxBlockUnmarshalling); + if (ret != HDF_SUCCESS) { + HDF_LOGE("Parse primitive data of Softmax operator failed."); + return nullptr; + } + + auto prim = std::make_unique(); + prim->value.type = mindspore::schema::PrimitiveType_Softmax; + + auto attr = new (std::nothrow) mindspore::schema::SoftmaxT; + if (attr == nullptr) { + HDF_LOGE("Create Softmax primitive failed."); + return nullptr; + } + + attr->axis = softmaxAttr.axis; + prim->value.value = attr; + return prim; +} + +PrimUniquePtr GetReshapePrimitive(const std::vector& primitive) +{ + Reshape reshapeAttr; + auto ret = ParsePrimitive(primitive, reshapeAttr, ReshapeBlockUnmarshalling); + if (ret != HDF_SUCCESS) { + HDF_LOGE("Parse primitive data of Reshape operator failed."); + return nullptr; + } + + auto prim = std::make_unique(); + prim->value.type = mindspore::schema::PrimitiveType_Reshape; + + auto attr = new (std::nothrow) mindspore::schema::ReshapeT; + if (attr == nullptr) { + HDF_LOGE("Create Reshape primitive failed."); + return nullptr; + } + + prim->value.value = attr; + return prim; +} + +PrimUniquePtr GetScaleFusionPrimitive(const std::vector& primitive) +{ + ScaleFusion scaleAttr; + auto ret = ParsePrimitive(primitive, scaleAttr, ScaleFusionBlockUnmarshalling); + if (ret != HDF_SUCCESS) { + HDF_LOGE("Parse primitive data of ScaleFusion operator failed."); + return nullptr; + } + + auto prim = std::make_unique(); + prim->value.type = mindspore::schema::PrimitiveType_ScaleFusion; + + auto attr = new (std::nothrow) mindspore::schema::ScaleFusionT; + if (attr == nullptr) { + HDF_LOGE("Create ScaleFusion primitive failed."); + return nullptr; + } + + attr->axis = scaleAttr.axis; + attr->activation_type = static_cast(scaleAttr.activationType); + prim->value.value = attr; + return prim; +} + +PrimUniquePtr GetActivationPrimitive(const std::vector& primitive) +{ + Activation actAttr; + auto ret = ParsePrimitive(primitive, actAttr, ActivationBlockUnmarshalling); + if (ret != HDF_SUCCESS) { + HDF_LOGE("Parse primitive data of Activation operator failed."); + return nullptr; + } + + auto prim = std::make_unique(); + prim->value.type = mindspore::schema::PrimitiveType_Activation; + + auto attr = new (std::nothrow) mindspore::schema::ActivationT; + if (attr == nullptr) { + HDF_LOGE("Create Activation primitive failed."); + return nullptr; + } + + attr->alpha = actAttr.alpha; + attr->min_val = actAttr.minVal; + attr->max_val = actAttr.maxVal; + attr->approximate = actAttr.approximate; + attr->activation_type = static_cast(actAttr.activationType); + + prim->value.value = attr; + return prim; +} + +PrimUniquePtr GetQuantDTypeCastPrimitive(const std::vector& primitive) +{ + QuantDTypeCast quantAttr; + auto ret = ParsePrimitive(primitive, quantAttr, QuantDTypeCastBlockUnmarshalling); + if (ret != HDF_SUCCESS) { + HDF_LOGE("Parse primitive data of QuantDTypeCast operator failed."); + return nullptr; + } + + auto prim = std::make_unique(); + prim->value.type = mindspore::schema::PrimitiveType_QuantDTypeCast; + + auto attr = new (std::nothrow) mindspore::schema::QuantDTypeCastT; + if (attr == nullptr) { + HDF_LOGE("Create QuantDTypeCast primitive failed."); + return nullptr; + } + + attr->src_t = quantAttr.srcT; + attr->dst_t = quantAttr.dstT; + prim->value.value = attr; + return prim; +} + +PrimUniquePtr GetMulFusionPrimitive(const std::vector& primitive) +{ + MulFusion mulAttr; + auto ret = ParsePrimitive(primitive, mulAttr, MulFusionBlockUnmarshalling); + if (ret != HDF_SUCCESS) { + HDF_LOGE("Parse primitive data of MulFusion operator failed."); + return nullptr; + } + + auto prim = std::make_unique(); + prim->value.type = mindspore::schema::PrimitiveType_MulFusion; + + auto attr = new (std::nothrow) mindspore::schema::MulFusionT; + if (attr == nullptr) { + HDF_LOGE("Create MulFusion primitive failed."); + return nullptr; + } + + attr->activation_type = static_cast(mulAttr.activationType); + prim->value.value = attr; + return prim; +} + +REGISTER_NODE(Activation, NodeType::NODE_TYPE_ACTIVATION, GetActivationPrimitive); +REGISTER_NODE(AddFusion, NodeType::NODE_TYPE_ADD_FUSION, GetAddPrimitive); +REGISTER_NODE(AvgPoolFusion, NodeType::NODE_TYPE_AVGPOOL_FUSION, GetAvgPoolPrimitive); +REGISTER_NODE(Concat, NodeType::NODE_TYPE_CONCAT, GetConcatPrimitive); +REGISTER_NODE(Conv2DFusion, NodeType::NODE_TYPE_CONV2D_FUSION, GetConv2dPrimitive); +REGISTER_NODE(FullConnection, NodeType::NODE_TYPE_FULL_CONNECTION, GetFullConnectionPrimitive); +REGISTER_NODE(MaxPoolFusion, NodeType::NODE_TYPE_MAX_POOL_FUSION, GetMaxPoolFusionPrimitive); +REGISTER_NODE(MatMulFusion, NodeType::NODE_TYPE_MATMUL_FUSION, GetMatMulFusionPrimitive); +REGISTER_NODE(Reshape, NodeType::NODE_TYPE_RESHAPE, GetReshapePrimitive); +REGISTER_NODE(Softmax, NodeType::NODE_TYPE_SOFTMAX, GetSoftmaxPrimitive); +REGISTER_NODE(ScaleFusion, NodeType::NODE_TYPE_SCALE_FUSION, GetScaleFusionPrimitive); +REGISTER_NODE(QuantDTypeCast, NodeType::NODE_TYPE_QUANT_DTYPE_CAST, GetQuantDTypeCastPrimitive); +REGISTER_NODE(MulFusion, NodeType::NODE_TYPE_MUL_FUSION, GetMulFusionPrimitive); +} // namespace V1_0 +} // namespace Nnrt +} // namespace HDI +} // namespace OHOS \ No newline at end of file diff --git a/example/drivers/nnrt/hdi_cpu_service/src/node_registry.cpp b/example/drivers/nnrt/hdi_cpu_service/src/node_registry.cpp new file mode 100644 index 0000000000000000000000000000000000000000..7b6a6aa1b0195202630eb3f8d385635695605f91 --- /dev/null +++ b/example/drivers/nnrt/hdi_cpu_service/src/node_registry.cpp @@ -0,0 +1,60 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "node_registry.h" + +#include "utils/hdf_log.h" + +namespace OHOS { +namespace HDI { +namespace Nnrt { +namespace V1_0 { +NodeRegistry& NodeRegistry::GetSingleton() +{ + static NodeRegistry registry; + return registry; +} + +NodeRegistry::Registrar::Registrar(NodeType type, std::function&)> nodeFunc) +{ + auto& registry = NodeRegistry::GetSingleton(); + if (registry.m_nodeRegs.find(type) != registry.m_nodeRegs.end()) { + HDF_LOGW("Node has been registered. nodeType=%d", type); + } else { + registry.m_nodeRegs[type] = nodeFunc; + } +} + +std::function&)> NodeRegistry::GetNodeFunc(NodeType type) const +{ + if (m_nodeRegs.find(type) == m_nodeRegs.end()) { + HDF_LOGW("Node type is not found. nodeType=%d", type); + return nullptr; + } + + return m_nodeRegs.at(type); +} + +bool NodeRegistry::IsNodeTypeExist(NodeType type) const +{ + if (m_nodeRegs.find(type) == m_nodeRegs.end()) { + return false; + } + return true; +} +} // namespace V1_0 +} // namespace Nnrt +} // namespace HDI +} // namespace OHOS \ No newline at end of file diff --git a/example/drivers/nnrt/hdi_cpu_service/src/prepared_model_service.cpp b/example/drivers/nnrt/hdi_cpu_service/src/prepared_model_service.cpp new file mode 100644 index 0000000000000000000000000000000000000000..b5452f81169db620b3af0a5bb28dd0a389a779aa --- /dev/null +++ b/example/drivers/nnrt/hdi_cpu_service/src/prepared_model_service.cpp @@ -0,0 +1,412 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "prepared_model_service.h" + +#include +#include "securec.h" +#include "utils/hdf_log.h" + +#include "shared_buffer_parser.h" + +namespace OHOS { +namespace HDI { +namespace Nnrt { +namespace V1_0 { +PreparedModelService::PreparedModelService(std::shared_ptr context) + : m_context(context) {} + +PreparedModelService::~PreparedModelService() +{ + if (m_cacheBuffer != nullptr) { + m_cacheBuffer->CloseAshmem(); + } + + for (auto& inputAsh : m_inputAshmems) { + inputAsh->UnmapAshmem(); + inputAsh->CloseAshmem(); + } + + for (auto& outputAsh : m_outputAshmems) { + outputAsh->UnmapAshmem(); + outputAsh->CloseAshmem(); + } +} + +int32_t PreparedModelService::ExportModelCache(std::vector& modelCache) +{ + if (!modelCache.empty()) { + HDF_LOGE("The parameters of ExportModelCache should be an empty vector."); + return HDF_ERR_INVALID_PARAM; + } + + if (m_cacheBuffer != nullptr) { + auto fd = m_cacheBuffer->GetAshmemFd(); + auto size = m_cacheBuffer->GetAshmemSize(); + + // SharedBuffer: fd, bufferSize, offset, dataSize + modelCache.emplace_back(SharedBuffer{fd, size, 0, size}); + return HDF_SUCCESS; + } + + auto size = m_builder.GetSize(); + auto buffer = m_builder.GetBufferPointer(); + const char* name = m_graph != nullptr ? m_graph->name.c_str() : "CacheModel"; + sptr cache = Ashmem::CreateAshmem(name, size); + if (cache == nullptr) { + HDF_LOGE("Create shared memory failed."); + return HDF_ERR_MALLOC_FAIL; + } + + bool ret = cache->MapReadAndWriteAshmem(); + if (!ret) { + HDF_LOGE("Map fd to write cache failed."); + return HDF_FAILURE; + } + + ret = cache->WriteToAshmem(buffer, size, 0); + cache->UnmapAshmem(); + if (!ret) { + HDF_LOGE("Write cache failed."); + return HDF_FAILURE; + } + + m_cacheBuffer = cache; + + // SharedBuffer: fd, bufferSize, offset, dataSize + modelCache.emplace_back(SharedBuffer {cache->GetAshmemFd(), cache->GetAshmemSize(), 0, cache->GetAshmemSize()}); + return HDF_SUCCESS; +} + +int32_t PreparedModelService::Run(const std::vector& inputs, const std::vector& outputs, + std::vector>& outputsDims, std::vector& isOutputBufferEnough) +{ + auto ret = SetInputs(inputs); + if (ret != HDF_SUCCESS) { + HDF_LOGE("Inputs tensor is invalid."); + return ret; + } + + if (!m_isDynamicShape) { + ret = SetOutputs(outputs); + if (ret != HDF_SUCCESS) { + HDF_LOGE("Output tensor is invalid."); + ResetInputAndOutput(); + return ret; + } + } + + auto msRet = m_model->Predict(m_inputs, &m_outputs); + if (msRet != mindspore::kSuccess) { + HDF_LOGE("Run model failed."); + ResetInputAndOutput(); + return HDF_FAILURE; + } + + ret = UpdateOutput(outputs, outputsDims, isOutputBufferEnough); + if (ret != HDF_SUCCESS) { + HDF_LOGE("Update output dimension or data failed."); + ResetInputAndOutput(); + return ret; + } + + ResetInputAndOutput(); + + return HDF_SUCCESS; +} + +int32_t PreparedModelService::UpdateOutput(const std::vector& outputs, + std::vector>& outputsDims, std::vector& isOutputBufferEnough) +{ + bool isEnough {true}; + size_t outputSize = m_outputs.size(); + isOutputBufferEnough.resize(outputSize, true); + for (size_t i = 0; i < outputSize; i++) { + auto& msOutput = m_outputs[i]; + auto& output = outputs[i]; + + auto msShape = msOutput.Shape(); + outputsDims.emplace_back(msShape.begin(), msShape.end()); + + auto dataSize = msOutput.DataSize(); + if (dataSize > output.data.bufferSize) { + HDF_LOGE("Output buffer is not enough. actual size %{public}zu, buffer size %{public}u", + dataSize, output.data.bufferSize); + isOutputBufferEnough[i] = false; + isEnough= false; + } + + if (isEnough && m_isDynamicShape) { + auto msData = msOutput.MutableData(); + SharedBufferParser parser; + auto ret = parser.Init(output.data); + if (ret != HDF_SUCCESS) { + HDF_LOGE("Parse %zu th output data failed.", i); + return HDF_ERR_INVALID_PARAM; + } + + auto data = parser.GetBufferPtr(); + auto memRet = memcpy_s(data, dataSize, msData, dataSize); + if (memRet != EOK) { + HDF_LOGE("Copy output memory failed."); + return HDF_FAILURE; + } + } + } + + return HDF_SUCCESS; +} + +void PreparedModelService::ResetInputAndOutput() +{ + for (auto& msInput : m_inputs) { + msInput.SetData(nullptr); + } + + if (!m_isDynamicShape) { + for (auto& msOutput : m_outputs) { + msOutput.SetData(nullptr); + } + } +} + +int32_t PreparedModelService::Compile(std::shared_ptr graph) +{ + if (graph == nullptr) { + HDF_LOGE("Graph cannot be nullptr"); + return HDF_ERR_INVALID_PARAM; + } + for (auto i : graph->inputIndex) { + auto inputShape = graph->allTensors[i]->dims; + auto iter = std::find(inputShape.begin(), inputShape.end(), DYNAMIC_SHAPE_FLAG); + if (iter != inputShape.end()) { + m_isDynamicShape = true; + break; + } + } + auto offset = mindspore::schema::MetaGraph::Pack(m_builder, graph.get()); + m_builder.Finish(offset); + mindspore::schema::FinishMetaGraphBuffer(m_builder, offset); + auto modelSize = m_builder.GetSize(); + uint8_t* modelBuffer = m_builder.GetBufferPointer(); + if (modelBuffer == nullptr) { + HDF_LOGE("Model is invalid."); + return HDF_FAILURE; + } + + m_model = std::make_shared(); + mindspore::Status msRet = m_model->Build(modelBuffer, modelSize, mindspore::kMindIR, m_context); + if (msRet != mindspore::kSuccess) { + HDF_LOGE("Prepare model failed, please make sure model is validate."); + return HDF_FAILURE; + } + + auto ret = GetMSInputsAndOutputs(); + if (ret != HDF_SUCCESS) { + HDF_LOGE("Model without inputs or outputs is invalid."); + return ret; + } + return HDF_SUCCESS; +} + +int32_t PreparedModelService::Compile(const void* modelBuffer, size_t length) +{ + if (modelBuffer == nullptr || length == 0) { + HDF_LOGE("ModelBuffer cannot be nullptr and length cannot be zero."); + return HDF_ERR_INVALID_PARAM; + } + + m_model = std::make_shared(); + mindspore::Status msRet = m_model->Build(modelBuffer, length, mindspore::kMindIR, m_context); + if (msRet != mindspore::kSuccess) { + HDF_LOGE("Prepare model from cache failed, please make sure model cache is valid."); + return HDF_FAILURE; + } + + auto ret = GetMSInputsAndOutputs(); + if (ret != HDF_SUCCESS) { + HDF_LOGE("Model without inputs or outputs is invalid."); + return ret; + } + + for (auto input : m_inputs) { + auto shapes = input.Shape(); + if (std::find(shapes.begin(), shapes.end(), DYNAMIC_SHAPE_FLAG) != shapes.end()) { + m_isDynamicShape = true; + break; + } + } + return HDF_SUCCESS; +} + +int32_t PreparedModelService::SetInputs(const std::vector& inputs) +{ + if (inputs.size() != m_inputs.size()) { + HDF_LOGE("inputs size is invalid. expect: %zu, actual: %zu", m_inputs.size(), inputs.size()); + return HDF_ERR_INVALID_PARAM; + } + for (auto& ash : m_inputAshmems) { + ash->UnmapAshmem(); + ash->CloseAshmem(); + } + m_inputAshmems.clear(); + + int32_t ret {0}; + size_t inputSize = m_inputs.size(); + std::vector> tmpAllDims; + for (size_t i = 0; i < inputSize; i++) { + auto& input = inputs[i]; + auto& msInput = m_inputs[i]; + ret = CompareTensor(input, msInput); + if (ret != HDF_SUCCESS) { + HDF_LOGE("Inputs tensor is not match that of model. Please check input tensor."); + return ret; + } + tmpAllDims.emplace_back(input.dimensions.begin(), input.dimensions.end()); + } + + if (m_isDynamicShape) { + auto msRet = m_model->Resize(m_inputs, tmpAllDims); + if (msRet != mindspore::kSuccess) { + HDF_LOGE("Resize for dynamic inputs failed."); + return HDF_FAILURE; + } + ret = GetMSInputsAndOutputs(); + if (ret != HDF_SUCCESS) { + HDF_LOGE("Get ms inputs or outputs failed after resize."); + return ret; + } + } + + for (size_t i = 0; i < inputSize; i++) { + auto& input = inputs[i]; + auto& msInput = m_inputs[i]; + sptr ashptr = ParseBuffer(input.data); + if (ashptr == nullptr) { + HDF_LOGE("Parse %zuth input data failed.", i); + return HDF_ERR_INVALID_PARAM; + } + + auto data = const_cast(ashptr->ReadFromAshmem(input.data.dataSize, 0)); + msInput.SetData(data); + m_inputAshmems.emplace_back(ashptr); + } + return HDF_SUCCESS; +} + +int32_t PreparedModelService::SetOutputs(const std::vector& outputs) +{ + HDF_LOGI("Start Set outputs, m_outputs size=%zu", m_outputs.size()); + if (outputs.size() != m_outputs.size()) { + HDF_LOGE("outputs size is invalid. expect: %{public}zu, actual: %{public}zu", m_outputs.size(), outputs.size()); + return HDF_ERR_INVALID_PARAM; + } + for (auto ash : m_outputAshmems) { + ash->UnmapAshmem(); + ash->CloseAshmem(); + } + m_outputAshmems.clear(); + + for (size_t i = 0; i < m_outputs.size(); i++) { + auto& output = outputs[i]; + auto& msOutput = m_outputs[i]; + + sptr ashptr = ParseBuffer(output.data); + if (ashptr == nullptr) { + HDF_LOGE("Parse %{public}zu th output data failed.", i); + return HDF_ERR_INVALID_PARAM; + } + + auto data = const_cast(ashptr->ReadFromAshmem(output.data.dataSize, 0)); + msOutput.SetAllocator(nullptr); + msOutput.SetData(data); + m_outputAshmems.emplace_back(ashptr); + } + return HDF_SUCCESS; +} + +int32_t PreparedModelService::GetMSInputsAndOutputs() +{ + m_inputs = m_model->GetInputs(); + if (m_inputs.empty()) { + HDF_LOGE("Get inputs failed."); + return HDF_FAILURE; + } + + m_outputs = m_model->GetOutputs(); + if (m_outputs.empty()) { + HDF_LOGE("Get outputs failed."); + return HDF_FAILURE; + } + return HDF_SUCCESS; +} + +int32_t PreparedModelService::CompareTensor(const IOTensor& tensor, const mindspore::MSTensor& msTensor) +{ + auto dataType = static_cast(msTensor.DataType()); + if (tensor.dataType != dataType) { + HDF_LOGE("Data type of tensor is not match that of model."); + return HDF_ERR_INVALID_PARAM; + } + + auto format = static_cast(msTensor.format()); + if (tensor.format != format) { + HDF_LOGE("Format of tensor is not match that of model."); + return HDF_ERR_INVALID_PARAM; + } + + for (size_t i = 0; i < tensor.dimensions.size(); i++) { + if (msTensor.Shape()[i] != DYNAMIC_SHAPE_FLAG && tensor.dimensions[i] != msTensor.Shape()[i]) { + HDF_LOGE("The Shape of tensor is not match that of model."); + return HDF_ERR_INVALID_PARAM; + } + } + + return HDF_SUCCESS; +} + +sptr PreparedModelService::ParseBuffer(const SharedBuffer& buffer) +{ + if (buffer.fd == -1) { + HDF_LOGE("Invalid buffer fd, it cannot be -1."); + return nullptr; + } + + HDF_LOGW("NNRT buffer fd=%{public}d, length=%{public}u", buffer.fd, buffer.dataSize); + + sptr ashptr = new (std::nothrow) Ashmem(buffer.fd, buffer.bufferSize); + if (ashptr == nullptr) { + HDF_LOGE("Create shared memory failed."); + return nullptr; + } + + if (!ashptr->MapReadAndWriteAshmem()) { + HDF_LOGE("Map buffer fd to address failed."); + return nullptr; + } + + const void* data = ashptr->ReadFromAshmem(buffer.dataSize, buffer.offset); + if (data == nullptr) { + HDF_LOGE("Get data address failed."); + ashptr->UnmapAshmem(); + ashptr->CloseAshmem(); + return nullptr; + } + return ashptr; +} +} // V1_0 +} // Nnrt +} // HDI +} // OHOS diff --git a/example/drivers/nnrt/hdi_cpu_service/src/shared_buffer_parser.cpp b/example/drivers/nnrt/hdi_cpu_service/src/shared_buffer_parser.cpp new file mode 100644 index 0000000000000000000000000000000000000000..19f5aa5cf108d2ee0609213d43833ef6ab9ff64d --- /dev/null +++ b/example/drivers/nnrt/hdi_cpu_service/src/shared_buffer_parser.cpp @@ -0,0 +1,104 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef OHOS_HDI_NNR_V1_0_UTILS_H +#define OHOS_HDI_NNR_V1_0_UTILS_H + +#include "shared_buffer_parser.h" + +#include +#include "ashmem.h" +#include "v1_0/nnrt_types.h" +#include "utils/hdf_log.h" + +namespace OHOS { +namespace HDI { +namespace Nnrt { +namespace V1_0 { +SharedBufferParser::~SharedBufferParser() +{ + if (m_ashptr != nullptr) { + m_ashptr->UnmapAshmem(); + m_ashptr->CloseAshmem(); + m_bufferAddr = nullptr; + } +} + +int32_t SharedBufferParser::Init(const std::string& name, int32_t size) +{ + HDF_LOGI("Init SharedBufferParser from name and size."); + sptr ashptr = Ashmem::CreateAshmem(name.c_str(), size); + if (ashptr == nullptr) { + HDF_LOGE("Create ashmen from size failed."); + return HDF_FAILURE; + } + + SharedBuffer buffer; + buffer.fd = ashptr->GetAshmemFd(); + buffer.bufferSize = ashptr->GetAshmemSize(); + buffer.offset = 0; + buffer.dataSize = size; + + auto ret = Init(buffer); + if (ret != HDF_SUCCESS) { + HDF_LOGE("Init SharedBufferParser failed."); + return ret; + } + return HDF_SUCCESS; +} + +int32_t SharedBufferParser::Init(const SharedBuffer& buffer) +{ + if (buffer.fd == INVALID_FD) { + HDF_LOGE("Invalid buffer fd, it cannot be %{public}d.", INVALID_FD); + return HDF_ERR_INVALID_PARAM; + } + + m_ashptr = new (std::nothrow) Ashmem(buffer.fd, buffer.bufferSize); + if (m_ashptr == nullptr) { + HDF_LOGE("Create ashmem failed."); + return HDF_FAILURE; + } + + if (!m_ashptr->MapReadAndWriteAshmem()) { + HDF_LOGE("Map buffer fd to address failed."); + return HDF_FAILURE; + } + + auto bufferAddr = m_ashptr->ReadFromAshmem(buffer.dataSize, buffer.offset); + if (bufferAddr == nullptr) { + HDF_LOGE("Invalid dataSize or offset of SharedBuffer."); + return HDF_ERR_INVALID_PARAM; + } + m_bufferAddr = const_cast(bufferAddr); + + m_buffer = buffer; + return HDF_SUCCESS; +} + +void* SharedBufferParser::GetBufferPtr() +{ + return m_bufferAddr; +} + +SharedBuffer SharedBufferParser::GetBuffer() +{ + return m_buffer; +} +} // V1_0 +} // Nnrt +} // HDI +} // OHOS +#endif // OHOS_HDI_NNR_V1_0_UTILS_H \ No newline at end of file diff --git a/example/drivers/nnrt/hdi_cpu_service/src/validation.cpp b/example/drivers/nnrt/hdi_cpu_service/src/validation.cpp new file mode 100644 index 0000000000000000000000000000000000000000..e640c0f9a16d357e2a468272eaae78c41eed2e34 --- /dev/null +++ b/example/drivers/nnrt/hdi_cpu_service/src/validation.cpp @@ -0,0 +1,72 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "validation.h" + +namespace OHOS { +namespace HDI { +namespace Nnrt { +namespace V1_0 { +int32_t ValidatePerformanceMode(PerformanceMode mode) +{ + if (mode < PerformanceMode::PERFORMANCE_NONE || mode > PerformanceMode::PERFORMANCE_EXTREME) { + return false; + } + + return true; +} + +int32_t ValidatePriority(Priority priority) +{ + if (priority < Priority::PRIORITY_NONE || priority > Priority::PRIORITY_HIGH) { + return false; + } + + return true; +} + +int32_t ValidateDataType(DataType dataType) +{ + if (dataType < DataType::DATA_TYPE_UNKNOWN || dataType > DataType::DATA_TYPE_FLOAT64) { + return false; + } + + if (dataType > DataType::DATA_TYPE_UNKNOWN && dataType < DataType::DATA_TYPE_BOOL) { + return false; + } + + if (dataType > DataType::DATA_TYPE_BOOL && dataType < DataType::DATA_TYPE_INT8) { + return false; + } + + if (dataType > DataType::DATA_TYPE_UINT64 && dataType < DataType::DATA_TYPE_FLOAT16) { + return false; + } + + return true; +} + +int32_t ValidateFormat(Format format) +{ + if (format < Format::FORMAT_NONE || format > Format::FORMAT_NHWC) { + return false; + } + + return true; +} +} // namespace V1_0 +} // namespace Nnrt +} // namespace HDI +} // namespace OHOS \ No newline at end of file diff --git a/frameworks/BUILD.gn b/frameworks/BUILD.gn new file mode 100644 index 0000000000000000000000000000000000000000..34b7da30133e38ddbd04db0138f125c208279ca5 --- /dev/null +++ b/frameworks/BUILD.gn @@ -0,0 +1,133 @@ +# Copyright (c) 2022 Huawei Device Co., Ltd. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import("//build/ohos.gni") + +config("nnrt_config") { + cflags_cc = [ "-fexceptions" ] +} + +nnrt_sources = [ + "native/device_manager.cpp", + "native/device_registrar.cpp", + "native/hdi_device.cpp", + "native/hdi_prepared_model.cpp", + "native/memory_manager.cpp", + "native/transform.cpp", + "native/nn_tensor.cpp", + "native/validation.cpp", + "native/inner_model.cpp", + "native/compilation.cpp", + "native/execution_plan.cpp", + "native/executor.cpp", + "native/neural_network_runtime.cpp", + "native/ops_builder.cpp", + "native/ops_registry.cpp", +] + +ops_sources = [ + "native/ops/add_builder.cpp", + "native/ops/argmax_builder.cpp", + "native/ops/avgpool_builder.cpp", + "native/ops/pooling_builder.cpp", + "native/ops/batch_to_space_nd_builder.cpp", + "native/ops/bias_add_builder.cpp", + "native/ops/cast_builder.cpp", + "native/ops/concat_builder.cpp", + "native/ops/conv2d_builder.cpp", + "native/ops/conv2d_transpose_builder.cpp", + "native/ops/depthwise_conv2d_native_builder.cpp", + "native/ops/div_builder.cpp", + "native/ops/eltwise_builder.cpp", + "native/ops/expandims_builder.cpp", + "native/ops/fullconnection_builder.cpp", + "native/ops/maxpool_builder.cpp", + "native/ops/slice_builder.cpp", + "native/ops/softmax_builder.cpp", + "native/ops/space_to_batch_nd_builder.cpp", + "native/ops/split_builder.cpp", + "native/ops/sqrt_builder.cpp", + "native/ops/squared_difference_builder.cpp", + "native/ops/squeeze_builder.cpp", + "native/ops/stack_builder.cpp", + "native/ops/strided_slice_builder.cpp", + "native/ops/sub_builder.cpp", + "native/ops/tanh_builder.cpp", + "native/ops/tile_builder.cpp", + "native/ops/top_k_builder.cpp", + "native/ops/transpose_builder.cpp", + "native/ops/unsqueeze_builder.cpp", + "native/ops/batchnorm_builder.cpp", + "native/ops/fill_builder.cpp", + "native/ops/matmul_builder.cpp", + "native/ops/gather_builder.cpp", + "native/ops/gelu_builder.cpp", + "native/ops/hswish_builder.cpp", + "native/ops/layernorm_builder.cpp", + "native/ops/maximum_builder.cpp", + "native/ops/lessequal_builder.cpp", + "native/ops/mul_builder.cpp", + "native/ops/onehot_builder.cpp", + "native/ops/pad_builder.cpp", + "native/ops/pow_builder.cpp", + "native/ops/prelu_builder.cpp", + "native/ops/quant_dtype_cast_builder.cpp", + "native/ops/reduceall_builder.cpp", + "native/ops/reducemean_builder.cpp", + "native/ops/reduceprod_builder.cpp", + "native/ops/relu_builder.cpp", + "native/ops/relu6_builder.cpp", + "native/ops/reshape_builder.cpp", + "native/ops/resize_bilinear_builder.cpp", + "native/ops/rsqrt_builder.cpp", + "native/ops/scale_builder.cpp", + "native/ops/shape_builder.cpp", + "native/ops/sigmoid_builder.cpp", +] + +ohos_shared_library("libneural_network_runtime") { + sources = nnrt_sources + sources += ops_sources + include_dirs = [ + "//commonlibrary/c_utils/base/include", + "//drivers/hdf_core/adapter/uhdf/posix/include", + "//drivers/hdf_core/adapter/uhdf2/include/hdi", + "//drivers/hdf_core/adapter/uhdf2/ipc/include", + "//drivers/hdf_core/framework/include/core", + "//drivers/hdf_core/framework/include/utils", + "//drivers/hdf_core/framework/core/common/include/host", + "//foundation/ai/neural_network_runtime", + "//foundation/communication/ipc/interfaces/innerkits/ipc_core/include", + "//third_party/googletest/googletest/include/gtest", + "//third_party/mindspore/mindspore/lite/mindir/include" + ] + + install_images = [ + "system", + "updater" + ] + + public_configs = [ ":nnrt_config" ] + + external_deps = [ + "hilog_native:libhilog", + "hitrace_native:libhitracechain", + "c_utils:utils", + "hdf_core:libhdf_utils", + "drivers_interface_nnrt:libnnrt_proxy_1.0", + "mindspore:mindir" + ] + + subsystem_name = "ai" + part_name = "neural_network_runtime" +} diff --git a/frameworks/native/compilation.cpp b/frameworks/native/compilation.cpp new file mode 100644 index 0000000000000000000000000000000000000000..ed6e737ba4124bd8387ae3a2be9e6af35dee69b8 --- /dev/null +++ b/frameworks/native/compilation.cpp @@ -0,0 +1,714 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "compilation.h" + +#include +#include +#include +#include +#include +#include + +#include "common/utils.h" +#include "common/scoped_trace.h" +#include "validation.h" +#include "device_manager.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +constexpr int MAX_MODEL_SIZE = 200 * 1024 * 1024; // 200MB +constexpr int OCT_UNIT = 8; +constexpr int NULL_PTR_LENGTH = 0; +constexpr int NUMBER_CACHE_INFO_MEMBERS = 3; + +// CRC16 Table is created based on the Polynomial of G(x) = x^16 + x^12 + x^15 + 1 and +// CRC register initialization value of "0" (0x0000) +static const unsigned short CRC16_TAB[256] = { + 0x0000, 0x1021, 0x2042, 0x3063, 0x4084, 0x50a5, 0x60c6, 0x70e7, + 0x8108, 0x9129, 0xa14a, 0xb16b, 0xc18c, 0xd1ad, 0xe1ce, 0xf1ef, + 0x1231, 0x0210, 0x3273, 0x2252, 0x52b5, 0x4294, 0x72f7, 0x62d6, + 0x9339, 0x8318, 0xb37b, 0xa35a, 0xd3bd, 0xc39c, 0xf3ff, 0xe3de, + 0x2462, 0x3443, 0x0420, 0x1401, 0x64e6, 0x74c7, 0x44a4, 0x5485, + 0xa56a, 0xb54b, 0x8528, 0x9509, 0xe5ee, 0xf5cf, 0xc5ac, 0xd58d, + 0x3653, 0x2672, 0x1611, 0x0630, 0x76d7, 0x66f6, 0x5695, 0x46b4, + 0xb75b, 0xa77a, 0x9719, 0x8738, 0xf7df, 0xe7fe, 0xd79d, 0xc7bc, + 0x48c4, 0x58e5, 0x6886, 0x78a7, 0x0840, 0x1861, 0x2802, 0x3823, + 0xc9cc, 0xd9ed, 0xe98e, 0xf9af, 0x8948, 0x9969, 0xa90a, 0xb92b, + 0x5af5, 0x4ad4, 0x7ab7, 0x6a96, 0x1a71, 0x0a50, 0x3a33, 0x2a12, + 0xdbfd, 0xcbdc, 0xfbbf, 0xeb9e, 0x9b79, 0x8b58, 0xbb3b, 0xab1a, + 0x6ca6, 0x7c87, 0x4ce4, 0x5cc5, 0x2c22, 0x3c03, 0x0c60, 0x1c41, + 0xedae, 0xfd8f, 0xcdec, 0xddcd, 0xad2a, 0xbd0b, 0x8d68, 0x9d49, + 0x7e97, 0x6eb6, 0x5ed5, 0x4ef4, 0x3e13, 0x2e32, 0x1e51, 0x0e70, + 0xff9f, 0xefbe, 0xdfdd, 0xcffc, 0xbf1b, 0xaf3a, 0x9f59, 0x8f78, + 0x9188, 0x81a9, 0xb1ca, 0xa1eb, 0xd10c, 0xc12d, 0xf14e, 0xe16f, + 0x1080, 0x00a1, 0x30c2, 0x20e3, 0x5004, 0x4025, 0x7046, 0x6067, + 0x83b9, 0x9398, 0xa3fb, 0xb3da, 0xc33d, 0xd31c, 0xe37f, 0xf35e, + 0x02b1, 0x1290, 0x22f3, 0x32d2, 0x4235, 0x5214, 0x6277, 0x7256, + 0xb5ea, 0xa5cb, 0x95a8, 0x8589, 0xf56e, 0xe54f, 0xd52c, 0xc50d, + 0x34e2, 0x24c3, 0x14a0, 0x0481, 0x7466, 0x6447, 0x5424, 0x4405, + 0xa7db, 0xb7fa, 0x8799, 0x97b8, 0xe75f, 0xf77e, 0xc71d, 0xd73c, + 0x26d3, 0x36f2, 0x0691, 0x16b0, 0x6657, 0x7676, 0x4615, 0x5634, + 0xd94c, 0xc96d, 0xf90e, 0xe92f, 0x99c8, 0x89e9, 0xb98a, 0xa9ab, + 0x5844, 0x4865, 0x7806, 0x6827, 0x18c0, 0x08e1, 0x3882, 0x28a3, + 0xcb7d, 0xdb5c, 0xeb3f, 0xfb1e, 0x8bf9, 0x9bd8, 0xabbb, 0xbb9a, + 0x4a75, 0x5a54, 0x6a37, 0x7a16, 0x0af1, 0x1ad0, 0x2ab3, 0x3a92, + 0xfd2e, 0xed0f, 0xdd6c, 0xcd4d, 0xbdaa, 0xad8b, 0x9de8, 0x8dc9, + 0x7c26, 0x6c07, 0x5c64, 0x4c45, 0x3ca2, 0x2c83, 0x1ce0, 0x0cc1, + 0xef1f, 0xff3e, 0xcf5d, 0xdf7c, 0xaf9b, 0xbfba, 0x8fd9, 0x9ff8, + 0x6e17, 0x7e36, 0x4e55, 0x5e74, 0x2e93, 0x3eb2, 0x0ed1, 0x1ef0 +}; + +Compilation::Compilation(const InnerModel* innerModel) + : m_liteGraph(innerModel->GetLiteGraphs()), + m_inputTensors(innerModel->GetInputTensors()), + m_outputTensors(innerModel->GetOutputTensors()) {} + +OH_NN_ReturnCode Compilation::SetDevice(size_t deviceId) +{ + if (m_isBuild) { + LOGE("Cannot set deviceId after compilation finish."); + return OH_NN_OPERATION_FORBIDDEN; + } + + auto& deviceManager = DeviceManager::GetInstance(); + std::shared_ptr availableDevice = deviceManager.GetDevice(deviceId); + if (availableDevice == nullptr) { + LOGE("[Compilation] DeviceId does not exist, deviceId=%zu", deviceId); + return OH_NN_INVALID_PARAMETER; + } + + std::vector supportedList; + OH_NN_ReturnCode ret = availableDevice->GetSupportedOperation(m_liteGraph, supportedList); + if (ret != OH_NN_SUCCESS) { + LOGE("[Compilation] SetDevice failed, error happened when getting supported operation."); + return ret; + } + + for (bool isSupport : supportedList) { + if (!isSupport) { + LOGE("[Compilation] SetDevice failed, current device not support the model, device id: %zu.", deviceId); + return OH_NN_FAILED; + } + } + + bool supportDynamic; + ret = availableDevice->IsDynamicInputSupported(supportDynamic); + if (ret != OH_NN_SUCCESS) { + LOGE("[Compilation] SetDevice failed, error happened when checking whether device supports dynamic input."); + return ret; + } + + if (IsDynamicShape() && (!supportDynamic)) { + LOGE("[Compilation] SetDevice failed." + "The device does not support dynamic shape inputs, but the model has dynamic inputs."); + return OH_NN_FAILED; + } + + m_device = availableDevice; + m_deviceId = deviceId; + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode Compilation::SetCacheDir(const std::string& cacheModelPath, uint32_t version) +{ + if (m_isBuild) { + LOGE("Cannot set cache after compilation finish."); + return OH_NN_OPERATION_FORBIDDEN; + } + + if (m_device == nullptr) { + LOGE("The parameter of m_device is nullptr, please call SetDevice function before calling SetCacheDir."); + return OH_NN_OPERATION_FORBIDDEN; + } + + bool isSupportedCache {false}; + OH_NN_ReturnCode ret = m_device->IsModelCacheSupported(isSupportedCache); + if (ret != OH_NN_SUCCESS) { + LOGE("[Compilation] Fail to query whether the device is available to save cache model."); + return ret; + } + + if (!isSupportedCache) { + LOGE("[Compilation] The device is unavailable to save cache model."); + return OH_NN_OPERATION_FORBIDDEN; + } + + char realPathRes[PATH_MAX]; + const char* filePath = realpath(cacheModelPath.c_str(), realPathRes); + if (filePath == nullptr) { + LOGE("[Compilation] The cache model path is invalid."); + return OH_NN_INVALID_PARAMETER; + } + + struct stat fileInfo; + if (stat(filePath, &fileInfo) != 0) { + LOGE("[Compilation] The cache directory does not exist or cannot be accessed."); + return OH_NN_INVALID_PARAMETER; + } + + if (!(fileInfo.st_mode & S_IFDIR)) { + LOGE("[Compilation] The cache model path is not a directory."); + return OH_NN_INVALID_PARAMETER; + } + + m_cachePath = (std::string)filePath + "/"; + m_version = version; + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode Compilation::SetPerformance(OH_NN_PerformanceMode performance) +{ + if (m_isBuild) { + LOGE("[Compilation] Cannot set performance after compilation finish."); + return OH_NN_OPERATION_FORBIDDEN; + } + + if (m_device == nullptr) { + LOGE("Cannot set performance before set device, please set device first"); + return OH_NN_OPERATION_FORBIDDEN; + } + + bool isSupportedPerformance {false}; + OH_NN_ReturnCode ret = m_device->IsPerformanceModeSupported(isSupportedPerformance); + if (ret != OH_NN_SUCCESS) { + LOGE("[Compilation] Call device %zu failed.", m_deviceId); + return ret; + } + + if (!isSupportedPerformance) { + LOGE("[Compilation] This device %zu is not support performance setting.", m_deviceId); + return OH_NN_OPERATION_FORBIDDEN; + } + + if (!Validation::ValidatePerformanceMode(performance)) { + LOGE("[Compilation] SetPerformance passed invalid performance=%d", performance); + return OH_NN_INVALID_PARAMETER; + } + + m_performance = performance; + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode Compilation::SetPriority(OH_NN_Priority priority) +{ + if (m_isBuild) { + LOGE("[Compilation] Cannot set priority after compilation finish."); + return OH_NN_OPERATION_FORBIDDEN; + } + + if (m_device == nullptr) { + LOGE("Cannot set priority before set device, please set device first"); + return OH_NN_OPERATION_FORBIDDEN; + } + + bool isSupportedPriority {false}; + OH_NN_ReturnCode ret = m_device->IsPrioritySupported(isSupportedPriority); + if (ret != OH_NN_SUCCESS) { + LOGE("[Compilation] Call device %zu failed.", m_deviceId); + return ret; + } + + if (!isSupportedPriority) { + LOGE("[Compilation] This device %zu is not support priority setting.", m_deviceId); + return OH_NN_OPERATION_FORBIDDEN; + } + + if (!Validation::ValidatePriority(priority)) { + LOGE("[Compilation] SetPriority passed invalid priority=%d", priority); + return OH_NN_INVALID_PARAMETER; + } + + m_priority = priority; + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode Compilation::SetEnableFp16(bool isFp16) +{ + if (m_isBuild) { + LOGE("[Compilation] Cannot enable float16 after compilation finish."); + return OH_NN_OPERATION_FORBIDDEN; + } + + if (m_device == nullptr) { + LOGE("Cannot set enable fp16 before set device, please set device first"); + return OH_NN_OPERATION_FORBIDDEN; + } + + bool isSupportedFp16 {false}; + OH_NN_ReturnCode ret = m_device->IsFloat16PrecisionSupported(isSupportedFp16); + if (ret != OH_NN_SUCCESS) { + LOGE("[Compilation] Call device %zu failed.", m_deviceId); + return ret; + } + + if (!isSupportedFp16) { + LOGE("[Compilation] This device %zu is not support float16 precision setting.", m_deviceId); + return OH_NN_OPERATION_FORBIDDEN; + } + + m_enableFp16 = isFp16; + return OH_NN_SUCCESS; +} + +unsigned short Compilation::GetCrc16(const unsigned char* buffer, size_t length) const +{ + unsigned short crc16 = 0; + for (size_t i = 0; i < length; ++i) { + uint8_t tableIndex = ((crc16 >> OCT_UNIT) ^ *buffer++) & 0x00ff; + crc16 = (crc16 << OCT_UNIT) ^ CRC16_TAB[tableIndex]; + } + return crc16; +} + +OH_NN_ReturnCode Compilation::GenerateCacheInfo(uint32_t cacheSize, std::unique_ptr& cacheInfo) const +{ + std::string cacheInfoPath = m_cachePath + "cache_info.nncache"; + std::ofstream cacheInfoStream(cacheInfoPath, std::ios::binary | std::ios::out | std::ios::trunc); + if (cacheInfoStream.fail()) { + LOGE("[Compilation] Model cache info file is invalid."); + return OH_NN_INVALID_FILE; + } + + if (!cacheInfoStream.write(reinterpret_cast(cacheInfo.get()), cacheSize)) { + LOGE("[Compilation] Fail to write cache info."); + cacheInfoStream.close(); + return OH_NN_FAILED; + } + + cacheInfoStream.close(); + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode Compilation::GenerateCacheModel(size_t cacheNumber, std::unique_ptr& cacheInfo, + std::vector modelBuffer) const +{ + auto cacheInfoPtr = cacheInfo.get(); + *cacheInfoPtr++ = static_cast(cacheNumber); + *cacheInfoPtr++ = static_cast(m_version); + *cacheInfoPtr++ = static_cast(m_deviceId); + for (uint32_t i = 0; i < cacheNumber; ++i) { + std::string cacheModelFile = m_cachePath + std::to_string(i) + ".nncache"; + std::ofstream cacheModelStream(cacheModelFile, std::ios::binary | std::ios::out | std::ios::trunc); + if (cacheModelStream.fail()) { + LOGE("[Compilation] Model cache file is invalid."); + return OH_NN_INVALID_FILE; + } + + uint64_t checkSum = static_cast(GetCrc16(static_cast(modelBuffer[i].buffer), + modelBuffer[i].length)); + *cacheInfoPtr++ = checkSum; + if (!cacheModelStream.write(static_cast(modelBuffer[i].buffer), modelBuffer[i].length)) { + LOGE("[Compilation] Fail to write cache model."); + cacheModelStream.close(); + return OH_NN_FAILED; + }; + + cacheModelStream.close(); + } + + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode Compilation::GenerateCacheFiles(const std::vector& modelBuffer) const +{ + const size_t cacheNumber = modelBuffer.size(); + uint32_t cacheSize = NUMBER_CACHE_INFO_MEMBERS + cacheNumber; + std::unique_ptr cacheInfo = std::make_unique(cacheSize); + if (cacheInfo == nullptr) { + LOGE("Fail to create cacheInfo instance."); + return OH_NN_MEMORY_ERROR; + } + + OH_NN_ReturnCode ret = GenerateCacheModel(cacheNumber, cacheInfo, modelBuffer); + if (ret != OH_NN_SUCCESS) { + return ret; + } + + uint32_t infoCharNumber = cacheSize * sizeof(uint64_t); + ret = GenerateCacheInfo(infoCharNumber, cacheInfo); + if (ret != OH_NN_SUCCESS) { + return ret; + } + + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode Compilation::GetCacheFileLength(std::ifstream& ifs, int& fsize) const +{ + ifs.seekg(0, std::ios::end); + if (!ifs.good()) { + LOGE("[Compilation] Fail to set the position of the next character to be extracted from the input stream."); + return OH_NN_INVALID_FILE; + } + + int handleValue = ifs.tellg(); + if (handleValue == -1) { + LOGE("[Compilation] Unable to get position of the input stream."); + return OH_NN_INVALID_FILE; + } + + if ((handleValue > MAX_MODEL_SIZE) || (handleValue == NULL_PTR_LENGTH)) { + LOGE("[Compilation] Unable to read huge or empty input stream, get cache file size=%d", handleValue); + return OH_NN_INVALID_FILE; + } + + fsize = handleValue; + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode Compilation::ReadCacheModelFile(const std::string& file, ModelBuffer& modelBuffer) const +{ + std::ifstream ifs(file.c_str(), std::ios::in | std::ios::binary); + if (!ifs) { + LOGE("[Compilation] Fail to open cache file."); + return OH_NN_INVALID_FILE; + } + + int fsize {-1}; + OH_NN_ReturnCode ret = GetCacheFileLength(ifs, fsize); + if (ret != OH_NN_SUCCESS) { + ifs.close(); + return ret; + } + + ifs.seekg(0, std::ios::beg); + if (!ifs.good()) { + LOGE("[Compilation] Fail to set the position of the next character to be extracted" + "from the cache model stream."); + ifs.close(); + return OH_NN_FAILED; + } + + char* ptr = static_cast(m_device->AllocateBuffer(fsize)); + if (ptr == nullptr) { + LOGE("[Compilation] Fail to create file buffer."); + ifs.close(); + return OH_NN_NULL_PTR; + } + + ifs.read(ptr, fsize); + if (!ifs.good()) { + LOGE("[Compilation] Fail to read the characters from the cache model stream."); + ifs.close(); + m_device->ReleaseBuffer(ptr); + ptr = nullptr; + return OH_NN_FAILED; + } + + ifs.close(); + modelBuffer.buffer = ptr; + modelBuffer.length = fsize; + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode Compilation::CheckCacheInfo(ModelCacheInfo& modelCacheInfo, const std::string& cacheInfoPath) const +{ + std::ifstream infoCacheFile(cacheInfoPath.c_str(), std::ios::in | std::ios::binary); + if (!infoCacheFile) { + LOGE("[Compilation] Openning cache info file failed."); + return OH_NN_INVALID_FILE; + } + + int charNumber = NUMBER_CACHE_INFO_MEMBERS * sizeof(uint64_t); + if (!infoCacheFile.read((char*)&(modelCacheInfo), charNumber)) { + LOGE("[Compilation] Fail to get the content of info cache file."); + infoCacheFile.close(); + return OH_NN_INVALID_FILE; + } + + // modelCacheInfo.deviceId type is int64_t, + // it is transformed from size_t value, so the transform here will not truncate value. + size_t deviceId = static_cast(modelCacheInfo.deviceId); + if (deviceId != m_deviceId) { + LOGE("[Compilation] The deviceId=%zu in the cache files is different from current deviceId=%zu," + "please change the cache directory or current deviceId.", deviceId, m_deviceId); + infoCacheFile.close(); + return OH_NN_INVALID_PARAMETER; + } + + std::vector modelCheckSum; + modelCheckSum.resize(modelCacheInfo.fileNumber); + modelCacheInfo.modelCheckSum.resize(modelCacheInfo.fileNumber); + if (!infoCacheFile.read((char*)&modelCheckSum[0], modelCacheInfo.fileNumber * sizeof(uint64_t))) { + LOGE("[Compilation] The info cache file has been changed."); + infoCacheFile.close(); + return OH_NN_INVALID_FILE; + } + + for (uint32_t i = 0; i < modelCacheInfo.fileNumber; ++i) { + modelCacheInfo.modelCheckSum[i] = static_cast(modelCheckSum[i]); + } + + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode Compilation::RemoveCacheFiles(uint32_t fileNumber) const +{ + std::string cacheInfoPath = m_cachePath + "cache_info.nncache"; + if (remove(cacheInfoPath.c_str()) == -1) { + LOGE("[Compilation] Fail to remove the file %s, please delete the file manually.", cacheInfoPath.c_str()); + return OH_NN_FAILED; + } + LOGI("[Compilation] Succeed to remove the file cache_info.nncach."); + + for (uint32_t i = 0; i < fileNumber; ++i) { + std::string fileName = std::to_string(i) + ".nncache"; + std::string cacheModelPath = m_cachePath + fileName; + if (access(cacheModelPath.c_str(), 0) != 0) { + LOGW("[Compilation] The file %s does not exist, no need to delete the file.", cacheModelPath.c_str()); + continue; + } + + if (remove(cacheModelPath.c_str()) == -1) { + LOGE("[Compilation] Fail to remove the file %s, please delete the file manually.", cacheModelPath.c_str()); + return OH_NN_FAILED; + } + LOGI("[Compilation] Succeed to remove the file %s", cacheModelPath.c_str()); + } + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode Compilation::CheckCacheModel(const ModelCacheInfo& modelCacheInfo, + std::vector& modelBuffers) const +{ + for (uint32_t i = 0; i < modelCacheInfo.fileNumber; ++i) { + std::string cacheModelPath = m_cachePath + std::to_string(i) + ".nncache"; + if (access(cacheModelPath.c_str(), 0) != 0) { + LOGE("[Compilation] The cache model file %s does not exist.", cacheModelPath.c_str()); + return OH_NN_INVALID_FILE; + } + + ModelBuffer modelBuffer; + OH_NN_ReturnCode ret = ReadCacheModelFile(cacheModelPath, modelBuffer); + if (ret != OH_NN_SUCCESS) { + LOGE("[Compilation] Read cache model file failed."); + return ret; + } + + if (GetCrc16(static_cast(modelBuffer.buffer), + modelBuffer.length) != modelCacheInfo.modelCheckSum[i]) { + LOGE("[Compilation] The cache model file %s has been changed.", cacheModelPath.c_str()); + return OH_NN_INVALID_FILE; + } + + modelBuffers.emplace_back(std::move(modelBuffer)); + } + + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode Compilation::NormalBuild(std::shared_ptr& preparedModel) +{ + ModelConfig config {m_enableFp16, m_performance, m_priority}; + OH_NN_ReturnCode ret = m_device->PrepareModel(m_liteGraph, config, preparedModel); + if (ret != OH_NN_SUCCESS) { + LOGE("[Compilation] Preparing model failed when normally building."); + return ret; + } + + m_executionPlan = CreateSharedPtr(preparedModel, m_device); + if (m_executionPlan == nullptr) { + LOGE("Fail to create ExecutionPlan instance."); + return OH_NN_MEMORY_ERROR; + } + + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode Compilation::GenCacheBuild(std::shared_ptr& preparedModel) +{ + OH_NN_ReturnCode ret = NormalBuild(preparedModel); + if (ret != OH_NN_SUCCESS) { + LOGE("[Compilation] Preparing model failed when generating cache."); + return ret; + } + + std::vector modelBuffers; + ret = preparedModel->ExportModelCache(modelBuffers); + if (ret != OH_NN_SUCCESS) { + LOGE("[Compilation] Export model cache failed."); + return ret; + } + + ret = GenerateCacheFiles(modelBuffers); + if (ret != OH_NN_SUCCESS) { + LOGE("[Compilation] Generate cache files failed."); + return ret; + } + + LOGI("[Compilation] Export model cache successfully."); + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode Compilation::ReGenCacheBuild(uint32_t fileNumber, std::shared_ptr& preparedModel) +{ + OH_NN_ReturnCode ret = RemoveCacheFiles(fileNumber); + if (ret != OH_NN_SUCCESS) { + return ret; + } + + ret = GenCacheBuild(preparedModel); + if (ret != OH_NN_SUCCESS) { + LOGE("[Compilation] Generating cache building failed."); + return ret; + } + + LOGI("[Compilation] Update model cache successfully."); + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode Compilation::LoadCacheBuild(std::shared_ptr& preparedModel, + const ModelCacheInfo& cacheInfo) +{ + std::vector modelBuffers; + OH_NN_ReturnCode ret = CheckCacheModel(cacheInfo, modelBuffers); + if (ret != OH_NN_SUCCESS) { + LOGE("[Compilation] Checking cache model failed."); + for (size_t i = 0; i < modelBuffers.size(); ++i) { + m_device->ReleaseBuffer(modelBuffers[i].buffer); + modelBuffers[i].buffer = nullptr; + modelBuffers[i].length = 0; + } + return ret; + } + + ModelConfig config {m_enableFp16, m_performance, m_priority}; + ret = m_device->PrepareModelFromModelCache(modelBuffers, config, preparedModel); + if (ret != OH_NN_SUCCESS) { + LOGE("[Compilation] Preparing model from cache failed."); + return ret; + } + + LOGI("[Compilation] Load cache successfully."); + + m_executionPlan = CreateSharedPtr(preparedModel, m_device); + if (m_executionPlan == nullptr) { + LOGE("Fail to create ExecutionPlan instance."); + return OH_NN_MEMORY_ERROR; + } + + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode Compilation::InnerBuild() +{ + OH_NN_ReturnCode ret; + std::shared_ptr preparedModel; + if (m_cachePath.empty()) { + ret = NormalBuild(preparedModel); + if (ret != OH_NN_SUCCESS) { + LOGE("Fail to normally build."); + return ret; + } + + m_isBuild = true; + return OH_NN_SUCCESS; + } + + std::string cacheInfoPath = m_cachePath + "cache_info.nncache"; + if (access(cacheInfoPath.c_str(), 0) != 0) { + ret = GenCacheBuild(preparedModel); + if (ret != OH_NN_SUCCESS) { + LOGE("Fail to build in generating cache mode."); + return ret; + } + + m_isBuild = true; + return OH_NN_SUCCESS; + } + + ModelCacheInfo cacheInfo; + ret = CheckCacheInfo(cacheInfo, cacheInfoPath); + if (ret != OH_NN_SUCCESS) { + return ret; + } + + if (m_version > cacheInfo.version) { + ret = ReGenCacheBuild(cacheInfo.fileNumber, preparedModel); + if (ret != OH_NN_SUCCESS) { + return ret; + } + + m_isBuild = true; + return OH_NN_SUCCESS; + } + + if (m_version < cacheInfo.version) { + LOGE("[Compilation] The current version is lower than the cache files, please set a higher version."); + return OH_NN_OPERATION_FORBIDDEN; + } + + ret = LoadCacheBuild(preparedModel, cacheInfo); + if (ret != OH_NN_SUCCESS) { + return ret; + } + + m_isBuild = true; + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode Compilation::Build() +{ + NNRT_TRACE_NAME("Compilation"); + if (m_isBuild) { + LOGE("[Compilation] Cannot enable float16 after compilation finish."); + return OH_NN_OPERATION_FORBIDDEN; + } + + if (m_device == nullptr) { + LOGE("The parameter of m_device is nullptr, please call SetDevice function before build model."); + return OH_NN_OPERATION_FORBIDDEN; + } + + OH_NN_ReturnCode ret = InnerBuild(); + if (ret != OH_NN_SUCCESS) { + return ret; + } + + return OH_NN_SUCCESS; +} + +std::shared_ptr Compilation::GetExecutionPlan() const +{ + return m_executionPlan; +} + +std::vector> Compilation::GetInputTensors() const +{ + return m_inputTensors; +} + +std::vector> Compilation::GetOutputTensors() const +{ + return m_outputTensors; +} + +bool Compilation::IsBuild() const +{ + return m_isBuild; +} + +bool Compilation::IsDynamicShape() const +{ + for (size_t i = 0; i < m_inputTensors.size(); ++i) { + if (m_inputTensors[i]->IsDynamicShape()) { + return true; + } + } + return false; +} +} // namespace NeuralNetworkRuntime +} // namespace OHOS \ No newline at end of file diff --git a/frameworks/native/compilation.h b/frameworks/native/compilation.h new file mode 100644 index 0000000000000000000000000000000000000000..0c76ef3cf49965d4cfce7e8aa1793c2ba5ea6101 --- /dev/null +++ b/frameworks/native/compilation.h @@ -0,0 +1,86 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef NEURAL_NETWORK_RUNTIME_COMPILATION_H +#define NEURAL_NETWORK_RUNTIME_COMPILATION_H + +#include "inner_model.h" +#include "execution_plan.h" + +#include "interfaces/oem/cpp_api/device.h" +#include "interfaces/oem/cpp_api/cpp_type.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +struct ModelCacheInfo { + uint64_t fileNumber = 0; + uint64_t version = 0; + uint64_t deviceId = 0; + std::vector modelCheckSum; +}; + +class Compilation { +public: + explicit Compilation(const InnerModel* innerModel); + + OH_NN_ReturnCode SetDevice(size_t deviceId); + OH_NN_ReturnCode SetCacheDir(const std::string& cacheModelPath, uint32_t version); + OH_NN_ReturnCode SetPerformance(OH_NN_PerformanceMode performance); + OH_NN_ReturnCode SetPriority(OH_NN_Priority priority); + OH_NN_ReturnCode SetEnableFp16(bool isFp16); + + OH_NN_ReturnCode Build(); + + bool IsBuild() const; + bool IsDynamicShape() const; + std::vector>GetInputTensors() const; + std::vector>GetOutputTensors() const; + std::shared_ptr GetExecutionPlan() const; + +private: + std::shared_ptr m_liteGraph {nullptr}; + OH_NN_Priority m_priority {OH_NN_PRIORITY_NONE}; + OH_NN_PerformanceMode m_performance {OH_NN_PERFORMANCE_NONE}; + bool m_enableFp16 {false}; + std::shared_ptr m_device {nullptr}; + std::string m_cachePath; + uint32_t m_version {0}; + size_t m_deviceId {0}; + bool m_isBuild {false}; + std::shared_ptr m_executionPlan {nullptr}; + std::vector> m_inputTensors; + std::vector> m_outputTensors; + +private: + OH_NN_ReturnCode GenerateCacheFiles(const std::vector& modelBuffer) const; + OH_NN_ReturnCode GenerateCacheModel(size_t cacheNumber, std::unique_ptr& cacheInfo, + std::vector modelBuffer) const; + OH_NN_ReturnCode GenerateCacheInfo(uint32_t cacheSize, std::unique_ptr& cacheInfo) const; + OH_NN_ReturnCode CheckCacheInfo(ModelCacheInfo& modelCacheInfo, const std::string& cacheInfoPath) const; + OH_NN_ReturnCode ReadCacheModelFile(const std::string& file, ModelBuffer& modelBuffer) const; + OH_NN_ReturnCode RemoveCacheFiles(uint32_t fileNumber) const; + unsigned short GetCrc16(const unsigned char* buffer, size_t length) const; + OH_NN_ReturnCode CheckCacheModel(const ModelCacheInfo& modelCacheInfo, + std::vector& modelBuffers) const; + OH_NN_ReturnCode NormalBuild(std::shared_ptr& preparedModel); + OH_NN_ReturnCode GenCacheBuild(std::shared_ptr& preparedModel); + OH_NN_ReturnCode ReGenCacheBuild(uint32_t fileNumber, std::shared_ptr& preparedModel); + OH_NN_ReturnCode LoadCacheBuild(std::shared_ptr& preparedModel, const ModelCacheInfo& cacheInfo); + OH_NN_ReturnCode InnerBuild(); + OH_NN_ReturnCode GetCacheFileLength(std::ifstream& ifs, int& fsize) const; +}; +} // namespace NeuralNetworkRuntime +} // namespace OHOS + +#endif // NEURAL_NETWORK_RUNTIME_COMPILATION_H \ No newline at end of file diff --git a/frameworks/native/device_manager.cpp b/frameworks/native/device_manager.cpp new file mode 100644 index 0000000000000000000000000000000000000000..6ad79bbc5ffe954305079386c3e6c42c1dcacd85 --- /dev/null +++ b/frameworks/native/device_manager.cpp @@ -0,0 +1,171 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "device_manager.h" + +#include "hdi_interfaces.h" +#include "hdi_device.h" +#include "common/log.h" +#include "common/utils.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +const std::vector& DeviceManager::GetAllDeviceId() +{ + m_tmpDeviceIds.clear(); + std::shared_ptr device {nullptr}; + for (auto iter = m_devices.begin(); iter != m_devices.end(); ++iter) { + device = iter->second; + if (!IsValidDevice(device)) { + continue; + } + m_tmpDeviceIds.emplace_back(iter->first); + } + return m_tmpDeviceIds; +} + +std::shared_ptr DeviceManager::GetDevice(size_t deviceId) const +{ + auto iter = m_devices.find(deviceId); + if (iter == m_devices.end()) { + LOGE("DeviceId is not found, deviceId=%zu", deviceId); + return nullptr; + } + + return iter->second; +} + +const std::string& DeviceManager::GetDeviceName(size_t deviceId) +{ + m_tmpDeviceName.clear(); + auto iter = m_devices.find(deviceId); + if (iter == m_devices.end()) { + LOGE("DeviceId is not found, deviceId=%zu", deviceId); + return m_tmpDeviceName; + } + + std::string deviceName; + auto ret = iter->second->GetDeviceName(deviceName); + if (ret != OH_NN_SUCCESS) { + LOGE("Get device name failed."); + return m_tmpDeviceName; + } + + std::string vendorName; + ret = iter->second->GetVendorName(vendorName); + if (ret != OH_NN_SUCCESS) { + LOGE("Get vendor name failed."); + return m_tmpDeviceName; + } + + m_tmpDeviceName = GenUniqueName(deviceName, vendorName); + return m_tmpDeviceName; +} + +std::string DeviceManager::GenUniqueName(const std::string& deviceName, const std::string& vendorName) const +{ + return deviceName + "_" + vendorName; +} + +OH_NN_ReturnCode DeviceManager::RegisterDevice(std::function()> creator) +{ + auto regDevice = creator(); + if (regDevice == nullptr) { + LOGE("Cannot create device, register device failed."); + return OH_NN_INVALID_PARAMETER; + } + + if (!IsValidDevice(regDevice)) { + LOGE("Device is not avaliable."); + return OH_NN_UNAVALIDABLE_DEVICE; + } + + std::string deviceName; + auto ret = regDevice->GetDeviceName(deviceName); + if (ret != OH_NN_SUCCESS) { + LOGE("Get device name failed."); + return ret; + } + + std::string vendorName; + ret = regDevice->GetVendorName(vendorName); + if (ret != OH_NN_SUCCESS) { + LOGE("Get vendor name failed."); + return ret; + } + + const std::lock_guard lock(m_mtx); + std::string uniqueName = GenUniqueName(deviceName, vendorName); + auto setResult = m_uniqueName.emplace(uniqueName); + if (!setResult.second) { + LOGE("Device already exists, cannot register again. deviceName=%s, vendorName=%s", + deviceName.c_str(), vendorName.c_str()); + return OH_NN_FAILED; + } + + m_devices.emplace(std::hash{}(uniqueName), regDevice); + return OH_NN_SUCCESS; +} + +void DeviceManager::DiscoverHDIDevices() +{ + // only one device from HDI now. + OHOS::sptr iDevice = V1_0::INnrtDevice::Get(); + if (iDevice == nullptr) { + LOGW("Get HDI device failed."); + return; + } + + std::string deviceName; + std::string vendorName; + auto hdiRet = iDevice->GetDeviceName(deviceName); + if (hdiRet != HDF_SUCCESS) { + LOGW("Get device name failed. ErrorCode=%d", hdiRet); + return; + } + hdiRet = iDevice->GetVendorName(vendorName); + if (hdiRet != HDF_SUCCESS) { + LOGW("Get vendor name failed. ErrorCode=%d", hdiRet); + return; + } + + std::string uniqueName = GenUniqueName(deviceName, vendorName); + const std::lock_guard lock(m_mtx); + auto setResult = m_uniqueName.emplace(uniqueName); + if (!setResult.second) { + LOGW("Device already exists, cannot register again. deviceName=%s, vendorName=%s", + deviceName.c_str(), vendorName.c_str()); + return; + } + + std::shared_ptr device = CreateSharedPtr(iDevice); + if (device == nullptr) { + LOGW("Failed to register device, because fail to create device instance."); + return; + } + m_devices.emplace(std::hash{}(uniqueName), device); +} + +bool DeviceManager::IsValidDevice(std::shared_ptr device) const +{ + DeviceStatus status {DeviceStatus::UNKNOWN}; + auto ret = device->GetDeviceStatus(status); + if (ret != OH_NN_SUCCESS || status == DeviceStatus::UNKNOWN || status == DeviceStatus::OFFLINE) { + return false; + } + return true; +} +} // NeuralNetworkRuntime +} // OHOS \ No newline at end of file diff --git a/frameworks/native/device_manager.h b/frameworks/native/device_manager.h new file mode 100644 index 0000000000000000000000000000000000000000..1f15c369a6fe8e62fe752cb574e367de02005a8c --- /dev/null +++ b/frameworks/native/device_manager.h @@ -0,0 +1,68 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_DEVICE_MANAGER_H +#define NEURAL_NETWORK_RUNTIME_DEVICE_MANAGER_H + +#include +#include +#include +#include +#include +#include + +#include "interfaces/oem/cpp_api/device.h" +#include "interfaces/kits/c/neural_network_runtime_type.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +class DeviceManager { +public: + const std::vector& GetAllDeviceId(); + std::shared_ptr GetDevice(size_t deviceId) const; + const std::string& GetDeviceName(size_t deviceId); + + // register device from C++ API + OH_NN_ReturnCode RegisterDevice(std::function()> creator); + + static DeviceManager& GetInstance() + { + static DeviceManager instance; + instance.DiscoverHDIDevices(); + return instance; + } + +private: + DeviceManager() = default; + DeviceManager(const DeviceManager&) = delete; + DeviceManager& operator=(const DeviceManager&) = delete; + + void DiscoverHDIDevices(); + std::string GenUniqueName(const std::string& deviceName, const std::string& vendorName) const; + bool IsValidDevice(std::shared_ptr device) const; + +private: + std::unordered_set m_uniqueName; + // key is device id, it is the unique number. + std::unordered_map> m_devices; + std::mutex m_mtx; + + std::string m_tmpDeviceName; + std::vector m_tmpDeviceIds; +}; +} // namespace NeuralNetworkRuntime +} // namespace OHOS + +#endif // NEURAL_NETWORK_RUNTIME_DEVICE_MANAGER_H \ No newline at end of file diff --git a/frameworks/native/device_registrar.cpp b/frameworks/native/device_registrar.cpp new file mode 100644 index 0000000000000000000000000000000000000000..3d50ef47779f12c581e4373bb04659f3d9c59ed8 --- /dev/null +++ b/frameworks/native/device_registrar.cpp @@ -0,0 +1,32 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "interfaces/oem/cpp_api/device_registrar.h" + +#include "device_manager.h" +#include "common/log.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +DeviceRegistrar::DeviceRegistrar(const CreateDevice creator) +{ + auto& deviceManager = DeviceManager::GetInstance(); + auto ret = deviceManager.RegisterDevice(creator); + if (ret != OH_NN_SUCCESS) { + LOGW("Register device failed. ErrorCode=%d", ret); + } +} +} // NeuralNetworkRuntime +} // OHOS \ No newline at end of file diff --git a/frameworks/native/execution_plan.cpp b/frameworks/native/execution_plan.cpp new file mode 100644 index 0000000000000000000000000000000000000000..e9a7c72b3dad8ecb1252970d66d55c5ec2c258d9 --- /dev/null +++ b/frameworks/native/execution_plan.cpp @@ -0,0 +1,91 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "execution_plan.h" + +#include + +#include "common/log.h" +#include "interfaces/oem/cpp_api/cpp_type.h" + + +namespace OHOS { +namespace NeuralNetworkRuntime { +OH_NN_ReturnCode ExecutionPlan::Run(const std::vector>& inputTensors, + std::vector>& outputTensors) +{ + OH_NN_ReturnCode ret {OH_NN_FAILED}; + IOTensor tensor; + std::vector inputIOTensors; + size_t inputSize = inputTensors.size(); + size_t outputSize = outputTensors.size(); + for (size_t i = 0; i < inputSize; ++i) { + inputTensors[i]->ConvertToIOTensor(tensor); + inputIOTensors.emplace_back(std::move(tensor)); + } + + std::vector outputIOTensors; + for (size_t i = 0; i < outputSize; ++i) { + outputTensors[i]->ConvertToIOTensor(tensor); + outputIOTensors.emplace_back(std::move(tensor)); + } + + std::vector> outputsDims; + std::vector isSufficientDataBuffer; + ret = m_preparedModel->Run(inputIOTensors, outputIOTensors, outputsDims, isSufficientDataBuffer); + if (ret != OH_NN_SUCCESS) { + LOGE("PrepardModel Run() failed."); + return ret; + } + + // Check if the output buffer is sufficient + bool bufferFailed {false}; + for (size_t i = 0; i < outputSize; ++i) { + if (!isSufficientDataBuffer[i]) { + // Print all output indices with insufficient buffer, don't return until traversing all outputs. + LOGE("Run failed, Output %zu does not have enough buffer to store the data.", i); + bufferFailed = true; + } + } + if (bufferFailed) { + return OH_NN_FAILED; + } + + // Set the output NNTensor's dimensions from output IOTensor if it is dynamic. + // NNTensor::SetDimensions will check if the tensor buffer is enough for the new dimensions. + for (size_t i = 0; i < outputSize; ++i) { + ret = outputTensors[i]->SetDimensions(outputsDims[i]); + if (ret != OH_NN_SUCCESS) { + LOGE("Run failed, error happened when setting output tensor's dimensions, output id: %zu.", i); + return ret; + } + } + + return OH_NN_SUCCESS; +} + + +std::shared_ptr ExecutionPlan::GetInputDevice() const +{ + return m_device; +} + + +std::shared_ptr ExecutionPlan::GetOutputDevice() const +{ + return m_device; +} +} // NeuralNetworkRuntime +} // OHOS \ No newline at end of file diff --git a/frameworks/native/execution_plan.h b/frameworks/native/execution_plan.h new file mode 100644 index 0000000000000000000000000000000000000000..6a6b254660fe5bef17907a41862ba8d6949ed1b0 --- /dev/null +++ b/frameworks/native/execution_plan.h @@ -0,0 +1,45 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_EXECUTION_PLAN_H +#define NEURAL_NETWORK_RUNTIME_EXECUTION_PLAN_H + +#include "frameworks/native/nn_tensor.h" +#include "interfaces/kits/c/neural_network_runtime_type.h" +#include "interfaces/oem/cpp_api/prepared_model.h" +#include "interfaces/oem/cpp_api/device.h" + + +namespace OHOS { +namespace NeuralNetworkRuntime { +class ExecutionPlan { +public: + ExecutionPlan(std::shared_ptr preparedModel, std::shared_ptr device) + : m_preparedModel(preparedModel), + m_device(device) {}; + + OH_NN_ReturnCode Run(const std::vector>& inputTensors, + std::vector>& outputTensors); + + std::shared_ptr GetInputDevice() const; + std::shared_ptr GetOutputDevice() const; + +private: + std::shared_ptr m_preparedModel {nullptr}; + std::shared_ptr m_device {nullptr}; +}; +} // namespace NeuralNetworkRuntime +} // namespace OHOS +#endif \ No newline at end of file diff --git a/frameworks/native/executor.cpp b/frameworks/native/executor.cpp new file mode 100644 index 0000000000000000000000000000000000000000..f99d28cdb1869484968217fb5b03c927e6e7b3f7 --- /dev/null +++ b/frameworks/native/executor.cpp @@ -0,0 +1,555 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "executor.h" + +#include "securec.h" + +#include "common/utils.h" +#include "common/scoped_trace.h" + + +namespace OHOS { +namespace NeuralNetworkRuntime { +Executor::Executor(const Compilation* compilation) + : m_modelInputs(compilation->GetInputTensors()), + m_modelOutputs(compilation->GetOutputTensors()), + m_executionPlan(compilation->GetExecutionPlan()) {} + +OH_NN_ReturnCode Executor::BuildInputTensor(uint32_t index, const OH_NN_Tensor& nnTensor, + std::shared_ptr inputTensor) const +{ + // Note: inputs have only shapes info. + if (index >= m_modelInputs.size()) { + LOGE("BuildInputTensor failed, input index is out of range."); + return OH_NN_INVALID_PARAMETER; + } + + // Build a tensor from nnTensor. + auto ret = inputTensor->BuildFromOHNNTensor(nnTensor); + if (ret != OH_NN_SUCCESS) { + LOGE("BuildInputTensor failed, please check input nnTensor."); + return ret; + } + + if (inputTensor->IsDynamicShape()) { + LOGE("BuildInputTensor failed, input nnTensor should has certain dimensions which cannot contain -1."); + return OH_NN_INVALID_PARAMETER; + } + + if (!m_modelInputs[index]->CompareAttribute(*inputTensor)) { + LOGE("BuildInputTensor failed, input has different attributes from the one in the constructed model."); + return OH_NN_INVALID_PARAMETER; + } + + inputTensor->SetName(m_modelInputs[index]->GetName()); + return OH_NN_SUCCESS; +} + + +OH_NN_ReturnCode Executor::SetInputTensorWithCurrentBuffer(uint32_t index, + std::shared_ptr inputTensor, + const void* buffer, + size_t dataLength, + size_t curBufferLength) +{ + void* curBuffer = m_inputTensors[index].tensor->GetBuffer(); + errno_t status = memcpy_s(curBuffer, dataLength, buffer, dataLength); + // Current buffer inside m_inputTensors is managed by executor, no need to release if memcpy failed. + if (status != EOK) { + LOGE("SetInputTensorWithCurrentBuffe failed, copy data from user buffer to device buffer failed. " + "Error code: %d.", status); + return OH_NN_MEMORY_ERROR; + } + + // Set the new tensor with the buffer of current tensor + inputTensor->SetBuffer(curBuffer, curBufferLength); + + // The memory is reused here. Thus, current tensor's buffer must set to nullptr, in case the memory is released + // twice. + m_inputTensors[index].tensor->SetBuffer(nullptr, 0); + + // Set to the new tensor, and release current one. + m_inputTensors[index].tensor = inputTensor; + return OH_NN_SUCCESS; +} + + +void Executor::SetInputTensorWithNewBuffer(uint32_t index, + std::shared_ptr inputTensor, + const void* inputBuffer, + size_t length, + bool isInnerMem) +{ + // Release the memory inside the tensor first, if it is allocated by Executor during SetInput(). + if (m_inputTensors.find(index) != m_inputTensors.end()) { + if (m_inputTensors[index].isInnerMem) { + void* curBuffer = m_inputTensors[index].tensor->GetBuffer(); + std::shared_ptr inputDevice = m_executionPlan->GetInputDevice(); + inputDevice->ReleaseBuffer(curBuffer); + } + // Set current tensor's buffer to nullptr in case the NNTensor release the driver memory in destruction. + m_inputTensors[index].tensor->SetBuffer(nullptr, 0); + } + + // Set new input tensor data buffer + inputTensor->SetBuffer(inputBuffer, length); + + // Create or update the input tensor + ExeTensor exeTensor{inputTensor, nullptr, 0, isInnerMem}; + m_inputTensors[index] = exeTensor; +} + + +OH_NN_ReturnCode Executor::SetInput(uint32_t index, const OH_NN_Tensor& nnTensor, const void* buffer, size_t length) +{ + std::shared_ptr inputTensor = CreateSharedPtr(); + if (inputTensor == nullptr) { + LOGE("SetInput failed, error happened when creating NNTensor."); + return OH_NN_MEMORY_ERROR; + } + + auto ret = BuildInputTensor(index, nnTensor, inputTensor); + if (ret != OH_NN_SUCCESS) { + LOGE("SetInput failed, please check input index or nnTensor."); + return ret; + } + + // dataLength will be larger than 0 after BuildInputTensor() + size_t dataLength = inputTensor->GetDataLength(); + if (length == 0 || length < dataLength) { + LOGE("SetInput failed, the given buffer length is too small to store the input nnTensor data."); + return OH_NN_INVALID_PARAMETER; + } + + // Get length of current buffer if it is allocate by SetInput() before. + size_t curBufferLength = 0; + if ((m_inputTensors.find(index) != m_inputTensors.end()) && (m_inputTensors[index].isInnerMem)) { + curBufferLength = m_inputTensors[index].tensor->GetBufferLength(); + } + + // (dataLength <= curBufferLength) returns true if and only if current buffer is allocated by SetInput() before + // and is larger than user buffer. + if (dataLength <= curBufferLength) { + ret = SetInputTensorWithCurrentBuffer(index, inputTensor, buffer, dataLength, curBufferLength); + if (ret != OH_NN_SUCCESS) { + LOGE("SetInput failed, error happened when setting input with current buffer."); + return ret; + } + m_isRun = false; + return OH_NN_SUCCESS; + } + + /** + * Buffer needs to allocated or reallocated if: + * + * - Current buffer is not enough. + * - SetInput() has not been called for the input before. + * - The buffer held in m_inputTensors is allocated and set by CreateInputMemory() and SetInputFromMemory(). + */ + std::shared_ptr inputDevice = m_executionPlan->GetInputDevice(); + void* inputBuffer = inputDevice->AllocateBuffer(length); + if (inputBuffer == nullptr) { + LOGE("SetInput failed, error happened when allocating input device buffer."); + return OH_NN_MEMORY_ERROR; + } + + errno_t status = memcpy_s(inputBuffer, dataLength, buffer, dataLength); + if (status != EOK) { + LOGE("SetInput failed, copy data from user buffer failed. Error code: %d.", status); + inputDevice->ReleaseBuffer(inputBuffer); + return OH_NN_MEMORY_ERROR; + } + + SetInputTensorWithNewBuffer(index, inputTensor, inputBuffer, length, true); + m_isRun = false; + return OH_NN_SUCCESS; +} + + +OH_NN_ReturnCode Executor::SetInputFromMemory(uint32_t index, const OH_NN_Tensor& nnTensor, const OH_NN_Memory& memory) +{ + // Build a input tensor + std::shared_ptr inputTensor = CreateSharedPtr(); + if (inputTensor == nullptr) { + LOGE("SetInputFromMemory failed, error happened when creating NNTensor."); + return OH_NN_MEMORY_ERROR; + } + + auto ret = BuildInputTensor(index, nnTensor, inputTensor); + if (ret != OH_NN_SUCCESS) { + LOGE("SetInputFromMemory failed, please check input index or nnTensor"); + return ret; + } + + // check data length + size_t dataLength = inputTensor->GetDataLength(); + if (memory.length == 0 || memory.length < dataLength) { + LOGE("SetInputFromMemory failed," + " the length in the given memory is too small to store the input nnTensor data."); + return OH_NN_INVALID_PARAMETER; + } + + SetInputTensorWithNewBuffer(index, inputTensor, const_cast(memory.data), memory.length, false); + m_isRun = false; + return OH_NN_SUCCESS; +} + + +OH_NN_ReturnCode Executor::SetOutput(uint32_t index, void* buffer, size_t length) +{ + if (index >= m_modelOutputs.size()) { + LOGE("SetOutput failed, output index is out of range."); + return OH_NN_INVALID_PARAMETER; + } + + size_t dataLength = m_modelOutputs[index]->GetDataLength(); + if (length == 0 || length < dataLength) { + LOGE("SetOutput failed, the given buffer length is too small to store the output tensor data."); + return OH_NN_INVALID_PARAMETER; + } + + // If output tensor does not exist, or inner device buffer size is not enough, + // or device buffer is set by SetOutputFromMemory() before, + // allocate a new device buffer and set it to output tensor, and update the user buffer. + std::shared_ptr outputDevice = m_executionPlan->GetOutputDevice(); + if (m_outputTensors.find(index) != m_outputTensors.end()) { + if (m_outputTensors[index].isInnerMem) { + size_t curBufferLength = m_outputTensors[index].tensor->GetBufferLength(); + if (length <= curBufferLength) { + // If current device buffer size is enough, only update the user buffer. + m_outputTensors[index].userBuffer = buffer; + m_outputTensors[index].userBufferLength = length; + m_isRun = false; + return OH_NN_SUCCESS; + } else { + // If current device buffer size is not enough, + // release current device buffer and then allocate a new one below. + void* curBuffer = m_outputTensors[index].tensor->GetBuffer(); + outputDevice->ReleaseBuffer(curBuffer); + } + } + } else { + // If output tensor does not exist, create a new null output tensor. + ExeTensor exeTensor; + m_outputTensors[index] = exeTensor; + m_outputTensors[index].tensor = m_modelOutputs[index]; + } + + void* deviceOutputBuffer = outputDevice->AllocateBuffer(length); + if (deviceOutputBuffer == nullptr) { + LOGE("SetOutput failed, allocating output device buffer failed."); + return OH_NN_MEMORY_ERROR; + } + + m_outputTensors[index].tensor->SetBuffer(deviceOutputBuffer, length); + m_outputTensors[index].userBuffer = buffer; + m_outputTensors[index].userBufferLength = length; + m_outputTensors[index].isInnerMem = true; + m_isRun = false; + return OH_NN_SUCCESS; +} + + +OH_NN_ReturnCode Executor::SetOutputFromMemory(uint32_t index, const OH_NN_Memory& memory) +{ + if (index >= m_modelOutputs.size()) { + LOGE("SetOutputFromMemory failed, output index is out of range."); + return OH_NN_INVALID_PARAMETER; + } + + size_t dataLength = m_modelOutputs[index]->GetDataLength(); + if (memory.length == 0 || memory.length < dataLength) { + LOGE("SetOutputFromMemory failed, the memory is too small to store the output tensor data."); + return OH_NN_INVALID_PARAMETER; + } + + if (m_outputTensors.find(index) != m_outputTensors.end()) { + if (m_outputTensors[index].isInnerMem) { + // If it is inner buffer, releate it + void* curBuffer = m_outputTensors[index].tensor->GetBuffer(); + std::shared_ptr outputDevice = m_executionPlan->GetOutputDevice(); + outputDevice->ReleaseBuffer(curBuffer); + } + } else { + // If output tensor does not exist, create a new null output tensor. + ExeTensor exeTensor; + m_outputTensors[index] = exeTensor; + m_outputTensors[index].tensor = m_modelOutputs[index]; + } + + // Set the output tensor with memory + m_outputTensors[index].tensor->SetBuffer(const_cast(memory.data), memory.length); + m_outputTensors[index].userBuffer = nullptr; + m_outputTensors[index].userBufferLength = 0; + m_outputTensors[index].isInnerMem = false; + m_isRun = false; + return OH_NN_SUCCESS; +} + + +OH_NN_ReturnCode Executor::GetOutputShape(uint32_t index, int32_t** dimensions, uint32_t& dimensionCount) +{ + if (!m_isRun) { + LOGE("GetOutputShape failed, cannot get output dimensions before Run."); + return OH_NN_OPERATION_FORBIDDEN; + } + + if (index >= m_modelOutputs.size()) { + LOGE("GetOutputShape failed, output index is out of range."); + return OH_NN_INVALID_PARAMETER; + } + + if (m_outputTensors.find(index) == m_outputTensors.end()) { + LOGE("GetOutputShape failed, output has not been set. Output index: %u.", index); + return OH_NN_INVALID_PARAMETER; + } + + m_outputDimensions[index] = m_outputTensors[index].tensor->GetDimensions(); + *dimensions = m_outputDimensions[index].data(); + dimensionCount = m_outputDimensions[index].size(); + + return OH_NN_SUCCESS; +} + + +OH_NN_ReturnCode Executor::CreateInputMemory(uint32_t index, size_t length, OH_NN_Memory** memory) +{ + if (index >= m_modelInputs.size()) { + LOGE("CreateInputMemory failed, input index is out of range."); + return OH_NN_INVALID_PARAMETER; + } + + // Allocate device buffer + std::shared_ptr inputDevice = m_executionPlan->GetInputDevice(); + void* deviceInputBuffer = inputDevice->AllocateBuffer(length); + if (deviceInputBuffer == nullptr) { + LOGE("CreateInputMemory failed, allocating intput device buffer failed."); + return OH_NN_MEMORY_ERROR; + } + + *memory = new(std::nothrow) OH_NN_Memory{deviceInputBuffer, length}; + if (*memory == nullptr) { + LOGE("CreateInputMemory failed, constructing OH_NN_Memory failed."); + inputDevice->ReleaseBuffer(deviceInputBuffer); + return OH_NN_MEMORY_ERROR; + } + + // Save the buffer address for check when destroying it. + m_inputCreatedMem[index].emplace_back(deviceInputBuffer); + + return OH_NN_SUCCESS; +} + + +OH_NN_ReturnCode Executor::DestroyInputMemory(uint32_t index, OH_NN_Memory** memory) +{ + if (index >= m_modelInputs.size()) { + LOGE("DestroyInputMemory failed, input index is out of range."); + return OH_NN_INVALID_PARAMETER; + } + + if (m_inputCreatedMem.find(index) == m_inputCreatedMem.end()) { + LOGE("DestroyInputMemory failed, the memory has not been created with the index."); + return OH_NN_INVALID_PARAMETER; + } + + std::vector& inputCreatedMem = m_inputCreatedMem[index]; + auto pos = std::find(inputCreatedMem.begin(), inputCreatedMem.end(), (*memory)->data); + if (pos == inputCreatedMem.end()) { + LOGE("DestroyInputMemory failed, the index does not match the memory."); + return OH_NN_INVALID_PARAMETER; + } + + std::shared_ptr inputDevice = m_executionPlan->GetInputDevice(); + auto ret = inputDevice->ReleaseBuffer((*memory)->data); + if (ret != OH_NN_SUCCESS) { + LOGE("Release input buffer failed."); + return ret; + } + + inputCreatedMem.erase(pos); + delete *memory; + *memory = nullptr; + + return OH_NN_SUCCESS; +} + + +OH_NN_ReturnCode Executor::CreateOutputMemory(uint32_t index, size_t length, OH_NN_Memory** memory) +{ + if (index >= m_modelOutputs.size()) { + LOGE("CreateOutputMemory failed, output index is out of range."); + return OH_NN_INVALID_PARAMETER; + } + + // Allocate device buffer + std::shared_ptr outputDevice = m_executionPlan->GetOutputDevice(); + void* deviceOutputBuffer = outputDevice->AllocateBuffer(length); + if (deviceOutputBuffer == nullptr) { + LOGE("CreateOutputMemory failed, allocating output device buffer failed."); + return OH_NN_MEMORY_ERROR; + } + + *memory = new(std::nothrow) OH_NN_Memory{deviceOutputBuffer, length}; + if (*memory == nullptr) { + LOGE("CreateOutputMemory failed, constructing OH_NN_Memory failed."); + outputDevice->ReleaseBuffer(deviceOutputBuffer); + return OH_NN_MEMORY_ERROR; + } + + // Save the buffer address for check when destroying it. + m_outputCreatedMem[index].emplace_back(deviceOutputBuffer); + + return OH_NN_SUCCESS; +} + + +OH_NN_ReturnCode Executor::DestroyOutputMemory(uint32_t index, OH_NN_Memory** memory) +{ + if (index >= m_modelOutputs.size()) { + LOGE("DestroyOutputMemory failed, output index is out of range."); + return OH_NN_INVALID_PARAMETER; + } + + if (m_outputCreatedMem.find(index) == m_outputCreatedMem.end()) { + LOGE("DestroyOutputMemory failed, the memory has not been created with the index."); + return OH_NN_INVALID_PARAMETER; + } + + std::vector& outputCreatedMem = m_outputCreatedMem[index]; + auto pos = std::find(outputCreatedMem.begin(), outputCreatedMem.end(), (*memory)->data); + if (pos == outputCreatedMem.end()) { + LOGE("DestroyOutputMemory failed, the index does not match the memory."); + return OH_NN_INVALID_PARAMETER; + } + + std::shared_ptr outputDevice = m_executionPlan->GetOutputDevice(); + auto ret = outputDevice->ReleaseBuffer((*memory)->data); + if (ret != OH_NN_SUCCESS) { + LOGE("Release output buffer failed."); + return ret; + } + + outputCreatedMem.erase(pos); + delete *memory; + *memory = nullptr; + + return OH_NN_SUCCESS; +} + + +OH_NN_ReturnCode Executor::Run() +{ + NNRT_TRACE_NAME("Execution"); + if (m_modelInputs.size() != m_inputTensors.size()) { + LOGE("Run failed, some input tensors have not been set."); + return OH_NN_INVALID_PARAMETER; + } + if (m_modelOutputs.size() != m_outputTensors.size()) { + LOGE("Run failed, some output tensors have not been set."); + return OH_NN_INVALID_PARAMETER; + } + + // Build the NNTensor pointer vector: inputTensors and outputTensors + std::vector> inputTensors; + std::vector> outputTensors; + size_t inputSize = m_inputTensors.size(); + size_t outputSize = m_outputTensors.size(); + for (size_t i = 0; i < inputSize; ++i) { + inputTensors.emplace_back(m_inputTensors[i].tensor); + } + for (size_t i = 0; i < outputSize; ++i) { + outputTensors.emplace_back(m_outputTensors[i].tensor); + } + + // Predict + auto ret = m_executionPlan->Run(inputTensors, outputTensors); + if (ret != OH_NN_SUCCESS) { + LOGE("Run failed, error happened when executing the inference."); + return ret; + } + + errno_t status{EOK}; + // Copy inner device buffer to user buffer if using SetOutput() + for (size_t i = 0; i < outputSize; ++i) { + if (m_outputTensors[i].isInnerMem) { + auto size = outputTensors[i]->GetDataLength(); + if (size > m_outputTensors[i].userBufferLength) { + LOGE("Output buffer size is not enough. Your size=%zu, but actual output size=%zu.", + m_outputTensors[i].userBufferLength, size); + return OH_NN_INVALID_PARAMETER; + } + + void* deviceBuffer = outputTensors[i]->GetBuffer(); + if (deviceBuffer == nullptr) { + LOGE("Output buffer is nullptr."); + return OH_NN_FAILED; + } + + status = memcpy_s(m_outputTensors[i].userBuffer, m_outputTensors[i].userBufferLength, deviceBuffer, size); + if (status != EOK) { + LOGE("Run failed, memory copy from device buffer to user buffer failed. Error code: %d.", status); + return OH_NN_MEMORY_ERROR; + } + } + } + + m_isRun = true; + return OH_NN_SUCCESS; +} + +Executor::~Executor() +{ + std::shared_ptr inputDevice; + for (auto& it : m_inputTensors) { + inputDevice = m_executionPlan->GetInputDevice(); + if ((it.second).isInnerMem) { + inputDevice->ReleaseBuffer((it.second).tensor->GetBuffer()); + } + (it.second).tensor->SetBuffer(nullptr, 0); + (it.second).tensor.reset(); + (it.second).userBuffer = nullptr; + } + m_inputTensors.clear(); + + std::shared_ptr outputDevice; + for (auto& it : m_outputTensors) { + outputDevice = m_executionPlan->GetOutputDevice(); + if ((it.second).isInnerMem) { + outputDevice->ReleaseBuffer((it.second).tensor->GetBuffer()); + } + (it.second).tensor->SetBuffer(nullptr, 0); + (it.second).tensor.reset(); + (it.second).userBuffer = nullptr; + } + m_outputTensors.clear(); + + for (auto& it : m_inputCreatedMem) { + it.second.clear(); + } + m_inputCreatedMem.clear(); + + for (auto& it : m_outputCreatedMem) { + it.second.clear(); + } + m_outputCreatedMem.clear(); + + m_outputDimensions.clear(); + m_modelInputs.clear(); + m_modelOutputs.clear(); +} +} // namespace NeuralNetworkRuntime +} // namespace OHOS diff --git a/frameworks/native/executor.h b/frameworks/native/executor.h new file mode 100644 index 0000000000000000000000000000000000000000..bbe3d933a53a2f1fbb15e88988ed135ae81c1415 --- /dev/null +++ b/frameworks/native/executor.h @@ -0,0 +1,72 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_EXECUTOR_H +#define NEURAL_NETWORK_RUNTIME_EXECUTOR_H + +#include "compilation.h" +#include "execution_plan.h" +#include "nn_tensor.h" +#include "interfaces/kits/c/neural_network_runtime.h" +#include "interfaces/oem/cpp_api/device.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +class Executor { +public: + explicit Executor(const Compilation* compilation); + ~Executor(); + + OH_NN_ReturnCode SetInput(uint32_t index, const OH_NN_Tensor& nnTensor, const void* buffer, size_t length); + OH_NN_ReturnCode SetInputFromMemory(uint32_t index, const OH_NN_Tensor& nnTensor, const OH_NN_Memory& memory); + OH_NN_ReturnCode SetOutput(uint32_t index, void* buffer, size_t length); + OH_NN_ReturnCode SetOutputFromMemory(uint32_t index, const OH_NN_Memory& memory); + OH_NN_ReturnCode GetOutputShape(uint32_t index, int32_t** dimensions, uint32_t& dimensionCount); + + OH_NN_ReturnCode CreateInputMemory(uint32_t index, size_t length, OH_NN_Memory** memory); + OH_NN_ReturnCode CreateOutputMemory(uint32_t index, size_t length, OH_NN_Memory** memory); + OH_NN_ReturnCode DestroyInputMemory(uint32_t index, OH_NN_Memory** memory); + OH_NN_ReturnCode DestroyOutputMemory(uint32_t index, OH_NN_Memory** memory); + + OH_NN_ReturnCode Run(); + +private: + OH_NN_ReturnCode BuildInputTensor(uint32_t index, const OH_NN_Tensor& nnTensor, + std::shared_ptr inputTensor) const; + OH_NN_ReturnCode SetInputTensorWithCurrentBuffer(uint32_t index, std::shared_ptr inputTensor, + const void* buffer, size_t dataLength, size_t curBufferLength); + void SetInputTensorWithNewBuffer(uint32_t index, std::shared_ptr inputTensor, + const void* inputBuffer, size_t length, bool isInnerMem); + +private: + struct ExeTensor { + std::shared_ptr tensor; + void* userBuffer; + size_t userBufferLength; + bool isInnerMem; + }; + bool m_isRun {false}; + std::vector> m_modelInputs; + std::vector> m_modelOutputs; + std::shared_ptr m_executionPlan {nullptr}; + std::unordered_map> m_outputDimensions; + std::unordered_map m_inputTensors; + std::unordered_map m_outputTensors; + std::unordered_map> m_inputCreatedMem; + std::unordered_map> m_outputCreatedMem; +}; +} // namespace NeuralNetworkRuntime +} // namespace OHOS +#endif \ No newline at end of file diff --git a/frameworks/native/hdi_device.cpp b/frameworks/native/hdi_device.cpp new file mode 100644 index 0000000000000000000000000000000000000000..6c8dd471c6f2623d80fb21c5e99c1cf7f759e2a6 --- /dev/null +++ b/frameworks/native/hdi_device.cpp @@ -0,0 +1,331 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "hdi_device.h" + +#include "hdf_base.h" +#include "mindir.h" + +#include "hdi_prepared_model.h" +#include "memory_manager.h" +#include "transform.h" +#include "common/log.h" +#include "common/utils.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +HDIDevice::HDIDevice(OHOS::sptr device) : m_iDevice(device) +{ + device->GetVersion(m_hdiVersion.first, m_hdiVersion.second); +} + +OH_NN_ReturnCode HDIDevice::GetDeviceName(std::string& name) +{ + auto ret = m_iDevice->GetDeviceName(name); + if (ret != HDF_SUCCESS) { + LOGE("Get HDI device name failed. ErrorCode=%d", ret); + return OH_NN_UNAVALIDABLE_DEVICE; + } + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode HDIDevice::GetVendorName(std::string& name) +{ + auto ret = m_iDevice->GetVendorName(name); + if (ret != HDF_SUCCESS) { + LOGE("Get HDI device vendor name failed. ErrorCode=%d", ret); + return OH_NN_UNAVALIDABLE_DEVICE; + } + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode HDIDevice::GetDeviceType(OH_NN_DeviceType& deviceType) +{ + V1_0::DeviceType iDeviceType; + auto ret = m_iDevice->GetDeviceType(iDeviceType); + if (ret != HDF_SUCCESS) { + LOGE("Get HDI device type failed. ErrorCode=%d", ret); + return OH_NN_UNAVALIDABLE_DEVICE; + } + + deviceType = HDIToNN::TransHDIDeviceType(iDeviceType); + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode HDIDevice::GetDeviceStatus(DeviceStatus& status) +{ + V1_0::DeviceStatus iDeviceStatus; + auto ret = m_iDevice->GetDeviceStatus(iDeviceStatus); + if (ret != HDF_SUCCESS) { + LOGE("Get HDI device status failed. ErrorCode=%d", ret); + return OH_NN_UNAVALIDABLE_DEVICE; + } + status = HDIToNN::TransHDIDeviceStatus(iDeviceStatus); + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode HDIDevice::GetSupportedOperation(std::shared_ptr model, + std::vector& ops) +{ + if (model == nullptr) { + LOGE("Model is nullptr, cannot query supported operation."); + return OH_NN_NULL_PTR; + } + + V1_0::SharedBuffer tensorBuffer {INVALID_FD, 0, 0, 0}; + size_t tensorSize = mindspore::lite::MindIR_LiteGraph_GetConstTensorSize(model.get()); + int32_t hdiRet {0}; + if (tensorSize > 0) { + hdiRet = m_iDevice->AllocateBuffer(tensorSize, tensorBuffer); + if (hdiRet != HDF_SUCCESS || tensorBuffer.fd == INVALID_FD) { + LOGE("Allocate tensor buffer error when get supported operation. ErrorCode: %d", hdiRet); + return OH_NN_FAILED; + } + } + + auto iModel = mindspore::lite::MindIR_LiteGraph_To_Model(model.get(), tensorBuffer); + if (iModel == nullptr) { + LOGE("Parse litegraph to hdi model failed."); + ReleaseSharedBuffer(tensorBuffer); + return OH_NN_FAILED; + } + + hdiRet = m_iDevice->GetSupportedOperation(*iModel, ops); + + mindspore::lite::MindIR_Model_Destroy(&iModel); + auto ret = ReleaseSharedBuffer(tensorBuffer); + if (ret != OH_NN_SUCCESS) { + LOGE("Release tensorBuffer failed."); + return OH_NN_FAILED; + } + if (hdiRet != HDF_SUCCESS) { + LOGE("Get supported operation failed. ErrorCode=%d", ret); + return OH_NN_UNAVALIDABLE_DEVICE; + } + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode HDIDevice::IsFloat16PrecisionSupported(bool& isSupported) +{ + auto ret = m_iDevice->IsFloat16PrecisionSupported(isSupported); + if (ret != HDF_SUCCESS) { + LOGE("Query fp16 precision supported failed. ErrorCode=%d", ret); + return OH_NN_UNAVALIDABLE_DEVICE; + } + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode HDIDevice::IsPerformanceModeSupported(bool& isSupported) +{ + auto ret = m_iDevice->IsPerformanceModeSupported(isSupported); + if (ret != HDF_SUCCESS) { + LOGE("Query performance mode supported failed. ErrorCode=%d", ret); + return OH_NN_UNAVALIDABLE_DEVICE; + } + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode HDIDevice::IsPrioritySupported(bool& isSupported) +{ + auto ret = m_iDevice->IsPrioritySupported(isSupported); + if (ret != HDF_SUCCESS) { + LOGE("Query priority supported failed. ErrorCode=%d", ret); + return OH_NN_UNAVALIDABLE_DEVICE; + } + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode HDIDevice::IsDynamicInputSupported(bool& isSupported) +{ + auto ret = m_iDevice->IsDynamicInputSupported(isSupported); + if (ret != HDF_SUCCESS) { + LOGE("Query dynamic input supported failed. ErrorCode=%d", ret); + return OH_NN_UNAVALIDABLE_DEVICE; + } + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode HDIDevice::IsModelCacheSupported(bool& isSupported) +{ + auto ret = m_iDevice->IsModelCacheSupported(isSupported); + if (ret != HDF_SUCCESS) { + LOGE("Query cache model supported failed. ErrorCode=%d", ret); + return OH_NN_UNAVALIDABLE_DEVICE; + } + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode HDIDevice::PrepareModel(std::shared_ptr model, + const ModelConfig& config, + std::shared_ptr& preparedModel) +{ + if (model == nullptr) { + LOGE("Model is nullptr, cannot prepare model."); + return OH_NN_INVALID_PARAMETER; + } + + V1_0::SharedBuffer tensorBuffer {INVALID_FD, 0, 0, 0}; + size_t tensorSize = mindspore::lite::MindIR_LiteGraph_GetConstTensorSize(model.get()); + int32_t hdiRet {0}; + if (tensorSize > 0) { + hdiRet = m_iDevice->AllocateBuffer(tensorSize, tensorBuffer); + if (hdiRet != HDF_SUCCESS || tensorBuffer.fd == INVALID_FD) { + LOGE("Allocate tensor buffer error when prepare model. ErrorCode: %d", hdiRet); + return OH_NN_FAILED; + } + } + + V1_0::Model* iModel = mindspore::lite::MindIR_LiteGraph_To_Model(model.get(), tensorBuffer); + if (iModel == nullptr) { + LOGE("Parse litegraph to hdi model failed."); + ReleaseSharedBuffer(tensorBuffer); + return OH_NN_FAILED; + } + + V1_0::ModelConfig iModelConfig; + iModelConfig.enableFloat16 = config.enableFloat16; + iModelConfig.mode = NNToHDI::TransPerformanceMode(config.mode); + iModelConfig.priority = NNToHDI::TransPriority(config.priority); + OHOS::sptr iPreparedModel; + + auto preparedRet = m_iDevice->PrepareModel(*iModel, iModelConfig, iPreparedModel); + + mindspore::lite::MindIR_Model_Destroy(&iModel); + auto ret = ReleaseSharedBuffer(tensorBuffer); + if (ret != OH_NN_SUCCESS) { + LOGE("Release tensorBuffer failed."); + return OH_NN_FAILED; + } + if (preparedRet != HDF_SUCCESS || iPreparedModel == nullptr) { + LOGE("Prepare model failed. ErrorCode=%d", preparedRet); + return OH_NN_FAILED; + } + + preparedModel = CreateSharedPtr(iPreparedModel); + if (preparedModel == nullptr) { + LOGE("Prepare model failed, because fail to create preparedModel instance."); + return OH_NN_MEMORY_ERROR; + } + + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode HDIDevice::PrepareModelFromModelCache(const std::vector& modelCache, + const ModelConfig& config, + std::shared_ptr& preparedModel) +{ + std::vector iBuffers; + auto memManager = MemoryManager::GetInstance(); + Memory memory; + OH_NN_ReturnCode ret; + size_t modelCacheSize = modelCache.size(); + for (size_t i = 0; i < modelCacheSize; i++) { + ret = memManager->GetMemory(modelCache[i].buffer, memory); + if (ret != OH_NN_SUCCESS) { + LOGE("The %zuth model cache is invalid. Please put valid model cache.", i + 1); + return ret; + } + iBuffers.emplace_back(V1_0::SharedBuffer {memory.fd, memory.length, 0, memory.length}); + } + + V1_0::ModelConfig iModelConfig; + iModelConfig.enableFloat16 = config.enableFloat16; + iModelConfig.mode = NNToHDI::TransPerformanceMode(config.mode); + iModelConfig.priority = NNToHDI::TransPriority(config.priority); + + OHOS::sptr iPreparedModel; + auto hdiRet = m_iDevice->PrepareModelFromModelCache(iBuffers, iModelConfig, iPreparedModel); + if (hdiRet != HDF_SUCCESS) { + LOGE("Prepare model from cache failed. ErrorCode=%d", hdiRet); + return OH_NN_UNAVALIDABLE_DEVICE; + } + + preparedModel = CreateSharedPtr(iPreparedModel); + if (preparedModel == nullptr) { + LOGE("Prepare model from model cache failed, because fail to create preparedModel instance."); + return OH_NN_MEMORY_ERROR; + } + return OH_NN_SUCCESS; +} + +void* HDIDevice::AllocateBuffer(size_t length) +{ + if (length == 0) { + LOGE("The length param is invalid, length=0"); + return nullptr; + } + + V1_0::SharedBuffer buffer; + auto ret = m_iDevice->AllocateBuffer(length, buffer); + if (ret != HDF_SUCCESS) { + LOGE("Allocate buffer error. ErrorCode: %d", ret); + return nullptr; + } + + auto memManager = MemoryManager::GetInstance(); + auto addr = memManager->MapMemory(buffer.fd, length); + if (addr == nullptr) { + LOGE("Map fd to address failed."); + } + return addr; +} + +OH_NN_ReturnCode HDIDevice::ReleaseBuffer(const void* buffer) +{ + if (buffer == nullptr) { + LOGE("Buffer is nullptr, no need to release."); + return OH_NN_INVALID_PARAMETER; + } + + auto memManager = MemoryManager::GetInstance(); + Memory memory; + auto ret = memManager->GetMemory(buffer, memory); + if (ret != OH_NN_SUCCESS) { + LOGE("Invalid Buffer, it is not NNRt buffer."); + return ret; + } + + V1_0::SharedBuffer hdiBuffer {memory.fd, memory.length, 0, memory.length}; + auto deviceResult = m_iDevice->ReleaseBuffer(hdiBuffer); + if (deviceResult != HDF_SUCCESS) { + LOGE("Device release buffer error. ErrorCode: %d", deviceResult); + return OH_NN_FAILED; + } + + ret = memManager->UnMapMemory(buffer); + if (ret != OH_NN_SUCCESS) { + LOGE("Unmap memory failed."); + return ret; + } + + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode HDIDevice::ReleaseSharedBuffer(const V1_0::SharedBuffer& buffer) +{ + if (buffer.fd == INVALID_FD) { + LOGI("No need to release. fd=%d", INVALID_FD); + return OH_NN_SUCCESS; + } + + auto ret = m_iDevice->ReleaseBuffer(buffer); + if (ret != HDF_SUCCESS) { + LOGE("Device release buffer error. ErrorCode=%d", ret); + return OH_NN_FAILED; + } + return OH_NN_SUCCESS; +} +} // namespace NeuralNetworkRuntime +} // namespace OHOS \ No newline at end of file diff --git a/frameworks/native/hdi_device.h b/frameworks/native/hdi_device.h new file mode 100644 index 0000000000000000000000000000000000000000..ba5253012ecfcda97bc959c3da1e1fd66c983f7e --- /dev/null +++ b/frameworks/native/hdi_device.h @@ -0,0 +1,63 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_HDI_DEVICE_H +#define NEURAL_NETWORK_RUNTIME_HDI_DEVICE_H + +#include "refbase.h" +#include "hdi_interfaces.h" + +#include "interfaces/oem/cpp_api/device.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +class HDIDevice : public Device { +public: + explicit HDIDevice(OHOS::sptr device); + + OH_NN_ReturnCode GetDeviceName(std::string& name) override; + OH_NN_ReturnCode GetVendorName(std::string& name) override; + OH_NN_ReturnCode GetDeviceType(OH_NN_DeviceType& deviceType) override; + OH_NN_ReturnCode GetDeviceStatus(DeviceStatus& status) override; + OH_NN_ReturnCode GetSupportedOperation(std::shared_ptr model, + std::vector& ops) override; + + OH_NN_ReturnCode IsFloat16PrecisionSupported(bool& isSupported) override; + OH_NN_ReturnCode IsPerformanceModeSupported(bool& isSupported) override; + OH_NN_ReturnCode IsPrioritySupported(bool& isSupported) override; + OH_NN_ReturnCode IsDynamicInputSupported(bool& isSupported) override; + OH_NN_ReturnCode IsModelCacheSupported(bool& isSupported) override; + + OH_NN_ReturnCode PrepareModel(std::shared_ptr model, + const ModelConfig& config, + std::shared_ptr& preparedModel) override; + OH_NN_ReturnCode PrepareModelFromModelCache(const std::vector& modelCache, + const ModelConfig& config, + std::shared_ptr& preparedModel) override; + + void* AllocateBuffer(size_t length) override; + OH_NN_ReturnCode ReleaseBuffer(const void* buffer) override; + +private: + OH_NN_ReturnCode ReleaseSharedBuffer(const V1_0::SharedBuffer& buffer); + +private: + // first: major version, second: minor version + std::pair m_hdiVersion; + OHOS::sptr m_iDevice {nullptr}; +}; +} // namespace NeuralNetworkRuntime +} // namespace OHOS +#endif // NEURAL_NETWORK_RUNTIME_HDI_DEVICE_H \ No newline at end of file diff --git a/frameworks/native/hdi_interfaces.h b/frameworks/native/hdi_interfaces.h new file mode 100644 index 0000000000000000000000000000000000000000..1d3416ba6f9daff3cd10c3a5ea5bfa2bd02315d4 --- /dev/null +++ b/frameworks/native/hdi_interfaces.h @@ -0,0 +1,29 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_HDI_INTERFACES_H +#define NEURAL_NETWORK_RUNTIME_HDI_INTERFACES_H + +#include +#include +#include + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace V1_0 = OHOS::HDI::Nnrt::V1_0; +} // namespace NeuralNetworkRuntime +} // namespace OHOS + +#endif // NEURAL_NETWORK_RUNTIME_HDI_INTERFACES_H \ No newline at end of file diff --git a/frameworks/native/hdi_prepared_model.cpp b/frameworks/native/hdi_prepared_model.cpp new file mode 100644 index 0000000000000000000000000000000000000000..491aec696489b34b44c32bad5cdd5a8c93d7c969 --- /dev/null +++ b/frameworks/native/hdi_prepared_model.cpp @@ -0,0 +1,91 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "hdi_prepared_model.h" + +#include "common/log.h" +#include "memory_manager.h" +#include "transform.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +HDIPreparedModel::HDIPreparedModel(OHOS::sptr hdiPreparedModel) + : m_hdiPreparedModel(hdiPreparedModel) +{ + hdiPreparedModel->GetVersion(m_hdiVersion.first, m_hdiVersion.second); +} + +OH_NN_ReturnCode HDIPreparedModel::ExportModelCache(std::vector& modelCache) +{ + if (!modelCache.empty()) { + LOGE("The vector of modelCache should be empty. size=%zu", modelCache.size()); + return OH_NN_INVALID_PARAMETER; + } + + std::vector iBuffers; + auto ret = m_hdiPreparedModel->ExportModelCache(iBuffers); + if (ret != HDF_SUCCESS) { + LOGE("Export model cache failed. ErrorCode=%d", ret); + return OH_NN_UNAVALIDABLE_DEVICE; + } + + auto memManager = MemoryManager::GetInstance(); + for (size_t i = 0; i < iBuffers.size(); i++) { + auto addr = memManager->MapMemory(iBuffers[i].fd, iBuffers[i].bufferSize); + if (addr == nullptr) { + LOGE("Export the %zuth model cache failed, cannot not map fd to address.", i + 1); + return OH_NN_MEMORY_ERROR; + } + ModelBuffer modelbuffer {addr, iBuffers[i].bufferSize}; + modelCache.emplace_back(modelbuffer); + } + + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode HDIPreparedModel::Run(const std::vector& inputs, const std::vector& outputs, + std::vector>& outputsDims, std::vector& isOutputBufferEnough) +{ + V1_0::IOTensor iTensor; + std::vector iInputTensors; + for (auto& input: inputs) { + iTensor = NNToHDI::TransIOTensor(input); + if (iTensor.data.fd == INVALID_FD) { + LOGE("Transform inputs tensor failed, cannot find data file descriptor."); + return OH_NN_INVALID_PARAMETER; + } + iInputTensors.emplace_back(iTensor); + } + + std::vector iOutputTensors; + for (auto& output: outputs) { + iTensor = NNToHDI::TransIOTensor(output); + if (iTensor.data.fd == INVALID_FD) { + LOGE("Transform outputs tensor failed, cannot find data file descriptor."); + return OH_NN_INVALID_PARAMETER; + } + iOutputTensors.emplace_back(iTensor); + } + + auto ret = m_hdiPreparedModel->Run(iInputTensors, iOutputTensors, outputsDims, isOutputBufferEnough); + if (ret != HDF_SUCCESS || outputsDims.empty()) { + LOGE("Run model failed. ErrorCode=%d", ret); + return OH_NN_UNAVALIDABLE_DEVICE; + } + + return OH_NN_SUCCESS; +} +} // namespace NeuralNetworkRuntime +} // OHOS \ No newline at end of file diff --git a/frameworks/native/hdi_prepared_model.h b/frameworks/native/hdi_prepared_model.h new file mode 100644 index 0000000000000000000000000000000000000000..538ab053b2c35a463e1a68ed3fc4309409a98ff1 --- /dev/null +++ b/frameworks/native/hdi_prepared_model.h @@ -0,0 +1,47 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +#ifndef NEURAL_NETWORK_RUNTIME_HDI_PREPARED_MODEL_H +#define NEURAL_NETWORK_RUNTIME_HDI_PREPARED_MODEL_H + +#include + +#include "refbase.h" +#include "hdi_interfaces.h" +#include "interfaces/oem/cpp_api/prepared_model.h" +#include "interfaces/oem/cpp_api/cpp_type.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +class HDIPreparedModel : public PreparedModel { +public: + explicit HDIPreparedModel(OHOS::sptr hdiPreparedModel); + + OH_NN_ReturnCode ExportModelCache(std::vector& modelCache) override; + + OH_NN_ReturnCode Run(const std::vector& inputs, + const std::vector& outputs, + std::vector>& outputsDims, + std::vector& isOutputBufferEnough) override; + +private: + // first: major version, second: minor version + std::pair m_hdiVersion; + OHOS::sptr m_hdiPreparedModel {nullptr}; +}; +} // namespace NeuralNetworkRuntime +} // OHOS +#endif // NEURAL_NETWORK_RUNTIME_HDI_PREPARED_MODEL_H \ No newline at end of file diff --git a/frameworks/native/inner_model.cpp b/frameworks/native/inner_model.cpp new file mode 100644 index 0000000000000000000000000000000000000000..bcd20c6e2cf1feafc2e058ea790dce815363b03e --- /dev/null +++ b/frameworks/native/inner_model.cpp @@ -0,0 +1,546 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "inner_model.h" + +#include +#include +#include + +#include "securec.h" + +#include "common/utils.h" +#include "common/scoped_trace.h" +#include "device_manager.h" +#include "hdi_device.h" +#include "validation.h" +#include "ops_builder.h" +#include "ops_registry.h" +#include "transform.h" + +namespace MSLITE = mindspore::lite; + +namespace OHOS { +namespace NeuralNetworkRuntime { +const std::string NNR_MODEL = "NNR_Model"; +const std::string LOADED_NNR_MODEL = "Loaded_NNR_Model"; + +namespace { +class LiteGraphDeleter { +public: + void operator()(MSLITE::LiteGraph* liteGraph) const + { + MindIR_LiteGraph_Destroy(&liteGraph); + } +}; + +std::shared_ptr ConstructNNTensorFromLiteGraphTensor(const MSLITE::TensorPtr msTensor) +{ + MSLITE::DataType msDataType = MSLITE::MindIR_Tensor_GetDataType(msTensor); + OH_NN_DataType dataType = MSToNN::TransformDataType(msDataType); + std::vector msDims = MSLITE::MindIR_Tensor_GetDims(msTensor); + std::vector msQuantParams = MSLITE::MindIR_Tensor_GetQuantParams(msTensor); + std::vector nnQuantParams = MSToNN::TransformQuantParams(msQuantParams); + + std::shared_ptr nnTensor = CreateSharedPtr(); + if (nnTensor == nullptr) { + LOGE("ConstructNNTensorFromLiteGraphTensor failed, error happened when creating NNTensor."); + return nullptr; + } + + OH_NN_ReturnCode ret = nnTensor->Build(dataType, msDims, nnQuantParams, OH_NN_TENSOR); + if (ret != OH_NN_SUCCESS) { + LOGE("ConstructNNTensorFromLiteGraphTensor failed, error happened when building NNTensor with attributes."); + return nullptr; + } + + return nnTensor; +} + +OH_NN_ReturnCode ConstructNNTensorsFromLiteGraph(const MSLITE::LiteGraph* liteGraph, + const std::vector& indices, + std::vector>& nnTensors) +{ + if (indices.empty()) { + LOGE("ConstructNNTensorsFromLiteGraph failed, passed empty indices list."); + return OH_NN_INVALID_PARAMETER; + } + + uint32_t maximumIndex = *(std::max_element(indices.begin(), indices.end())); + if (maximumIndex >= liteGraph->all_tensors_.size()) { + LOGE("ConstructNNTensorsFromLiteGraph failed, index exceed size of all_tensors inside liteGraph."); + return OH_NN_INVALID_PARAMETER; + } + + std::shared_ptr nnTensor; + for (uint32_t i : indices) { + nnTensor = ConstructNNTensorFromLiteGraphTensor(liteGraph->all_tensors_[i]); + if (nnTensor == nullptr) { + LOGE("ConstructNNTensorsFromLiteGraph failed, failed to construct NNTensor from LiteGraphTensor."); + return OH_NN_NULL_PTR; + } + + nnTensors.emplace_back(nnTensor); + } + + return OH_NN_SUCCESS; +} +} // anonymous namespace + +InnerModel::InnerModel() {} + +bool InnerModel::IsBuild() const +{ + return (m_liteGraph != nullptr); +} + +OH_NN_ReturnCode InnerModel::BuildFromLiteGraph(const MSLITE::LiteGraph* liteGraph) +{ + NNRT_TRACE_NAME("Build model from lite graph"); + if (liteGraph == nullptr) { + LOGE("BuildFromLiteGraph failed, passed empty liteGraph."); + return OH_NN_INVALID_PARAMETER; + } + + if (m_liteGraph != nullptr) { + LOGE("BuildFromLiteGraph failed, liteGraph has been built or loaded before."); + return OH_NN_OPERATION_FORBIDDEN; + } + + if (!m_allTensors.empty() || !m_ops.empty()) { + LOGE("BuildFromLiteGraph failed, please LoadLiteGraph without adding tensor and operations."); + return OH_NN_OPERATION_FORBIDDEN; + } + + m_inputTensors.clear(); + OH_NN_ReturnCode ret = ConstructNNTensorsFromLiteGraph(liteGraph, liteGraph->input_indices_, m_inputTensors); + if (ret != OH_NN_SUCCESS) { + LOGE("BuildFromLiteGraph failed, error happened when constructing input NNTensors from liteGraph."); + return ret; + } + + m_outputTensors.clear(); + ret = ConstructNNTensorsFromLiteGraph(liteGraph, liteGraph->output_indices_, m_outputTensors); + if (ret != OH_NN_SUCCESS) { + LOGE("BuildFromLiteGraph failed, error happened when constructing output NNTensors from liteGraph."); + return ret; + } + + m_liteGraph.reset(const_cast(liteGraph), LiteGraphDeleter()); + m_liteGraph->name_ = LOADED_NNR_MODEL; + + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode InnerModel::AddTensor(const OH_NN_Tensor& nnTensor) +{ + if (m_liteGraph != nullptr) { + LOGE("AddTensor failed, AddTensor is forbidden after Finish() or LoadLiteGraph() has been called."); + return OH_NN_OPERATION_FORBIDDEN; + } + + std::shared_ptr tensor = CreateSharedPtr(); + if (tensor == nullptr) { + LOGE("AddTensor failed, error happened when creating NNTensor."); + return OH_NN_MEMORY_ERROR; + } + + OH_NN_ReturnCode ret = tensor->BuildFromOHNNTensor(nnTensor); + if (ret != OH_NN_SUCCESS) { + LOGE("AddTensor failed, error happened when build NNTensor from OH_NN_Tensor."); + return ret; + } + + // The NNTensor is named as "Tensor: "". + tensor->SetName("Tensor: " + std::to_string(m_allTensors.size())); + m_allTensors.emplace_back(tensor); + + return OH_NN_SUCCESS; +} + +// DOTO: 圈复杂度待优化 +OH_NN_ReturnCode InnerModel::SetTensorValue(uint32_t index, const void* buffer, size_t length) +{ + if (m_liteGraph != nullptr) { + LOGE("SetTensorValue failed, SetTensorValue is forbidden after Finish() or LoadLiteGraph() has been called."); + return OH_NN_OPERATION_FORBIDDEN; + } + + if (index >= m_allTensors.size()) { + LOGE("SetTensorValue failed, passed index %u out of the number of added tensors.", index); + return OH_NN_INVALID_PARAMETER; + } + + const std::shared_ptr tensor = m_allTensors[index]; + if (tensor->GetBuffer() != nullptr) { + LOGE("SetTensorValue failed, tensor has been set value twice. Tensor index: %u.", index); + return OH_NN_INVALID_PARAMETER; + } + + if (buffer == nullptr) { + LOGW("SetTensorValue passed empty buffer, which makes no effect."); + return OH_NN_SUCCESS; + } + + if (tensor->IsDynamicShape()) { + LOGE("SetTensorValue failed, cannot set value to tensor with dynamic shape."); + return OH_NN_OPERATION_FORBIDDEN; + } + + if (length != tensor->GetDataLength()) { + LOGE("SetTensorValue failed, get buffer length %zu different from the byte size of tensor %zu.", + length, tensor->GetDataLength()); + return OH_NN_INVALID_PARAMETER; + } + + // Data will be released inside NNTensor if it is set inside NNTensor using SetBuffer(). + void* data = new (std::nothrow) char[length]; + if (data == nullptr) { + LOGE("SetTensorValue failed, please check whether it runs out of memory."); + return OH_NN_MEMORY_ERROR; + } + + errno_t ret = memcpy_s(data, length, buffer, length); + if (ret != EOK) { + LOGE("SetTensorValue failed, please the information of error number %d from memcpy_s.", ret); + delete [] reinterpret_cast(data); + return OH_NN_FAILED; + } + + tensor->SetBuffer(data, length); + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode InnerModel::ValidateInputAndOutput( + const OH_NN_UInt32Array& inputIndices, const OH_NN_UInt32Array& outputIndices) const +{ + OH_NN_ReturnCode ret = ValidateTensorArray(inputIndices); + if (ret != OH_NN_SUCCESS) { + LOGE("ValidateInputAndOutput failed, please check input indices."); + return ret; + } + + ret = ValidateTensorArray(outputIndices); + if (ret != OH_NN_SUCCESS) { + LOGE("ValidateInputAndOutput failed, please check output indices."); + return ret; + } + + if (inputIndices.size == 0) { + LOGE("ValidateInputAndOutput failed, passed empty input indices."); + return OH_NN_INVALID_PARAMETER; + } + + if (outputIndices.size == 0) { + LOGE("ValidateInputAndOutput failed, passed empty output indices."); + return OH_NN_INVALID_PARAMETER; + } + + std::shared_ptr tensor{nullptr}; + for (uint32_t i = 0; i < inputIndices.size; i++) { + tensor = m_allTensors[inputIndices.data[i]]; + if (tensor->GetType() != OH_NN_TENSOR) { + LOGE("ValidateInputAndOutput failed, tensor set as input should has type of OH_NN_TENSOR, but receive %d." + "Tensor index: %u.", tensor->GetType(), i); + return OH_NN_INVALID_PARAMETER; + } + } + + for (uint32_t i = 0; i < outputIndices.size; i++) { + tensor = m_allTensors[outputIndices.data[i]]; + if (tensor->GetType() != OH_NN_TENSOR) { + LOGE("ValidateInputAndOutput failed, tensor set as output should has type of OH_NN_TENSOR, but receive %d." + "Tensor index: %u.", tensor->GetType(), i); + return OH_NN_INVALID_PARAMETER; + } + } + + // The number of inputIndices and outputIndices are usually small, so O(n**2) iteration is fine. + for (uint32_t i = 0; i < inputIndices.size; i++) { + for (uint32_t j = 0; j < outputIndices.size; j++) { + if (inputIndices.data[i] == outputIndices.data[j]) { + LOGE("ValidateInputAndOutput failed, should not set an tensor as input and output at the same time, " + "input index %u, output index %u", inputIndices.data[i], outputIndices.data[j]); + return OH_NN_INVALID_PARAMETER; + } + } + } + return OH_NN_SUCCESS; +} + +/* Check whether the indices exceed the number of added tensors. */ +OH_NN_ReturnCode InnerModel::ValidateTensorArray(const OH_NN_UInt32Array& indices) const +{ + OH_NN_ReturnCode ret = Validation::ValidateArray(indices.data, indices.size); + if (ret != OH_NN_SUCCESS) { + LOGE("ValidateTensorArray failed, please check the validity of indices."); + return ret; + } + + for (uint32_t i = 0; i < indices.size; i++) { + if (indices.data[i] >= m_allTensors.size()) { + LOGE("ValidateTensors failed, index %u is out of the number of added tensors.", indices.data[i]); + return OH_NN_INVALID_PARAMETER; + } + } + + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode InnerModel::AddOperation(OH_NN_OperationType opType, const OH_NN_UInt32Array& paramIndices, + const OH_NN_UInt32Array& inputIndices, const OH_NN_UInt32Array& outputIndices) +{ + if (m_liteGraph != nullptr) { + LOGE("AddOperation failed, AddOperation is forbidden after after Finish() or LoadLiteGraph() has been called."); + return OH_NN_OPERATION_FORBIDDEN; + } + + OH_NN_ReturnCode ret = ValidateInputAndOutput(inputIndices, outputIndices); + if (ret != OH_NN_SUCCESS) { + LOGE("AddOperation failed, please check inputIndices and outputIndices."); + return ret; + } + std::vector inputs = ConstructVectorFromArray(inputIndices.data, inputIndices.size); + std::vector outputs = ConstructVectorFromArray(outputIndices.data, outputIndices.size); + + ret = ValidateTensorArray(paramIndices); + if (ret != OH_NN_SUCCESS) { + LOGE("AddOperation failed, please check paramIndices."); + return ret; + } + std::vector parameters = ConstructVectorFromArray(paramIndices.data, paramIndices.size); + + Ops::OpsRegistry& opsRegistry = Ops::OpsRegistry::GetSingleton(); + std::unique_ptr opsBuilder = opsRegistry.GetOpsBuilder(opType); + if (opsBuilder == nullptr) { + LOGE("AddOperation failed, cannot add operation of type: %d.", opType); + return OH_NN_INVALID_PARAMETER; + } + + ret = opsBuilder->Build(parameters, inputs, outputs, m_allTensors); + if (ret != OH_NN_SUCCESS) { + LOGE("AddOperation failed, error happens when build operations."); + return ret; + } + + m_ops.emplace_back(std::move(opsBuilder)); + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode InnerModel::SpecifyInputsAndOutputs( + const OH_NN_UInt32Array& inputIndices, const OH_NN_UInt32Array& outputIndices) +{ + if (m_liteGraph != nullptr) { + LOGE("SpecifyInputsAndOutputs failed, " + "SpecifyInputsAndOutputs is forbidden after Finish() or LoadLiteGraph() has been called."); + return OH_NN_OPERATION_FORBIDDEN; + } + + if (!m_inputTensors.empty()) { + LOGE("SpecifyInputsAndOutputs failed, SpecifyInputsAndOutputs should not be called twice."); + return OH_NN_OPERATION_FORBIDDEN; + } + + OH_NN_ReturnCode ret = ValidateInputAndOutput(inputIndices, outputIndices); + if (ret != OH_NN_SUCCESS) { + LOGE("SpecifyInputsAndOutputs failed, please check inputIndices and outputIndices."); + return ret; + } + + m_inputIndices = ConstructVectorFromArray(inputIndices.data, inputIndices.size); + m_outputIndices = ConstructVectorFromArray(outputIndices.data, outputIndices.size); + + for (uint32_t i : m_inputIndices) { + m_inputTensors.emplace_back(m_allTensors[i]); + } + + for (uint32_t i : m_outputIndices) { + m_outputTensors.emplace_back(m_allTensors[i]); + } + + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode InnerModel::Build() +{ + NNRT_TRACE_NAME("Build model"); + if (m_liteGraph != nullptr) { + LOGE("Build failed," + " OH_NNModel is not allowed to build again after Build() or BuildFromLiteGraph() has been called."); + return OH_NN_OPERATION_FORBIDDEN; + } + + if (m_allTensors.empty()) { + LOGE("Build failed, no OH_NN_Tensor has been added. Must call AddTensor before Build()."); + return OH_NN_OPERATION_FORBIDDEN; + } + + if (m_ops.empty()) { + LOGE("Build failed, no operation has beed added. Must call AddOperation before Build()."); + return OH_NN_OPERATION_FORBIDDEN; + } + + if ((m_inputIndices.empty()) || (m_outputIndices.empty())) { + LOGE("Build failed, inputs and outputs are unspecified. Must call SpecifyInputsAndOutputs before Build()."); + return OH_NN_OPERATION_FORBIDDEN; + } + + MSLITE::LiteGraph* pLiteGraph = new (std::nothrow) MSLITE::LiteGraph(); + if (pLiteGraph == nullptr) { + LOGE("Build failed, error happend when creating LiteGraph."); + return OH_NN_MEMORY_ERROR; + } + m_liteGraph.reset(pLiteGraph, LiteGraphDeleter()); + + m_liteGraph->name_ = NNR_MODEL; + + std::unordered_map modelIDToGraphID; + AddTensorsToLiteGraph(modelIDToGraphID); + + OH_NN_ReturnCode ret = AddNodesToLiteGraph(modelIDToGraphID); + if (ret != OH_NN_SUCCESS) { + return ret; + } + + // subGraph will be released by LiteGraph if it is added into instance of LiteGraph. + MSLITE::LiteGraph::SubGraph* subGraph = new (std::nothrow) MSLITE::LiteGraph::SubGraph(); + if (subGraph == nullptr) { + LOGE("AddNodesToLiteGraph failed, error happened when creating subgraph."); + return OH_NN_NULL_PTR; + } + + subGraph->name_ = "NNRt_SubGraph"; // Name of subGraph + subGraph->input_indices_ = m_liteGraph->input_indices_; + subGraph->output_indices_ = m_liteGraph->output_indices_; + uint32_t nodeCount = static_cast(m_ops.size()); // m_ops.size() smaller than UINT32_MAX + for (uint32_t i = 0; i < nodeCount; i++) { + subGraph->node_indices_.emplace_back(i); + } + m_liteGraph->sub_graphs_.emplace_back(subGraph); + + return OH_NN_SUCCESS; +} + +void InnerModel::AddTensorsToLiteGraph(std::unordered_map& modelIDToGraphID) +{ + uint32_t graphID = 0; + LiteGraphTensorPtr tensor(nullptr, DestroyLiteGraphTensor); + size_t tensorCount = m_allTensors.size(); + for (size_t i = 0; i < tensorCount; i++) { + const std::shared_ptr& nnTensor = m_allTensors[i]; + // If the tensor is used as operation parameter, it will not convert to the tensor of LiteGraph. + if (nnTensor->IsOpParameter()) { + continue; + } + + tensor = nnTensor->ConvertToLiteGraphTensor(); + m_liteGraph->all_tensors_.emplace_back(tensor.release()); + modelIDToGraphID[i] = graphID++; + } + + // Note: Indices in m_inputIndices and m_outputIndices have been checked in SpecifyInputAndOutput(), there is no + // need to check twice. + std::vector& inputIndices = m_liteGraph->input_indices_; + for (uint32_t index : m_inputIndices) { + inputIndices.emplace_back(modelIDToGraphID.at(index)); + } + + std::vector& outputIndices = m_liteGraph->output_indices_; + for (uint32_t index : m_outputIndices) { + outputIndices.emplace_back(modelIDToGraphID.at(index)); + } +} + +OH_NN_ReturnCode InnerModel::AddNodesToLiteGraph(const std::unordered_map& modelIDToGraphID) +{ + MSLITE::LiteGraph::Node* node{nullptr}; + size_t opCount = m_ops.size(); + Ops::LiteGraphPrimitvePtr primitive = {nullptr, DestroyLiteGraphTensor}; + for (size_t i = 0; i < opCount; i++) { + std::unique_ptr& op = m_ops[i]; + // node will be released by LiteGraph if it is added into instance of LiteGraph. + node = new(std::nothrow) MSLITE::LiteGraph::Node(); + if (node == nullptr) { + LOGE("AddNodesToLiteGraph failed, error happened when creating LiteGraph tensor."); + return OH_NN_NULL_PTR; + } + + node->name_ = op->GetName() + ":" + std::to_string(i); + node->quant_type_ = NNToMS::TransformQuantType(op->GetQuantType()); + + op->GetInputIndex(node->input_indices_, modelIDToGraphID); + op->GetOutputIndex(node->output_indices_, modelIDToGraphID); + + primitive = op->GetPrimitive(); + if (primitive == nullptr) { + LOGE("Build %s primitive failed.", op->GetName().c_str()); + delete node; + return OH_NN_FAILED; + } + + node->primitive_ = primitive.release(); + m_liteGraph->all_nodes_.emplace_back(node); + } + + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode InnerModel::GetSupportedOperations(size_t deviceID, const bool** isSupported, uint32_t& opCount) +{ + if (m_liteGraph == nullptr) { + LOGE("GetSupportedOperations failed. GetSupportedOperations() must be called after Finish()."); + return OH_NN_OPERATION_FORBIDDEN; + } + + DeviceManager& deviceManager = DeviceManager::GetInstance(); + + std::shared_ptr device = deviceManager.GetDevice(deviceID); + if (device == nullptr) { + LOGE("GetSupportedOperations failed, retrieve device failed."); + return OH_NN_FAILED; + } + + std::vector supportedOperations; + OH_NN_ReturnCode ret = device->GetSupportedOperation(m_liteGraph, supportedOperations); + if (ret != OH_NN_SUCCESS) { + LOGE("GetSupportedOperations failed, error happened when get supported operations from devices."); + return ret; + } + + m_supportedOperations.clear(); + std::copy(supportedOperations.begin(), supportedOperations.end(), std::back_inserter(m_supportedOperations)); + + *isSupported = reinterpret_cast(m_supportedOperations.data()); + opCount = m_supportedOperations.size(); + + return OH_NN_SUCCESS; +} + +std::shared_ptr InnerModel::GetLiteGraphs() const +{ + return m_liteGraph; +} + +std::vector> InnerModel::GetInputTensors() const +{ + return m_inputTensors; +} + +std::vector> InnerModel::GetOutputTensors() const +{ + return m_outputTensors; +} +} // namespace NeuralNetworkRuntime +} // namespace OHOS \ No newline at end of file diff --git a/frameworks/native/inner_model.h b/frameworks/native/inner_model.h new file mode 100644 index 0000000000000000000000000000000000000000..6a4460d79677d41b68a55289d5e5a217db100ad5 --- /dev/null +++ b/frameworks/native/inner_model.h @@ -0,0 +1,67 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_INNER_MODEL_H +#define NEURAL_NETWORK_RUNTIME_INNER_MODEL_H + +#include +#include + +#include "mindir.h" +#include "ops_builder.h" +#include "interfaces/kits/c/neural_network_runtime.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +class InnerModel { +public: + InnerModel(); + + bool IsBuild() const; + OH_NN_ReturnCode BuildFromLiteGraph(const mindspore::lite::LiteGraph* liteGraph); + OH_NN_ReturnCode AddTensor(const OH_NN_Tensor& nnTensor); + OH_NN_ReturnCode SetTensorValue(uint32_t index, const void* buffer, size_t length); + OH_NN_ReturnCode AddOperation(OH_NN_OperationType opType, + const OH_NN_UInt32Array& paramIndices, + const OH_NN_UInt32Array& inputIndices, + const OH_NN_UInt32Array& outputIndices); + OH_NN_ReturnCode GetSupportedOperations(size_t deviceID, const bool** isSupported, uint32_t& opCount); + OH_NN_ReturnCode SpecifyInputsAndOutputs( + const OH_NN_UInt32Array& inputIndices, const OH_NN_UInt32Array& outputIndices); + OH_NN_ReturnCode Build(); + std::vector> GetInputTensors() const; + std::vector> GetOutputTensors() const; + std::shared_ptr GetLiteGraphs() const; + +private: + void AddTensorsToLiteGraph(std::unordered_map& modelIDToGraphID); + OH_NN_ReturnCode AddNodesToLiteGraph(const std::unordered_map& modelIDToGraphID); + OH_NN_ReturnCode ValidateInputAndOutput( + const OH_NN_UInt32Array& inputIndices, const OH_NN_UInt32Array& outputIndices) const; + OH_NN_ReturnCode ValidateTensorArray(const OH_NN_UInt32Array& indices) const; + +private: + std::vector m_supportedOperations; // std::vector not support data(), use std::vector instead. + std::vector m_inputIndices; + std::vector m_outputIndices; + std::vector> m_ops; + std::vector> m_allTensors; + std::vector> m_inputTensors; // Used to pass input tensors to compilation. + std::vector> m_outputTensors; // Used to pass output tensors to compilation. + std::shared_ptr m_liteGraph {nullptr}; +}; +} // namespace NeuralNetworkRuntime +} // namespace OHOS +#endif // NEURAL_NETWORK_RUNTIME_INNER_MODEL_H diff --git a/frameworks/native/memory_manager.cpp b/frameworks/native/memory_manager.cpp new file mode 100644 index 0000000000000000000000000000000000000000..2c87adaeef0f8bd417a5076a0da488bd7841cef9 --- /dev/null +++ b/frameworks/native/memory_manager.cpp @@ -0,0 +1,102 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +#include "memory_manager.h" + +#include +#include + +#include "interfaces/oem/cpp_api/cpp_type.h" +#include "common/log.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +void* MemoryManager::MapMemory(int fd, size_t length) +{ + if (fd < 0) { + LOGE("Invalid fd, fd must greater than 0."); + return nullptr; + } + + if (length <= 0 || length > ALLOCATE_BUFFER_LIMIT) { + LOGE("Invalid buffer size, it must greater than 0 and less than 1Gb. length=%zu", length); + return nullptr; + } + + void* addr = mmap(nullptr, length, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0); + if (addr == MAP_FAILED) { + LOGE("Map fd to address failed."); + return nullptr; + } + + std::lock_guard lock(m_mtx); + Memory memory {fd, addr, length}; + m_memorys.emplace(addr, memory); + return addr; +} + +OH_NN_ReturnCode MemoryManager::UnMapMemory(const void* buffer) +{ + if (buffer == nullptr) { + LOGE("Buffer is nullptr, no need to release."); + return OH_NN_INVALID_PARAMETER; + } + + auto iter = m_memorys.find(buffer); + if (iter == m_memorys.end()) { + LOGE("This buffer is not found, cannot release."); + return OH_NN_INVALID_PARAMETER; + } + + auto& memory = m_memorys[buffer]; + auto unmapResult = munmap(const_cast(memory.data), memory.length); + if (unmapResult != 0) { + LOGE("Unmap memory failed. Please try again."); + return OH_NN_MEMORY_ERROR; + } + memory.data = nullptr; + + if (close(memory.fd) != 0) { + LOGE("Close memory fd failed. fd=%d", memory.fd); + return OH_NN_MEMORY_ERROR; + } + + std::lock_guard lock(m_mtx); + m_memorys.erase(iter); + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode MemoryManager::GetMemory(const void* buffer, Memory& memory) const +{ + if (buffer == nullptr) { + LOGE("Memory is nullptr."); + return OH_NN_NULL_PTR; + } + + auto iter = m_memorys.find(buffer); + if (iter == m_memorys.end()) { + LOGE("Memory is not found."); + return OH_NN_INVALID_PARAMETER; + } + + memory.fd = iter->second.fd; + memory.data = buffer; + memory.length = iter->second.length; + + return OH_NN_SUCCESS; +} +} // NeuralNetworkRuntime +} // OHOS \ No newline at end of file diff --git a/frameworks/native/memory_manager.h b/frameworks/native/memory_manager.h new file mode 100644 index 0000000000000000000000000000000000000000..5518deb97dd870a3fa6cc66ea4fd9649b7837727 --- /dev/null +++ b/frameworks/native/memory_manager.h @@ -0,0 +1,60 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_MEMORY_MANAGER_H +#define NEURAL_NETWORK_RUNTIME_MEMORY_MANAGER_H + +#include +#include + +#include "interfaces/kits/c/neural_network_runtime_type.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +const int INVALID_FD = -1; + +struct Memory { + int fd; + const void* data; + size_t length; +}; + +class MemoryManager { +public: + ~MemoryManager() = default; + + void* MapMemory(int fd, size_t length); + OH_NN_ReturnCode UnMapMemory(const void* buffer); + OH_NN_ReturnCode GetMemory(const void* buffer, Memory& memory) const; + + static MemoryManager* GetInstance() + { + static MemoryManager instance; + return &instance; + } + +private: + MemoryManager() {}; + MemoryManager(const MemoryManager&) = delete; + MemoryManager& operator=(const MemoryManager&) = delete; + +private: + // key: OH_NN_Memory, value: fd + std::unordered_map m_memorys; + std::mutex m_mtx; +}; +} // namespace NeuralNetworkRuntime +} // OHOS +#endif // NEURAL_NETWORK_RUNTIME_MEMORY_MANAGER_H \ No newline at end of file diff --git a/frameworks/native/neural_network_runtime.cpp b/frameworks/native/neural_network_runtime.cpp new file mode 100644 index 0000000000000000000000000000000000000000..dfd5f366f1171ee3abdaf123efd7b49e5e468ede --- /dev/null +++ b/frameworks/native/neural_network_runtime.cpp @@ -0,0 +1,682 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "interfaces/innerkits/c/neural_network_runtime_inner.h" +#include "interfaces/kits/c/neural_network_runtime.h" + +#include "compilation.h" +#include "device_manager.h" +#include "executor.h" +#include "inner_model.h" +#include "common/log.h" + + +using namespace OHOS::NeuralNetworkRuntime; + +#define NNRT_API __attribute__((visibility("default"))) + +NNRT_API OH_NNModel *OH_NNModel_Construct(void) +{ + InnerModel *innerModel = new(std::nothrow) InnerModel(); + if (innerModel == nullptr) { + LOGE("OH_NNModel_Construct failed, please check whether it has enough memory."); + return nullptr; + } + + OH_NNModel *nnModel = reinterpret_cast(innerModel); + return nnModel; +} + +NNRT_API OH_NN_ReturnCode OH_NNModel_AddTensor(OH_NNModel *model, const OH_NN_Tensor *tensor) +{ + if (model == nullptr) { + LOGE("OH_NNModel_AddTensor failed, passed nullptr to model."); + return OH_NN_INVALID_PARAMETER; + } + + if (tensor == nullptr) { + LOGE("OH_NNModel_AddTensor failed, passed nullptr to tensor."); + return OH_NN_INVALID_PARAMETER; + } + + InnerModel *innerModel = reinterpret_cast(model); + return innerModel->AddTensor(*tensor); +} + +NNRT_API OH_NN_ReturnCode OH_NNModel_AddOperation(OH_NNModel *model, + OH_NN_OperationType op, + const OH_NN_UInt32Array *paramIndices, + const OH_NN_UInt32Array *inputIndices, + const OH_NN_UInt32Array *outputIndices) +{ + if (model == nullptr) { + LOGE("OH_NNModel_AddOperation failed, passed nullptr to model."); + return OH_NN_INVALID_PARAMETER; + } + + if (paramIndices == nullptr) { + LOGE("OH_NNModel_AddOperation failed, passed nullptr to paramIndices."); + return OH_NN_INVALID_PARAMETER; + } + + if (inputIndices == nullptr) { + LOGE("OH_NNModel_AddOperation failed, passed nullptr to inputIndices."); + return OH_NN_INVALID_PARAMETER; + } + + if (outputIndices == nullptr) { + LOGE("OH_NNModel_AddOperation failed, passed nullptr to outputIndices."); + return OH_NN_INVALID_PARAMETER; + } + + InnerModel *innerModel = reinterpret_cast(model); + return innerModel->AddOperation(op, *paramIndices, *inputIndices, *outputIndices); +} + +NNRT_API OH_NN_ReturnCode OH_NNModel_SetTensorData(OH_NNModel *model, + uint32_t index, + const void *dataBuffer, + size_t length) +{ + if (model == nullptr) { + LOGE("OH_NNModel_SetTensorData failed, passed nullptr to model."); + return OH_NN_INVALID_PARAMETER; + } + + if (dataBuffer == nullptr) { + LOGE("OH_NNModel_SetTensorData failed, passed nullptr to dataBuffer, which has no effect."); + return OH_NN_INVALID_PARAMETER; + } + + if (length == 0) { + LOGE("OH_NNModel_SetTensorData failed, passed dataBuffer with length 0, which has no effect."); + return OH_NN_INVALID_PARAMETER; + } + + InnerModel *innerModel = reinterpret_cast(model); + return innerModel->SetTensorValue(index, dataBuffer, length); +} + +NNRT_API OH_NN_ReturnCode OH_NNModel_SpecifyInputsAndOutputs(OH_NNModel *model, + const OH_NN_UInt32Array *inputIndices, + const OH_NN_UInt32Array *outputIndices) +{ + if (model == nullptr) { + LOGE("OH_NNModel_SpecifyInputsAndOutputs failed, passed nullptr to model."); + return OH_NN_INVALID_PARAMETER; + } + + if (inputIndices == nullptr) { + LOGE("OH_NNModel_SpecifyInputsAndOutputs failed, passed nullptr to inputIndices."); + return OH_NN_INVALID_PARAMETER; + } + + if (outputIndices == nullptr) { + LOGE("OH_NNModel_SpecifyInputsAndOutputs failed, passed nullptr to outputIndices."); + return OH_NN_INVALID_PARAMETER; + } + + InnerModel *innerModel = reinterpret_cast(model); + return innerModel->SpecifyInputsAndOutputs(*inputIndices, *outputIndices); +} + +NNRT_API OH_NN_ReturnCode OH_NNModel_Finish(OH_NNModel *model) +{ + if (model == nullptr) { + LOGE("OH_NNModel_Finish failed, passed nullptr to model."); + return OH_NN_INVALID_PARAMETER; + } + + InnerModel *innerModel = reinterpret_cast(model); + return innerModel->Build(); +} + +NNRT_API OH_NN_ReturnCode OH_NNModel_BuildFromLiteGraph(OH_NNModel *model, const void *liteGraph) +{ + if (model == nullptr) { + LOGE("OH_NNModel_BuildFromLiteGraph failed, passed nullptr to model."); + return OH_NN_INVALID_PARAMETER; + } + + if (liteGraph == nullptr) { + LOGE("OH_NNModel_BuildFromLiteGraph failed, passed nullptr to liteGraph."); + return OH_NN_INVALID_PARAMETER; + } + + auto *pLiteGraph = static_cast(liteGraph); + InnerModel *innerModel = reinterpret_cast(model); + + // Once the innerModel built from the liteGraph successfully, the innerModel + // owns the liteGraph, in which case, the invoker should not delete + // the liteGraph actively. Otherwise, the invoker still has the ownership. + return innerModel->BuildFromLiteGraph(pLiteGraph); +} + +NNRT_API void OH_NNModel_Destroy(OH_NNModel **model) +{ + if (model == nullptr) { + LOGW("OH_NNModel_Destroy has no effect, passed nullptr to model."); + return; + } + + if (*model == nullptr) { + LOGW("OH_NNModel_Destroy has no effect, passed nullptr to *model."); + return; + } + + InnerModel *innerModel = reinterpret_cast(*model); + delete innerModel; + *model = nullptr; +} + +NNRT_API OH_NN_ReturnCode OH_NNModel_GetAvailableOperations(OH_NNModel *model, + size_t deviceID, + const bool **isAvailable, + uint32_t *opCount) +{ + if (model == nullptr) { + LOGE("OH_NNModel_GetAvailableOperations failed, passed nullptr to model."); + return OH_NN_INVALID_PARAMETER; + } + + if (isAvailable == nullptr) { + LOGE("OH_NNModel_GetAvailableOperations failed, passed nullptr to isAvailable."); + return OH_NN_INVALID_PARAMETER; + } + + if (*isAvailable != nullptr) { + LOGE("OH_NNModel_GetAvailableOperations failed, *isAvailable is not nullptr."); + return OH_NN_INVALID_PARAMETER; + } + + if (opCount == nullptr) { + LOGE("OH_NNModel_GetAvailableOperations failed, passed nullptr to opCount."); + return OH_NN_INVALID_PARAMETER; + } + + InnerModel *innerModel = reinterpret_cast(model); + return innerModel->GetSupportedOperations(deviceID, isAvailable, *opCount); +} + +NNRT_API OH_NNCompilation *OH_NNCompilation_Construct(const OH_NNModel *model) +{ + if (model == nullptr) { + LOGE("OH_NNCompilation_Construct failed, passed nullptr to model."); + return nullptr; + } + const InnerModel *innerModel = reinterpret_cast(model); + + if (!innerModel->IsBuild()) { + LOGE("OH_NNCompilation_Construct failed, should call OH_NNModel_Finish before creating compilation."); + return nullptr; + } + + Compilation *compilation = new(std::nothrow) Compilation(innerModel); + if (compilation == nullptr) { + LOGE("OH_NNCompilation_Construct failed, please check whether it has enough memory."); + return nullptr; + } + + OH_NNCompilation* nnCompilation = reinterpret_cast(compilation); + return nnCompilation; +} + +NNRT_API OH_NN_ReturnCode OH_NNCompilation_SetDevice(OH_NNCompilation *compilation, size_t deviceID) +{ + if (compilation == nullptr) { + LOGE("OH_NNCompilation_SetDevice failed, passed nullptr to compilation."); + return OH_NN_INVALID_PARAMETER; + } + + Compilation* innerCompilation = reinterpret_cast(compilation); + return innerCompilation->SetDevice(deviceID); +} + +NNRT_API OH_NN_ReturnCode OH_NNCompilation_SetCache(OH_NNCompilation *compilation, + const char *cachePath, + uint32_t version) +{ + if (compilation == nullptr) { + LOGE("OH_NNCompilation_SetCache failed, passed nullptr to compilation."); + return OH_NN_INVALID_PARAMETER; + } + + if (cachePath == nullptr) { + LOGE("OH_NNCompilation_SetCache failed, passed nullptr to cachePath."); + return OH_NN_INVALID_PARAMETER; + } + + Compilation* innerCompilation = reinterpret_cast(compilation); + return innerCompilation->SetCacheDir(cachePath, version); +} + +NNRT_API OH_NN_ReturnCode OH_NNCompilation_SetPerformanceMode(OH_NNCompilation *compilation, + OH_NN_PerformanceMode performanceMode) +{ + if (compilation == nullptr) { + LOGE("OH_NNCompilation_SetPerformanceMode failed, passed nullptr to compilation."); + return OH_NN_INVALID_PARAMETER; + } + + Compilation* innerCompilation = reinterpret_cast(compilation); + return innerCompilation->SetPerformance(performanceMode); +} + +NNRT_API OH_NN_ReturnCode OH_NNCompilation_SetPriority(OH_NNCompilation *compilation, + OH_NN_Priority priority) +{ + if (compilation == nullptr) { + LOGE("OH_NNCompilation_SetPriority failed, passed nullptr to compilation."); + return OH_NN_INVALID_PARAMETER; + } + + Compilation* innerCompilation = reinterpret_cast(compilation); + return innerCompilation->SetPriority(priority); +} + +NNRT_API OH_NN_ReturnCode OH_NNCompilation_EnableFloat16(OH_NNCompilation *compilation, bool enableFloat16) +{ + if (compilation == nullptr) { + LOGE("OH_NNCompilation_EnableFloat16 failed, passed nullptr to compilation."); + return OH_NN_INVALID_PARAMETER; + } + + Compilation* innerCompilation = reinterpret_cast(compilation); + return innerCompilation->SetEnableFp16(enableFloat16); +} + +NNRT_API OH_NN_ReturnCode OH_NNCompilation_Build(OH_NNCompilation *compilation) +{ + if (compilation == nullptr) { + LOGE("OH_NNCompilation_Build failed, passed nullptr to compilation."); + return OH_NN_INVALID_PARAMETER; + } + + Compilation* innerCompilation = reinterpret_cast(compilation); + return innerCompilation->Build(); +} + +NNRT_API void OH_NNCompilation_Destroy(OH_NNCompilation **compilation) +{ + if (compilation == nullptr) { + LOGW("OH_NNCompilation_Destroy has no effect, passed nullptr to compilation."); + return; + } + + if (*compilation == nullptr) { + LOGW("OH_NNCompilation_Destroy has no effect, passed nullptr to *compilation."); + return; + } + + Compilation *innerCompilation = reinterpret_cast(*compilation); + delete innerCompilation; + *compilation = nullptr; +} + +NNRT_API OH_NNExecutor *OH_NNExecutor_Construct(OH_NNCompilation *compilation) +{ + if (compilation == nullptr) { + LOGE("OH_NNExecutor_Construct failed, passed nullptr to compilation."); + return nullptr; + } + Compilation *innerCompilation = reinterpret_cast(compilation); + + if (!innerCompilation->IsBuild()) { + LOGE("OH_NNExecutor_Construct failed, should call OH_NNCompilation_Build before creating executor."); + return nullptr; + } + + Executor* executor = new(std::nothrow) Executor(innerCompilation); + if (executor == nullptr) { + LOGE("OH_NNExecutor_Construct failed, please check whether it has enough memory."); + return nullptr; + } + + OH_NNExecutor* nnExecutor = reinterpret_cast(executor); + return nnExecutor; +} + +NNRT_API OH_NN_ReturnCode OH_NNExecutor_SetInput(OH_NNExecutor *executor, + uint32_t inputIndex, + const OH_NN_Tensor *tensor, + const void *dataBuffer, + size_t length) +{ + if (executor == nullptr) { + LOGE("OH_NNExecutor_SetInput failed, passed nullptr to executor."); + return OH_NN_INVALID_PARAMETER; + } + + if (tensor == nullptr) { + LOGE("OH_NNExecutor_SetInput failed, passed nullptr to tensor."); + return OH_NN_INVALID_PARAMETER; + } + + if (dataBuffer == nullptr) { + LOGE("OH_NNExecutor_SetInput failed, passed nullptr to dataBuffer."); + return OH_NN_INVALID_PARAMETER; + } + + if (length == 0) { + LOGE("OH_NNExecutor_SetInput failed, dataBuffer length is 0."); + return OH_NN_INVALID_PARAMETER; + } + + Executor* innerExecutor = reinterpret_cast(executor); + return innerExecutor->SetInput(inputIndex, *tensor, dataBuffer, length); +} + +NNRT_API OH_NN_ReturnCode OH_NNExecutor_SetOutput(OH_NNExecutor *executor, + uint32_t outputIndex, + void *dataBuffer, + size_t length) +{ + if (executor == nullptr) { + LOGE("OH_NNExecutor_SetOutput failed, passed nullptr to executor."); + return OH_NN_INVALID_PARAMETER; + } + + if (dataBuffer == nullptr) { + LOGE("OH_NNExecutor_SetOutput failed, passed nullptr to dataBuffer."); + return OH_NN_INVALID_PARAMETER; + } + + if (length == 0) { + LOGE("OH_NNExecutor_SetOutput failed, dataBuffer length is 0."); + return OH_NN_INVALID_PARAMETER; + } + + Executor* innerExecutor = reinterpret_cast(executor); + return innerExecutor->SetOutput(outputIndex, dataBuffer, length); +} + +NNRT_API OH_NN_ReturnCode OH_NNExecutor_GetOutputShape(OH_NNExecutor *executor, + uint32_t outputIndex, + int32_t **shape, + uint32_t *shapeLength) +{ + if (executor == nullptr) { + LOGE("OH_NNExecutor_GetOutputShape failed, passed nullptr to executor."); + return OH_NN_INVALID_PARAMETER; + } + + if (shape == nullptr) { + LOGE("OH_NNExecutor_GetOutputShape failed, passed nullptr to shape."); + return OH_NN_INVALID_PARAMETER; + } + + if (*shape != nullptr) { + LOGE("OH_NNExecutor_GetOutputShape failed, *shape is not nullptr."); + return OH_NN_INVALID_PARAMETER; + } + + if (shapeLength == nullptr) { + LOGE("OH_NNExecutor_GetOutputShape failed, passed nullptr to shapeLength."); + return OH_NN_INVALID_PARAMETER; + } + + Executor* innerExecutor = reinterpret_cast(executor); + return innerExecutor->GetOutputShape(outputIndex, shape, *shapeLength); +} + +NNRT_API OH_NN_ReturnCode OH_NNExecutor_Run(OH_NNExecutor *executor) +{ + if (executor == nullptr) { + LOGE("OH_NNExecutor_Run failed, passed nullptr to executor."); + return OH_NN_INVALID_PARAMETER; + } + + Executor *innerExecutor = reinterpret_cast(executor); + return innerExecutor->Run(); +} + +NNRT_API OH_NN_Memory *OH_NNExecutor_AllocateInputMemory(OH_NNExecutor *executor, uint32_t inputIndex, size_t length) +{ + if (executor == nullptr) { + LOGE("OH_NNExecutor_AllocateInputMemory failed, passed nullptr to executor."); + return nullptr; + } + + if (length == 0) { + LOGW("OH_NNExecutor_AllocateInputMemory has no effect, passed length equals 0."); + return nullptr; + } + + OH_NN_Memory *nnMemory = nullptr; + Executor *innerExecutor = reinterpret_cast(executor); + OH_NN_ReturnCode ret = innerExecutor->CreateInputMemory(inputIndex, length, &nnMemory); + if (ret != OH_NN_SUCCESS) { + LOGE("OH_NNExecutor_AllocateInputMemory failed, error happened when creating input memory in executor."); + return nullptr; + } + + return nnMemory; +} + +NNRT_API OH_NN_Memory *OH_NNExecutor_AllocateOutputMemory(OH_NNExecutor *executor, uint32_t outputIndex, size_t length) +{ + if (executor == nullptr) { + LOGE("OH_NNExecutor_AllocateOutputMemory failed, passed nullptr to executor."); + return nullptr; + } + + if (length == 0) { + LOGW("OH_NNExecutor_AllocateOutputMemory has no effect, passed length equals 0."); + return nullptr; + } + + OH_NN_Memory *nnMemory = nullptr; + Executor *innerExecutor = reinterpret_cast(executor); + OH_NN_ReturnCode ret = innerExecutor->CreateOutputMemory(outputIndex, length, &nnMemory); + if (ret != OH_NN_SUCCESS) { + LOGE("OH_NNExecutor_AllocateOutputMemory failed, error happened when creating output memory in executor."); + return nullptr; + } + + return nnMemory; +} + +NNRT_API void OH_NNExecutor_DestroyInputMemory(OH_NNExecutor *executor, uint32_t inputIndex, OH_NN_Memory **memory) +{ + if (executor == nullptr) { + LOGE("OH_NNExecutor_DestroyInputMemory failed, passed nullptr to executor."); + return; + } + + if (memory == nullptr) { + LOGW("OH_NNExecutor_DestroyInputMemory has no effect, passed nullptr to memory."); + return; + } + + if (*memory == nullptr) { + LOGW("OH_NNExecutor_DestroyInputMemory has no effect, passed nullptr to *memory."); + return; + } + + Executor *innerExecutor = reinterpret_cast(executor); + OH_NN_ReturnCode ret = innerExecutor->DestroyInputMemory(inputIndex, memory); + if (ret != OH_NN_SUCCESS) { + LOGE("OH_NNExecutor_DestroyInputMemory failed, error happened when destroying input memory."); + return; + } + + *memory = nullptr; +} + +NNRT_API void OH_NNExecutor_DestroyOutputMemory(OH_NNExecutor *executor, uint32_t outputIndex, OH_NN_Memory **memory) +{ + if (executor == nullptr) { + LOGE("OH_NNExecutor_DestroyOutputMemory failed, passed nullptr to executor."); + return; + } + + if (memory == nullptr) { + LOGW("OH_NNExecutor_DestroyOutputMemory has no effect, passed nullptr to memory."); + return; + } + + if (*memory == nullptr) { + LOGW("OH_NNExecutor_DestroyOutputMemory has no effect, passed nullptr to *memory."); + return; + } + + Executor *innerExecutor = reinterpret_cast(executor); + OH_NN_ReturnCode ret = innerExecutor->DestroyOutputMemory(outputIndex, memory); + if (ret != OH_NN_SUCCESS) { + LOGE("OH_NNExecutor_DestroyOutputMemory failed, error happened when destroying output memory."); + return; + } + + *memory = nullptr; +} + +NNRT_API OH_NN_ReturnCode OH_NNExecutor_SetInputWithMemory(OH_NNExecutor *executor, + uint32_t inputIndex, + const OH_NN_Tensor *tensor, + const OH_NN_Memory *memory) +{ + if (executor == nullptr) { + LOGE("OH_NNExecutor_SetInputWithMemory failed, passed nullptr to executor."); + return OH_NN_INVALID_PARAMETER; + } + + if (tensor == nullptr) { + LOGE("OH_NNExecutor_SetInputWithMemory failed, passed nullptr to tensor."); + return OH_NN_INVALID_PARAMETER; + } + + if (memory == nullptr) { + LOGE("OH_NNExecutor_SetInputWithMemory failed, passed nullptr to memory."); + return OH_NN_INVALID_PARAMETER; + } + + Executor *innerExecutor = reinterpret_cast(executor); + return innerExecutor->SetInputFromMemory(inputIndex, *tensor, *memory); +} + +NNRT_API OH_NN_ReturnCode OH_NNExecutor_SetOutputWithMemory(OH_NNExecutor *executor, + uint32_t outputIndex, + const OH_NN_Memory *memory) +{ + if (executor == nullptr) { + LOGE("OH_NNExecutor_SetOutputWithMemory failed, passed nullptr to executor."); + return OH_NN_INVALID_PARAMETER; + } + + if (memory == nullptr) { + LOGE("OH_NNExecutor_SetOutputWithMemory failed, passed nullptr to memory."); + return OH_NN_INVALID_PARAMETER; + } + + Executor *innerExecutor = reinterpret_cast(executor); + return innerExecutor->SetOutputFromMemory(outputIndex, *memory); +} + +NNRT_API void OH_NNExecutor_Destroy(OH_NNExecutor **executor) +{ + if (executor == nullptr) { + LOGW("OH_NNExecutor_Destroy has no effect, since executor is nullptr."); + return; + } + + if ((*executor) == nullptr) { + LOGW("OH_NNExecutor_Destroy has no effect, since *executor is nullptr"); + return; + } + + Executor *innerExecutor = reinterpret_cast(*executor); + delete innerExecutor; + *executor = nullptr; +} + +NNRT_API OH_NN_ReturnCode OH_NNDevice_GetAllDevicesID(const size_t **allDevicesID, uint32_t *deviceCount) +{ + if (allDevicesID == nullptr) { + LOGE("OH_NNDevice_GetAllDevicesID failed, passed nullptr to allDevicesID."); + return OH_NN_INVALID_PARAMETER; + } + + if ((*allDevicesID) != nullptr) { + LOGE("OH_NNDevice_GetAllDevicesID failed, *allDevicesID should be nullptr."); + return OH_NN_INVALID_PARAMETER; + } + + if (deviceCount == nullptr) { + LOGE("OH_NNDevice_GetAllDevicesID failed, passed nullptr to deviceCount."); + return OH_NN_INVALID_PARAMETER; + } + + DeviceManager& deviceManager = DeviceManager::GetInstance(); + const std::vector& allDevices = deviceManager.GetAllDeviceId(); + + if (allDevices.empty()) { + LOGW("OH_NNDevice_GetAllDevicesID got no device."); + *allDevicesID = nullptr; + *deviceCount = 0; + return OH_NN_SUCCESS; + } + + *allDevicesID = allDevices.data(); + // allDevices.size() will not exceed UINT32_MAX, it is safe to cast to uint32_t. + *deviceCount = static_cast(allDevices.size()); + + return OH_NN_SUCCESS; +} + +NNRT_API OH_NN_ReturnCode OH_NNDevice_GetName(size_t deviceID, const char **name) +{ + if (name == nullptr) { + LOGE("OH_NNDevice_GetName failed, passed nullptr to name."); + return OH_NN_INVALID_PARAMETER; + } + + if ((*name) != nullptr) { + LOGE("OH_NNDevice_GetName failed, *name should be nullptr."); + return OH_NN_INVALID_PARAMETER; + } + + DeviceManager& deviceManager = DeviceManager::GetInstance(); + const std::string& deviceName = deviceManager.GetDeviceName(deviceID); + if (deviceName.empty()) { + LOGE("OH_NNDevice_GetName failed, error happened when getting name of deviceID %zu.", deviceID); + *name = nullptr; + return OH_NN_FAILED; + } + + *name = deviceName.data(); + return OH_NN_SUCCESS; +} + +NNRT_API OH_NN_ReturnCode OH_NNDevice_GetType(size_t deviceID, OH_NN_DeviceType* deviceType) +{ + DeviceManager& deviceManager = DeviceManager::GetInstance(); + std::shared_ptr device = deviceManager.GetDevice(deviceID); + if (device == nullptr) { + LOGE("OH_NNDevice_GetName failed, passed invalid deviceID."); + return OH_NN_INVALID_PARAMETER; + } + + if (deviceType == nullptr) { + LOGE("OH_NNDevice_GetType failed, passed nullptr to deviceType."); + return OH_NN_INVALID_PARAMETER; + } + + OH_NN_ReturnCode ret = device->GetDeviceType(*deviceType); + if (ret != OH_NN_SUCCESS) { + LOGE("OH_NNDevice_GetType failed, device id: %zu.", deviceID); + return ret; + } + return OH_NN_SUCCESS; +} \ No newline at end of file diff --git a/frameworks/native/nn_tensor.cpp b/frameworks/native/nn_tensor.cpp new file mode 100644 index 0000000000000000000000000000000000000000..68f392a0ad9fb33e61f51dd62173d9fdd39e5d27 --- /dev/null +++ b/frameworks/native/nn_tensor.cpp @@ -0,0 +1,409 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include + +#include "nn_tensor.h" +#include "validation.h" +#include "transform.h" +#include "common/log.h" +#include "mindir.h" +#include "mindir_types.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +const uint32_t SUPPORT_NUM_BIT = 8; // Currently support 8-bit quantization only +const uint32_t INVALID_NUM_BIT = 0; + +void DestroyLiteGraphTensor(void* tensor) +{ + mindspore::lite::MindIR_Tensor_Destroy(&tensor); +} + +NNTensor::~NNTensor() +{ + if (m_buffer != nullptr) { + delete [] reinterpret_cast(m_buffer); + } +} + +NNTensor::NNTensor(NNTensor&& tensor) noexcept +{ + *this = std::move(tensor); +} + +NNTensor& NNTensor::operator=(NNTensor&& tensor) noexcept +{ + if (this == &tensor) { + return *this; + } + + m_type = tensor.m_type; + m_dataType = tensor.m_dataType; + m_format = tensor.m_format; + m_name = std::move(tensor.m_name); + m_dimensions = std::move(tensor.m_dimensions); + m_quantParams = std::move(tensor.m_quantParams); + m_elementCount = tensor.m_elementCount; + m_isDynamicShape = tensor.m_isDynamicShape; + m_isOpParameter = tensor.m_isOpParameter; + m_buffer = tensor.m_buffer; + m_bufferLength = tensor.m_bufferLength; + m_dataLength = tensor.m_dataLength; + + tensor.m_buffer = nullptr; + tensor.m_bufferLength = 0; + tensor.m_dataLength = 0; + + return *this; +} + +OH_NN_ReturnCode NNTensor::Build(OH_NN_DataType dataType, + const std::vector& dimensions, + const std::vector& quantParam, + OH_NN_TensorType type) +{ + m_type = type; + + if (!Validation::ValidateTensorDataType(dataType)) { + LOGE("Build failed, passed invalid data type."); + return OH_NN_INVALID_PARAMETER; + } + m_dataType = dataType; + + OH_NN_ReturnCode ret = ParseDimensions(dimensions); + if (ret != OH_NN_SUCCESS) { + LOGE("Build failed, passed invalid dimensions."); + return ret; + } + + ret = ParseQuantParams(quantParam); + if (ret != OH_NN_SUCCESS) { + LOGE("Build failed, please check quantParam."); + return ret; + } + + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode NNTensor::BuildFromOHNNTensor(const OH_NN_Tensor& nnTensor) +{ + m_type = nnTensor.type; + + if (!Validation::ValidateTensorDataType(nnTensor.dataType)) { + LOGE("BuildFromOHNNTensor failed, passed invalid data type: %d.", nnTensor.dataType); + return OH_NN_INVALID_PARAMETER; + } + m_dataType = nnTensor.dataType; + + if (!Validation::ValidateTensorType(nnTensor.type)) { + LOGE("BuildFromOHNNTensor failed, passed invalid nnTensor type: %d.", nnTensor.type); + return OH_NN_INVALID_PARAMETER; + } + + OH_NN_ReturnCode ret = ParseDimensions(nnTensor); + if (ret != OH_NN_SUCCESS) { + LOGE("BuildFromOHNNTensor failed, passed invalid nnTensor dimensions."); + return ret; + } + + ret = ParseQuantParams(nnTensor.quantParam); + if (ret != OH_NN_SUCCESS) { + LOGE("BuildFromOHNNTensor failed, please check quantParam in nnTensor."); + return ret; + } + + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode NNTensor::ParseDimensions(const std::vector& dimensions) +{ + // Temporary variable to check overflow. + uint64_t absoluteDim {0}; + uint64_t elementCount {1}; + uint64_t dataLength {static_cast(GetTypeSize(m_dataType))}; + m_isDynamicShape = false; + for (int32_t dim : dimensions) { + if (dim < -1 || dim == 0) { + LOGE("ParseDimension failed, dimension of OH_NN_Tensor cannot be 0 or less than -1, receive %d.", dim); + return OH_NN_INVALID_PARAMETER; + } + + m_isDynamicShape = m_isDynamicShape || (dim == -1); + absoluteDim = static_cast(abs(dim)); + elementCount *= absoluteDim; + dataLength *= absoluteDim; + + if (dataLength > UINT32_MAX) { + LOGE("ParseDimension failed, expected data length of tensor exceed limit %u.", UINT32_MAX); + return OH_NN_INVALID_PARAMETER; + } + } + + if (m_isDynamicShape) { + // If tensor has dynamic shape, m_elementCount and m_dataLength take 0. + m_elementCount = 0; + m_dataLength = 0; + } else { + m_elementCount = static_cast(elementCount); + m_dataLength = static_cast(dataLength); + } + + m_dimensions = std::move(dimensions); + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode NNTensor::ParseDimensions(const OH_NN_Tensor& nnTensor) +{ + OH_NN_ReturnCode ret = Validation::ValidateArray(nnTensor.dimensions, nnTensor.dimensionCount); + if (ret != OH_NN_SUCCESS) { + LOGE("BuildFromOHNNTensor failed, please check dimension and dimensionCount in NNTensor."); + return ret; + } + std::vector dimensions = ConstructVectorFromArray(nnTensor.dimensions, nnTensor.dimensionCount); + + ret = ParseDimensions(dimensions); + if (ret != OH_NN_SUCCESS) { + LOGE("BuildFromOHNNTensor failed, passed invalid dimension info."); + return ret; + } + + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode NNTensor::ParseQuantParams(const OH_NN_QuantParam* quantParam) +{ + if (quantParam == nullptr) { + return OH_NN_SUCCESS; + } + + if ((quantParam->numBits == nullptr) || (quantParam->scale == nullptr) || (quantParam->zeroPoint == nullptr)) { + LOGE("ParseQuantParams failed, scale or zeroPoint is nullptr."); + return OH_NN_INVALID_PARAMETER; + } + + std::vector tmpQuantParam; + uint32_t numBits{0}; + double scale{0.0}; + int32_t zeroPoint{0}; + for (uint32_t i = 0; i < quantParam->quantCount; i++) { + numBits = quantParam->numBits[i]; + scale = quantParam->scale[i]; + zeroPoint = quantParam->zeroPoint[i]; + tmpQuantParam.emplace_back((QuantParam){numBits, scale, zeroPoint}); + } + + OH_NN_ReturnCode ret = ParseQuantParams(tmpQuantParam); + if (ret != OH_NN_SUCCESS) { + LOGE("ParseQuantParams failed, please numBits in NNTensor."); + return ret; + } + + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode NNTensor::ParseQuantParams(const std::vector& quantParams) +{ + for (const QuantParam& param : quantParams) { + // Only support 8-bit quantization in NNR version 1.0 + if ((param.numBits != SUPPORT_NUM_BIT) || (param.numBits == INVALID_NUM_BIT)) { + LOGE("ParseQuantParams failed, get invalid numBits %d.", param.numBits); + return OH_NN_INVALID_PARAMETER; + } + } + + m_quantParams = quantParams; + return OH_NN_SUCCESS; +} + +void NNTensor::IdentifyOpParameter() +{ + m_isOpParameter = true; +} + +void NNTensor::SetName(const std::string& name) +{ + m_name = name; +} + +// Buffer set inside NNTensor will be released during deconstruction, make sure the buffer won't be released twice. +void NNTensor::SetBuffer(const void* buffer, size_t length) +{ + // copy pointer instead of memory copying + m_buffer = const_cast(buffer); + m_bufferLength = length; +} + +OH_NN_ReturnCode NNTensor::SetDimensions(const std::vector& dimensions) +{ + size_t expectedDimensionCount = m_dimensions.size(); + size_t dimensionCount = dimensions.size(); + if (dimensionCount != expectedDimensionCount) { + LOGE("Passed dimensions have different dimension counts from NNTensor, expected %zu, but passed %zu.", + expectedDimensionCount, dimensionCount); + return OH_NN_INVALID_PARAMETER; + } + + auto ret = ParseDimensions(dimensions); + if (ret != OH_NN_SUCCESS) { + LOGE("SetDimemsions failed, passed invalid dimension info."); + return ret; + } + + m_dimensions = dimensions; + return OH_NN_SUCCESS; +} + +OH_NN_TensorType NNTensor::GetType() const +{ + return m_type; +} + +std::string NNTensor::GetName() const +{ + return m_name; +} + +void* NNTensor::GetBuffer() const +{ + return m_buffer; +} + +size_t NNTensor::GetBufferLength() const +{ + return m_bufferLength; +} + +size_t NNTensor::GetDataLength() const +{ + return m_dataLength; +} + +OH_NN_DataType NNTensor::GetDataType() const +{ + return m_dataType; +} + +uint32_t NNTensor::GetElementCount() const +{ + return m_elementCount; +} + +std::vector NNTensor::GetDimensions() const +{ + return m_dimensions; +} + +OH_NN_Format NNTensor::GetFormat() const +{ + return m_format; +} + +std::vector NNTensor::GetQuantParam() const +{ + return m_quantParams; +} + +LiteGraphTensorPtr NNTensor::ConvertToLiteGraphTensor() const +{ + mindspore::lite::DataType dataType = NNToMS::TransformDataType(m_dataType); + mindspore::lite::Format format = NNToMS::TransformFormat(m_format); + const uint8_t* buffer = static_cast(m_buffer); + std::vector data = ConstructVectorFromArray(buffer, m_dataLength); + + std::vector quantParams; + mindspore::lite::QuantParam msQuantParam; + for (const QuantParam& param : m_quantParams) { + msQuantParam = {param.zeroPoint, param.scale, param.numBits}; + quantParams.emplace_back(std::move(msQuantParam)); + } + + mindspore::lite::TensorPtr tensor = mindspore::lite::MindIR_Tensor_Create( + m_name, dataType, m_dimensions, format, data, quantParams); + if (tensor == nullptr) { + LOGE("ConvertToLiteGraphTensor failed, please check attributes of NNTensor."); + return {nullptr, DestroyLiteGraphTensor}; + } + + LiteGraphTensorPtr liteGraphTensor(tensor, DestroyLiteGraphTensor); + return liteGraphTensor; +} + +void NNTensor::ConvertToIOTensor(IOTensor& tensor) const +{ + tensor.dataType = m_dataType; + tensor.format = m_format; + tensor.dimensions = m_dimensions; + tensor.data = const_cast(m_buffer); + tensor.length = m_bufferLength; +} + +bool NNTensor::IsDynamicShape() const +{ + return m_isDynamicShape; +} + +bool NNTensor::IsQuantTensor() const +{ + return (m_quantParams.size() > 0); +} + +bool NNTensor::IsScalar() const +{ + return (m_dimensions.empty()); +} + +bool NNTensor::IsOpParameter() const +{ + return m_isOpParameter; +} + +bool NNTensor::CompareAttribute(const NNTensor& tensor) const +{ + if (m_dataType != tensor.GetDataType()) { + LOGI("Tensors have different data type: %d and %d.", m_dataType, tensor.GetDataType()); + return false; + } + + if (m_format != tensor.GetFormat()) { + LOGI("Tensors have different format: %d and %d.", m_format, tensor.GetFormat()); + return false; + } + + const std::vector dimensions = tensor.GetDimensions(); + if (m_dimensions.size() != dimensions.size()) { + LOGI("Tensors have differents dimension counts: %zu and %zu.", m_dimensions.size(), dimensions.size()); + return false; + } + + for (auto i = 0; i < dimensions.size(); i++) { + if (m_dimensions[i] != -1 && m_dimensions[i] != dimensions[i]) { + LOGI("Tensors have different dimension: dimension index: %u, dimension value: %d and %d.", + i, m_dimensions[i], dimensions[i]); + return false; + } + } + + if (m_type != tensor.GetType()) { + LOGI("Tensors have different type: %d and %d.", m_type, tensor.GetType()); + return false; + } + + return true; +} +} // NeuralNetworkRuntime +} // OHOS \ No newline at end of file diff --git a/frameworks/native/nn_tensor.h b/frameworks/native/nn_tensor.h new file mode 100644 index 0000000000000000000000000000000000000000..1b8cf20109996bcadc6993ec390951bbf1f5500f --- /dev/null +++ b/frameworks/native/nn_tensor.h @@ -0,0 +1,97 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_NN_TENSOR_H +#define NEURAL_NETWORK_RUNTIME_NN_TENSOR_H + +#include +#include + +#include "interfaces/oem/cpp_api/cpp_type.h" +#include "interfaces/kits/c/neural_network_runtime.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +using LiteGraphTensorPtr = std::unique_ptr; + +void DestroyLiteGraphTensor(void* tensor); + +class NNTensor { +public: + NNTensor() = default; + ~NNTensor(); + NNTensor(NNTensor&& tensor) noexcept; + NNTensor& operator=(NNTensor&& tensor) noexcept; + // Copy construction and assignment is not allowed in case of double-free of m_buffer + NNTensor(const NNTensor& tensor) = delete; + NNTensor& operator=(const NNTensor& tensor) = delete; + + OH_NN_ReturnCode BuildFromOHNNTensor(const OH_NN_Tensor& nnTensor); + OH_NN_ReturnCode Build(OH_NN_DataType dataType, + const std::vector& dimensions, + const std::vector& quantParam, + OH_NN_TensorType type); + void IdentifyOpParameter(); + + void SetName(const std::string& name); + void SetBuffer(const void* buffer, size_t length); + OH_NN_ReturnCode SetDimensions(const std::vector& dimensions); + + std::string GetName() const; + OH_NN_TensorType GetType() const; + void* GetBuffer() const; + // Return complete buffer length + size_t GetBufferLength() const; + // Return actual data length, since the data can be store in a larger buffer + size_t GetDataLength() const; + OH_NN_DataType GetDataType() const; + uint32_t GetElementCount() const; + std::vector GetDimensions() const; + OH_NN_Format GetFormat() const; + std::vector GetQuantParam() const; + LiteGraphTensorPtr ConvertToLiteGraphTensor() const; + void ConvertToIOTensor(IOTensor& tensor) const; + + bool IsDynamicShape() const; + bool IsQuantTensor() const; + bool IsScalar() const; + bool IsOpParameter() const; + bool CompareAttribute(const NNTensor& tensor) const; + +private: + // Used in BuildFromOHNNTensor() + OH_NN_ReturnCode ParseQuantParams(const OH_NN_QuantParam* quantParams); + OH_NN_ReturnCode ParseDimensions(const OH_NN_Tensor& nnTensor); + // Used in Build() + OH_NN_ReturnCode ParseQuantParams(const std::vector& quantParams); + OH_NN_ReturnCode ParseDimensions(const std::vector& dimensions); + +private: + OH_NN_TensorType m_type {OH_NN_TENSOR}; + OH_NN_DataType m_dataType {OH_NN_FLOAT32}; + OH_NN_Format m_format {OH_NN_FORMAT_NHWC}; + std::string m_name; + std::vector m_dimensions; + std::vector m_quantParams; + uint32_t m_elementCount {0}; + bool m_isDynamicShape {false}; + bool m_isOpParameter {false}; + void* m_buffer {nullptr}; + size_t m_bufferLength {0}; + size_t m_dataLength {0}; +}; +} // namespace NeuralNetworkRuntime +} // namespace OHOS +#endif // NEURAL_NETWORK_RUNTIME_NN_TENSOR_H \ No newline at end of file diff --git a/frameworks/native/ops/add_builder.cpp b/frameworks/native/ops/add_builder.cpp new file mode 100644 index 0000000000000000000000000000000000000000..96dd2951b2f359935712f85fcd03ba56596ceb95 --- /dev/null +++ b/frameworks/native/ops/add_builder.cpp @@ -0,0 +1,115 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "add_builder.h" + +#include "frameworks/native/transform.h" +#include "frameworks/native/validation.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +static const int INPUT_NUM = 2; +static const int OUTPUT_NUM = 1; +static const std::string OP_NAME = "Add"; + +AddBuilder::AddBuilder() {} + +AddBuilder::~AddBuilder() {} + +OH_NN_ReturnCode AddBuilder::SetActivation(std::shared_ptr& tensor) +{ + tensor->IdentifyOpParameter(); + + if (tensor->GetDataType() != OH_NN_INT8) { + LOGE("[Add] SetActivation failed, the activationType should be type OH_NN_INT8."); + return OH_NN_INVALID_PARAMETER; + } + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[Add] SetActivation GetBuffer return nullptr."); + return OH_NN_INVALID_PARAMETER; + } + int8_t* fuseData = static_cast(buffer); + if (!Validation::ValidateFuseType(static_cast(*fuseData))) { + LOGE("[Add] SetActivation failed, fuse activation type is invalid."); + return OH_NN_INVALID_PARAMETER; + } + m_activationType = NNToMS::TransfromFusionType(static_cast(*fuseData)); + + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode AddBuilder::Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) +{ + if (m_isBuild) { + LOGE("[Add] Build failed, operation has been build, cannot build again."); + return OH_NN_OPERATION_FORBIDDEN; + } + + auto ret = CheckIOIndex(inputsIndex, outputsIndex, allTensors, INPUT_NUM, OUTPUT_NUM); + if (ret != OH_NN_SUCCESS) { + LOGE("[Add] Build failed, the input or output index of Add operation is invalid."); + return ret; + } + + m_inputsIndex = inputsIndex; + m_outputsIndex = outputsIndex; + + for (uint32_t i : paramsIndex) { + std::shared_ptr tensor = allTensors[i]; + switch (tensor->GetType()) { + case OH_NN_ADD_ACTIVATIONTYPE: + ret = SetActivation(tensor); + break; + default: + LOGE("[Add] Build failed, param invalid, type = %d.", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; + } + + if (ret != OH_NN_SUCCESS) { + LOGE("[Add] Build failed, passed invalid param."); + return ret; + } + } + + // The quantization type of the first output determinies that of the operator. + SetQuantType(outputsIndex, allTensors); + + m_name = OP_NAME; + m_isBuild = true; + return OH_NN_SUCCESS; +} + +LiteGraphPrimitvePtr AddBuilder::GetPrimitive() +{ + if (!m_isBuild) { + LOGE("[Add] GetPrimitive failed, cannot get primitive before call build."); + return {nullptr, DestroyLiteGraphPrimitive}; + } + + void* primitive = mindspore::lite::MindIR_AddFusion_CreatePrimitive(m_activationType); + LiteGraphPrimitvePtr graphPrimitivePtr(primitive, DestroyLiteGraphPrimitive) ; + return graphPrimitivePtr; +} + +REGISTER_OPS(AddBuilder, OH_NN_OPS_ADD); +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS diff --git a/frameworks/native/ops/add_builder.h b/frameworks/native/ops/add_builder.h new file mode 100644 index 0000000000000000000000000000000000000000..c08d4d9741121eab58d3d54012d9f126178875fd --- /dev/null +++ b/frameworks/native/ops/add_builder.h @@ -0,0 +1,48 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_ADD_BUILDER_H +#define NEURAL_NETWORK_RUNTIME_ADD_BUILDER_H + +#include "mindir.h" + +#include "frameworks/native/ops_builder.h" +#include "frameworks/native/ops_registry.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +class AddBuilder : public OpsBuilder { +public: + AddBuilder(); + ~AddBuilder() override; + OH_NN_ReturnCode Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) override; + + LiteGraphPrimitvePtr GetPrimitive() override; + +private: + OH_NN_ReturnCode SetActivation(std::shared_ptr& tensor); + +private: + mindspore::lite::ActivationType m_activationType{mindspore::lite::ACTIVATION_TYPE_NO_ACTIVATION}; +}; +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS + +#endif // NEURAL_NETWORK_RUNTIME_ADD_BUILDER_H diff --git a/frameworks/native/ops/argmax_builder.cpp b/frameworks/native/ops/argmax_builder.cpp new file mode 100644 index 0000000000000000000000000000000000000000..d40f3a88d9214a9f1aca05acf0bad40d1170fca1 --- /dev/null +++ b/frameworks/native/ops/argmax_builder.cpp @@ -0,0 +1,131 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "argmax_builder.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +static const int INPUT_NUM = 1; +static const int OUTPUT_NUM = 1; +static const std::string OP_NAME = "ArgMax"; + +ArgMaxBuilder::ArgMaxBuilder() {} + +ArgMaxBuilder::~ArgMaxBuilder() {} + +OH_NN_ReturnCode ArgMaxBuilder::SetAxis(std::shared_ptr tensor) +{ + tensor->IdentifyOpParameter(); + + if (tensor->GetDataType() != OH_NN_INT64) { + LOGE("[ArgMax] SetAxis failed, the axis should be type HNN_INT64."); + return OH_NN_INVALID_PARAMETER; + } + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[ArgMax] SetAxis GetBuffer return nullptr."); + return OH_NN_INVALID_PARAMETER; + } + + m_axis = *(static_cast(buffer)); + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode ArgMaxBuilder::SetKeepdims(std::shared_ptr tensor) +{ + tensor->IdentifyOpParameter(); + + if (tensor->GetDataType() != OH_NN_BOOL) { + LOGE("[ArgMax] SetKeepdims failed, the keep_dims should be type HNN_BOOL."); + return OH_NN_INVALID_PARAMETER; + } + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[ArgMax] SetKeepdims GetBuffer return nullptr."); + return OH_NN_INVALID_PARAMETER; + } + m_keepDims = *(static_cast(buffer)); + + return OH_NN_SUCCESS; +} + +/** + * Build method. + * 1.build primitive of ops. + * 2.build inputIndex of ops. + * 3.build outputIndex of ops. + */ +OH_NN_ReturnCode ArgMaxBuilder::Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, const std::vector& outputsIndex, + const std::vector>& allTensors) +{ + if (m_isBuild) { + LOGE("[ArgMax] Build failed, build operation has been completed, cannot build again."); + return OH_NN_OPERATION_FORBIDDEN; + } + + OH_NN_ReturnCode returnCode = CheckIOIndex(inputsIndex, outputsIndex, allTensors, INPUT_NUM, OUTPUT_NUM); + if (returnCode != OH_NN_SUCCESS) { + LOGE("[ArgMax] Build failed, passed invalid input or output index."); + return returnCode; + } + m_inputsIndex = inputsIndex; + m_outputsIndex = outputsIndex; + + for (int i : paramsIndex) { + const std::shared_ptr tensor = allTensors[i]; + switch (tensor->GetType()) { + case OH_NN_ARG_MAX_AXIS: + returnCode = SetAxis(tensor); + break; + case OH_NN_ARG_MAX_KEEPDIMS: + returnCode = SetKeepdims(tensor); + break; + default: + LOGE("[ArgMax] Build failed, param invalid, type = %d.", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; + } + if (returnCode != OH_NN_SUCCESS) { + LOGE("[ArgMax] Build failed, passed invalid param."); + return returnCode; + } + } + + // The quantization type of the first output determinies that of the operator. + SetQuantType(outputsIndex, allTensors); + + m_name = OP_NAME; + m_isBuild = true; + return OH_NN_SUCCESS; +} + +LiteGraphPrimitvePtr ArgMaxBuilder::GetPrimitive() +{ + if (!m_isBuild) { + LOGE("[ArgMax] GetPrimitive failed, cannot get primitive before call build."); + return {nullptr, DestroyLiteGraphPrimitive}; + } + + void* primitive = mindspore::lite::MindIR_ArgMaxFusion_CreatePrimitive(m_axis, m_topK, m_keepDims, m_outMaxValue); + LiteGraphPrimitvePtr graphPrimitivePtr(primitive, DestroyLiteGraphPrimitive); + return graphPrimitivePtr; +} +REGISTER_OPS(ArgMaxBuilder, OH_NN_OPS_ARG_MAX); +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS diff --git a/frameworks/native/ops/argmax_builder.h b/frameworks/native/ops/argmax_builder.h new file mode 100644 index 0000000000000000000000000000000000000000..73139e383129f9e94be1d7e06b9586f84344beda --- /dev/null +++ b/frameworks/native/ops/argmax_builder.h @@ -0,0 +1,51 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_ARGMAX_BUILDER_H +#define NEURAL_NETWORK_RUNTIME_ARGMAX_BUILDER_H + +#include "mindir.h" + +#include "frameworks/native/ops_builder.h" +#include "frameworks/native/ops_registry.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +class ArgMaxBuilder : public OpsBuilder { +public: + ArgMaxBuilder(); + ~ArgMaxBuilder() override; + OH_NN_ReturnCode Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) override; + LiteGraphPrimitvePtr GetPrimitive() override; + +private: + OH_NN_ReturnCode SetAxis(std::shared_ptr tensor); + OH_NN_ReturnCode SetKeepdims(std::shared_ptr tensor); + +private: + int64_t m_axis {-1}; + int64_t m_topK {1}; + bool m_keepDims {false}; + bool m_outMaxValue {false}; +}; +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS + +#endif // NEURAL_NETWORK_RUNTIME_ARGMAX_BUILDER_H diff --git a/frameworks/native/ops/avgpool_builder.cpp b/frameworks/native/ops/avgpool_builder.cpp new file mode 100644 index 0000000000000000000000000000000000000000..02c78a7e488264a647624b162ac65268399d1e61 --- /dev/null +++ b/frameworks/native/ops/avgpool_builder.cpp @@ -0,0 +1,60 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "avgpool_builder.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +static const std::string OP_NAME = "AvgPool"; + +AvgPoolBuilder::AvgPoolBuilder() {} + +AvgPoolBuilder::~AvgPoolBuilder() {} + +OH_NN_ReturnCode AvgPoolBuilder::Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) +{ + OH_NN_ReturnCode returnCode = PoolingBuild(paramsIndex, inputsIndex, outputsIndex, allTensors); + if (returnCode != OH_NN_SUCCESS) { + LOGE("[AvgPool] Build failed, the PoolingBuild failed."); + return returnCode; + } + + m_isBuild = true; + m_name = OP_NAME; + return OH_NN_SUCCESS; +} + +LiteGraphPrimitvePtr AvgPoolBuilder::GetPrimitive() +{ + if (!m_isBuild) { + LOGE("[AvgPool] GetPrimitive failed, cannot get primitive before call build."); + return {nullptr, DestroyLiteGraphPrimitive}; + } + + void* primitive = mindspore::lite::MindIR_AvgPoolFusion_CreatePrimitive(m_kernelSize, m_strides, m_pad, + m_padMode, m_roundMode, m_format, m_global, m_activationType); + + LiteGraphPrimitvePtr graphPrimitivePtr(primitive, DestroyLiteGraphPrimitive); + return graphPrimitivePtr; +} + +REGISTER_OPS(AvgPoolBuilder, OH_NN_OPS_AVG_POOL); +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS diff --git a/frameworks/native/ops/avgpool_builder.h b/frameworks/native/ops/avgpool_builder.h new file mode 100644 index 0000000000000000000000000000000000000000..fc58d41478ef02cd080c2650855d0bc262c89009 --- /dev/null +++ b/frameworks/native/ops/avgpool_builder.h @@ -0,0 +1,40 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_AVGPOOL_BUILDER_H +#define NEURAL_NETWORK_AVGPOOL_BUILDER_H + +#include "pooling_builder.h" +#include "frameworks/native/ops_registry.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +class AvgPoolBuilder : public PoolingBuilder { +public: + AvgPoolBuilder(); + ~AvgPoolBuilder() override; + OH_NN_ReturnCode Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) override; + + LiteGraphPrimitvePtr GetPrimitive() override; +}; +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS + +#endif // NEURAL_NETWORK_AVGPOOL_BUILDER_H \ No newline at end of file diff --git a/frameworks/native/ops/batch_to_space_nd_builder.cpp b/frameworks/native/ops/batch_to_space_nd_builder.cpp new file mode 100644 index 0000000000000000000000000000000000000000..b56ffcbf5526678666cd98bb2f8fdddc02c88df9 --- /dev/null +++ b/frameworks/native/ops/batch_to_space_nd_builder.cpp @@ -0,0 +1,142 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "batch_to_space_nd_builder.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +static const int INPUT_NUM = 1; +static const int OUTPUT_NUM = 1; +static const int CROPS_ROWS = 2; +static const int CROPS_COLUMN = 2; +static const std::string OP_NAME = "BatchToSpaceND"; + +BatchToSpaceNDBuilder::BatchToSpaceNDBuilder() {} + +BatchToSpaceNDBuilder::~BatchToSpaceNDBuilder() {} + +OH_NN_ReturnCode BatchToSpaceNDBuilder::SetInputBlock(std::shared_ptr tensor) +{ + tensor->IdentifyOpParameter(); + + if (tensor->GetDataType() != OH_NN_INT64) { + LOGE("[BatchToSpaceND] SetInputBlock failed, the BlockSize should be type OH_NN_INT64."); + return OH_NN_INVALID_PARAMETER; + } + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[BatchToSpaceND] SetInputBlock GetBuffer return nullptr."); + return OH_NN_INVALID_PARAMETER; + } + int64_t* pBlockSize = static_cast(buffer); + + uint32_t elementCount = tensor->GetElementCount(); + for (uint32_t i = 0; i < elementCount; ++i) { + m_blockSize.emplace_back(*pBlockSize); + ++pBlockSize; + } + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode BatchToSpaceNDBuilder::SetInputCrops(std::shared_ptr tensor) +{ + tensor->IdentifyOpParameter(); + + if (tensor->GetDataType() != OH_NN_INT64) { + LOGE("[BatchToSpaceND] SetInputCrops failed, the Crops should be type OH_NN_INT64."); + return OH_NN_INVALID_PARAMETER; + } + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[BatchToSpaceND] SetInputCrops GetBuffer return nullptr."); + return OH_NN_INVALID_PARAMETER; + } + int64_t* pCropsData = static_cast(buffer); + + std::vector> cropsData; + for (int i = 0; i < CROPS_ROWS; i++) { + std::vector vect_data; + vect_data.reserve(CROPS_COLUMN); + for (int j = 0; j < CROPS_COLUMN; j++) { + vect_data.push_back(*pCropsData++); + } + cropsData.push_back(vect_data); + } + m_crops = cropsData; + + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode BatchToSpaceNDBuilder::Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, const std::vector& outputsIndex, + const std::vector>& allTensors) +{ + if (m_isBuild) { + LOGE("[BatchToSpaceND] Build failed, operation has been build, cannot build again."); + return OH_NN_OPERATION_FORBIDDEN; + } + + OH_NN_ReturnCode returnCode = CheckIOIndex(inputsIndex, outputsIndex, allTensors, INPUT_NUM, OUTPUT_NUM); + if (returnCode != OH_NN_SUCCESS) { + LOGE("[BatchToSpaceND] Build failed, passed invalid input or output index."); + return returnCode; + } + + for (int i : paramsIndex) { + std::shared_ptr tensor = allTensors[i]; + switch (tensor->GetType()) { + case OH_NN_BATCH_TO_SPACE_ND_BLOCKSIZE: + returnCode = SetInputBlock(tensor); + break; + case OH_NN_BATCH_TO_SPACE_ND_CROPS: + returnCode = SetInputCrops(tensor); + break; + default: + LOGE("[BatchToSpaceND] Build failed, param invalid, type = %d.", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; + } + if (returnCode != OH_NN_SUCCESS) { + LOGE("[BatchToSpaceND] Build failed, passed invalid param."); + return returnCode; + } + } + + // The quantization type of the first output determinies that of the operator. + SetQuantType(outputsIndex, allTensors); + + m_isBuild = true; + m_name = OP_NAME; + return OH_NN_SUCCESS; +} + +LiteGraphPrimitvePtr BatchToSpaceNDBuilder::GetPrimitive() +{ + if (!m_isBuild) { + LOGE("[BatchToSpaceND] Cannot get primitive before call build."); + return {nullptr, DestroyLiteGraphPrimitive}; + } + + void* primitive = mindspore::lite::MindIR_BatchToSpaceND_CreatePrimitive(m_blockSize, m_crops); + LiteGraphPrimitvePtr graphPrimitivePtr(primitive, DestroyLiteGraphPrimitive); + return graphPrimitivePtr; +} + +REGISTER_OPS(BatchToSpaceNDBuilder, OH_NN_OPS_BATCH_TO_SPACE_ND); +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS diff --git a/frameworks/native/ops/batch_to_space_nd_builder.h b/frameworks/native/ops/batch_to_space_nd_builder.h new file mode 100644 index 0000000000000000000000000000000000000000..beab53a68b74ea6049d81366275ad3f4867147eb --- /dev/null +++ b/frameworks/native/ops/batch_to_space_nd_builder.h @@ -0,0 +1,53 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_BATCHTOSPACEND_BUILDER_H +#define NEURAL_NETWORK_BATCHTOSPACEND_BUILDER_H + +#include "mindir.h" + +#include "frameworks/native/ops_builder.h" +#include "frameworks/native/ops_registry.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +class BatchToSpaceNDBuilder : public OpsBuilder { +public: + BatchToSpaceNDBuilder(); + ~BatchToSpaceNDBuilder() override; + OH_NN_ReturnCode Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) override; + + LiteGraphPrimitvePtr GetPrimitive() override; + +private: + OH_NN_ReturnCode SetBatchToSpaceInput(const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors); + OH_NN_ReturnCode SetInputBlock(std::shared_ptr tensor); + OH_NN_ReturnCode SetInputCrops(std::shared_ptr tensor); + +private: + std::vector m_blockSize; + std::vector> m_crops; +}; +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS + +#endif // NEURAL_NETWORK_BATCHTOSPACEND_BUILDER_H \ No newline at end of file diff --git a/frameworks/native/ops/batchnorm_builder.cpp b/frameworks/native/ops/batchnorm_builder.cpp new file mode 100644 index 0000000000000000000000000000000000000000..1fd5997f0d6ef8dee1e85077524de4a97ddaac23 --- /dev/null +++ b/frameworks/native/ops/batchnorm_builder.cpp @@ -0,0 +1,116 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "batchnorm_builder.h" + +#include "mindir.h" + +#include "frameworks/native/ops_registry.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +static const int INPUT_NUM = 5; +static const int OUTPUT_NUM = 1; +static const int SCALAR_LENGTH = 1; +const std::string OP_NAME = "BatchNorm"; + +BatchNormBuilder::BatchNormBuilder() {} + +BatchNormBuilder::~BatchNormBuilder() {} + +OH_NN_ReturnCode BatchNormBuilder::SetEpsilon(std::shared_ptr tensor) +{ + tensor->IdentifyOpParameter(); + if (tensor->GetDataType() != OH_NN_FLOAT32) { + LOGE("[BatchNorm] SetEpsilon failed, the Epsilon should be type OH_NN_FLOAT32."); + return OH_NN_INVALID_PARAMETER; + } + + if (tensor->GetElementCount() != SCALAR_LENGTH) { + LOGE("[BatchNorm] SetEpsilon failed, the Epsilon shoule be a scalar"); + return OH_NN_INVALID_PARAMETER; + } + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[BatchNorm] SetEpsilon failed, the epsilon passed a empty buffer."); + return OH_NN_INVALID_PARAMETER; + } + + m_epsilon = *static_cast(buffer); + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode BatchNormBuilder::Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) +{ + if (m_isBuild) { + LOGE("[BatchNorm] Build failed, batchNorm operation has been build, cannot build again."); + return OH_NN_OPERATION_FORBIDDEN; + } + + OH_NN_ReturnCode returnCode = CheckIOIndex(inputsIndex, outputsIndex, allTensors, INPUT_NUM, OUTPUT_NUM); + if (returnCode != OH_NN_SUCCESS) { + LOGE("[BatchNorm] Build failed, passed invalid input or output index."); + return returnCode; + } + + m_inputsIndex = inputsIndex; + m_outputsIndex = outputsIndex; + + for (int i : paramsIndex) { + std::shared_ptr tensor = allTensors[i]; + switch (tensor->GetType()) { + case OH_NN_BATCH_NORM_EPSILON: + returnCode = SetEpsilon(tensor); + break; + default: + LOGE("[BatchNorm] Parameter Type is invalid, type=%d", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; + } + + if (returnCode != OH_NN_SUCCESS) { + LOGE("[BatchNorm] BatchNorm Build failed,, Passed invalid param."); + return returnCode; + } + } + + // The quantization type of the first output determinies that of the operator. + SetQuantType(outputsIndex, allTensors); + + m_isBuild = true; + m_name = OP_NAME; + return OH_NN_SUCCESS; +} + +LiteGraphPrimitvePtr BatchNormBuilder::GetPrimitive() +{ + if (!m_isBuild) { + LOGE("[BatchNorm] GetPrimitive failed, cannot get primitive before call build."); + return {nullptr, DestroyLiteGraphPrimitive}; + } + + void* primitive = mindspore::lite::MindIR_FusedBatchNorm_CreatePrimitive(m_epsilon); + LiteGraphPrimitvePtr graphPrimitivePtr(primitive, DestroyLiteGraphPrimitive); + return graphPrimitivePtr; +} + +REGISTER_OPS(BatchNormBuilder, OH_NN_OPS_BATCH_NORM); +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS \ No newline at end of file diff --git a/frameworks/native/ops/batchnorm_builder.h b/frameworks/native/ops/batchnorm_builder.h new file mode 100644 index 0000000000000000000000000000000000000000..c8b4e9c31c36a91d843d0c8a711a9d22a23df9c1 --- /dev/null +++ b/frameworks/native/ops/batchnorm_builder.h @@ -0,0 +1,44 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_BATHNORM_BUILDER_H +#define NEURAL_NETWORK_RUNTIME_BATHNORM_BUILDER_H + +#include "frameworks/native/ops_builder.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +class BatchNormBuilder : public OpsBuilder { +public: + BatchNormBuilder(); + ~BatchNormBuilder() override; + OH_NN_ReturnCode Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) override; + LiteGraphPrimitvePtr GetPrimitive() override; + +private: + OH_NN_ReturnCode SetEpsilon(std::shared_ptr tensor); + +private: + float m_epsilon{1e-7}; +}; +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS + +#endif // NEURAL_NETWORK_RUNTIME_BATHNORM_BUILDER_H \ No newline at end of file diff --git a/frameworks/native/ops/bias_add_builder.cpp b/frameworks/native/ops/bias_add_builder.cpp new file mode 100644 index 0000000000000000000000000000000000000000..4130bd6d5f4bb002acb281c66e472c0c84072cb6 --- /dev/null +++ b/frameworks/native/ops/bias_add_builder.cpp @@ -0,0 +1,75 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "bias_add_builder.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +static const int INPUT_NUM = 2; +static const int OUTPUT_NUM = 1; +static const std::string OP_NAME = "BiasAdd"; + +BiasAddBuilder::BiasAddBuilder() {} + +BiasAddBuilder::~BiasAddBuilder() {} + +OH_NN_ReturnCode BiasAddBuilder::Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) +{ + if (m_isBuild) { + LOGE("[BiasAdd] Build failed, biasAdd operation has been build, cannot build again."); + return OH_NN_OPERATION_FORBIDDEN; + } + + OH_NN_ReturnCode returnCode = CheckIOIndex(inputsIndex, outputsIndex, allTensors, INPUT_NUM, OUTPUT_NUM); + if (returnCode != OH_NN_SUCCESS) { + LOGE("[BiasAdd] Build failed, passed invalid input or output index."); + return returnCode; + } + + m_inputsIndex = inputsIndex; + m_outputsIndex = outputsIndex; + if (!paramsIndex.empty()) { + LOGE("[BiasAdd] Build failed, expects no parameters"); + return OH_NN_INVALID_PARAMETER; + } + + // The quantization type of the first output determinies that of the operator. + SetQuantType(outputsIndex, allTensors); + + m_isBuild = true; + m_name = OP_NAME; + return OH_NN_SUCCESS; +} + +LiteGraphPrimitvePtr BiasAddBuilder::GetPrimitive() +{ + if (!m_isBuild) { + LOGE("[BiasAdd] Build failed, cannot get primitive before call build."); + return {nullptr, DestroyLiteGraphPrimitive}; + } + + void* primitive = mindspore::lite::MindIR_BiasAdd_CreatePrimitive(); + LiteGraphPrimitvePtr graphPrimitivePtr(primitive, DestroyLiteGraphPrimitive); + return graphPrimitivePtr; +} + +REGISTER_OPS(BiasAddBuilder, OH_NN_OPS_BIAS_ADD); +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS \ No newline at end of file diff --git a/frameworks/native/ops/bias_add_builder.h b/frameworks/native/ops/bias_add_builder.h new file mode 100644 index 0000000000000000000000000000000000000000..410bfe88aa9655ac77c16e8d7e48fa7ba332ce07 --- /dev/null +++ b/frameworks/native/ops/bias_add_builder.h @@ -0,0 +1,42 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_BIASADD_BUILDER_H +#define NEURAL_NETWORK_RUNTIME_BIASADD_BUILDER_H + +#include "mindir.h" + +#include "frameworks/native/ops_builder.h" +#include "frameworks/native/ops_registry.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +class BiasAddBuilder : public OpsBuilder { +public: + BiasAddBuilder(); + ~BiasAddBuilder() override; + OH_NN_ReturnCode Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) override; + + LiteGraphPrimitvePtr GetPrimitive() override; +}; +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS + +#endif // NEURAL_NETWORK_RUNTIME_BIASADD_BUILDER_H \ No newline at end of file diff --git a/frameworks/native/ops/cast_builder.cpp b/frameworks/native/ops/cast_builder.cpp new file mode 100644 index 0000000000000000000000000000000000000000..81dc1eb2b5f386bbf04fa022649b8dba146b4438 --- /dev/null +++ b/frameworks/native/ops/cast_builder.cpp @@ -0,0 +1,89 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "cast_builder.h" + +#include "frameworks/native/transform.h" +#include "frameworks/native/validation.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +static const int INPUT_NUM = 2; +static const int OUTPUT_NUM = 1; +static const int INPUT_TYPE = 1; +static const std::string OP_NAME = "Cast"; + +CastBuilder::CastBuilder() {} + +CastBuilder::~CastBuilder() {} + +OH_NN_ReturnCode CastBuilder::Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) +{ + if (m_isBuild) { + LOGE("[Cast] Build failed, operation has been build, cannot build again."); + return OH_NN_OPERATION_FORBIDDEN; + } + auto ret = CheckIOIndex(inputsIndex, outputsIndex, allTensors, INPUT_NUM, OUTPUT_NUM); + if (ret != OH_NN_SUCCESS) { + LOGE("[Cast] Build failed, the input or output index of Cast operation is invalid."); + return ret; + } + m_inputsIndex = inputsIndex; + m_outputsIndex = outputsIndex; + + auto castType = allTensors[inputsIndex[INPUT_TYPE]]->GetBuffer(); + if (castType == nullptr) { + LOGE("[Cast] Build castType GetBuffer return nullptr."); + return OH_NN_INVALID_PARAMETER; + } + OH_NN_DataType* castTypeInt = reinterpret_cast(castType); + if (!Validation::ValidateTensorDataType(*castTypeInt)) { + LOGE("[Cast] Type of cast operator is not validation."); + return OH_NN_INVALID_PARAMETER; + } + *castTypeInt = (OH_NN_DataType)NNToHDI::TransDataType(*castTypeInt); + + if (!paramsIndex.empty()) { + LOGE("[Cast] Cast expects no parameters"); + return OH_NN_INVALID_PARAMETER; + } + + // The quantization type of the first output determinies that of the operator. + SetQuantType(outputsIndex, allTensors); + m_isBuild = true; + m_name = OP_NAME; + return OH_NN_SUCCESS; +} + +LiteGraphPrimitvePtr CastBuilder::GetPrimitive() +{ + if (!m_isBuild) { + LOGE("[Cast] Cannot get primitive before call build."); + return {nullptr, DestroyLiteGraphPrimitive}; + } + + void* primitive = mindspore::lite::MindIR_Cast_CreatePrimitive(); + LiteGraphPrimitvePtr graphPrimitivePtr(primitive, DestroyLiteGraphPrimitive); + return graphPrimitivePtr; +} + +REGISTER_OPS(CastBuilder, OH_NN_OPS_CAST); +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS diff --git a/frameworks/native/ops/cast_builder.h b/frameworks/native/ops/cast_builder.h new file mode 100644 index 0000000000000000000000000000000000000000..09682db5400e2a23dcb45b68cbb2c652657f3662 --- /dev/null +++ b/frameworks/native/ops/cast_builder.h @@ -0,0 +1,42 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_CAST_BUILDER_H +#define NEURAL_NETWORK_RUNTIME_CAST_BUILDER_H + +#include "mindir.h" + +#include "frameworks/native/ops_builder.h" +#include "frameworks/native/ops_registry.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +class CastBuilder : public OpsBuilder { +public: + CastBuilder(); + ~CastBuilder() override; + OH_NN_ReturnCode Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) override; + + LiteGraphPrimitvePtr GetPrimitive() override; +}; +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS + +#endif // NEURAL_NETWORK_RUNTIME_CAST_BUILDER_H diff --git a/frameworks/native/ops/concat_builder.cpp b/frameworks/native/ops/concat_builder.cpp new file mode 100644 index 0000000000000000000000000000000000000000..d18411492cfeebc05b774ad8f92d5e47ce72ecaa --- /dev/null +++ b/frameworks/native/ops/concat_builder.cpp @@ -0,0 +1,146 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "concat_builder.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +static constexpr int MINIMUM_INTPUT = 2; +static constexpr int OUTPUT_NUM = 1; +static constexpr int AXIS_LENGTH = 1; +static const std::string OP_NAME = "Concat"; + +ConcatBuilder::ConcatBuilder() {} + +ConcatBuilder::~ConcatBuilder() {} + +OH_NN_ReturnCode ConcatBuilder::SetAxis(std::shared_ptr tensor) +{ + tensor->IdentifyOpParameter(); + + if (tensor->GetElementCount() != AXIS_LENGTH) { + LOGE("[Concat] SetAxis failed, the Activation shoule be a scalar"); + return OH_NN_INVALID_PARAMETER; + } + + if (tensor->GetDataType() != OH_NN_INT64) { + LOGE("[Concat] SetAxis failed, the axis should be type OH_NN_INT64."); + return OH_NN_INVALID_PARAMETER; + } + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[Concat] SetAxis GetBuffer return nullptr."); + return OH_NN_INVALID_PARAMETER; + } + m_axis = *(static_cast(buffer)); + + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode ConcatBuilder::Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, const std::vector& outputsIndex, + const std::vector>& allTensors) +{ + if (m_isBuild) { + LOGE("[Concat] Build failed, operation has been build, cannot build again."); + return OH_NN_OPERATION_FORBIDDEN; + } + + if (inputsIndex.size() < MINIMUM_INTPUT) { + LOGE("[Concat] Build failed, Concat need more than one inputs."); + return OH_NN_INVALID_PARAMETER; + } + + if (outputsIndex.size() != OUTPUT_NUM) { + LOGE("[Concat] Build failed, The number of index of outputs not equal to 1."); + return OH_NN_INVALID_PARAMETER; + } + + OH_NN_ReturnCode returnCode = SetInputsAndOutputs(inputsIndex, outputsIndex, allTensors); + if (returnCode != OH_NN_SUCCESS) { + LOGE("[Concat] Build failed, set inputs or outputs failed."); + return returnCode; + } + + for (int i : paramsIndex) { + std::shared_ptr tensor = allTensors[i]; + switch (tensor->GetType()) { + case OH_NN_CONCAT_AXIS: + returnCode = SetAxis(tensor); + break; + default: + LOGE("[Concat] Build failed, param invalid, type = %d.", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; + } + if (returnCode != OH_NN_SUCCESS) { + LOGE("[Concat] Build failed, passed invalid param."); + return returnCode; + } + } + + // The quantization type of the first output determinies that of the operator. + SetQuantType(outputsIndex, allTensors); + + m_isBuild = true; + m_name = OP_NAME; + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode ConcatBuilder::SetInputsAndOutputs(const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) +{ + size_t allTensorsSize = allTensors.size(); + for (auto index : inputsIndex) { + if (index >= allTensorsSize) { + LOGE("[Concat] Invalid input index, it is out of range %zu.", allTensorsSize); + return OH_NN_INVALID_PARAMETER; + } + } + + for (auto index : outputsIndex) { + if (index >= allTensorsSize) { + LOGE("[Concat] Invalid output index, it is out of range %zu.", allTensorsSize); + return OH_NN_INVALID_PARAMETER; + } + } + + m_inputsIndex.clear(); + m_inputsIndex = inputsIndex; + + m_outputsIndex.clear(); + m_outputsIndex = outputsIndex; + + return OH_NN_SUCCESS; +} + +LiteGraphPrimitvePtr ConcatBuilder::GetPrimitive() +{ + if (!m_isBuild) { + LOGE("[Concat] GetPrimitive failed, cannot get primitive before call build."); + return {nullptr, DestroyLiteGraphPrimitive}; + } + + void* primitive = mindspore::lite::MindIR_Concat_CreatePrimitive(m_axis); + LiteGraphPrimitvePtr graphPrimitivePtr(primitive, DestroyLiteGraphPrimitive); + return graphPrimitivePtr; +} + +REGISTER_OPS(ConcatBuilder, OH_NN_OPS_CONCAT); +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS \ No newline at end of file diff --git a/frameworks/native/ops/concat_builder.h b/frameworks/native/ops/concat_builder.h new file mode 100644 index 0000000000000000000000000000000000000000..c80a53a6e0744f1606c35f0e70e2de2ab06f5cb5 --- /dev/null +++ b/frameworks/native/ops/concat_builder.h @@ -0,0 +1,50 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_CONCAT_BUILDER_H +#define NEURAL_NETWORK_RUNTIME_CONCAT_BUILDER_H + +#include "mindir.h" + +#include "frameworks/native/ops_builder.h" +#include "frameworks/native/ops_registry.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +class ConcatBuilder : public OpsBuilder { +public: + ConcatBuilder(); + ~ConcatBuilder() override; + OH_NN_ReturnCode Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) override; + + LiteGraphPrimitvePtr GetPrimitive() override; + +private: + OH_NN_ReturnCode SetAxis(std::shared_ptr tensor); + OH_NN_ReturnCode SetInputsAndOutputs(const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors); +private: + int64_t m_axis{0}; +}; +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS + +#endif // NEURAL_NETWORK_RUNTIME_CONCAT_BUILDER_H \ No newline at end of file diff --git a/frameworks/native/ops/conv2d_builder.cpp b/frameworks/native/ops/conv2d_builder.cpp new file mode 100644 index 0000000000000000000000000000000000000000..302f1e4c1e003dbaa67545533f97d53ca4e31f23 --- /dev/null +++ b/frameworks/native/ops/conv2d_builder.cpp @@ -0,0 +1,298 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "conv2d_builder.h" + +#include "frameworks/native/transform.h" +#include "frameworks/native/validation.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +static constexpr int INPUT_NUM = 3; +static constexpr int OUTPUT_NUM = 1; +static constexpr int CONV2D_INPUT_WEIGHT = 1; +static constexpr int WEIGHT_SIZE = 4; +static constexpr int OUT_CHANNEL_INDEX = 0; +static constexpr int IN_CHANNEL_INDEX = 3; +static constexpr int KERNEL_HEIGHT_INDEX = 1; +static constexpr int KERNEL_WEIGHT_INDEX = 2; +static constexpr int PAD_MODE_GET = 1; +static constexpr int PAD_LIST_GET = 4; +static constexpr int SCALAR_LENGTH = 1; +static const std::string OP_NAME = "Conv2D"; + +Conv2DBuilder::Conv2DBuilder() {} + +Conv2DBuilder::~Conv2DBuilder() {} + +OH_NN_ReturnCode Conv2DBuilder::SetInputAndOutput(const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) +{ + OH_NN_ReturnCode returnCode = CheckIOIndex(inputsIndex, outputsIndex, allTensors, INPUT_NUM, OUTPUT_NUM); + if (returnCode != OH_NN_SUCCESS) { + LOGE("[Conv2d] SetInputAndOutput failed, passed invalid input or output index."); + return returnCode; + } + + m_inputsIndex = inputsIndex; + m_outputsIndex = outputsIndex; + + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode Conv2DBuilder::SetChannel(const std::vector& inputsIndex, + const std::vector>& allTensors) +{ + // set inChannel, outChannel, kernelSize + auto weightShape = allTensors[inputsIndex[CONV2D_INPUT_WEIGHT]]->GetDimensions(); + if (weightShape.size() != WEIGHT_SIZE) { + LOGE("[Conv2d] SetChannel failed, the dimension of weight should be %d", WEIGHT_SIZE); + return OH_NN_INVALID_PARAMETER; + } + + m_inChannel = weightShape[IN_CHANNEL_INDEX]; + m_outChannel = weightShape[OUT_CHANNEL_INDEX]; + + return OH_NN_SUCCESS; +} + +void Conv2DBuilder::SetKernelSize(const std::vector& inputsIndex, + const std::vector>& allTensors) +{ + // set inChannel, outChannel, kernelSize + auto weightShape = allTensors[inputsIndex[CONV2D_INPUT_WEIGHT]]->GetDimensions(); + + m_kernelSize.clear(); + m_kernelSize.emplace_back(weightShape[KERNEL_HEIGHT_INDEX]); + m_kernelSize.emplace_back(weightShape[KERNEL_WEIGHT_INDEX]); +} + +OH_NN_ReturnCode Conv2DBuilder::SetStrides(std::shared_ptr tensor) +{ + tensor->IdentifyOpParameter(); + // Set Strides + if (tensor->GetDataType() != OH_NN_INT64) { + LOGE("[Conv2d] SetStrides failed, the Strides should be type OH_NN_INT64."); + return OH_NN_INVALID_PARAMETER; + } + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[Conv2d] SetStrides GetBuffer return nullptr"); + return OH_NN_INVALID_PARAMETER; + } + const int64_t* pStrides = reinterpret_cast(buffer); + int stridesSize = tensor->GetElementCount(); + m_strides.assign(pStrides, pStrides + stridesSize); + + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode Conv2DBuilder::SetDilation(std::shared_ptr tensor) +{ + tensor->IdentifyOpParameter(); + // Set Dilation + if (tensor->GetDataType() != OH_NN_INT64) { + LOGE("[Conv2d] SetDilation failed, the Dilation should have type OH_NN_INT64"); + return OH_NN_INVALID_PARAMETER; + } + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[Conv2d] SetDilation GetBuffer return nullptr"); + return OH_NN_INVALID_PARAMETER; + } + const int64_t* pDilation = reinterpret_cast(buffer); + int dilationSize = tensor->GetElementCount(); + m_dilation.assign(pDilation, pDilation + dilationSize); + + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode Conv2DBuilder::SetPad(std::shared_ptr tensor) +{ + tensor->IdentifyOpParameter(); + + bool isPadMode = false; + if (tensor->GetElementCount() == PAD_MODE_GET) { + isPadMode = true; + } else if (tensor->GetElementCount() != PAD_LIST_GET) { + LOGE("[Conv2d] SetPad failed, inputs should be 1 for padMode and 4 for padList."); + return OH_NN_INVALID_PARAMETER; + } + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[Conv2d] SetPadList GetBuffer return nullptr"); + return OH_NN_INVALID_PARAMETER; + } + + // Set PadMode or PadList + if (isPadMode) { + if (tensor->GetDataType() != OH_NN_INT8) { + LOGE("[Conv2d] SetPad failed, the PadMode should have type OH_NN_INT8."); + return OH_NN_INVALID_PARAMETER; + } + + int8_t* pPad = static_cast(buffer); + if (!OHOS::NeuralNetworkRuntime::Validation::ValidatePadMode(*pPad)) { + LOGE("[Conv2d] SetPad failed, invalid pad mode."); + return OH_NN_INVALID_PARAMETER; + } + m_padMode = NNToMS::TransformPadModeValue(*pPad); + } else { + if (tensor->GetDataType() != OH_NN_INT64) { + LOGE("[Conv2d] SetPad failed, the PadList should have type OH_NN_INT64."); + return OH_NN_INVALID_PARAMETER; + } + + int64_t* pPadList = static_cast(buffer); + int padListSize = tensor->GetElementCount(); + m_pad.assign(pPadList, pPadList + padListSize); + } + + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode Conv2DBuilder::SetGroup(std::shared_ptr tensor) +{ + tensor->IdentifyOpParameter(); + // Set Group + if (tensor->GetElementCount() != SCALAR_LENGTH) { + LOGE("[Conv2d] SetGroup failed, The Group shoule be a scalar"); + return OH_NN_INVALID_PARAMETER; + } + + if (tensor->GetDataType() != OH_NN_INT64) { + LOGE("[Conv2d] SetGroup failed, The Group should have type OH_NN_INT64."); + return OH_NN_INVALID_PARAMETER; + } + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[Conv2d] SetGroup GetBuffer return nullptr"); + return OH_NN_INVALID_PARAMETER; + } + m_group = *static_cast(buffer); + + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode Conv2DBuilder::SetActavitation(std::shared_ptr tensor) +{ + tensor->IdentifyOpParameter(); + + if (tensor->GetElementCount() != SCALAR_LENGTH) { + LOGE("[Conv2d] SetActavitation failed, the ActivationType shoule be a scalar"); + return OH_NN_INVALID_PARAMETER; + } + + if (tensor->GetDataType() != OH_NN_INT8) { + LOGE("[Conv2d] SetActavitation failed, the ActivationType should have type OH_NN_INT8."); + return OH_NN_INVALID_PARAMETER; + } + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[Conv2d] SetGroup GetBuffer return nullptr"); + return OH_NN_INVALID_PARAMETER; + } + int8_t* pFuseData = static_cast(buffer); + if (!OHOS::NeuralNetworkRuntime::Validation::ValidateFuseType(static_cast(*pFuseData))) { + LOGE("[Conv2d] SetActavitation failed, activation input is invalid."); + return OH_NN_INVALID_PARAMETER; + } + m_activationType = NNToMS::TransfromFusionType((OH_NN_FuseType)(*pFuseData)); + + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode Conv2DBuilder::Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, const std::vector& outputsIndex, + const std::vector>& allTensors) +{ + if (m_isBuild) { + LOGE("[Conv2d] Build failed, Conv2D operation has been build, cannot build again."); + return OH_NN_OPERATION_FORBIDDEN; + } + + OH_NN_ReturnCode returnCode = SetInputAndOutput(inputsIndex, outputsIndex, allTensors); + if (returnCode != OH_NN_SUCCESS) { + return returnCode; + } + + returnCode = SetChannel(inputsIndex, allTensors); + if (returnCode != OH_NN_SUCCESS) { + return returnCode; + } + + SetKernelSize(inputsIndex, allTensors); + + for (int i : paramsIndex) { + std::shared_ptr tensor = allTensors[i]; + switch (tensor->GetType()) { + case OH_NN_CONV2D_STRIDES: + returnCode = SetStrides(tensor); + break; + case OH_NN_CONV2D_DILATION: + returnCode = SetDilation(tensor); + break; + case OH_NN_CONV2D_PAD_MODE: + case OH_NN_CONV2D_PAD: + returnCode = SetPad(tensor); + break; + case OH_NN_CONV2D_GROUP: + returnCode = SetGroup(tensor); + break; + case OH_NN_CONV2D_ACTIVATION_TYPE: + returnCode = SetActavitation(tensor); + break; + default: + LOGE("[Conv2D] Build failed, param invalid, type = %d.", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; + } + if (returnCode != OH_NN_SUCCESS) { + LOGE("[Conv2D] Build failed, Passed invalid param."); + return returnCode; + } + } + + // The quantization type of the first output determinies that of the operator. + SetQuantType(outputsIndex, allTensors); + + m_isBuild = true; + m_name = OP_NAME; + return OH_NN_SUCCESS; +} + +LiteGraphPrimitvePtr Conv2DBuilder::GetPrimitive() +{ + if (!m_isBuild) { + LOGE("[Conv2d] GetPrimitive failed, Cannot get primitive before call build."); + return {nullptr, DestroyLiteGraphPrimitive}; + } + + auto primitive = MindIR_Conv2DFusion_CreatePrimitive(m_kernelSize, m_strides, + m_dilation, m_padMode, m_pad, m_group, m_inChannel, m_outChannel, m_activationType); + LiteGraphPrimitvePtr graphPrimitivePtr(primitive, DestroyLiteGraphPrimitive); + return graphPrimitivePtr; +} + +REGISTER_OPS(Conv2DBuilder, OH_NN_OPS_CONV2D); +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS diff --git a/frameworks/native/ops/conv2d_builder.h b/frameworks/native/ops/conv2d_builder.h new file mode 100644 index 0000000000000000000000000000000000000000..412427ee9e53a1a2db7de76e47ae95ab3ced4c81 --- /dev/null +++ b/frameworks/native/ops/conv2d_builder.h @@ -0,0 +1,65 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_CONV2D_BUILDER_H +#define NEURAL_NETWORK_RUNTIME_CONV2D_BUILDER_H + +#include "frameworks/native/ops_builder.h" +#include "frameworks/native/ops_registry.h" +#include "mindir.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +class Conv2DBuilder : public OpsBuilder { +public: + Conv2DBuilder(); + ~Conv2DBuilder() override; + OH_NN_ReturnCode Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) override; + + LiteGraphPrimitvePtr GetPrimitive() override; + +private: + OH_NN_ReturnCode SetInputAndOutput(const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors); + OH_NN_ReturnCode SetChannel(const std::vector& inputsIndex, + const std::vector>& allTensors); + void SetKernelSize(const std::vector& inputsIndex, + const std::vector>& allTensors); + OH_NN_ReturnCode SetStrides(std::shared_ptr tensor); + OH_NN_ReturnCode SetDilation(std::shared_ptr tensor); + OH_NN_ReturnCode SetPad(std::shared_ptr tensor); + OH_NN_ReturnCode SetGroup(std::shared_ptr tensor); + OH_NN_ReturnCode SetActavitation(std::shared_ptr tensor); + +private: + int64_t m_group{1}; + int64_t m_inChannel{0}; + int64_t m_outChannel{0}; + std::vector m_kernelSize; + std::vector m_strides; + std::vector m_pad; + std::vector m_dilation; + mindspore::lite::PadMode m_padMode{mindspore::lite::PAD_MODE_PAD}; + mindspore::lite::ActivationType m_activationType{mindspore::lite::ACTIVATION_TYPE_NO_ACTIVATION}; +}; +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS +#endif // NEURAL_NETWORK_RUNTIME_CONV2D_BUILDER_H \ No newline at end of file diff --git a/frameworks/native/ops/conv2d_transpose_builder.cpp b/frameworks/native/ops/conv2d_transpose_builder.cpp new file mode 100644 index 0000000000000000000000000000000000000000..2e7b8b060e66cd75d5a13d2685220d2ddbc6eb16 --- /dev/null +++ b/frameworks/native/ops/conv2d_transpose_builder.cpp @@ -0,0 +1,313 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "conv2d_transpose_builder.h" + +#include "frameworks/native/transform.h" +#include "frameworks/native/validation.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +static constexpr int INPUT_NUM = 3; +static constexpr int OUTPUT_NUM = 1; +static constexpr int INPUT_WEIGHT = 1; +static constexpr int WEIGHT_SIZE = 4; +static constexpr int OUT_CHANNEL_INDEX = 0; +static constexpr int IN_CHANNEL_INDEX = 3; +static constexpr int KERNEL_HEIGHT_INDEX = 1; +static constexpr int KERNEL_WEIGHT_INDEX = 2; +static constexpr int PAD_MODE_PARAM_NUM = 1; +static constexpr int PAD_LIST_PARAM_NUM = 4; +static constexpr int SCALAR_LENGTH = 1; +static const std::string OP_NAME = "Conv2DTranspose"; + +Conv2DTransposeBuilder::Conv2DTransposeBuilder() {} + +Conv2DTransposeBuilder::~Conv2DTransposeBuilder() {} + +OH_NN_ReturnCode Conv2DTransposeBuilder::SetInput(const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) +{ + OH_NN_ReturnCode returnCode = CheckIOIndex(inputsIndex, outputsIndex, allTensors, INPUT_NUM, OUTPUT_NUM); + if (returnCode != OH_NN_SUCCESS) { + LOGE("[Conv2dTranspose] SetInput failed, Passed invalid input or output index."); + return returnCode; + } + + m_inputsIndex = inputsIndex; + m_outputsIndex = outputsIndex; + + // set inChannel, outChannel, kernelSize + auto weightShape = allTensors[inputsIndex[INPUT_WEIGHT]]->GetDimensions(); + if (weightShape.size() != WEIGHT_SIZE) { + LOGE("[Conv2dTranspose] SetInput failed, the dimension of weight should be %d", WEIGHT_SIZE); + return OH_NN_INVALID_PARAMETER; + } + + m_inChannel = weightShape[IN_CHANNEL_INDEX]; + m_outChannel = weightShape[OUT_CHANNEL_INDEX]; + + return OH_NN_SUCCESS; +} + +void Conv2DTransposeBuilder::SetKernelSize(const std::vector& inputsIndex, + const std::vector>& allTensors) +{ + auto weightShape = allTensors[inputsIndex[INPUT_WEIGHT]]->GetDimensions(); + + m_kernelSize.clear(); + m_kernelSize.emplace_back(weightShape[KERNEL_HEIGHT_INDEX]); + m_kernelSize.emplace_back(weightShape[KERNEL_WEIGHT_INDEX]); +} + +OH_NN_ReturnCode Conv2DTransposeBuilder::SetStrides(std::shared_ptr tensor) +{ + tensor->IdentifyOpParameter(); + // Set Strides + if (tensor->GetDataType() != OH_NN_INT64) { + LOGE("[Conv2DTranspose] SetStrides failed, the Strides should be type OH_NN_INT64."); + return OH_NN_INVALID_PARAMETER; + } + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[Conv2DTranspose] SetStrides GetBuffer return nullptr"); + return OH_NN_INVALID_PARAMETER; + } + const int64_t* pStrides = reinterpret_cast(buffer); + int elementSize = tensor->GetElementCount(); + m_strides.assign(pStrides, pStrides + elementSize); + + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode Conv2DTransposeBuilder::SetDilation(std::shared_ptr tensor) +{ + tensor->IdentifyOpParameter(); + // Set Dilation + if (tensor->GetDataType() != OH_NN_INT64) { + LOGE("[Conv2DTranspose] SetDilation failed, the Dilation should be type OH_NN_INT64"); + return OH_NN_INVALID_PARAMETER; + } + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[Conv2DTranspose] SetDilation GetBuffer return nullptr"); + return OH_NN_INVALID_PARAMETER; + } + const int64_t* pDilation = reinterpret_cast(buffer); + int dilationSize = tensor->GetElementCount(); + m_dilation.assign(pDilation, pDilation + dilationSize); + + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode Conv2DTransposeBuilder::SetPad(std::shared_ptr tensor) +{ + tensor->IdentifyOpParameter(); + + bool isPadMode = false; + if (tensor->GetElementCount() == PAD_MODE_PARAM_NUM) { + isPadMode = true; + } else if (tensor->GetElementCount() != PAD_LIST_PARAM_NUM) { + LOGE("[Conv2DTranspose] SetPad failed, the inputs should be 1 if using padMode or 4 if using padList."); + return OH_NN_INVALID_PARAMETER; + } + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[Conv2DTranspose] SetPadMode GetBuffer return nullptr"); + return OH_NN_INVALID_PARAMETER; + } + + // Set PadMode or PadList + if (isPadMode) { + if (tensor->GetDataType() != OH_NN_INT8) { + LOGE("[Conv2DTranspose] SetPad failed, the PadMode should have type OH_NN_INT8."); + return OH_NN_INVALID_PARAMETER; + } + + int8_t* pPad = static_cast(buffer); + if (!OHOS::NeuralNetworkRuntime::Validation::ValidatePadMode(*pPad)) { + LOGE("[Conv2DTranspose] SetPad failed, invalid pad mode."); + return OH_NN_INVALID_PARAMETER; + } + m_padMode = NNToMS::TransformPadModeValue(*pPad); + } else { + if (tensor->GetDataType() != OH_NN_INT64) { + LOGE("[Conv2DTranspose] SetPad failed, the PadList should have type OH_NN_INT64."); + return OH_NN_INVALID_PARAMETER; + } + + const int64_t* pPadList = reinterpret_cast(buffer); + int padListPadSize = tensor->GetElementCount(); + m_padList.assign(pPadList, pPadList + padListPadSize); + } + + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode Conv2DTransposeBuilder::SetGroup(std::shared_ptr tensor) +{ + tensor->IdentifyOpParameter(); + // Set Group + if (tensor->GetElementCount() != SCALAR_LENGTH) { + LOGE("[Conv2dTranspose] SetGroup failed, the Group shoule be a scalar"); + return OH_NN_INVALID_PARAMETER; + } + + if (tensor->GetDataType() != OH_NN_INT64) { + LOGE("[Conv2dTranspose] SetGroup failed, the Group should have type OH_NN_INT64."); + return OH_NN_INVALID_PARAMETER; + } + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[Conv2DTranspose] SetGroup GetBuffer return nullptr"); + return OH_NN_INVALID_PARAMETER; + } + m_group = *reinterpret_cast(buffer); + + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode Conv2DTransposeBuilder::SetOutPadding(std::shared_ptr tensor) +{ + tensor->IdentifyOpParameter(); + // Set outputPadding + if (tensor->GetDataType() != OH_NN_INT64) { + LOGE("[Conv2DTranspose] SetOutPadding failed, the outputPadding should be type OH_NN_INT64."); + return OH_NN_INVALID_PARAMETER; + } + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[Conv2DTranspose] SetOutPadding GetBuffer return nullptr"); + return OH_NN_INVALID_PARAMETER; + } + const int64_t* pOutputPadding = reinterpret_cast(buffer); + int outputPadSize = tensor->GetElementCount(); + m_outputPaddings.assign(pOutputPadding, pOutputPadding + outputPadSize); + + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode Conv2DTransposeBuilder::SetActivation(std::shared_ptr tensor) +{ + tensor->IdentifyOpParameter(); + + if (tensor->GetElementCount() != SCALAR_LENGTH) { + LOGE("[Conv2DTranspose] SetActivation failed, the ActivationType shoule be a scalar"); + return OH_NN_INVALID_PARAMETER; + } + + if (tensor->GetDataType() != OH_NN_INT8) { + LOGE("[Conv2DTranspose] SetActivation failed, the ActivationType should have type OH_NN_INT8."); + return OH_NN_INVALID_PARAMETER; + } + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[Conv2DTranspose] SetOutPadding GetBuffer return nullptr"); + return OH_NN_INVALID_PARAMETER; + } + int8_t* pFuseData = static_cast(buffer); + if (!OHOS::NeuralNetworkRuntime::Validation::ValidateFuseType(static_cast(*pFuseData))) { + LOGE("[Conv2DTranspose] SetActivation failed, activation input is invalid."); + return OH_NN_INVALID_PARAMETER; + } + m_activationType = NNToMS::TransfromFusionType((OH_NN_FuseType)(*pFuseData)); + + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode Conv2DTransposeBuilder::Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) +{ + if (m_isBuild) { + LOGE("[Conv2DTranspose] Build failed, conv2DTranspose operation has been build, cannot build again."); + return OH_NN_OPERATION_FORBIDDEN; + } + + OH_NN_ReturnCode returnCode = SetInput(inputsIndex, outputsIndex, allTensors); + if (returnCode != OH_NN_SUCCESS) { + return returnCode; + } + + SetKernelSize(inputsIndex, allTensors); + + for (int i : paramsIndex) { + std::shared_ptr tensor = allTensors[i]; // 参数 tensor + switch (tensor->GetType()) { + case OH_NN_CONV2D_TRANSPOSE_STRIDES: + returnCode = SetStrides(tensor); + break; + case OH_NN_CONV2D_TRANSPOSE_DILATION: + returnCode = SetDilation(tensor); + break; + case OH_NN_CONV2D_TRANSPOSE_PAD_MODE: + case OH_NN_CONV2D_TRANSPOSE_PAD: + returnCode = SetPad(tensor); + break; + case OH_NN_CONV2D_TRANSPOSE_GROUP: + returnCode = SetGroup(tensor); + break; + case OH_NN_CONV2D_TRANSPOSE_OUTPUT_PADDINGS: + returnCode = SetOutPadding(tensor); + break; + case OH_NN_CONV2D_TRANSPOSE_ACTIVATION_TYPE: + returnCode = SetActivation(tensor); + break; + default: + LOGE("[Conv2DTranspose] Build failed, param invalid, type = %d.", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; + } + + if (returnCode != OH_NN_SUCCESS) { + LOGE("[Conv2DTranspose] Build failed, passed invalid param."); + return returnCode; + } + } + + // The quantization type of the first output determinies that of the operator. + SetQuantType(outputsIndex, allTensors); + + m_isBuild = true; + m_name = OP_NAME; + return OH_NN_SUCCESS; +} + +LiteGraphPrimitvePtr Conv2DTransposeBuilder::GetPrimitive() +{ + if (!m_isBuild) { + LOGE("[Conv2DTranspose] GetPrimitive failed, cannot get primitive before call build."); + return {nullptr, DestroyLiteGraphPrimitive}; + } + + void* primitive = MindIR_Conv2dTransposeFusion_CreatePrimitive(m_kernelSize, + m_strides, m_dilation, m_padMode, m_padList, m_group, m_inChannel, m_outChannel, + m_activationType, m_outputPaddings); + LiteGraphPrimitvePtr graphPrimitivePtr(primitive, DestroyLiteGraphPrimitive); + return graphPrimitivePtr; +} + +REGISTER_OPS(Conv2DTransposeBuilder, OH_NN_OPS_CONV2D_TRANSPOSE); +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS diff --git a/frameworks/native/ops/conv2d_transpose_builder.h b/frameworks/native/ops/conv2d_transpose_builder.h new file mode 100644 index 0000000000000000000000000000000000000000..f54cb509ae41d1a8cba413db658bad24e9379801 --- /dev/null +++ b/frameworks/native/ops/conv2d_transpose_builder.h @@ -0,0 +1,66 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_CONV2DTRANSPOSE_BUILDER_H +#define NEURAL_NETWORK_RUNTIME_CONV2DTRANSPOSE_BUILDER_H + +#include "frameworks/native/ops_builder.h" +#include "frameworks/native/ops_registry.h" +#include "mindir.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +class Conv2DTransposeBuilder : public OpsBuilder { +public: + Conv2DTransposeBuilder(); + ~Conv2DTransposeBuilder() override; + OH_NN_ReturnCode Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) override; + + LiteGraphPrimitvePtr GetPrimitive() override; + +private: + OH_NN_ReturnCode SetInput(const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors); + void SetKernelSize(const std::vector& inputsIndex, + const std::vector>& allTensors); + OH_NN_ReturnCode SetStrides(std::shared_ptr tensor); + OH_NN_ReturnCode SetDilation(std::shared_ptr tensor); + OH_NN_ReturnCode SetPad(std::shared_ptr tensor); + OH_NN_ReturnCode SetGroup(std::shared_ptr tensor); + OH_NN_ReturnCode SetOutPadding(std::shared_ptr tensor); + OH_NN_ReturnCode SetActivation(std::shared_ptr tensor); + +private: + int64_t m_group{1}; + int64_t m_inChannel{0}; + int64_t m_outChannel{0}; + std::vector m_kernelSize; + std::vector m_strides; + std::vector m_padList; + std::vector m_dilation; + std::vector m_outputPaddings; + mindspore::lite::PadMode m_padMode{mindspore::lite::PAD_MODE_PAD}; + mindspore::lite::ActivationType m_activationType{mindspore::lite::ACTIVATION_TYPE_NO_ACTIVATION}; +}; +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS + +#endif // NEURAL_NETWORK_RUNTIME_CONV2DTRANSPOSE_BUILDER_H diff --git a/frameworks/native/ops/depthwise_conv2d_native_builder.cpp b/frameworks/native/ops/depthwise_conv2d_native_builder.cpp new file mode 100644 index 0000000000000000000000000000000000000000..51a2066c7f78c15a41bc288c30914735847d5433 --- /dev/null +++ b/frameworks/native/ops/depthwise_conv2d_native_builder.cpp @@ -0,0 +1,275 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "depthwise_conv2d_native_builder.h" + +#include "frameworks/native/transform.h" +#include "frameworks/native/validation.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +static const int INPUT_NUM = 3; +static const int OUTPUT_NUM = 1; +static const int PAD_MODE_SIZE = 1; +static const int PAD_LIST_SIZE = 4; +static const int IN_CHANNEL_IN_INPUT = 3; +static const int OUT_CHANNEL_IN_WEIGHT = 0; +static const int HEIGHT_IN_WEIGHT = 1; +static const int WIDTH_IN_WEIGHT = 2; +static const int INPUT_RANK = 4; +static const int INPUT_X = 0; +static const int INPUT_WEIGHT = 1; +static const int SCALE_LENGTH = 1; +static const std::string OP_NAME = "DepthwiseConv2DNative"; + +DepthwiseConv2DNativeBuilder::DepthwiseConv2DNativeBuilder() {} + +DepthwiseConv2DNativeBuilder::~DepthwiseConv2DNativeBuilder() {} + +OH_NN_ReturnCode DepthwiseConv2DNativeBuilder::SetIsPadMode(std::shared_ptr tensor, + bool &isPadMode) +{ + if (tensor->GetElementCount() == PAD_MODE_SIZE) { + isPadMode = true; + } else if (tensor->GetElementCount() != PAD_LIST_SIZE) { + LOGE("[DepthwiseConv2DNative] The element size of padMode should be 1 or " + "the element size of padList should be 4."); + return OH_NN_INVALID_PARAMETER; + } + + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode DepthwiseConv2DNativeBuilder::SetActivation(std::shared_ptr tensor) +{ + tensor->IdentifyOpParameter(); + // Set ActivationType + if (tensor->GetElementCount() != SCALE_LENGTH) { + LOGE("[DepthwiseConv2DNative] SetActivation failed, the Activation should be scaler."); + return OH_NN_INVALID_PARAMETER; + } + + if (tensor->GetDataType() != OH_NN_INT8) { + LOGE("[DepthwiseConv2DNative] SetActivation failed, the activationType should have type OH_NN_INT8."); + return OH_NN_INVALID_PARAMETER; + } + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[DepthwiseConv2DNative] SetActivation GetBuffer return nullptr"); + return OH_NN_INVALID_PARAMETER; + } + int8_t* pFuseData = static_cast(buffer); + if (!OHOS::NeuralNetworkRuntime::Validation::ValidateFuseType(static_cast(*pFuseData))) { + LOGE("[DepthwiseConv2DNative] SetActivation failed, activation input is invalid."); + return OH_NN_INVALID_PARAMETER; + } + m_activationType = NNToMS::TransfromFusionType((OH_NN_FuseType)(*pFuseData)); + + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode DepthwiseConv2DNativeBuilder::SetKernelSize(const std::vector& inputsIndex, + const std::vector>& allTensors) +{ + // Set kernleSize and outChannel + auto weightShape = allTensors[inputsIndex[INPUT_WEIGHT]]->GetDimensions(); + if (weightShape.size() != INPUT_RANK) { + LOGE("[DepthwiseConv2DNative] SetKernelSize failed, invalid rank of shape of weight, should be 4 dimensions."); + return OH_NN_INVALID_PARAMETER; + } + + m_outChannel = weightShape[OUT_CHANNEL_IN_WEIGHT]; + m_kernelSize.clear(); + m_kernelSize.emplace_back(weightShape[HEIGHT_IN_WEIGHT]); + m_kernelSize.emplace_back(weightShape[WIDTH_IN_WEIGHT]); + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode DepthwiseConv2DNativeBuilder::SetStrides(std::shared_ptr tensor) +{ + tensor->IdentifyOpParameter(); + if (tensor->GetDataType() != OH_NN_INT64) { + LOGE("[DepthwiseConv2DNative] SetStrides failed, the stride should have type OH_NN_INT64."); + return OH_NN_INVALID_PARAMETER; + } + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[DepthwiseConv2DNative] SetStrides GetBuffer return nullptr"); + return OH_NN_INVALID_PARAMETER; + } + const int64_t* pStrides = reinterpret_cast(buffer); + int stridesSize = tensor->GetElementCount(); + m_strides.assign(pStrides, pStrides + stridesSize); + + return OH_NN_SUCCESS; +} +OH_NN_ReturnCode DepthwiseConv2DNativeBuilder::SetDilation(std::shared_ptr tensor) +{ + tensor->IdentifyOpParameter(); + if (tensor->GetDataType() != OH_NN_INT64) { + LOGE("[DepthwiseConv2DNative] SetDilation failed, the dilation should have type OH_NN_INT64"); + return OH_NN_INVALID_PARAMETER; + } + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[DepthwiseConv2DNative] SetDilation GetBuffer return nullptr"); + return OH_NN_INVALID_PARAMETER; + } + const int64_t* pDilation = reinterpret_cast(buffer); + int dilationSize = tensor->GetElementCount(); + m_dilation.assign(pDilation, pDilation + dilationSize); + + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode DepthwiseConv2DNativeBuilder::SetPadModeOrPaddings( + std::shared_ptr tensor) +{ + tensor->IdentifyOpParameter(); + + bool isPadMode = false; + OH_NN_ReturnCode ret = SetIsPadMode(tensor, isPadMode); + if (ret != OH_NN_SUCCESS) { + return ret; + } + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[DepthwiseConv2DNative] SetPad GetBuffer return nullptr"); + return OH_NN_INVALID_PARAMETER; + } + + if (isPadMode) { + if (tensor->GetDataType() != OH_NN_INT8) { + LOGE("[DepthwiseConv2DNative] SetPadModeOrPaddings failed, the padMode should have type OH_NN_INT8."); + return OH_NN_INVALID_PARAMETER; + } + + int8_t* pPad = static_cast(buffer); + if (!OHOS::NeuralNetworkRuntime::Validation::ValidatePadMode(*pPad)) { + LOGE("[DepthwiseConv2DNative] SetPadModeOrPaddings failed, invalid pad mode."); + return OH_NN_INVALID_PARAMETER; + } + m_padMode = NNToMS::TransformPadModeValue(*pPad); + } else { + if (tensor->GetDataType() != OH_NN_INT64) { + LOGE("[DepthwiseConv2DNative] SetPadModeOrPaddings failed, the padList should have type OH_NN_INT64."); + return OH_NN_INVALID_PARAMETER; + } + + const int64_t* pPadList = reinterpret_cast(buffer); + int padListSize = tensor->GetElementCount(); + m_pad.assign(pPadList, pPadList + padListSize); + } + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode DepthwiseConv2DNativeBuilder::SetInputAndOutput( + const std::vector& inputsIndex, const std::vector& outputsIndex, + const std::vector>& allTensors) +{ + OH_NN_ReturnCode returnCode = CheckIOIndex(inputsIndex, outputsIndex, allTensors, INPUT_NUM, OUTPUT_NUM); + if (returnCode != OH_NN_SUCCESS) { + LOGE("[DepthwiseConv2DNative] SetInputAndOutput failed, passed invalid input or output index."); + return returnCode; + } + + m_inputsIndex = inputsIndex; + m_outputsIndex = outputsIndex; + + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode DepthwiseConv2DNativeBuilder::Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, const std::vector& outputsIndex, + const std::vector>& allTensors) +{ + if (m_isBuild) { + LOGE("[DepthwiseConv2DNative] Build failed, operation has been build, cannot build again."); + return OH_NN_OPERATION_FORBIDDEN; + } + + OH_NN_ReturnCode ret = SetInputAndOutput(inputsIndex, outputsIndex, allTensors); + if (ret != OH_NN_SUCCESS) { + return ret; + } + + auto inputShape = allTensors[inputsIndex[INPUT_X]]->GetDimensions(); + if (inputShape.size() != INPUT_RANK) { + LOGE("[DepthwiseConv2DNative] Build failed, invalid rank of shape of input, should be 4 dimensions."); + return OH_NN_INVALID_PARAMETER; + } + m_inChannel = inputShape[IN_CHANNEL_IN_INPUT]; + // Set Kernel Size + ret = SetKernelSize(inputsIndex, allTensors); + if (ret != OH_NN_SUCCESS) { + LOGE("[DepthwiseConv2DNative] Build failed, SetKernelSize failed."); + return ret; + } + + for (int i : paramsIndex) { + std::shared_ptr tensor = allTensors[i]; // 参数 tensor + switch (tensor->GetType()) { + case OH_NN_DEPTHWISE_CONV2D_NATIVE_STRIDES: + ret = SetStrides(tensor); + break; + case OH_NN_DEPTHWISE_CONV2D_NATIVE_DILATION: + ret = SetDilation(tensor); + break; + case OH_NN_DEPTHWISE_CONV2D_NATIVE_PAD_MODE: + case OH_NN_DEPTHWISE_CONV2D_NATIVE_PAD: + ret = SetPadModeOrPaddings(tensor); + break; + case OH_NN_DEPTHWISE_CONV2D_NATIVE_ACTIVATION_TYPE: + ret = SetActivation(tensor); + break; + default: + LOGE("[DepthwiseConv2DNative] Build failed, param invalid, type = %d.", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; + } + if (ret != OH_NN_SUCCESS) { + LOGE("[DepthwiseConv2DNative] Build failed, passed invalid param."); + return ret; + } + } + + SetQuantType(outputsIndex, allTensors); + + m_isBuild = true; + m_name = OP_NAME; + return OH_NN_SUCCESS; +} + +LiteGraphPrimitvePtr DepthwiseConv2DNativeBuilder::GetPrimitive() +{ + if (!m_isBuild) { + LOGE("[DepthwiseConv2DNative] GetPrimitive failed, cannot get primitive before call build."); + return {nullptr, DestroyLiteGraphPrimitive}; + } + + auto primitive = MindIR_Conv2DFusion_CreatePrimitive(m_kernelSize, m_strides, + m_dilation, m_padMode, m_pad, m_inChannel, m_inChannel, m_outChannel, m_activationType); + LiteGraphPrimitvePtr graphPrimitivePtr(primitive, DestroyLiteGraphPrimitive) ; + return graphPrimitivePtr; +} + +REGISTER_OPS(DepthwiseConv2DNativeBuilder, OH_NN_OPS_DEPTHWISE_CONV2D_NATIVE); +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS diff --git a/frameworks/native/ops/depthwise_conv2d_native_builder.h b/frameworks/native/ops/depthwise_conv2d_native_builder.h new file mode 100644 index 0000000000000000000000000000000000000000..f1663f4694331048c7a1dddea66031244649717a --- /dev/null +++ b/frameworks/native/ops/depthwise_conv2d_native_builder.h @@ -0,0 +1,62 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_DEPTHWISE_CONV2D_NATIVE_BUILDER_H +#define NEURAL_NETWORK_RUNTIME_DEPTHWISE_CONV2D_NATIVE_BUILDER_H + +#include "frameworks/native/ops_builder.h" +#include "frameworks/native/ops_registry.h" +#include "mindir.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +class DepthwiseConv2DNativeBuilder : public OpsBuilder { +public: + DepthwiseConv2DNativeBuilder(); + ~DepthwiseConv2DNativeBuilder() override; + OH_NN_ReturnCode Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) override; + LiteGraphPrimitvePtr GetPrimitive() override; + +private: + OH_NN_ReturnCode SetInputAndOutput(const std::vector& inputsIndex, + const std::vector& outputsIndex, const std::vector>& allTensors); + OH_NN_ReturnCode SetIsPadMode(std::shared_ptr tensor, + bool &isPadMode); + OH_NN_ReturnCode SetPadModeOrPaddings(std::shared_ptr tensor); + OH_NN_ReturnCode SetKernelSize(const std::vector& inputsIndex, + const std::vector>& allTensors); + OH_NN_ReturnCode SetDilation(std::shared_ptr tensor); + OH_NN_ReturnCode SetStrides(std::shared_ptr tensor); + OH_NN_ReturnCode SetActivation(std::shared_ptr tensor); + +private: + int64_t m_inChannel{0}; + int64_t m_outChannel{0}; + std::vector m_kernelSize; + std::vector m_strides; + std::vector m_pad; + std::vector m_dilation; + mindspore::lite::PadMode m_padMode{mindspore::lite::PAD_MODE_PAD}; + mindspore::lite::ActivationType m_activationType{mindspore::lite::ACTIVATION_TYPE_NO_ACTIVATION}; +}; +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS + +#endif // NEURAL_NETWORK_RUNTIME_DEPTHWISE_CONV2D_NATIVE_BUILDER_H \ No newline at end of file diff --git a/frameworks/native/ops/div_builder.cpp b/frameworks/native/ops/div_builder.cpp new file mode 100644 index 0000000000000000000000000000000000000000..17dd34e26f8d7e5bcfba0bd5866b811e2cc67e3b --- /dev/null +++ b/frameworks/native/ops/div_builder.cpp @@ -0,0 +1,119 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "div_builder.h" + +#include "frameworks/native/transform.h" +#include "frameworks/native/validation.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +static const int INPUT_NUM = 2; +static const int OUTPUT_NUM = 1; +static constexpr int SCALAR_LENGTH = 1; +static const std::string OP_NAME = "Div"; + +DivBuilder::DivBuilder() {} + +DivBuilder::~DivBuilder() {} + +OH_NN_ReturnCode DivBuilder::SetActicationType(std::shared_ptr tensor) +{ + tensor->IdentifyOpParameter(); + + if (tensor->GetElementCount() != SCALAR_LENGTH) { + LOGE("[Div] SetActicationType failed, the Activation shoule be a scalar"); + return OH_NN_INVALID_PARAMETER; + } + + if (tensor->GetDataType() != OH_NN_INT8) { + LOGE("[Div] SetActicationType failed, the activation should be type OH_NN_INT8."); + return OH_NN_INVALID_PARAMETER; + } + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[Div] SetActivation GetBuffer return nullptr"); + return OH_NN_INVALID_PARAMETER; + } + const int8_t* fuseData = static_cast(buffer); + if (!OHOS::NeuralNetworkRuntime::Validation::ValidateFuseType(static_cast(*fuseData))) { + LOGE("[Div] SetActicationType failed, fuse activation type is invalid"); + return OH_NN_INVALID_PARAMETER; + } + auto fuseType = (OH_NN_FuseType)(*fuseData); + m_activationType = NNToMS::TransfromFusionType(fuseType); + + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode DivBuilder::Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) +{ + if (m_isBuild) { + LOGE("[Div] Build failed, operation has been build, cannot build again."); + return OH_NN_OPERATION_FORBIDDEN; + } + + OH_NN_ReturnCode returnCode = CheckIOIndex(inputsIndex, outputsIndex, allTensors, INPUT_NUM, OUTPUT_NUM); + if (returnCode != OH_NN_SUCCESS) { + LOGE("[Div] Build failed, passed invalid input or output index."); + return returnCode; + } + m_inputsIndex = inputsIndex; + m_outputsIndex = outputsIndex; + + for (int i : paramsIndex) { + std::shared_ptr tensor = allTensors[i]; + switch (tensor->GetType()) { + case OH_NN_DIV_ACTIVATIONTYPE: + returnCode = SetActicationType(tensor); + break; + default: + LOGE("[Div] Build failed, param invalid, type = %d.", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; + } + if (returnCode != OH_NN_SUCCESS) { + LOGE("[Div] Build failed, passed invalid param."); + return returnCode; + } + } + + SetQuantType(outputsIndex, allTensors); + + m_isBuild = true; + m_name = OP_NAME; + return OH_NN_SUCCESS; +} + +LiteGraphPrimitvePtr DivBuilder::GetPrimitive() +{ + if (!m_isBuild) { + LOGE("[Div] GetPrimitive failed, cannot get primitive before call build."); + return {nullptr, DestroyLiteGraphPrimitive}; + } + + auto primitive = mindspore::lite::MindIR_DivFusion_CreatePrimitive(m_activationType); + LiteGraphPrimitvePtr graphPrimitivePtr(primitive, DestroyLiteGraphPrimitive); + return graphPrimitivePtr; +} + +REGISTER_OPS(DivBuilder, OH_NN_OPS_DIV); +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS diff --git a/frameworks/native/ops/div_builder.h b/frameworks/native/ops/div_builder.h new file mode 100644 index 0000000000000000000000000000000000000000..3c0905c51874484a065421a8ca56d600194c4942 --- /dev/null +++ b/frameworks/native/ops/div_builder.h @@ -0,0 +1,48 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_DIV_BUILDER_H +#define NEURAL_NETWORK_RUNTIME_DIV_BUILDER_H + +#include "mindir.h" + +#include "frameworks/native/ops_builder.h" +#include "frameworks/native/ops_registry.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +class DivBuilder : public OpsBuilder { +public: + DivBuilder(); + ~DivBuilder() override; + OH_NN_ReturnCode Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) override; + + LiteGraphPrimitvePtr GetPrimitive() override; + +private: + OH_NN_ReturnCode SetActicationType(std::shared_ptr tensor); + +private: + mindspore::lite::ActivationType m_activationType{mindspore::lite::ACTIVATION_TYPE_NO_ACTIVATION}; +}; +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS + +#endif // NEURAL_NETWORK_RUNTIME_DIV_BUILDER_H \ No newline at end of file diff --git a/frameworks/native/ops/eltwise_builder.cpp b/frameworks/native/ops/eltwise_builder.cpp new file mode 100644 index 0000000000000000000000000000000000000000..df6b649518ec80f1be3b145ba32dd0799335a35c --- /dev/null +++ b/frameworks/native/ops/eltwise_builder.cpp @@ -0,0 +1,119 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "eltwise_builder.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +static const int INPUT_NUM = 2; +static const int OUTPUT_NUM = 1; +static constexpr int SCALAR_LENGTH = 1; +static const std::string OP_NAME = "Eltwise"; + +EltwiseBuilder::EltwiseBuilder() {} + +EltwiseBuilder::~EltwiseBuilder() {} + +OH_NN_ReturnCode EltwiseBuilder::SetMode(std::shared_ptr tensor) +{ + tensor->IdentifyOpParameter(); + + if (tensor->GetDataType() != OH_NN_INT8) { + LOGE("[Eltwise] SetMode failed, the EltwiseMode should be type OH_NN_INT8."); + return OH_NN_INVALID_PARAMETER; + } + + if (tensor->GetElementCount() != SCALAR_LENGTH) { + LOGE("[Eltwise] SetMode failed, the eltwiseMode shoule be a scalar"); + return OH_NN_INVALID_PARAMETER; + } + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[Eltwise] SetMode GetBuffer return nullptr"); + return OH_NN_INVALID_PARAMETER; + } + int8_t eltwiseMode = *static_cast(buffer); + if (eltwiseMode < mindspore::lite::ELTWISE_MODE_PROD || + eltwiseMode > mindspore::lite::ELTWISE_MODE_UNKNOWN) { + LOGE("[Eltwise] SetMode failed, passed invalid eltwiseMode, received %d", eltwiseMode); + return OH_NN_INVALID_PARAMETER; + } + m_mode = (mindspore::lite::EltwiseMode)eltwiseMode; + + return OH_NN_SUCCESS; +} + + +OH_NN_ReturnCode EltwiseBuilder::Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) +{ + if (m_isBuild) { + LOGE("[Eltwise] Build failed, operation has been build, cannot build again."); + return OH_NN_OPERATION_FORBIDDEN; + } + + OH_NN_ReturnCode returnCode = CheckIOIndex(inputsIndex, outputsIndex, allTensors, INPUT_NUM, OUTPUT_NUM); + if (returnCode != OH_NN_SUCCESS) { + LOGE("[Eltwise] Build failed, passed invalid input index or output indices."); + return returnCode; + } + + m_inputsIndex = inputsIndex; + m_outputsIndex = outputsIndex; + + for (int i : paramsIndex) { + std::shared_ptr tensor = allTensors[i]; + switch (tensor->GetType()) { + case OH_NN_ELTWISE_MODE: + returnCode = SetMode(tensor); + break; + default: + LOGE("[Eltwise] Build failed, param invalid, type = %d.", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; + } + if (returnCode != OH_NN_SUCCESS) { + LOGE("[Eltwise] Build failed, passed invalid param."); + return returnCode; + } + } + + // The quantization type of the first output determinies that of the operator. + SetQuantType(outputsIndex, allTensors); + + m_isBuild = true; + m_name = OP_NAME; + return OH_NN_SUCCESS; +} + +LiteGraphPrimitvePtr EltwiseBuilder::GetPrimitive() +{ + if (!m_isBuild) { + LOGE("[Eltwise] GetPrimitive failed, cannot get primitive before call build."); + return {nullptr, DestroyLiteGraphPrimitive}; + } + + void* primitive = mindspore::lite::MindIR_Eltwise_CreatePrimitive(m_mode); + LiteGraphPrimitvePtr graphPrimitivePtr(primitive, DestroyLiteGraphPrimitive); + return graphPrimitivePtr; +} + +REGISTER_OPS(EltwiseBuilder, OH_NN_OPS_ELTWISE); +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS \ No newline at end of file diff --git a/frameworks/native/ops/eltwise_builder.h b/frameworks/native/ops/eltwise_builder.h new file mode 100644 index 0000000000000000000000000000000000000000..09ff0d13fd5d4217fc5ce4f5b8a2800934881f94 --- /dev/null +++ b/frameworks/native/ops/eltwise_builder.h @@ -0,0 +1,48 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_ELTWISE_BUILDER_H +#define NEURAL_NETWORK_RUNTIME_ELTWISE_BUILDER_H + +#include "mindir.h" + +#include "frameworks/native/ops_builder.h" +#include "frameworks/native/ops_registry.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +class EltwiseBuilder : public OpsBuilder { +public: + EltwiseBuilder(); + ~EltwiseBuilder() override; + OH_NN_ReturnCode Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) override; + + LiteGraphPrimitvePtr GetPrimitive() override; + +private: + OH_NN_ReturnCode SetMode(std::shared_ptr tensor); + +private: + mindspore::lite::EltwiseMode m_mode{mindspore::lite::ELTWISE_MODE_PROD}; +}; +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS + +#endif // NEURAL_NETWORK_RUNTIME_ELTWISE_BUILDER_H \ No newline at end of file diff --git a/frameworks/native/ops/expandims_builder.cpp b/frameworks/native/ops/expandims_builder.cpp new file mode 100644 index 0000000000000000000000000000000000000000..095db7bd461c79cc35f0c2c309e1ee3cc6406c80 --- /dev/null +++ b/frameworks/native/ops/expandims_builder.cpp @@ -0,0 +1,73 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "expandims_builder.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +static const int INPUT_NUM = 2; +static const int OUTPUT_NUM = 1; +static const std::string OP_NAME = "ExpandDims"; + +ExpandDimsBuilder::ExpandDimsBuilder() {} + +ExpandDimsBuilder::~ExpandDimsBuilder() {} + +OH_NN_ReturnCode ExpandDimsBuilder::Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) +{ + if (m_isBuild) { + LOGE("[ExpandDims] Build failed, operation has been build, cannot build again."); + return OH_NN_OPERATION_FORBIDDEN; + } + + auto ret = CheckIOIndex(inputsIndex, outputsIndex, allTensors, INPUT_NUM, OUTPUT_NUM); + if (ret != OH_NN_SUCCESS) { + LOGE("[ExpandDims] Build failed, the input or output index of ExpandDims operation is invalid."); + return ret; + } + + m_inputsIndex = inputsIndex; + m_outputsIndex = outputsIndex; + + if (!paramsIndex.empty()) { + LOGE("[ExpandDims] Build failed, expandDims expects no parameters"); + return OH_NN_INVALID_PARAMETER; + } + + m_isBuild = true; + m_name = OP_NAME; + return OH_NN_SUCCESS; +} + +LiteGraphPrimitvePtr ExpandDimsBuilder::GetPrimitive() +{ + if (!m_isBuild) { + LOGE("[ExpandDims] GetPrimitive failed, cannot get primitive before call build."); + return {nullptr, DestroyLiteGraphPrimitive}; + } + + void* primitive = mindspore::lite::MindIR_ExpandDims_CreatePrimitive(); + LiteGraphPrimitvePtr graphPrimitivePtr(primitive, DestroyLiteGraphPrimitive); + return graphPrimitivePtr; +} + +REGISTER_OPS(ExpandDimsBuilder, OH_NN_OPS_EXPAND_DIMS); +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS diff --git a/frameworks/native/ops/expandims_builder.h b/frameworks/native/ops/expandims_builder.h new file mode 100644 index 0000000000000000000000000000000000000000..4e16b9b76b0c8b656a6b0d049de696697144daf1 --- /dev/null +++ b/frameworks/native/ops/expandims_builder.h @@ -0,0 +1,42 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_EXPANDDIMS_BUILDER_H +#define NEURAL_NETWORK_RUNTIME_EXPANDDIMS_BUILDER_H + +#include "mindir.h" + +#include "frameworks/native/ops_builder.h" +#include "frameworks/native/ops_registry.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +class ExpandDimsBuilder : public OpsBuilder { +public: + ExpandDimsBuilder(); + ~ExpandDimsBuilder() override; + OH_NN_ReturnCode Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) override; + + LiteGraphPrimitvePtr GetPrimitive() override; +}; +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS + +#endif // NEURAL_NETWORK_RUNTIME_EXPANDDIMS_BUILDER_H \ No newline at end of file diff --git a/frameworks/native/ops/fill_builder.cpp b/frameworks/native/ops/fill_builder.cpp new file mode 100644 index 0000000000000000000000000000000000000000..83c085cd0ffa3285aafe522d0048bce6b4991d21 --- /dev/null +++ b/frameworks/native/ops/fill_builder.cpp @@ -0,0 +1,80 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "fill_builder.h" + +#include "mindir.h" + +#include "frameworks/native/ops_registry.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +static const int INPUT_NUM = 2; +static const int OUTPUT_NUM = 1; +static const std::string OP_NAME = "Fill"; + +FillBuilder::FillBuilder() {} + +FillBuilder::~FillBuilder() {} + +OH_NN_ReturnCode FillBuilder::Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) +{ + if (m_isBuild) { + LOGE("[Fill] Build failed, fill operation has been build, cannot build again."); + return OH_NN_OPERATION_FORBIDDEN; + } + + OH_NN_ReturnCode returnCode = CheckIOIndex(inputsIndex, outputsIndex, allTensors, INPUT_NUM, OUTPUT_NUM); + if (returnCode != OH_NN_SUCCESS) { + LOGE("[Fill] Fill Build failed, Passed invalid input or output indices."); + return returnCode; + } + + if (!paramsIndex.empty()) { + LOGW("[Fill] Build failed, fill expects no parameters, but receive %zu", paramsIndex.size()); + return OH_NN_INVALID_PARAMETER; + } + + m_inputsIndex = inputsIndex; + m_outputsIndex = outputsIndex; + + // The quantization type of the first output determinies that of the operator. + SetQuantType(outputsIndex, allTensors); + + m_isBuild = true; + m_name = OP_NAME; + return OH_NN_SUCCESS; +} + +LiteGraphPrimitvePtr FillBuilder::GetPrimitive() +{ + if (!m_isBuild) { + LOGE("[Fill] GetPrimitive failed, cannot get primitive before call build."); + return {nullptr, DestroyLiteGraphPrimitive}; + } + + void* primitive = mindspore::lite::MindIR_Fill_CreatePrimitive(); + LiteGraphPrimitvePtr graphPrimitivePtr(primitive, DestroyLiteGraphPrimitive); + return graphPrimitivePtr; +} + +REGISTER_OPS(FillBuilder, OH_NN_OPS_FILL); +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS \ No newline at end of file diff --git a/frameworks/native/ops/fill_builder.h b/frameworks/native/ops/fill_builder.h new file mode 100644 index 0000000000000000000000000000000000000000..8e9f2241eed6f72548beb6d2176a617a52cd12a0 --- /dev/null +++ b/frameworks/native/ops/fill_builder.h @@ -0,0 +1,38 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_FILL_BUILDER_H +#define NEURAL_NETWORK_RUNTIME_FILL_BUILDER_H + +#include "frameworks/native/ops_builder.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +class FillBuilder : public OpsBuilder { +public: + FillBuilder(); + ~FillBuilder() override; + OH_NN_ReturnCode Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) override; + LiteGraphPrimitvePtr GetPrimitive() override; +}; +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS + +#endif // NEURAL_NETWORK_RUNTIME_FILL_BUILDER_H \ No newline at end of file diff --git a/frameworks/native/ops/fullconnection_builder.cpp b/frameworks/native/ops/fullconnection_builder.cpp new file mode 100644 index 0000000000000000000000000000000000000000..ce464a3f2f120b81c0fc3f1e2707fb3599280096 --- /dev/null +++ b/frameworks/native/ops/fullconnection_builder.cpp @@ -0,0 +1,183 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "fullconnection_builder.h" + +#include "frameworks/native/transform.h" +#include "frameworks/native/validation.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +static constexpr int INPUT_WITH_AXIS = 2; +static constexpr int INPUT_WITHOUT_AXIS = 1; +static constexpr int OUTPUT_NUM = 1; +static constexpr int SCALAR_LENGTH = 1; +static const std::string OP_NAME = "FullConnection"; + +FullConnectionBuilder::FullConnectionBuilder() {} + +FullConnectionBuilder::~FullConnectionBuilder() {} + +OH_NN_ReturnCode FullConnectionBuilder::SetFullConnectionInput(const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) +{ + if (outputsIndex.size() != OUTPUT_NUM) { + LOGE("[FullConnection] SetFullConnectionInput failed, the index of outputs don't equal to %d.", OUTPUT_NUM); + return OH_NN_INVALID_PARAMETER; + } + size_t allTensorsSize = allTensors.size(); + for (auto index : inputsIndex) { + if (index >= allTensorsSize) { + LOGE("[FullConnection] SetFullConnectionInput failed, the index of inputs is out of range."); + return OH_NN_INVALID_PARAMETER; + } + } + + m_inputsIndex = inputsIndex; + m_outputsIndex = outputsIndex; + + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode FullConnectionBuilder::SetFullConnectionActivation(std::shared_ptr tensor) +{ + tensor->IdentifyOpParameter(); + // Set Activation + if (tensor->GetElementCount() != SCALAR_LENGTH) { + LOGE("[FullConnection] SetFullConnectionActivation failed, the Activation shoule be a scalar"); + return OH_NN_INVALID_PARAMETER; + } + + if (tensor->GetDataType() != OH_NN_INT8) { + LOGE("[FullConnection] SetFullConnectionActivation failed, the Activation should have type OH_NN_INT8."); + return OH_NN_INVALID_PARAMETER; + } + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[FullConnection] SetFullConnectionActivation GetBuffer return nullptr"); + return OH_NN_INVALID_PARAMETER; + } + + int8_t* pFuseData = static_cast(tensor->GetBuffer()); + if (!OHOS::NeuralNetworkRuntime::Validation::ValidateFuseType(static_cast(*pFuseData))) { + LOGE("[FullConnection] SetFullConnectionActivation failed, activation input is invalid."); + return OH_NN_INVALID_PARAMETER; + } + m_activationType = NNToMS::TransfromFusionType((OH_NN_FuseType)(*pFuseData)); + + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode FullConnectionBuilder::SetAxis(std::shared_ptr tensor) +{ + if (m_useAxis) { + tensor->IdentifyOpParameter(); + + if (tensor->GetElementCount() != SCALAR_LENGTH) { + LOGE("[FullConnection] SetFullConnectionActivation failed, the axis shoule be a scalar"); + return OH_NN_INVALID_PARAMETER; + } + + if (tensor->GetDataType() != OH_NN_INT64) { + LOGE("[FullConnection] SetFullConnectionActivation failed, the Axis should be type OH_NN_INT64."); + return OH_NN_INVALID_PARAMETER; + } + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[FullConnection] SetAxis GetBuffer return nullptr"); + return OH_NN_INVALID_PARAMETER; + } + + m_axis = *static_cast(buffer); + } + return OH_NN_SUCCESS; +} + + +OH_NN_ReturnCode FullConnectionBuilder::Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) +{ + if (m_isBuild) { + LOGE("[FullConnection] Build failed, operation has been build, cannot build again."); + return OH_NN_OPERATION_FORBIDDEN; + } + + bool useAxis = false; + if (paramsIndex.size() == INPUT_WITH_AXIS) { + useAxis = true; + } else if (paramsIndex.size() != INPUT_WITHOUT_AXIS) { + LOGE("[FullConnection] Build failed, the index of inputs should equal to %d if axis used or %d if not.", + INPUT_WITH_AXIS, INPUT_WITHOUT_AXIS); + return OH_NN_INVALID_PARAMETER; + } + + OH_NN_ReturnCode returnCode = SetFullConnectionInput(inputsIndex, outputsIndex, allTensors); + if (returnCode != OH_NN_SUCCESS) { + LOGE("[FullConnection] Build failed, SetFullConnectionInput failed."); + return returnCode; + } + + // Set axis + m_useAxis = useAxis; + for (int i : paramsIndex) { + std::shared_ptr tensor = allTensors[i]; // 参数 tensor + switch (tensor->GetType()) { + case OH_NN_FULL_CONNECTION_AXIS: + returnCode = SetAxis(tensor); + break; + case OH_NN_FULL_CONNECTION_ACTIVATIONTYPE: + returnCode = SetFullConnectionActivation(tensor); + break; + default: + LOGE("[FullConnection] Build failed, param invalid, type = %d.", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; + } + if (returnCode != OH_NN_SUCCESS) { + LOGE("[FullConnection] Build failed, passed invalid param."); + return returnCode; + } + } + + // The quantization type of the first output determinies that of the operator. + SetQuantType(outputsIndex, allTensors); + + m_isBuild = true; + m_name = OP_NAME; + return OH_NN_SUCCESS; +} + +LiteGraphPrimitvePtr FullConnectionBuilder::GetPrimitive() +{ + if (!m_isBuild) { + LOGE("[FullConnection] GetPrimitive failed, cannot get primitive before call build."); + return {nullptr, DestroyLiteGraphPrimitive}; + } + + void* primitive = mindspore::lite::MindIR_FullConnection_CreatePrimitive(m_hasBias, m_useAxis, + m_axis, m_activationType); + LiteGraphPrimitvePtr graphPrimitivePtr(primitive, DestroyLiteGraphPrimitive) ; + return graphPrimitivePtr; +} + +REGISTER_OPS(FullConnectionBuilder, OH_NN_OPS_FULL_CONNECTION); +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS \ No newline at end of file diff --git a/frameworks/native/ops/fullconnection_builder.h b/frameworks/native/ops/fullconnection_builder.h new file mode 100644 index 0000000000000000000000000000000000000000..50eaa38c62b200d947efe1e3e3ff8a21b50dad0e --- /dev/null +++ b/frameworks/native/ops/fullconnection_builder.h @@ -0,0 +1,55 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_FULLCONNECTION_BUILDER_H +#define NEURAL_NETWORK_RUNTIME_FULLCONNECTION_BUILDER_H + +#include "mindir.h" + +#include "frameworks/native/ops_builder.h" +#include "frameworks/native/ops_registry.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +class FullConnectionBuilder : public OpsBuilder { +public: + FullConnectionBuilder(); + ~FullConnectionBuilder() override; + OH_NN_ReturnCode Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) override; + + LiteGraphPrimitvePtr GetPrimitive() override; + +private: + OH_NN_ReturnCode SetFullConnectionInput(const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors); + OH_NN_ReturnCode SetFullConnectionActivation(std::shared_ptr tensor); + OH_NN_ReturnCode SetAxis(std::shared_ptr tensor); + +private: + bool m_hasBias{true}; + bool m_useAxis{false}; + int64_t m_axis{0}; + mindspore::lite::ActivationType m_activationType{mindspore::lite::ACTIVATION_TYPE_NO_ACTIVATION}; +}; +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS + +#endif // NEURAL_NETWORK_RUNTIME_FULLCONNECTION_BUILDER_H \ No newline at end of file diff --git a/frameworks/native/ops/gather_builder.cpp b/frameworks/native/ops/gather_builder.cpp new file mode 100644 index 0000000000000000000000000000000000000000..aba0bfc790c0170a824133f0c7a252768c025074 --- /dev/null +++ b/frameworks/native/ops/gather_builder.cpp @@ -0,0 +1,80 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "gather_builder.h" + +#include "mindir.h" + +#include "frameworks/native/ops_registry.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +static const int INPUT_NUM = 3; +static const int OUTPUT_NUM = 1; +static const std::string OP_NAME = "Gather"; + +GatherBuilder::GatherBuilder() {} + +GatherBuilder::~GatherBuilder() {} + +OH_NN_ReturnCode GatherBuilder::Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) +{ + if (m_isBuild) { + LOGE("[Gather] Gather Build failed, gather operation has been build, cannot build again."); + return OH_NN_OPERATION_FORBIDDEN; + } + + OH_NN_ReturnCode returnCode = CheckIOIndex(inputsIndex, outputsIndex, allTensors, INPUT_NUM, OUTPUT_NUM); + if (returnCode != OH_NN_SUCCESS) { + LOGE("[Gather] Build failed, the input or output index of Gather operation is invalid."); + return returnCode; + } + + if (!paramsIndex.empty()) { + LOGW("[Gather] Build failed, gather expects no parameters, but receive %zu", paramsIndex.size()); + return OH_NN_INVALID_PARAMETER; + } + + m_inputsIndex = inputsIndex; + m_outputsIndex = outputsIndex; + + // The quantization type of the first output determinies that of the operator. + SetQuantType(outputsIndex, allTensors); + + m_isBuild = true; + m_name = OP_NAME; + return OH_NN_SUCCESS; +} + +LiteGraphPrimitvePtr GatherBuilder::GetPrimitive() +{ + if (!m_isBuild) { + LOGE("[Gather] GetPrimitive failed, cannot get primitive before call build."); + return {nullptr, DestroyLiteGraphPrimitive}; + } + + void* primitive = mindspore::lite::MindIR_Gather_CreatePrimitive(); + LiteGraphPrimitvePtr graphPrimitivePtr(primitive, DestroyLiteGraphPrimitive); + return graphPrimitivePtr; +} + +REGISTER_OPS(GatherBuilder, OH_NN_OPS_GATHER); +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS \ No newline at end of file diff --git a/frameworks/native/ops/gather_builder.h b/frameworks/native/ops/gather_builder.h new file mode 100644 index 0000000000000000000000000000000000000000..7fe35dfb8e077a44213db9ea556227a940d4ca0f --- /dev/null +++ b/frameworks/native/ops/gather_builder.h @@ -0,0 +1,38 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_GATHER_BUILDER_H +#define NEURAL_NETWORK_RUNTIME_GATHER_BUILDER_H + +#include "frameworks/native/ops_builder.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +class GatherBuilder : public OpsBuilder { +public: + GatherBuilder(); + ~GatherBuilder() override; + OH_NN_ReturnCode Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) override; + LiteGraphPrimitvePtr GetPrimitive() override; +}; +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS + +#endif // HETERNEURAL_NETWORK_GATHER_BUILDER_H \ No newline at end of file diff --git a/frameworks/native/ops/gelu_builder.cpp b/frameworks/native/ops/gelu_builder.cpp new file mode 100644 index 0000000000000000000000000000000000000000..0f7592200f9848e956b103508720e08a97b70028 --- /dev/null +++ b/frameworks/native/ops/gelu_builder.cpp @@ -0,0 +1,83 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "gelu_builder.h" + +#include "mindir.h" + +#include "frameworks/native/ops_registry.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +static const int INPUT_NUMS = 1; +static const int OUTPUT_NUMS = 1; +static const std::string OP_NAME = "Gelu"; + +GeluBuilder::GeluBuilder() {} + +GeluBuilder::~GeluBuilder() {} + +OH_NN_ReturnCode GeluBuilder::Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) +{ + if (m_isBuild) { + LOGE("[Gelu] Build failed, operation has been build, cannot build again"); + return OH_NN_OPERATION_FORBIDDEN; + } + + OH_NN_ReturnCode returnCode = CheckIOIndex(inputsIndex, outputsIndex, allTensors, INPUT_NUMS, OUTPUT_NUMS); + if (returnCode != OH_NN_SUCCESS) { + LOGE("[Gelu] Build failed, passed invalid input or output indices."); + return returnCode; + } + + if (!paramsIndex.empty()) { + LOGW("[Gelu] Build failed, gelu expects no parameters, but receive %zu", paramsIndex.size()); + return OH_NN_INVALID_PARAMETER; + } + + m_inputsIndex = inputsIndex; + m_outputsIndex = outputsIndex; + + m_isBuild = true; + m_name = OP_NAME; + return OH_NN_SUCCESS; +} + +LiteGraphPrimitvePtr GeluBuilder::GetPrimitive() +{ + if (!m_isBuild) { + LOGE("[Gelu] GetPrimitive failed, cannot get primitive before call build."); + return {nullptr, DestroyLiteGraphPrimitive}; + } + + mindspore::lite::ActivationType activationType = mindspore::lite::ACTIVATION_TYPE_GELU; + float alpha = 0.0f; + float minVal = 0.0f; + float maxVal = 0.0f; + bool approximate = false; + void* primitive = mindspore::lite::MindIR_Activation_CreatePrimitive(activationType, + alpha, minVal, maxVal, approximate); + LiteGraphPrimitvePtr graphPrimitivePtr(primitive, DestroyLiteGraphPrimitive); + return graphPrimitivePtr; +} + +REGISTER_OPS(GeluBuilder, OH_NN_OPS_GELU); +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS \ No newline at end of file diff --git a/frameworks/native/ops/gelu_builder.h b/frameworks/native/ops/gelu_builder.h new file mode 100644 index 0000000000000000000000000000000000000000..6f0346a13fcddf7dcd519e9ca6b5bc00c061f42e --- /dev/null +++ b/frameworks/native/ops/gelu_builder.h @@ -0,0 +1,38 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_GELU_BUILDER_H +#define NEURAL_NETWORK_RUNTIME_GELU_BUILDER_H + +#include "frameworks/native/ops_builder.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +class GeluBuilder : public OpsBuilder { +public: + GeluBuilder(); + ~GeluBuilder() override; + OH_NN_ReturnCode Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) override; + LiteGraphPrimitvePtr GetPrimitive() override; +}; +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS + +#endif // NEURAL_NETWORK_RUNTIME_GELU_BUILDER_H \ No newline at end of file diff --git a/frameworks/native/ops/hswish_builder.cpp b/frameworks/native/ops/hswish_builder.cpp new file mode 100644 index 0000000000000000000000000000000000000000..3a9cae54263edff7221daa2c02a0a6160a9613d2 --- /dev/null +++ b/frameworks/native/ops/hswish_builder.cpp @@ -0,0 +1,87 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "hswish_builder.h" + +#include "mindir.h" + +#include "frameworks/native/ops_registry.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +static const int INPUT_NUMS = 1; +static const int OUTPUT_NUMS = 1; +static const std::string OP_NAME = "Hswish"; + +HswishBuilder::HswishBuilder() {} + +HswishBuilder::~HswishBuilder() {} + +OH_NN_ReturnCode HswishBuilder::Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) +{ + if (m_isBuild) { + LOGE("[Hswish] Build failed, operation has been build, cannot build again."); + return OH_NN_OPERATION_FORBIDDEN; + } + + OH_NN_ReturnCode returnCode = CheckIOIndex(inputsIndex, outputsIndex, allTensors, INPUT_NUMS, OUTPUT_NUMS); + if (returnCode != OH_NN_SUCCESS) { + LOGE("[Hswish] Build failed, passed invalid input or output indices."); + return returnCode; + } + + if (!paramsIndex.empty()) { + LOGW("[Hswish] Build failed, hswish expects no parameters, but receive %zu", paramsIndex.size()); + return OH_NN_INVALID_PARAMETER; + } + + m_inputsIndex = inputsIndex; + m_outputsIndex = outputsIndex; + + // The quantization type of the first output determinies that of the operator. + SetQuantType(outputsIndex, allTensors); + + m_isBuild = true; + m_name = OP_NAME; + return OH_NN_SUCCESS; +} + +LiteGraphPrimitvePtr HswishBuilder::GetPrimitive() +{ + if (!m_isBuild) { + LOGE("[Hswish] GetPrimitive failed, cannot get primitive before call build."); + return {nullptr, DestroyLiteGraphPrimitive}; + } + + mindspore::lite::ActivationType activationType = mindspore::lite::ACTIVATION_TYPE_HSWISH; + float alpha = 0.0f; + float minVal = 0.0f; + float maxVal = 0.0f; + bool approximate = false; + void* primitive = mindspore::lite::MindIR_Activation_CreatePrimitive(activationType, + alpha, minVal, maxVal, approximate); + LiteGraphPrimitvePtr graphPrimitivePtr(primitive, DestroyLiteGraphPrimitive); + + return graphPrimitivePtr; +} + +REGISTER_OPS(HswishBuilder, OH_NN_OPS_HSWISH); +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS \ No newline at end of file diff --git a/frameworks/native/ops/hswish_builder.h b/frameworks/native/ops/hswish_builder.h new file mode 100644 index 0000000000000000000000000000000000000000..0d9a9050f5b64245613d24bbb9be42849d4b2802 --- /dev/null +++ b/frameworks/native/ops/hswish_builder.h @@ -0,0 +1,38 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_HSWISH_BUILDER_H +#define NEURAL_NETWORK_RUNTIME_HSWISH_BUILDER_H + +#include "frameworks/native/ops_builder.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +class HswishBuilder : public OpsBuilder { +public: + HswishBuilder(); + ~HswishBuilder() override; + OH_NN_ReturnCode Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) override; + LiteGraphPrimitvePtr GetPrimitive() override; +}; +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS + +#endif // NEURAL_NETWORK_RUNTIME_HSWISH_BUILDER_H \ No newline at end of file diff --git a/frameworks/native/ops/layernorm_builder.cpp b/frameworks/native/ops/layernorm_builder.cpp new file mode 100644 index 0000000000000000000000000000000000000000..a8938ab457b9169e8c795d932f983f4a99c54ada --- /dev/null +++ b/frameworks/native/ops/layernorm_builder.cpp @@ -0,0 +1,206 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "layernorm_builder.h" + +#include "mindir.h" + +#include "frameworks/native/ops_registry.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +static const int INPUT_NUM = 3; +static const int OUTPUT_NUM = 1; +static const int INPUT_X = 0; +static const int INPUT_GAMMA = 1; +static const int INPUT_BETA = 2; +static const std::string OP_NAME = "LayerNorm"; + +LayerNormBuilder::LayerNormBuilder() {} + +LayerNormBuilder::~LayerNormBuilder() {} + +OH_NN_ReturnCode LayerNormBuilder::SetBeginNormAxis(std::shared_ptr tensor) +{ + tensor->IdentifyOpParameter(); + if (tensor->GetDataType() != OH_NN_INT32) { + LOGE("[LayerNormBuilder] SetBeginNormAxis failed. The has_bias should be type OH_NN_INT32."); + return OH_NN_INVALID_PARAMETER; + } + + if (!tensor->IsScalar()) { + LOGE("[LayerNormBuilder] SetBeginNormAxis failed. The beginNormAxis should be a scalar value."); + return OH_NN_INVALID_PARAMETER; + } + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[LayerNormBuilder] SetBeginNormAxis failed, the beginNormAxis passed a empty buffer."); + return OH_NN_INVALID_PARAMETER; + } + + m_beginNormAxis = *static_cast(buffer); + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode LayerNormBuilder::SetEpsilon(std::shared_ptr tensor) +{ + tensor->IdentifyOpParameter(); + if (tensor->GetDataType() != OH_NN_FLOAT32) { + LOGE("[LayerNormBuilder] SetEpsilon failed. The epsilon should be type OH_NN_FLOAT32."); + return OH_NN_INVALID_PARAMETER; + } + + if (!tensor->IsScalar()) { + LOGE("[LayerNormBuilder] SetEpsilon failed. The epsilon should be a scalar value."); + return OH_NN_INVALID_PARAMETER; + } + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[LayerNormBuilder] SetEpsilon failed, the epsilon passed a empty buffer."); + return OH_NN_INVALID_PARAMETER; + } + + m_epsilon = *static_cast(buffer); + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode LayerNormBuilder::SetBeginParamsAxis(std::shared_ptr tensor) +{ + tensor->IdentifyOpParameter(); + if (tensor->GetDataType() != OH_NN_INT32) { + LOGE("[LayerNormBuilder] SetBeginParamsAxis failed. The has_bias should be type OH_NN_INT32."); + return OH_NN_INVALID_PARAMETER; + } + + if (!tensor->IsScalar()) { + LOGE("[LayerNormBuilder] SetBeginParamsAxis failed. The beginNormAxis should be a scalar value."); + return OH_NN_INVALID_PARAMETER; + } + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[LayerNormBuilder] SetBeginParamsAxis failed, the beginParamsAxis passed a empty buffer."); + return OH_NN_INVALID_PARAMETER; + } + + m_beginParamsAxis = *static_cast(buffer); + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode LayerNormBuilder::Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) +{ + if (m_isBuild) { + LOGE("[LayerNormBuilder] Build failed. LayerNorm operation has been build, cannot build again."); + return OH_NN_OPERATION_FORBIDDEN; + } + + OH_NN_ReturnCode returnCode = CheckIOIndex(inputsIndex, outputsIndex, allTensors, INPUT_NUM, OUTPUT_NUM); + if (returnCode != OH_NN_SUCCESS) { + LOGE("[LayerNormBuilder] Build failed. Passed invalid input or output index."); + return returnCode; + } + + m_inputsIndex = inputsIndex; + m_outputsIndex = outputsIndex; + + for (int i : paramsIndex) { + std::shared_ptr tensor = allTensors[i]; + switch (tensor->GetType()) { + case OH_NN_LAYER_NORM_BEGIN_NORM_AXIS: + returnCode = SetBeginNormAxis(tensor); + break; + case OH_NN_LAYER_NORM_EPSILON: + returnCode = SetEpsilon(tensor); + break; + case OH_NN_LAYER_NORM_BEGIN_PARAM_AXIS: + returnCode = SetBeginParamsAxis(tensor); + break; + default: + LOGE("[LayerNormBuilder] Parameter Type is invalid, type=%d", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; + } + + if (returnCode != OH_NN_SUCCESS) { + LOGE("[LayerNormBuilder] Build failed. Passed invalid param."); + return returnCode; + } + } + + auto inputShape = allTensors[inputsIndex[INPUT_X]]->GetDimensions(); + int inputShapeSize = static_cast(inputShape.size()); + // beginNormAxis must great than 1, because normal shape cannot equal input shape. + if (m_beginNormAxis >= inputShapeSize || m_beginNormAxis < 1) { + LOGE("[LayerNormBuilder] Build failed, invalid beginNormAxis value, it should be [1, rank(input))."); + return OH_NN_INVALID_PARAMETER; + } + // validate gamma and beta shape + returnCode = ValidateGammaAndBetaShape(inputsIndex, m_beginNormAxis, allTensors); + if (returnCode != OH_NN_SUCCESS) { + return returnCode; + } + + // The quantization type of the first output determinies that of the operator. + SetQuantType(outputsIndex, allTensors); + + m_name = OP_NAME; + m_isBuild = true; + return OH_NN_SUCCESS; +} + +LiteGraphPrimitvePtr LayerNormBuilder::GetPrimitive() +{ + if (!m_isBuild) { + LOGE("[LayerNormBuilder] GetPrimitive failed, cannot get primitive before call build."); + return {nullptr, DestroyLiteGraphPrimitive}; + } + + void* primitive = mindspore::lite::MindIR_LayerNormFusion_CreatePrimitive(m_beginNormAxis, + m_epsilon, m_elementwiseAffine, m_beginParamsAxis); + LiteGraphPrimitvePtr graphPrimitivePtr(primitive, DestroyLiteGraphPrimitive); + return graphPrimitivePtr; +} + +OH_NN_ReturnCode LayerNormBuilder::ValidateGammaAndBetaShape(const std::vector& inputsIndex, + int beginAxis, const std::vector>& allTensors) const +{ + auto inputShape = allTensors[inputsIndex[INPUT_X]]->GetDimensions(); + auto gammaShape = allTensors[inputsIndex[INPUT_GAMMA]]->GetDimensions(); + auto betaShape = allTensors[inputsIndex[INPUT_BETA]]->GetDimensions(); + int inputShapeSize = static_cast(inputShape.size()); + + for (auto i = beginAxis; i < inputShapeSize; i++) { + if (gammaShape[i - beginAxis] != inputShape[i]) { + LOGE("[LayerNormBuilder] Invalid gamma shape, gamma shape should equal to normalized shape."); + return OH_NN_INVALID_PARAMETER; + } + if (betaShape[i - beginAxis] != inputShape[i]) { + LOGE("[LayerNormBuilder] Invalid beta shape, bata shape should equal to normalized shape."); + return OH_NN_INVALID_PARAMETER; + } + } + + return OH_NN_SUCCESS; +} + +REGISTER_OPS(LayerNormBuilder, OH_NN_OPS_LAYER_NORM); +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS \ No newline at end of file diff --git a/frameworks/native/ops/layernorm_builder.h b/frameworks/native/ops/layernorm_builder.h new file mode 100644 index 0000000000000000000000000000000000000000..68847b7b04648ab3b44ed5cc04d9bc5c19f6f8b7 --- /dev/null +++ b/frameworks/native/ops/layernorm_builder.h @@ -0,0 +1,51 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_LAYERNORM_BUILDER_H +#define NEURAL_NETWORK_RUNTIME_LAYERNORM_BUILDER_H + +#include "frameworks/native/ops_builder.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +class LayerNormBuilder : public OpsBuilder { +public: + LayerNormBuilder(); + ~LayerNormBuilder() override; + OH_NN_ReturnCode Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) override; + LiteGraphPrimitvePtr GetPrimitive() override; + +private: + OH_NN_ReturnCode SetBeginNormAxis(std::shared_ptr tensor); + OH_NN_ReturnCode SetEpsilon(std::shared_ptr tensor); + OH_NN_ReturnCode SetBeginParamsAxis(std::shared_ptr tensor); + OH_NN_ReturnCode ValidateGammaAndBetaShape(const std::vector& inputsIndex, + int beginAxis, const std::vector>& allTensors) const; + +private: + int m_beginNormAxis{1}; + float m_epsilon{1e-7}; + bool m_elementwiseAffine{false}; + int m_beginParamsAxis{1}; +}; +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS + +#endif // NEURAL_NETWORK_RUNTIME_LAYERNORM_BUILDER_H \ No newline at end of file diff --git a/frameworks/native/ops/lessequal_builder.cpp b/frameworks/native/ops/lessequal_builder.cpp new file mode 100644 index 0000000000000000000000000000000000000000..a74445efc744385694e0a8796acf732998a9416e --- /dev/null +++ b/frameworks/native/ops/lessequal_builder.cpp @@ -0,0 +1,80 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "lessequal_builder.h" + +#include "mindir.h" + +#include "frameworks/native/ops_registry.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +static const int INPUT_NUMS = 2; +static const int OUTPUT_NUMS = 1; +static const std::string OP_NAME = "LessEqual"; + +LessEqualBuilder::LessEqualBuilder() {} + +LessEqualBuilder::~LessEqualBuilder() {} + +OH_NN_ReturnCode LessEqualBuilder::Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) +{ + if (m_isBuild) { + LOGE("[LessEqual] Build failded, operation has been build, cannot build again."); + return OH_NN_OPERATION_FORBIDDEN; + } + + OH_NN_ReturnCode returnCode = CheckIOIndex(inputsIndex, outputsIndex, allTensors, INPUT_NUMS, OUTPUT_NUMS); + if (returnCode != OH_NN_SUCCESS) { + LOGE("[LessEqual] Build failded, Passed invalid input or output indices."); + return returnCode; + } + + if (!paramsIndex.empty()) { + LOGW("[LessEqual] LessEqual expects no parameters, but receive %zu", paramsIndex.size()); + return OH_NN_INVALID_PARAMETER; + } + + m_inputsIndex = inputsIndex; + m_outputsIndex = outputsIndex; + + // The quantization type of the first output determinies that of the operator. + SetQuantType(outputsIndex, allTensors); + + m_name = OP_NAME; + m_isBuild = true; + return OH_NN_SUCCESS; +} + +LiteGraphPrimitvePtr LessEqualBuilder::GetPrimitive() +{ + if (!m_isBuild) { + LOGE("[LessEqual] GetPrimitive failed, cannot get primitive before call build."); + return {nullptr, DestroyLiteGraphPrimitive}; + } + + void* primitive = mindspore::lite::MindIR_LessEqual_CreatePrimitive(); + LiteGraphPrimitvePtr graphPrimitivePtr(primitive, DestroyLiteGraphPrimitive); + return graphPrimitivePtr; +} + +REGISTER_OPS(LessEqualBuilder, OH_NN_OPS_LESS_EQUAL); +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS \ No newline at end of file diff --git a/frameworks/native/ops/lessequal_builder.h b/frameworks/native/ops/lessequal_builder.h new file mode 100644 index 0000000000000000000000000000000000000000..6933ffed2869e714db6b9d5f3f018a05bac3a933 --- /dev/null +++ b/frameworks/native/ops/lessequal_builder.h @@ -0,0 +1,38 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_LESSEQUAL_BUILDER_H +#define NEURAL_NETWORK_RUNTIME_LESSEQUAL_BUILDER_H + +#include "frameworks/native/ops_builder.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +class LessEqualBuilder : public OpsBuilder { +public: + LessEqualBuilder(); + ~LessEqualBuilder() override; + OH_NN_ReturnCode Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) override; + LiteGraphPrimitvePtr GetPrimitive() override; +}; +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS + +#endif // NEURAL_NETWORK_RUNTIME_LESSEQUAL_BUILDER_H \ No newline at end of file diff --git a/frameworks/native/ops/matmul_builder.cpp b/frameworks/native/ops/matmul_builder.cpp new file mode 100644 index 0000000000000000000000000000000000000000..9b0440f5970c4a1504dd2c130eb2b3f4e20922ac --- /dev/null +++ b/frameworks/native/ops/matmul_builder.cpp @@ -0,0 +1,175 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "matmul_builder.h" + +#include "frameworks/native/transform.h" +#include "frameworks/native/validation.h" +#include "frameworks/native/ops_registry.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +static const int INPUT_NUM = 2; +static const int OUTPUT_NUM = 1; +static const int SCALE_LENGTH = 1; +static const std::string OP_NAME = "Matmul"; + +MatmulBuilder::MatmulBuilder() {} + +MatmulBuilder::~MatmulBuilder() {} + +OH_NN_ReturnCode MatmulBuilder::SetTransposeA(std::shared_ptr tensor) +{ + tensor->IdentifyOpParameter(); + if (tensor->GetElementCount() != SCALE_LENGTH) { + LOGE("[Matmul] Matmul SetTransposeA failed. The transposeA should be scaler."); + return OH_NN_INVALID_PARAMETER; + } + + if (tensor->GetDataType() != OH_NN_BOOL) { + LOGE("[Matmul] Matmul SetTransposeA failed. The transposeA should have type OH_NN_BOOL."); + return OH_NN_INVALID_PARAMETER; + } + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[Matmul] SetTransposeA failed, the transposeA passed a empty buffer."); + return OH_NN_INVALID_PARAMETER; + } + + m_transposeA = *static_cast(buffer); + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode MatmulBuilder::SetTransposeB(std::shared_ptr tensor) +{ + tensor->IdentifyOpParameter(); + if (tensor->GetElementCount() != SCALE_LENGTH) { + LOGE("[Matmul] Matmul SetTransposeB failed. The transposeB should be scaler."); + return OH_NN_INVALID_PARAMETER; + } + + if (tensor->GetDataType() != OH_NN_BOOL) { + LOGE("[Matmul] Matmul SetTransposeB failed. The transposeB TransposeY should have type OH_NN_BOOL."); + return OH_NN_INVALID_PARAMETER; + } + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[Matmul] SetTransposeB failed, the transposeB passed a empty buffer."); + return OH_NN_INVALID_PARAMETER; + } + + m_transposeB = *static_cast(buffer); + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode MatmulBuilder::SetActivationType(std::shared_ptr tensor) +{ + tensor->IdentifyOpParameter(); + if (tensor->GetElementCount() != SCALE_LENGTH) { + LOGE("[Matmul] Matmul SetActivationType failed. The shape of activation should be scaler."); + return OH_NN_INVALID_PARAMETER; + } + + if (tensor->GetDataType() != OH_NN_INT8) { + LOGE("[Matmul] Matmul SetActivationType failed. The activation should be type OH_NN_INT8."); + return OH_NN_INVALID_PARAMETER; + } + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[Matmul] SetActivationType failed, the activationType passed a empty buffer."); + return OH_NN_INVALID_PARAMETER; + } + + int8_t* fuseData = static_cast(buffer); + if (!OHOS::NeuralNetworkRuntime::Validation::ValidateFuseType(static_cast(*fuseData))) { + LOGE("[Matmul] Matmul SetActivationType failed. Fuse activation type is invalid"); + return OH_NN_INVALID_PARAMETER; + } + + auto fuseType = (OH_NN_FuseType)(*fuseData); + m_activationType = NNToMS::TransfromFusionType(fuseType); + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode MatmulBuilder::Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) +{ + if (m_isBuild) { + LOGE("[Matmul] Matmul Build failed. operation has been build, cannot build again."); + return OH_NN_OPERATION_FORBIDDEN; + } + + OH_NN_ReturnCode returnCode = CheckIOIndex(inputsIndex, outputsIndex, allTensors, INPUT_NUM, OUTPUT_NUM); + if (returnCode != OH_NN_SUCCESS) { + LOGE("[Matmul] Matmul Build failed. Passed invalid input or output indices."); + return returnCode; + } + + m_inputsIndex = inputsIndex; + m_outputsIndex = outputsIndex; + + for (int i : paramsIndex) { + std::shared_ptr tensor = allTensors[i]; + switch (tensor->GetType()) { + case OH_NN_MATMUL_TRANSPOSE_A: + returnCode = SetTransposeA(tensor); + break; + case OH_NN_MATMUL_TRANSPOSE_B: + returnCode = SetTransposeB(tensor); + break; + case OH_NN_MATMUL_ACTIVATION_TYPE: + returnCode = SetActivationType(tensor); + break; + default: + LOGE("[Matmul] Parameter Type is invalid, type=%d", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; + } + + if (returnCode != OH_NN_SUCCESS) { + LOGE("[Matmul] Matmul Build failed. Passed invalid param."); + return returnCode; + } + } + + // The quantization type of the first output determinies that of the operator. + SetQuantType(outputsIndex, allTensors); + + m_isBuild = true; + m_name = OP_NAME; + return OH_NN_SUCCESS; +} + +LiteGraphPrimitvePtr MatmulBuilder::GetPrimitive() +{ + if (!m_isBuild) { + LOGE("[Matmul] Matmul GetPrimitive failed. Cannot get primitive before call build."); + return {nullptr, DestroyLiteGraphPrimitive}; + } + + auto primitive = mindspore::lite::MindIR_MatMulFusion_CreatePrimitive(m_transposeA, m_transposeB, m_activationType); + LiteGraphPrimitvePtr graphPrimitivePtr(primitive, DestroyLiteGraphPrimitive); + return graphPrimitivePtr; +} + +REGISTER_OPS(MatmulBuilder, OH_NN_OPS_MATMUL); +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS \ No newline at end of file diff --git a/frameworks/native/ops/matmul_builder.h b/frameworks/native/ops/matmul_builder.h new file mode 100644 index 0000000000000000000000000000000000000000..1efcdf14a848fd53cef6a57405cc51451e495d26 --- /dev/null +++ b/frameworks/native/ops/matmul_builder.h @@ -0,0 +1,50 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_MATMUL_BUILDER_H +#define NEURAL_NETWORK_RUNTIME_MATMUL_BUILDER_H + +#include "mindir.h" + +#include "frameworks/native/ops_builder.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +class MatmulBuilder : public OpsBuilder { +public: + MatmulBuilder(); + ~MatmulBuilder() override; + OH_NN_ReturnCode Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) override; + LiteGraphPrimitvePtr GetPrimitive() override; + +private: + OH_NN_ReturnCode SetTransposeA(std::shared_ptr tensor); + OH_NN_ReturnCode SetTransposeB(std::shared_ptr tensor); + OH_NN_ReturnCode SetActivationType(std::shared_ptr tensor); + +private: + mindspore::lite::ActivationType m_activationType{mindspore::lite::ACTIVATION_TYPE_NO_ACTIVATION}; + bool m_transposeA{false}; + bool m_transposeB{false}; +}; +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS + +#endif // NEURAL_NETWORK_RUNTIME_MATMUL_BUILDER_H \ No newline at end of file diff --git a/frameworks/native/ops/maximum_builder.cpp b/frameworks/native/ops/maximum_builder.cpp new file mode 100644 index 0000000000000000000000000000000000000000..b681ec1cc8fcfae48a8cef854cd4cf55db1da4c2 --- /dev/null +++ b/frameworks/native/ops/maximum_builder.cpp @@ -0,0 +1,77 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "maximum_builder.h" + +#include "mindir.h" + +#include "frameworks/native/ops_registry.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +static const int INPUT_NUM = 2; +static const int OUTPUT_NUM = 1; +static const std::string OP_NAME = "Maximum"; + +MaximumBuilder::MaximumBuilder() {} + +MaximumBuilder::~MaximumBuilder() {} + +OH_NN_ReturnCode MaximumBuilder::Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) +{ + if (m_isBuild) { + LOGE("[Maximum] Maximum Build failed. operation has been build, cannot build again."); + return OH_NN_OPERATION_FORBIDDEN; + } + + OH_NN_ReturnCode ret = CheckIOIndex(inputsIndex, outputsIndex, allTensors, INPUT_NUM, OUTPUT_NUM); + if (ret != OH_NN_SUCCESS) { + LOGE("[Maximum] Maximum Build failed. The input or output index of Maximum operation is invalid."); + return ret; + } + + if (!paramsIndex.empty()) { + LOGW("[Maximum] Maximum Build failed. Maximum expects no parameters, but receive %zu", paramsIndex.size()); + return OH_NN_INVALID_PARAMETER; + } + + m_inputsIndex = inputsIndex; + m_outputsIndex = outputsIndex; + + m_isBuild = true; + m_name = OP_NAME; + return OH_NN_SUCCESS; +} + +LiteGraphPrimitvePtr MaximumBuilder::GetPrimitive() +{ + if (!m_isBuild) { + LOGE("[Maximum] Maximum GetPrimitive failed. Cannot get primitive before call build."); + return {nullptr, DestroyLiteGraphPrimitive}; + } + + void* primitive = mindspore::lite::MindIR_Maximum_CreatePrimitive(); + LiteGraphPrimitvePtr graphPrimitivePtr(primitive, DestroyLiteGraphPrimitive); + return graphPrimitivePtr; +} + +REGISTER_OPS(MaximumBuilder, OH_NN_OPS_MAXIMUM); +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS \ No newline at end of file diff --git a/frameworks/native/ops/maximum_builder.h b/frameworks/native/ops/maximum_builder.h new file mode 100644 index 0000000000000000000000000000000000000000..5308c5d4eecb4a2f851658283dc5537c41311ac9 --- /dev/null +++ b/frameworks/native/ops/maximum_builder.h @@ -0,0 +1,38 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_MAXIMUM_BUILDER_H +#define NEURAL_NETWORK_RUNTIME_MAXIMUM_BUILDER_H + +#include "frameworks/native/ops_builder.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +class MaximumBuilder : public OpsBuilder { +public: + MaximumBuilder(); + ~MaximumBuilder() override; + OH_NN_ReturnCode Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) override; + LiteGraphPrimitvePtr GetPrimitive() override; +}; +} // namespace Ops +} // namespace NeuralNetworkRuntime +} + +#endif // NEURAL_NETWORK_RUNTIME_MAXIMUM_BUILDER_H \ No newline at end of file diff --git a/frameworks/native/ops/maxpool_builder.cpp b/frameworks/native/ops/maxpool_builder.cpp new file mode 100644 index 0000000000000000000000000000000000000000..04b61676c8f3b59d2f71a03619a1d280b8d18f43 --- /dev/null +++ b/frameworks/native/ops/maxpool_builder.cpp @@ -0,0 +1,58 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "maxpool_builder.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +static const std::string OP_NAME = "MaxPool"; + +MaxPoolBuilder::MaxPoolBuilder() {} + +MaxPoolBuilder::~MaxPoolBuilder() {} + +OH_NN_ReturnCode MaxPoolBuilder::Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, const std::vector& outputsIndex, + const std::vector>& allTensors) +{ + OH_NN_ReturnCode returnCode = PoolingBuild(paramsIndex, inputsIndex, outputsIndex, allTensors); + if (returnCode != OH_NN_SUCCESS) { + LOGE("[MaxPool] Build failed, PoolingBuild failed."); + return returnCode; + } + + m_isBuild = true; + m_name = OP_NAME; + return OH_NN_SUCCESS; +} + +LiteGraphPrimitvePtr MaxPoolBuilder::GetPrimitive() +{ + if (!m_isBuild) { + LOGE("[MaxPool] GetPrimitive failed, cannot get primitive before call build."); + return {nullptr, DestroyLiteGraphPrimitive}; + } + + void* primitive = MindIR_MaxPoolFusion_CreatePrimitive(m_kernelSize, m_strides, m_pad, + m_padMode, m_format, m_global, m_activationType); + LiteGraphPrimitvePtr graphPrimitivePtr(primitive, DestroyLiteGraphPrimitive) ; + return graphPrimitivePtr; +} + +REGISTER_OPS(MaxPoolBuilder, OH_NN_OPS_MAX_POOL); +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS diff --git a/frameworks/native/ops/maxpool_builder.h b/frameworks/native/ops/maxpool_builder.h new file mode 100644 index 0000000000000000000000000000000000000000..2022b6efa3ac53213cb585102fd0d001f691fe1c --- /dev/null +++ b/frameworks/native/ops/maxpool_builder.h @@ -0,0 +1,40 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_MAXPOOL_BUILDER_H +#define NEURAL_NETWORK_RUNTIME_MAXPOOL_BUILDER_H + +#include "pooling_builder.h" +#include "frameworks/native/ops_registry.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +class MaxPoolBuilder : public PoolingBuilder { +public: + MaxPoolBuilder(); + ~MaxPoolBuilder() override; + OH_NN_ReturnCode Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) override; + + LiteGraphPrimitvePtr GetPrimitive() override; +}; +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS + +#endif // NEURAL_NETWORK_RUNTIME_MAXPOOL_BUILDER_H diff --git a/frameworks/native/ops/mul_builder.cpp b/frameworks/native/ops/mul_builder.cpp new file mode 100644 index 0000000000000000000000000000000000000000..f055d285386269a194224d647b59c66ad626a927 --- /dev/null +++ b/frameworks/native/ops/mul_builder.cpp @@ -0,0 +1,122 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "mul_builder.h" + +#include "frameworks/native/transform.h" +#include "frameworks/native/validation.h" +#include "frameworks/native/ops_registry.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +static const int INPUT_NUM = 2; +static const int OUTPUT_NUM = 1; +static const int SCALE_LENGTH = 1; +static const std::string OP_NAME = "Mul"; + +MulBuilder::MulBuilder() {} + +MulBuilder::~MulBuilder() {} + +OH_NN_ReturnCode MulBuilder::SetActivationType(std::shared_ptr tensor) +{ + tensor->IdentifyOpParameter(); + if (tensor->GetElementCount() != SCALE_LENGTH) { + LOGE("[Mul] Mul SetActivationType failed. The shape of activation should be scaler."); + return OH_NN_INVALID_PARAMETER; + } + + if (tensor->GetDataType() != OH_NN_INT8) { + LOGE("[Mul] Mul SetActivationType failed. The activation should be type OH_NN_INT8."); + return OH_NN_INVALID_PARAMETER; + } + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[Mul] SetActivationType failed, the activationType passed a empty buffer."); + return OH_NN_INVALID_PARAMETER; + } + + int8_t* fuseData = static_cast(buffer); + if (!OHOS::NeuralNetworkRuntime::Validation::ValidateFuseType(static_cast(*fuseData))) { + LOGE("[Mul] Mul SetActivationType failed. Fuse activation type is invalid"); + return OH_NN_INVALID_PARAMETER; + } + + auto fuseType = (OH_NN_FuseType)(*fuseData); + m_activationType = NNToMS::TransfromFusionType(fuseType); + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode MulBuilder::Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) +{ + if (m_isBuild) { + LOGE("[Mul] Mul build failed. operation has been build, cannot build again."); + return OH_NN_OPERATION_FORBIDDEN; + } + + OH_NN_ReturnCode returnCode = CheckIOIndex(inputsIndex, outputsIndex, allTensors, INPUT_NUM, OUTPUT_NUM); + if (returnCode != OH_NN_SUCCESS) { + LOGE("[Mul] Mul build failed. Passed invalid input or output index of Mul operation index."); + return returnCode; + } + + m_inputsIndex = inputsIndex; + m_outputsIndex = outputsIndex; + + for (int i : paramsIndex) { + std::shared_ptr tensor = allTensors[i]; + switch (tensor->GetType()) { + case OH_NN_MUL_ACTIVATION_TYPE: + returnCode = SetActivationType(tensor); + break; + default: + LOGE("[Mul] Parameter Type is invalid, type=%d", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; + } + + if (returnCode != OH_NN_SUCCESS) { + LOGE("[Mul] Mul build failed. Passed invalid param."); + return returnCode; + } + } + + // The quantization type of the first output determinies that of the operator. + SetQuantType(outputsIndex, allTensors); + m_name = OP_NAME; + m_isBuild = true; + return OH_NN_SUCCESS; +} + +LiteGraphPrimitvePtr MulBuilder::GetPrimitive() +{ + if (!m_isBuild) { + LOGE("[Mul] Mul GetPrimitive failed. Cannot get primitive before call build."); + return {nullptr, DestroyLiteGraphPrimitive}; + } + + void* primitive = mindspore::lite::MindIR_MulFusion_CreatePrimitive(m_activationType); + LiteGraphPrimitvePtr graphPrimitivePtr(primitive, DestroyLiteGraphPrimitive); + return graphPrimitivePtr; +} + +REGISTER_OPS(MulBuilder, OH_NN_OPS_MUL); +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS \ No newline at end of file diff --git a/frameworks/native/ops/mul_builder.h b/frameworks/native/ops/mul_builder.h new file mode 100644 index 0000000000000000000000000000000000000000..25cd5ac0dd6cef5608aa73137dd9d3a8a1b469df --- /dev/null +++ b/frameworks/native/ops/mul_builder.h @@ -0,0 +1,46 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_MUL_BUILDER_H +#define NEURAL_NETWORK_RUNTIME_MUL_BUILDER_H + +#include "mindir.h" + +#include "frameworks/native/ops_builder.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +class MulBuilder : public OpsBuilder { +public: + MulBuilder(); + ~MulBuilder() override; + OH_NN_ReturnCode Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) override; + LiteGraphPrimitvePtr GetPrimitive() override; + +private: + OH_NN_ReturnCode SetActivationType(std::shared_ptr tensor); + +private: + mindspore::lite::ActivationType m_activationType{mindspore::lite::ACTIVATION_TYPE_NO_ACTIVATION}; +}; +} // namespace Ops +} // namespace NeuralNetworkRuntime +} + +#endif // NEURAL_NETWORK_RUNTIME_MUL_BUILDER_H \ No newline at end of file diff --git a/frameworks/native/ops/onehot_builder.cpp b/frameworks/native/ops/onehot_builder.cpp new file mode 100644 index 0000000000000000000000000000000000000000..b06840863c63c4342c2f087841de9d50940fe029 --- /dev/null +++ b/frameworks/native/ops/onehot_builder.cpp @@ -0,0 +1,107 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "onehot_builder.h" + +#include "mindir.h" + +#include "frameworks/native/ops_registry.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +static const int INPUT_NUM = 4; +static const int OUTPUT_NUM = 1; +static const std::string OP_NAME = "Onehot"; + +OnehotBuilder::OnehotBuilder() {} + +OnehotBuilder::~OnehotBuilder() {} + +OH_NN_ReturnCode OnehotBuilder::SetAxis(std::shared_ptr tensor) +{ + tensor->IdentifyOpParameter(); + if (tensor->GetDataType() != OH_NN_INT64) { + LOGE("[Onehot] Onehot SetAxis failed. The axis should be type OH_NN_INT64."); + return OH_NN_INVALID_PARAMETER; + } + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[Onehot] SetAxis failed, the axis passed a empty buffer."); + return OH_NN_INVALID_PARAMETER; + } + + m_axis = *static_cast(buffer); + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode OnehotBuilder::Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) +{ + if (m_isBuild) { + LOGE("[Onehot] Onehot build failed. operation has been build, cannot build again."); + return OH_NN_OPERATION_FORBIDDEN; + } + + OH_NN_ReturnCode returnCode = CheckIOIndex(inputsIndex, outputsIndex, allTensors, INPUT_NUM, OUTPUT_NUM); + if (returnCode != OH_NN_SUCCESS) { + LOGE("[Onehot] Onehot build failed. Passed invalid input or output index of Onehot operation index."); + return returnCode; + } + + m_inputsIndex = inputsIndex; + m_outputsIndex = outputsIndex; + + for (int i : paramsIndex) { + std::shared_ptr tensor = allTensors[i]; + switch (tensor->GetType()) { + case OH_NN_ONE_HOT_AXIS: + returnCode = SetAxis(tensor); + break; + default: + LOGE("[Onehot] Parameter Type is invalid, type=%d", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; + } + + if (returnCode != OH_NN_SUCCESS) { + LOGE("[Onehot] Onehot Build failed. Passed invalid param."); + return returnCode; + } + } + + m_name = OP_NAME; + m_isBuild = true; + return OH_NN_SUCCESS; +} + +LiteGraphPrimitvePtr OnehotBuilder::GetPrimitive() +{ + if (!m_isBuild) { + LOGE("[Onehot] Onehot GetPrimitive failed. Cannot get primitive before call build."); + return {nullptr, DestroyLiteGraphPrimitive}; + } + + void* primitive = mindspore::lite::MindIR_OneHot_CreatePrimitive(m_axis); + LiteGraphPrimitvePtr graphPrimitivePtr(primitive, DestroyLiteGraphPrimitive); + return graphPrimitivePtr; +} + +REGISTER_OPS(OnehotBuilder, OH_NN_OPS_ONE_HOT); +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // OHOS \ No newline at end of file diff --git a/frameworks/native/ops/onehot_builder.h b/frameworks/native/ops/onehot_builder.h new file mode 100644 index 0000000000000000000000000000000000000000..537855bd4ec538612790399c7fe4d92ca239bc4b --- /dev/null +++ b/frameworks/native/ops/onehot_builder.h @@ -0,0 +1,44 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_ONEHOT_BUILDER_H +#define NEURAL_NETWORK_RUNTIME_ONEHOT_BUILDER_H + +#include "frameworks/native/ops_builder.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +class OnehotBuilder : public OpsBuilder { +public: + OnehotBuilder(); + ~OnehotBuilder() override; + OH_NN_ReturnCode Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) override; + LiteGraphPrimitvePtr GetPrimitive() override; + +private: + OH_NN_ReturnCode SetAxis(std::shared_ptr tensor); + +private: + int64_t m_axis{-1}; +}; +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // OHOS + +#endif // NEURAL_NETWORK_RUNTIME_ONEHOT_BUILDER_H \ No newline at end of file diff --git a/frameworks/native/ops/pad_builder.cpp b/frameworks/native/ops/pad_builder.cpp new file mode 100644 index 0000000000000000000000000000000000000000..b14d8a63343ee922631539c931eb82bac1f1b52f --- /dev/null +++ b/frameworks/native/ops/pad_builder.cpp @@ -0,0 +1,116 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "pad_builder.h" + +#include "mindir.h" + +#include "frameworks/native/ops_registry.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +static const int INPUT_NUM = 2; +static const int OUTPUT_NUM = 1; +static const int SCALE_LENGTH = 1; +static const std::string OP_NAME = "Pad"; + +PadBuilder::PadBuilder() {} + +PadBuilder::~PadBuilder() {} + +OH_NN_ReturnCode PadBuilder::SetConstantValue(std::shared_ptr tensor) +{ + tensor->IdentifyOpParameter(); + if (tensor->GetElementCount() != SCALE_LENGTH) { + LOGE("[Pad] Pad SetConstantValue failed. The constant_value should be scaler."); + return OH_NN_INVALID_PARAMETER; + } + + if (tensor->GetDataType() != OH_NN_FLOAT32) { + LOGE("[Pad] Pad SetConstantValue failed. The constant_value should be type OH_NN_FLOAT32"); + return OH_NN_INVALID_PARAMETER; + } + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[Pad] SetConstantValue failed, the constantValue passed an empty buffer."); + return OH_NN_INVALID_PARAMETER; + } + + m_constantValue = *static_cast(buffer); + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode PadBuilder::Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) +{ + if (m_isBuild) { + LOGE("[Pad] Pad Build failed. operation has been build, cannot build again."); + return OH_NN_OPERATION_FORBIDDEN; + } + + OH_NN_ReturnCode returnCode = CheckIOIndex(inputsIndex, outputsIndex, allTensors, INPUT_NUM, OUTPUT_NUM); + if (returnCode != OH_NN_SUCCESS) { + LOGE("[Pad] Pad Build failed. Passed invalid input or output index of Pad operation index."); + return returnCode; + } + + m_inputsIndex = inputsIndex; + m_outputsIndex = outputsIndex; + + for (int i : paramsIndex) { + std::shared_ptr tensor = allTensors[i]; + switch (tensor->GetType()) { + case OH_NN_PAD_CONSTANT_VALUE: + returnCode = SetConstantValue(tensor); + break; + default: + LOGE("[Pad] Parameter Type is invalid, type=%d", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; + } + + if (returnCode != OH_NN_SUCCESS) { + LOGE("[Pad] Pad Build failed. Passed invalid param."); + return returnCode; + } + } + + // The quantization type of the first output determinies that of the operator. + SetQuantType(outputsIndex, allTensors); + + m_name = OP_NAME; + m_isBuild = true; + return OH_NN_SUCCESS; +} +LiteGraphPrimitvePtr PadBuilder::GetPrimitive() +{ + if (!m_isBuild) { + LOGE("[Pad] GetPrimitive failed. Cannot get primitive before call build."); + return {nullptr, DestroyLiteGraphPrimitive}; + } + + mindspore::lite::PaddingMode padding_mode = mindspore::lite::PADDING_MODE_CONSTANT; + void* primitive = MindIR_PadFusion_CreatePrimitive(paddings, padding_mode, m_constantValue); + LiteGraphPrimitvePtr graphPrimitivePtr(primitive, DestroyLiteGraphPrimitive); + return graphPrimitivePtr; +} + +REGISTER_OPS(PadBuilder, OH_NN_OPS_PAD); +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespcae OHOS \ No newline at end of file diff --git a/frameworks/native/ops/pad_builder.h b/frameworks/native/ops/pad_builder.h new file mode 100644 index 0000000000000000000000000000000000000000..a5968a42ecb8735627855d61d2839fd3fb0d39a1 --- /dev/null +++ b/frameworks/native/ops/pad_builder.h @@ -0,0 +1,45 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_PAD_BUILDER_H +#define NEURAL_NETWORK_RUNTIME_PAD_BUILDER_H + +#include "frameworks/native/ops_builder.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +class PadBuilder : public OpsBuilder { +public: + PadBuilder(); + ~PadBuilder() override; + OH_NN_ReturnCode Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) override; + LiteGraphPrimitvePtr GetPrimitive() override; + +private: + OH_NN_ReturnCode SetConstantValue(std::shared_ptr tensor); + +private: + std::vector> paddings{}; + float m_constantValue{0.0}; +}; +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS + +#endif // NEURAL_NETWORK_RUNTIME_PAD_BUILDER_H \ No newline at end of file diff --git a/frameworks/native/ops/pooling_builder.cpp b/frameworks/native/ops/pooling_builder.cpp new file mode 100644 index 0000000000000000000000000000000000000000..9b52c8f643bae6fcf9450986c2838d7956f69d76 --- /dev/null +++ b/frameworks/native/ops/pooling_builder.cpp @@ -0,0 +1,220 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "pooling_builder.h" + +#include "frameworks/native/transform.h" +#include "frameworks/native/validation.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +static const int INPUT_NUM = 1; +static const int OUTPUT_NUM = 1; +static const int NUM_ELEMENT_PAD_MODE = 1; +static const int NUM_ELEMENT_PAD_LIST = 4; +static const int ACTIVATION_LENGTH = 1; + +OH_NN_ReturnCode PoolingBuilder::PoolingBuild(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) +{ + if (m_isBuild) { + LOGE("[PoolingBuilder] PoolingBuild failed, operation has been build, cannot build again."); + return OH_NN_OPERATION_FORBIDDEN; + } + + // Set input and output + OH_NN_ReturnCode returnCode = SetInputAndOutput(inputsIndex, outputsIndex, allTensors); + if (returnCode != OH_NN_SUCCESS) { + LOGE("[PoolingBuilder] PoolingBuild failed, the SetInputAndOutput failed."); + return returnCode; + } + + for (int i : paramsIndex) { + std::shared_ptr tensor = allTensors[i]; + switch (tensor->GetType()) { + case OH_NN_AVG_POOL_KERNEL_SIZE: + case OH_NN_MAX_POOL_KERNEL_SIZE: + returnCode = SetKernel(tensor); + break; + case OH_NN_AVG_POOL_STRIDE: + case OH_NN_MAX_POOL_STRIDE: + returnCode = SetStrides(tensor); + break; + case OH_NN_AVG_POOL_PAD_MODE: + case OH_NN_MAX_POOL_PAD_MODE: + case OH_NN_MAX_POOL_PAD: + case OH_NN_AVG_POOL_PAD: + returnCode = SetPadModeOrPaddings(tensor); + break; + case OH_NN_AVG_POOL_ACTIVATION_TYPE: + case OH_NN_MAX_POOL_ACTIVATION_TYPE: + returnCode = SetActivation(tensor); + break; + default: + LOGE("[PoolingBuilder] Build failed, param invalid, type = %d.", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; + } + if (returnCode != OH_NN_SUCCESS) { + LOGE("[PoolingBuilder] PoolingBuild failed, passed invalid param."); + return returnCode; + } + } + + // The quantization type of the first output determinies that of the operator. + SetQuantType(outputsIndex, allTensors); + + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode PoolingBuilder::SetInputAndOutput(const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) +{ + OH_NN_ReturnCode returnCode = CheckIOIndex(inputsIndex, outputsIndex, allTensors, INPUT_NUM, OUTPUT_NUM); + if (returnCode != OH_NN_SUCCESS) { + LOGE("[PoolingBuilder] SetInputAndOutput failed, passed invalid input or output index."); + return returnCode; + } + + m_inputsIndex = inputsIndex; + m_outputsIndex = outputsIndex; + + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode PoolingBuilder::SetKernel(std::shared_ptr tensor) +{ + tensor->IdentifyOpParameter(); + // Set kernelSize + if (tensor->GetDataType() != OH_NN_INT64) { + LOGE("[PoolingBuilder] SetKernel failed, the KernelSize should be type OH_NN_INT64."); + return OH_NN_INVALID_PARAMETER; + } + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[PoolingBuilder] SetKernel GetBuffer return nullptr"); + return OH_NN_INVALID_PARAMETER; + } + + const int64_t* pKernelSize = reinterpret_cast(buffer); + int kernelSize = tensor->GetElementCount(); + m_kernelSize.assign(pKernelSize, pKernelSize + kernelSize); + + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode PoolingBuilder::SetStrides(std::shared_ptr tensor) +{ + tensor->IdentifyOpParameter(); + // Set Strides + if (tensor->GetDataType() != OH_NN_INT64) { + LOGE("[PoolingBuilder] SetStrides failed, the Strides should be type OH_NN_INT64."); + return OH_NN_INVALID_PARAMETER; + } + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[PoolingBuilder] SetStrides GetBuffer return nullptr"); + return OH_NN_INVALID_PARAMETER; + } + + const int64_t* pStrides = reinterpret_cast(buffer); + int strideslSize = tensor->GetElementCount(); + m_strides.assign(pStrides, pStrides + strideslSize); + + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode PoolingBuilder::SetPadModeOrPaddings(std::shared_ptr tensor) +{ + tensor->IdentifyOpParameter(); + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[PoolingBuilder] SetPadModeOrPaddings GetBuffer return nullptr"); + return OH_NN_INVALID_PARAMETER; + } + size_t tensorElementCount = tensor->GetElementCount(); + // Set PadMode or PadList + if (tensorElementCount == NUM_ELEMENT_PAD_MODE) { + // PadMode + if (tensor->GetDataType() != OH_NN_INT8) { + LOGE("[PoolingBuilder] SetPadModeOrPaddings failed, the type of padMode should be OH_NN_INT8."); + return OH_NN_INVALID_PARAMETER; + } + + int8_t* pPadMode = static_cast(buffer); + if (!OHOS::NeuralNetworkRuntime::Validation::ValidatePadMode(*pPadMode)) { + LOGE("[PoolingBuilder] SetPadModeOrPaddings failed, invalid pad mode."); + return OH_NN_INVALID_PARAMETER; + } + m_padMode = NNToMS::TransformPadModeValue(*pPadMode); + } else if (tensorElementCount == NUM_ELEMENT_PAD_LIST) { + if (tensor->GetDataType() != OH_NN_INT64) { + LOGE("[PoolingBuilder] SetPadModeOrPaddings failed, the type of padList should be OH_NN_INT64."); + return OH_NN_INVALID_PARAMETER; + } + + int64_t* pPad = static_cast(buffer); + // PadList + m_pad.clear(); + for (int i = 0; i < NUM_ELEMENT_PAD_LIST; i++) { + m_pad.emplace_back(static_cast(pPad[i])); + } + } else { + LOGE("[PoolingBuilder] SetPadModeOrPaddings failed, invalid element size of padMode or padList," + "padMode should be single value, and padList should be 4."); + return OH_NN_INVALID_PARAMETER; + } + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode PoolingBuilder::SetActivation(std::shared_ptr tensor) +{ + tensor->IdentifyOpParameter(); + // Set ActivationType + if (tensor->GetElementCount() != ACTIVATION_LENGTH) { + LOGE("[PoolingBuilder] SetActivation failed, the Activation shoule be a scalar"); + return OH_NN_INVALID_PARAMETER; + } + + if (tensor->GetDataType() != OH_NN_INT8) { + LOGE("[PoolingBuilder] SetActivation failed, the ActivationType should be type OH_NN_INT8."); + return OH_NN_INVALID_PARAMETER; + } + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[PoolingBuilder] SetActivation GetBuffer return nullptr"); + return OH_NN_INVALID_PARAMETER; + } + + int8_t* pFuseData = static_cast(buffer); + if (!OHOS::NeuralNetworkRuntime::Validation::ValidateFuseType(static_cast(*pFuseData))) { + LOGE("[PoolingBuilder] SetActivation failed, activation input is invalid."); + return OH_NN_INVALID_PARAMETER; + } + auto fuseType = (OH_NN_FuseType)(*pFuseData); + m_activationType = NNToMS::TransfromFusionType(fuseType); + + return OH_NN_SUCCESS; +} +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS diff --git a/frameworks/native/ops/pooling_builder.h b/frameworks/native/ops/pooling_builder.h new file mode 100644 index 0000000000000000000000000000000000000000..4a42b4a2674ccfc026d3a595f0aaf5ff6d79bd31 --- /dev/null +++ b/frameworks/native/ops/pooling_builder.h @@ -0,0 +1,59 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_POOLING_BUILDER_H +#define NEURAL_NETWORK_RUNTIME_POOLING_BUILDER_H + +#include "mindir.h" + +#include "frameworks/native/ops_builder.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +class PoolingBuilder : public OpsBuilder { +public: + PoolingBuilder() = default; + virtual ~PoolingBuilder() = default; + + OH_NN_ReturnCode PoolingBuild(const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector& paramsIndex, + const std::vector>& allTensors); + + OH_NN_ReturnCode SetInputAndOutput(const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors); + + OH_NN_ReturnCode SetKernel(std::shared_ptr tensor); + OH_NN_ReturnCode SetStrides(std::shared_ptr tensor); + OH_NN_ReturnCode SetPadModeOrPaddings(std::shared_ptr tensor); + OH_NN_ReturnCode SetActivation(std::shared_ptr tensor); + +protected: + std::vector m_kernelSize; + std::vector m_pad; + std::vector m_strides; + mindspore::lite::PadMode m_padMode{mindspore::lite::PAD_MODE_PAD}; + mindspore::lite::ActivationType m_activationType{mindspore::lite::ACTIVATION_TYPE_NO_ACTIVATION}; + mindspore::lite::RoundMode m_roundMode = mindspore::lite::ROUND_MODE_FLOOR; + mindspore::lite::Format m_format = mindspore::lite::FORMAT_NCHW; + bool m_global = false; +}; +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS + +#endif // NEURAL_NETWORK_RUNTIME_POOLING_BUILDER_H diff --git a/frameworks/native/ops/pow_builder.cpp b/frameworks/native/ops/pow_builder.cpp new file mode 100644 index 0000000000000000000000000000000000000000..71e9df7fe6db128a09935ce8869d005605b8afa8 --- /dev/null +++ b/frameworks/native/ops/pow_builder.cpp @@ -0,0 +1,82 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "pow_builder.h" + +#include "mindir.h" + +#include "frameworks/native/ops_registry.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +static const int INPUT_NUM = 2; +static const int OUTPUT_NUM = 1; +static const std::string OP_NAME = "Pow"; + +PowBuilder::PowBuilder() {} + +PowBuilder::~PowBuilder() {} + +OH_NN_ReturnCode PowBuilder::Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) +{ + if (m_isBuild) { + LOGE("[Pow] Build failed, operation has been build, cannot build again."); + return OH_NN_OPERATION_FORBIDDEN; + } + + OH_NN_ReturnCode returnCode = CheckIOIndex(inputsIndex, outputsIndex, allTensors, INPUT_NUM, OUTPUT_NUM); + if (returnCode != OH_NN_SUCCESS) { + LOGE("[Pow] Build failed, passed invalid input or output index of Pow operation index."); + return returnCode; + } + + if (!paramsIndex.empty()) { + LOGW("[Pow] Build failed, pow expects no parameters, but receive %zu", paramsIndex.size()); + return OH_NN_INVALID_PARAMETER; + } + + m_inputsIndex = inputsIndex; + m_outputsIndex = outputsIndex; + + SetQuantType(outputsIndex, allTensors); + + m_name = OP_NAME; + m_isBuild = true; + return OH_NN_SUCCESS; +} + +LiteGraphPrimitvePtr PowBuilder::GetPrimitive() +{ + if (!m_isBuild) { + LOGE("[Pow] GetPrimitive failed, cannot get primitive before call build."); + return {nullptr, DestroyLiteGraphPrimitive}; + } + + float scale{1.0}; + float shift{0.0}; + + void* primitive = mindspore::lite::MindIR_PowFusion_CreatePrimitive(scale, shift); + LiteGraphPrimitvePtr graphPrimitivePtr(primitive, DestroyLiteGraphPrimitive); + return graphPrimitivePtr; +} + +REGISTER_OPS(PowBuilder, OH_NN_OPS_POW); +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS \ No newline at end of file diff --git a/frameworks/native/ops/pow_builder.h b/frameworks/native/ops/pow_builder.h new file mode 100644 index 0000000000000000000000000000000000000000..10ec7279d86b60dba114b86ceb643f77dac1775e --- /dev/null +++ b/frameworks/native/ops/pow_builder.h @@ -0,0 +1,39 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_POW_BUILDER_H +#define NEURAL_NETWORK_RUNTIME_POW_BUILDER_H + +#include "frameworks/native/ops_builder.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +class PowBuilder : public OpsBuilder { +public: + PowBuilder(); + ~PowBuilder() override; + OH_NN_ReturnCode Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) override; + + LiteGraphPrimitvePtr GetPrimitive() override; +}; +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS + +#endif // NEURAL_NETWORK_RUNTIME_POW_BUILDER_H \ No newline at end of file diff --git a/frameworks/native/ops/prelu_builder.cpp b/frameworks/native/ops/prelu_builder.cpp new file mode 100644 index 0000000000000000000000000000000000000000..d8fcc56c98d89ca51611d5ed860d6c54b5e43de4 --- /dev/null +++ b/frameworks/native/ops/prelu_builder.cpp @@ -0,0 +1,78 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "prelu_builder.h" + +#include "mindir.h" + +#include "frameworks/native/ops_registry.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +static const int INPUT_NUMS = 2; +static const int OUTPUT_NUMS = 1; +static const std::string OP_NAME = "PRelu"; + +PReluBuilder::PReluBuilder() {} + +PReluBuilder::~PReluBuilder() {} + +OH_NN_ReturnCode PReluBuilder::Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) +{ + if (m_isBuild) { + LOGE("[PRelu] Build failed, the PRelu operation has been build, cannot build again."); + return OH_NN_OPERATION_FORBIDDEN; + } + + OH_NN_ReturnCode returnCode = CheckIOIndex(inputsIndex, outputsIndex, allTensors, INPUT_NUMS, OUTPUT_NUMS); + if (returnCode != OH_NN_SUCCESS) { + LOGE("[PRelu] Build failed, passed invalid input or output index of PRelu operation index."); + return returnCode; + } + + if (!paramsIndex.empty()) { + LOGW("[PRelu] Build failed, the PRelu expects no parameters, but receive %zu", paramsIndex.size()); + return OH_NN_INVALID_PARAMETER; + } + + m_inputsIndex = inputsIndex; + m_outputsIndex = outputsIndex; + + m_name = OP_NAME; + m_isBuild = true; + return OH_NN_SUCCESS; +} + +LiteGraphPrimitvePtr PReluBuilder::GetPrimitive() +{ + if (!m_isBuild) { + LOGE("[PRelu] GetPrimitive failed, cannot get primitive before call build."); + return {nullptr, DestroyLiteGraphPrimitive}; + } + + bool channelShared{false}; + void* primitive = mindspore::lite::MindIR_PReLUFusion_CreatePrimitive(channelShared); + LiteGraphPrimitvePtr graphPrimitivePtr(primitive, DestroyLiteGraphPrimitive); + return graphPrimitivePtr; +} + +REGISTER_OPS(PReluBuilder, OH_NN_OPS_PRELU); +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS diff --git a/frameworks/native/ops/prelu_builder.h b/frameworks/native/ops/prelu_builder.h new file mode 100644 index 0000000000000000000000000000000000000000..117343c7c90ab86255eda31cab0a628b2cf817f2 --- /dev/null +++ b/frameworks/native/ops/prelu_builder.h @@ -0,0 +1,39 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_PRELU_BUILDER_H +#define NEURAL_NETWORK_RUNTIME_PRELU_BUILDER_H + +#include "frameworks/native/ops_builder.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +class PReluBuilder : public OpsBuilder { +public: + PReluBuilder(); + ~PReluBuilder() override; + OH_NN_ReturnCode Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) override; + + LiteGraphPrimitvePtr GetPrimitive() override; +}; +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS + +#endif // NEURAL_NETWORK_RUNTIME_PRELU_BUILDER_H \ No newline at end of file diff --git a/frameworks/native/ops/quant_dtype_cast_builder.cpp b/frameworks/native/ops/quant_dtype_cast_builder.cpp new file mode 100644 index 0000000000000000000000000000000000000000..79ce59c493a342aec5660ac200268ab2df3a015d --- /dev/null +++ b/frameworks/native/ops/quant_dtype_cast_builder.cpp @@ -0,0 +1,128 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "quant_dtype_cast_builder.h" + +#include "mindir.h" + +#include "frameworks/native/ops_registry.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +static const int INPUT_NUM = 1; +static const int OUTPUT_NUM = 1; +static const std::string OP_NAME = "QuantDTypeCast"; + +QuantDTypeCastBuilder::QuantDTypeCastBuilder() {} + +QuantDTypeCastBuilder::~QuantDTypeCastBuilder() {} + +OH_NN_ReturnCode QuantDTypeCastBuilder::SetSrcT(std::shared_ptr tensor) +{ + tensor->IdentifyOpParameter(); + if (tensor->GetDataType() != OH_NN_INT64) { + LOGE("[QuantDTypeCast] SetSrcT failed, the src_t should be type OH_NN_INT64."); + return OH_NN_INVALID_PARAMETER; + } + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[QuantDTypeCast] SetSrcT failed, the src_t passed buffer is empty."); + return OH_NN_INVALID_PARAMETER; + } + + m_src_t = static_cast(buffer); + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode QuantDTypeCastBuilder::SetDstT(std::shared_ptr tensor) +{ + tensor->IdentifyOpParameter(); + if (tensor->GetDataType() != OH_NN_INT64) { + LOGE("[QuantDTypeCast] SetDstT failed, the dst_t should be type OH_NN_INT64."); + return OH_NN_INVALID_PARAMETER; + } + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[QuantDTypeCast] SetDstT failed, the dst_t passed buffer is empty."); + return OH_NN_INVALID_PARAMETER; + } + + m_dst_t = static_cast(buffer); + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode QuantDTypeCastBuilder::Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) +{ + if (m_isBuild) { + LOGE("[QuantDTypeCast] Build failed, the QuantDTypeCast operation has been build, cannot build again."); + return OH_NN_OPERATION_FORBIDDEN; + } + + OH_NN_ReturnCode returnCode = CheckIOIndex(inputsIndex, outputsIndex, allTensors, INPUT_NUM, OUTPUT_NUM); + if (returnCode != OH_NN_SUCCESS) { + LOGE("[QuantDTypeCast] Build failed, passed invalid input or output index."); + return returnCode; + } + + m_inputsIndex = inputsIndex; + m_outputsIndex = outputsIndex; + + for (uint32_t i : paramsIndex) { + std::shared_ptr tensor = allTensors[i]; + switch (tensor->GetType()) { + case OH_NN_QUANT_DTYPE_CAST_SRC_T: + returnCode = SetSrcT(tensor); + break; + case OH_NN_QUANT_DTYPE_CAST_DST_T: + returnCode = SetDstT(tensor); + break; + default: + LOGE("[QuantDTypeCast] Build failed, parameter type is invalid. type=%d", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; + } + + if (returnCode != OH_NN_SUCCESS) { + LOGE("[QuantDTypeCast] Build failed, passed invalid param."); + return returnCode; + } + } + + m_isBuild = true; + m_name = OP_NAME; + return OH_NN_SUCCESS; +} + +LiteGraphPrimitvePtr QuantDTypeCastBuilder::GetPrimitive() +{ + if (!m_isBuild) { + LOGE("[QuantDTypeCast] GetPrimitive failed, cannot get primitive before call build."); + return {nullptr, DestroyLiteGraphPrimitive}; + } + + void* primitive = mindspore::lite::MindIR_QuantDTypeCast_CreatePrimitive(*m_src_t, *m_dst_t); + LiteGraphPrimitvePtr graphPrimitivePtr(primitive, DestroyLiteGraphPrimitive); + return graphPrimitivePtr; +} + +REGISTER_OPS(QuantDTypeCastBuilder, OH_NN_OPS_QUANT_DTYPE_CAST); +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS \ No newline at end of file diff --git a/frameworks/native/ops/quant_dtype_cast_builder.h b/frameworks/native/ops/quant_dtype_cast_builder.h new file mode 100644 index 0000000000000000000000000000000000000000..bd4a1b323b0486fac2fad129cb98e1fd9fcd052f --- /dev/null +++ b/frameworks/native/ops/quant_dtype_cast_builder.h @@ -0,0 +1,47 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_QUANTDTYPECAST_BUILDER_H +#define NEURAL_NETWORK_RUNTIME_QUANTDTYPECAST_BUILDER_H + +#include "frameworks/native/ops_builder.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +class QuantDTypeCastBuilder : public OpsBuilder { +public: + QuantDTypeCastBuilder(); + ~QuantDTypeCastBuilder() override; + OH_NN_ReturnCode Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) override; + + LiteGraphPrimitvePtr GetPrimitive() override; + +private: + OH_NN_ReturnCode SetSrcT(std::shared_ptr tensor); + OH_NN_ReturnCode SetDstT(std::shared_ptr tensor); + +private: + const uint64_t* m_src_t{nullptr}; + const uint64_t* m_dst_t{nullptr}; +}; +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS + +#endif // NEURAL_NETWORK_RUNTIME_QUANTDTYPECAST_BUILDER_H \ No newline at end of file diff --git a/frameworks/native/ops/reduceall_builder.cpp b/frameworks/native/ops/reduceall_builder.cpp new file mode 100644 index 0000000000000000000000000000000000000000..fbf0406a1a6d8e1afc0a4122337b6d4acc32142e --- /dev/null +++ b/frameworks/native/ops/reduceall_builder.cpp @@ -0,0 +1,114 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "reduceall_builder.h" + +#include "frameworks/native/ops_registry.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +static const int INPUT_NUM = 2; +static const int OUTPUT_NUM = 1; +static const int SCALE_LENGTH = 1; +static const std::string OP_NAME = "ReduceAll"; + +ReduceAllBuilder::ReduceAllBuilder() {} + +ReduceAllBuilder:: ~ReduceAllBuilder() {} + +OH_NN_ReturnCode ReduceAllBuilder::SetKeepDims(std::shared_ptr tensor) +{ + tensor->IdentifyOpParameter(); + if (tensor->GetElementCount() != SCALE_LENGTH) { + LOGE("[ReduceAll] SetKeepDims failed, the keep_dims dimensions should be scalar."); + return OH_NN_INVALID_PARAMETER; + } + + if (tensor->GetDataType() != OH_NN_BOOL) { + LOGE("[ReduceAll] SetKeepDims failed, the keep_dims should be type OH_NN_BOOL."); + return OH_NN_INVALID_PARAMETER; + } + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[ReduceAll] SetKeepDims failed, the keep_dims passed buffer is empty."); + return OH_NN_INVALID_PARAMETER; + } + + m_keepDims = *(static_cast(buffer)); + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode ReduceAllBuilder::Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) +{ + if (m_isBuild) { + LOGE("[ReduceAll] Build failed, the ReduceAll operation has been build, cannot build again."); + return OH_NN_OPERATION_FORBIDDEN; + } + + OH_NN_ReturnCode returnCode = CheckIOIndex(inputsIndex, outputsIndex, allTensors, INPUT_NUM, OUTPUT_NUM); + if (returnCode != OH_NN_SUCCESS) { + LOGE("[ReduceAll] Build failed, passed invalid input or output index of ReduceAll operation index."); + return returnCode; + } + + m_inputsIndex = inputsIndex; + m_outputsIndex = outputsIndex; + + for (uint32_t i : paramsIndex) { + std::shared_ptr tensor = allTensors[i]; + switch (tensor->GetType()) { + case OH_NN_REDUCE_ALL_KEEP_DIMS: + returnCode = SetKeepDims(tensor); + break; + default: + LOGE("[ReduceAll] Build failed, parameter type is invalid. type=%d", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; + } + + if (returnCode != OH_NN_SUCCESS) { + LOGE("[ReduceAll] Build failed, passed invalid param."); + return returnCode; + } + } + + m_name = OP_NAME; + m_isBuild = true; + return OH_NN_SUCCESS; +} + +LiteGraphPrimitvePtr ReduceAllBuilder::GetPrimitive() +{ + if (!m_isBuild) { + LOGE("[ReduceAll] GetPrimitive failed, cannot get primitive before call build."); + return {nullptr, DestroyLiteGraphPrimitive}; + } + + bool reduceToEnd{false}; + float coeff{0.0f}; + + void* primitive = mindspore::lite::MindIR_ReduceFusion_CreatePrimitive(m_keepDims, m_mode, reduceToEnd, coeff); + LiteGraphPrimitvePtr graphPrimitivePtr(primitive, DestroyLiteGraphPrimitive); + return graphPrimitivePtr; +} + +REGISTER_OPS(ReduceAllBuilder, OH_NN_OPS_REDUCE_ALL); +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS \ No newline at end of file diff --git a/frameworks/native/ops/reduceall_builder.h b/frameworks/native/ops/reduceall_builder.h new file mode 100644 index 0000000000000000000000000000000000000000..141d69b0f5c0338d9912adf7f00550e07b2602ea --- /dev/null +++ b/frameworks/native/ops/reduceall_builder.h @@ -0,0 +1,48 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_REDUCEALL_BUILDER_H +#define NEURAL_NETWORK_RUNTIME_REDUCEALL_BUILDER_H + +#include "mindir.h" + +#include "frameworks/native/ops_builder.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +class ReduceAllBuilder : public OpsBuilder { +public: + ReduceAllBuilder(); + ~ReduceAllBuilder() override; + OH_NN_ReturnCode Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) override; + + LiteGraphPrimitvePtr GetPrimitive() override; + +private: + OH_NN_ReturnCode SetKeepDims(std::shared_ptr tensor); + +private: + bool m_keepDims{false}; + mindspore::lite::ReduceMode m_mode{mindspore::lite::REDUCE_MODE_ALL}; +}; +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS + +#endif // NEURAL_NETWORK_RUNTIME_REDUCEALL_BUILDER_H \ No newline at end of file diff --git a/frameworks/native/ops/reducemean_builder.cpp b/frameworks/native/ops/reducemean_builder.cpp new file mode 100644 index 0000000000000000000000000000000000000000..5f60e0570ac71248a4c6543357aeb60c2dbc44ce --- /dev/null +++ b/frameworks/native/ops/reducemean_builder.cpp @@ -0,0 +1,116 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "reducemean_builder.h" + +#include "frameworks/native/ops_registry.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +static const int INPUT_NUM = 2; +static const int OUTPUT_NUM = 1; +static const int SCALE_LENGTH = 1; +static const std::string OP_NAME = "ReduceMean"; + +ReduceMeanBuilder::ReduceMeanBuilder() {} + +ReduceMeanBuilder:: ~ReduceMeanBuilder() {} + +OH_NN_ReturnCode ReduceMeanBuilder::SetKeepDims(std::shared_ptr tensor) +{ + tensor->IdentifyOpParameter(); + if (tensor->GetElementCount() != SCALE_LENGTH) { + LOGE("[ReduceMean] SetKeepDims failed, the keep_dims dimensions should be scalar."); + return OH_NN_INVALID_PARAMETER; + } + + if (tensor->GetDataType() != OH_NN_BOOL) { + LOGE("[ReduceMean] SetKeepDims failed, the keep_dims should be type OH_NN_BOOL."); + return OH_NN_INVALID_PARAMETER; + } + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[ReduceMean] SetKeepDims failed, the keep_dims passed buffer is empty."); + return OH_NN_INVALID_PARAMETER; + } + + m_keepDims = *(static_cast(buffer)); + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode ReduceMeanBuilder::Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) +{ + if (m_isBuild) { + LOGE("[ReduceMean] Build failed, the ReduceMean operation has been build, cannot build again."); + return OH_NN_OPERATION_FORBIDDEN; + } + + OH_NN_ReturnCode returnCode = CheckIOIndex(inputsIndex, outputsIndex, allTensors, INPUT_NUM, OUTPUT_NUM); + if (returnCode != OH_NN_SUCCESS) { + LOGE("[ReduceMean] Build failed, passed invalid input or output index of ReduceMean operation index."); + return returnCode; + } + + m_inputsIndex = inputsIndex; + m_outputsIndex = outputsIndex; + + for (uint32_t i : paramsIndex) { + std::shared_ptr tensor = allTensors[i]; + switch (tensor->GetType()) { + case OH_NN_REDUCE_MEAN_KEEP_DIMS: + returnCode = SetKeepDims(tensor); + break; + default: + LOGE("[ReduceMean] Build failed, parameter type is invalid. type=%d", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; + } + + if (returnCode != OH_NN_SUCCESS) { + LOGE("[ReduceMean] Build failed, passed invalid param."); + return returnCode; + } + } + + SetQuantType(outputsIndex, allTensors); + + m_name = OP_NAME; + m_isBuild = true; + return OH_NN_SUCCESS; +} + +LiteGraphPrimitvePtr ReduceMeanBuilder::GetPrimitive() +{ + if (!m_isBuild) { + LOGE("[ReduceMean] GetPrimitive failed, cannot get primitive before call build."); + return {nullptr, DestroyLiteGraphPrimitive}; + } + + bool reduceToEnd{false}; + float coeff{0.0f}; + + void* primitive = mindspore::lite::MindIR_ReduceFusion_CreatePrimitive(m_keepDims, m_mode, reduceToEnd, coeff); + LiteGraphPrimitvePtr graphPrimitivePtr(primitive, DestroyLiteGraphPrimitive); + return graphPrimitivePtr; +} + +REGISTER_OPS(ReduceMeanBuilder, OH_NN_OPS_REDUCE_MEAN); +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS \ No newline at end of file diff --git a/frameworks/native/ops/reducemean_builder.h b/frameworks/native/ops/reducemean_builder.h new file mode 100644 index 0000000000000000000000000000000000000000..7ddc3b30cdc0c12ede4ffefab2480fb35519e2d2 --- /dev/null +++ b/frameworks/native/ops/reducemean_builder.h @@ -0,0 +1,48 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_REDUCEMEAN_BUILDER_H +#define NEURAL_NETWORK_RUNTIME_REDUCEMEAN_BUILDER_H + +#include "mindir.h" + +#include "frameworks/native/ops_builder.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +class ReduceMeanBuilder : public OpsBuilder { +public: + ReduceMeanBuilder(); + ~ReduceMeanBuilder() override; + OH_NN_ReturnCode Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) override; + + LiteGraphPrimitvePtr GetPrimitive() override; + +private: + OH_NN_ReturnCode SetKeepDims(std::shared_ptr tensor); + +private: + bool m_keepDims{false}; + mindspore::lite::ReduceMode m_mode{mindspore::lite::REDUCE_MODE_MEAN}; +}; +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS + +#endif // NEURAL_NETWORK_RUNTIME_REDUCEMEAN_BUILDER_H \ No newline at end of file diff --git a/frameworks/native/ops/reduceprod_builder.cpp b/frameworks/native/ops/reduceprod_builder.cpp new file mode 100644 index 0000000000000000000000000000000000000000..e22b2c5cc68f89a162229a2f0b076401d601db1f --- /dev/null +++ b/frameworks/native/ops/reduceprod_builder.cpp @@ -0,0 +1,116 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "reduceprod_builder.h" + +#include "frameworks/native/ops_registry.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +static const int INPUT_NUM = 2; +static const int OUTPUT_NUM = 1; +static const int SCALE_LENGTH = 1; +static const std::string OP_NAME = "ReduceProd"; + +ReduceProdBuilder::ReduceProdBuilder() {} + +ReduceProdBuilder:: ~ReduceProdBuilder() {} + +OH_NN_ReturnCode ReduceProdBuilder::SetKeepDims(std::shared_ptr tensor) +{ + tensor->IdentifyOpParameter(); + if (tensor->GetElementCount() != SCALE_LENGTH) { + LOGE("[ReduceProd] SetKeepDims failed, the keep_dims dimensions should be scalar."); + return OH_NN_INVALID_PARAMETER; + } + + if (tensor->GetDataType() != OH_NN_BOOL) { + LOGE("[ReduceProd] SetKeepDims failed, the keep_dims should be type OH_NN_BOOL."); + return OH_NN_INVALID_PARAMETER; + } + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[ReduceProd] SetKeepDims failed, the keep_dims passed buffer is empty."); + return OH_NN_INVALID_PARAMETER; + } + + m_keepDims = *(static_cast(buffer)); + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode ReduceProdBuilder::Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) +{ + if (m_isBuild) { + LOGE("[ReduceProd] Build failed, operation has been build, cannot build again."); + return OH_NN_OPERATION_FORBIDDEN; + } + + OH_NN_ReturnCode returnCode = CheckIOIndex(inputsIndex, outputsIndex, allTensors, INPUT_NUM, OUTPUT_NUM); + if (returnCode != OH_NN_SUCCESS) { + LOGE("[ReduceProd] Build failed, passed invalid input or output index of ReduceProd operation index."); + return returnCode; + } + + m_inputsIndex = inputsIndex; + m_outputsIndex = outputsIndex; + + for (uint32_t i : paramsIndex) { + std::shared_ptr tensor = allTensors[i]; + switch (tensor->GetType()) { + case OH_NN_REDUCE_PROD_KEEP_DIMS: + returnCode = SetKeepDims(tensor); + break; + default: + LOGE("[ReduceProd] Build failed, parameter type is invalid. type=%d", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; + } + + if (returnCode != OH_NN_SUCCESS) { + LOGE("[ReduceProd] Build failed, passed invalid param."); + return returnCode; + } + } + + SetQuantType(outputsIndex, allTensors); + + m_name = OP_NAME; + m_isBuild = true; + return OH_NN_SUCCESS; +} + +LiteGraphPrimitvePtr ReduceProdBuilder::GetPrimitive() +{ + if (!m_isBuild) { + LOGE("[ReduceProd] GetPrimitive failed, cannot get primitive before call build."); + return {nullptr, DestroyLiteGraphPrimitive}; + } + + bool reduceToEnd{false}; + float coeff{0.0f}; + + void* primitive = mindspore::lite::MindIR_ReduceFusion_CreatePrimitive(m_keepDims, m_mode, reduceToEnd, coeff); + LiteGraphPrimitvePtr graphPrimitivePtr(primitive, DestroyLiteGraphPrimitive); + return graphPrimitivePtr; +} + +REGISTER_OPS(ReduceProdBuilder, OH_NN_OPS_REDUCE_PROD); +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS \ No newline at end of file diff --git a/frameworks/native/ops/reduceprod_builder.h b/frameworks/native/ops/reduceprod_builder.h new file mode 100644 index 0000000000000000000000000000000000000000..b1314aab3ed098681888e4a9799912ea6990b70f --- /dev/null +++ b/frameworks/native/ops/reduceprod_builder.h @@ -0,0 +1,48 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_REDUCEPROD_BUILDER_H +#define NEURAL_NETWORK_RUNTIME_REDUCEPROD_BUILDER_H + +#include "mindir.h" + +#include "frameworks/native/ops_builder.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +class ReduceProdBuilder : public OpsBuilder { +public: + ReduceProdBuilder(); + ~ReduceProdBuilder() override; + OH_NN_ReturnCode Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) override; + + LiteGraphPrimitvePtr GetPrimitive() override; + +private: + OH_NN_ReturnCode SetKeepDims(std::shared_ptr tensor); + +private: + bool m_keepDims{false}; + mindspore::lite::ReduceMode m_mode{mindspore::lite::REDUCE_MODE_PROD}; +}; +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS + +#endif // NEURAL_NETWORK_RUNTIME_REDUCEPROD_BUILDER_H \ No newline at end of file diff --git a/frameworks/native/ops/relu6_builder.cpp b/frameworks/native/ops/relu6_builder.cpp new file mode 100644 index 0000000000000000000000000000000000000000..eaa347492605e14e425c8b5ca32a3b1202f80f81 --- /dev/null +++ b/frameworks/native/ops/relu6_builder.cpp @@ -0,0 +1,87 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "relu6_builder.h" + +#include "mindir.h" + +#include "frameworks/native/ops_registry.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +static const int INPUT_NUM = 1; +static const int OUTPUT_NUM = 1; +static const std::string OP_NAME = "Relu6"; + +Relu6Builder::Relu6Builder() {} + +Relu6Builder::~Relu6Builder() {} + +OH_NN_ReturnCode Relu6Builder::Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) +{ + if (m_isBuild) { + LOGE("[Relu6] Build failed, operation has been build, cannot build again."); + return OH_NN_OPERATION_FORBIDDEN; + } + + OH_NN_ReturnCode returnCode = CheckIOIndex(inputsIndex, outputsIndex, allTensors, INPUT_NUM, OUTPUT_NUM); + if (returnCode != OH_NN_SUCCESS) { + LOGE("[Relu6] Build failed, passed invalid input or output indices."); + return returnCode; + } + + if (!paramsIndex.empty()) { + LOGW("[Relu6] Build failed, the Relu6 expects no parameters, but receive %zu", paramsIndex.size()); + return OH_NN_INVALID_PARAMETER; + } + + m_inputsIndex = inputsIndex; + m_outputsIndex = outputsIndex; + + SetQuantType(outputsIndex, allTensors); + + m_isBuild = true; + m_name = OP_NAME; + return OH_NN_SUCCESS; +} + +LiteGraphPrimitvePtr Relu6Builder::GetPrimitive() +{ + if (!m_isBuild) { + LOGE("[Relu6] GetPrimitive failed, cannot get primitive before call build."); + return {nullptr, DestroyLiteGraphPrimitive}; + } + + float alpha{0.0f}; + float minVal{0.0f}; + float maxVal{0.0f}; + bool approximate{false}; + mindspore::lite::ActivationType activationType{mindspore::lite::ACTIVATION_TYPE_RELU6}; + + void* primitive = mindspore::lite::MindIR_Activation_CreatePrimitive(activationType, alpha, + minVal, maxVal, approximate); + + LiteGraphPrimitvePtr graphPrimitivePtr(primitive, DestroyLiteGraphPrimitive); + return graphPrimitivePtr; +} + +REGISTER_OPS(Relu6Builder, OH_NN_OPS_RELU6); +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS \ No newline at end of file diff --git a/frameworks/native/ops/relu6_builder.h b/frameworks/native/ops/relu6_builder.h new file mode 100644 index 0000000000000000000000000000000000000000..71f61966c8f44334c4f0a1afb6265052f68d384f --- /dev/null +++ b/frameworks/native/ops/relu6_builder.h @@ -0,0 +1,39 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_RELU6_BUILDER_H +#define NEURAL_NETWORK_RUNTIME_RELU6_BUILDER_H + +#include "frameworks/native/ops_builder.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +class Relu6Builder : public OpsBuilder { +public: + Relu6Builder(); + ~Relu6Builder() override; + OH_NN_ReturnCode Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) override; + + LiteGraphPrimitvePtr GetPrimitive() override; +}; +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS + +#endif // NEURAL_NETWORK_RUNTIME_RELU6_BUILDER_H diff --git a/frameworks/native/ops/relu_builder.cpp b/frameworks/native/ops/relu_builder.cpp new file mode 100644 index 0000000000000000000000000000000000000000..6f397feaeed3653fd17d538fa8f2739d0afb92df --- /dev/null +++ b/frameworks/native/ops/relu_builder.cpp @@ -0,0 +1,87 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "relu_builder.h" + +#include "mindir.h" + +#include "frameworks/native/ops_registry.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +static const int INPUT_NUMS = 1; +static const int OUTPUT_NUMS = 1; +static const std::string OP_NAME = "Relu"; + +ReluBuilder::ReluBuilder() {} + +ReluBuilder::~ReluBuilder() {} + +OH_NN_ReturnCode ReluBuilder::Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) +{ + if (m_isBuild) { + LOGE("[Relu] Build failed, the Relu operation has been build, cannot build again."); + return OH_NN_OPERATION_FORBIDDEN; + } + + OH_NN_ReturnCode returnCode = CheckIOIndex(inputsIndex, outputsIndex, allTensors, INPUT_NUMS, OUTPUT_NUMS); + if (returnCode != OH_NN_SUCCESS) { + LOGE("[Relu] Build failed, passed invalid input or output indices."); + return returnCode; + } + + if (!paramsIndex.empty()) { + LOGW("[Relu] Build failed, the Relu expects no parameters, but receive %zu", paramsIndex.size()); + return OH_NN_INVALID_PARAMETER; + } + + m_inputsIndex = inputsIndex; + m_outputsIndex = outputsIndex; + + SetQuantType(outputsIndex, allTensors); + + m_isBuild = true; + m_name = OP_NAME; + return OH_NN_SUCCESS; +} + +LiteGraphPrimitvePtr ReluBuilder::GetPrimitive() +{ + if (!m_isBuild) { + LOGE("[Relu] GetPrimitive failed, cannot get primitive before call build."); + return {nullptr, DestroyLiteGraphPrimitive}; + } + + float alpha{0.0f}; + float minVal{0.0f}; + float maxVal{0.0f}; + bool approximate{false}; + mindspore::lite::ActivationType activationType{mindspore::lite::ACTIVATION_TYPE_RELU}; + + void* primitive = mindspore::lite::MindIR_Activation_CreatePrimitive(activationType, alpha, + minVal, maxVal, approximate); + + LiteGraphPrimitvePtr graphPrimitivePtr(primitive, DestroyLiteGraphPrimitive); + return graphPrimitivePtr; +} + +REGISTER_OPS(ReluBuilder, OH_NN_OPS_RELU); +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS diff --git a/frameworks/native/ops/relu_builder.h b/frameworks/native/ops/relu_builder.h new file mode 100644 index 0000000000000000000000000000000000000000..9c8b9d73a0db02fe6af5b0a20dcbff0b621bf809 --- /dev/null +++ b/frameworks/native/ops/relu_builder.h @@ -0,0 +1,39 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_RELU_BUILDER_H +#define NEURAL_NETWORK_RUNTIME_RELU_BUILDER_H + +#include "frameworks/native/ops_builder.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +class ReluBuilder : public OpsBuilder { +public: + ReluBuilder(); + ~ReluBuilder() override; + OH_NN_ReturnCode Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) override; + + LiteGraphPrimitvePtr GetPrimitive() override; +}; +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS + +#endif // NEURAL_NETWORK_RUNTIME_RELU_BUILDER_H diff --git a/frameworks/native/ops/reshape_builder.cpp b/frameworks/native/ops/reshape_builder.cpp new file mode 100644 index 0000000000000000000000000000000000000000..e1469bcc71b0287d53f9a5385203ce7ca1b49d9d --- /dev/null +++ b/frameworks/native/ops/reshape_builder.cpp @@ -0,0 +1,79 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "reshape_builder.h" + +#include "mindir.h" + +#include "frameworks/native/ops_registry.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +static const int INPUT_NUM = 2; +static const int OUTPUT_NUM = 1; +static const std::string OP_NAME = "Reshape"; + +ReshapeBuilder::ReshapeBuilder() {} + +ReshapeBuilder::~ReshapeBuilder() {} + +OH_NN_ReturnCode ReshapeBuilder::Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) +{ + if (m_isBuild) { + LOGE("[Reshape] Build failed, operation has been build, cannot build again."); + return OH_NN_OPERATION_FORBIDDEN; + } + + OH_NN_ReturnCode returnCode = CheckIOIndex(inputsIndex, outputsIndex, allTensors, INPUT_NUM, OUTPUT_NUM); + if (returnCode != OH_NN_SUCCESS) { + LOGE("[Reshape] Build failed, passed invalid input or output index of Reshape operation index."); + return returnCode; + } + + if (!paramsIndex.empty()) { + LOGW("[Reshape] Build failed, the Reshape expects no parameters, but receive %zu", paramsIndex.size()); + return OH_NN_INVALID_PARAMETER; + } + + m_inputsIndex = inputsIndex; + m_outputsIndex = outputsIndex; + + SetQuantType(outputsIndex, allTensors); + + m_name = OP_NAME; + m_isBuild = true; + return OH_NN_SUCCESS; +} + +LiteGraphPrimitvePtr ReshapeBuilder::GetPrimitive() +{ + if (!m_isBuild) { + LOGE("[Reshape] GetPrimitive failed, cannot get primitive before call build."); + return {nullptr, DestroyLiteGraphPrimitive}; + } + + void* primitive = mindspore::lite::MindIR_Reshape_CreatePrimitive(); + LiteGraphPrimitvePtr graphPrimitivePtr(primitive, DestroyLiteGraphPrimitive); + return graphPrimitivePtr; +} + +REGISTER_OPS(ReshapeBuilder, OH_NN_OPS_RESHAPE); +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS \ No newline at end of file diff --git a/frameworks/native/ops/reshape_builder.h b/frameworks/native/ops/reshape_builder.h new file mode 100644 index 0000000000000000000000000000000000000000..ef24166e923ed3b7c07b5911bdd548e50ea9b84d --- /dev/null +++ b/frameworks/native/ops/reshape_builder.h @@ -0,0 +1,39 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_RESHAPE_BUILDER_H +#define NEURAL_NETWORK_RUNTIME_RESHAPE_BUILDER_H + +#include "frameworks/native/ops_builder.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +class ReshapeBuilder : public OpsBuilder { +public: + ReshapeBuilder(); + ~ReshapeBuilder() override; + OH_NN_ReturnCode Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) override; + + LiteGraphPrimitvePtr GetPrimitive() override; +}; +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS + +#endif // NEURAL_NETWORK_RUNTIME_RESHAPE_BUILDER_H \ No newline at end of file diff --git a/frameworks/native/ops/resize_bilinear_builder.cpp b/frameworks/native/ops/resize_bilinear_builder.cpp new file mode 100644 index 0000000000000000000000000000000000000000..5c868f2fa4c0bd5fc7751a86bb0c28e2e5c57531 --- /dev/null +++ b/frameworks/native/ops/resize_bilinear_builder.cpp @@ -0,0 +1,227 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "resize_bilinear_builder.h" + +#include "frameworks/native/ops_registry.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +static const int INPUT_NUM = 1; +static const int OUTPUT_NUM = 1; +static const int SCALE_LENGTH = 1; +static const std::string OP_NAME = "ResizeBilinear"; + +ResizeBilinearBuilder::ResizeBilinearBuilder() {} + +ResizeBilinearBuilder::~ResizeBilinearBuilder() {} + +OH_NN_ReturnCode ResizeBilinearBuilder::SetNewHeight(std::shared_ptr tensor) +{ + tensor->IdentifyOpParameter(); + if (tensor->GetElementCount() != SCALE_LENGTH) { + LOGE("[ResizeBilinear] SetNewHeight failed, the new_height dimensions should be scaler."); + return OH_NN_INVALID_PARAMETER; + } + + if (tensor->GetDataType() != OH_NN_INT64) { + LOGE("[ResizeBilinear] SetNewHeight failed, the new_height should be type OH_NN_INT64"); + return OH_NN_INVALID_PARAMETER; + } + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[ResizeBilinear] ResizeBilinear failed, the new_height passed buffer is empty."); + return OH_NN_INVALID_PARAMETER; + } + + m_newHeight = *(static_cast(buffer)); + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode ResizeBilinearBuilder::SetNewWidth(std::shared_ptr tensor) +{ + tensor->IdentifyOpParameter(); + if (tensor->GetElementCount() != SCALE_LENGTH) { + LOGE("[ResizeBilinear] SetNewWidth failed, the new_width dimensions should be scaler."); + return OH_NN_INVALID_PARAMETER; + } + + if (tensor->GetDataType() != OH_NN_INT64) { + LOGE("[ResizeBilinear] SetNewWidth failed, the new_width should be type OH_NN_INT64"); + return OH_NN_INVALID_PARAMETER; + } + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[ResizeBilinear] SetNewWidth failed, the new_width passed buffer is empty."); + return OH_NN_INVALID_PARAMETER; + } + + m_newWidth = *(static_cast(buffer)); + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode ResizeBilinearBuilder::SetPreserveAspectRatio(std::shared_ptr tensor) +{ + tensor->IdentifyOpParameter(); + if (tensor->GetElementCount() != SCALE_LENGTH) { + LOGE("[ResizeBilinear] SetPreserveAspectRatio failed, the preserve_aspect_ratio dimensions should be scaler."); + return OH_NN_INVALID_PARAMETER; + } + + if (tensor->GetDataType() != OH_NN_BOOL) { + LOGE("[ResizeBilinear] SetPreserveAspectRatio failed, the preserve_aspect_ratio should be type OH_NN_BOOL"); + return OH_NN_INVALID_PARAMETER; + } + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[ResizeBilinear] SetPreserveAspectRatio failed, the preserve_aspect_ratio passed buffer is empty."); + return OH_NN_INVALID_PARAMETER; + } + + m_preserveAspectRatio = *(static_cast(buffer)); + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode ResizeBilinearBuilder::SetCoordinateTransformMode(std::shared_ptr tensor) +{ + tensor->IdentifyOpParameter(); + if (tensor->GetElementCount() != SCALE_LENGTH) { + LOGE("[ResizeBilinear] SetCoordinateTransformMode failed," + "the coordinate_transform_mode dimensions should be scaler."); + return OH_NN_INVALID_PARAMETER; + } + + if (tensor->GetDataType() != OH_NN_INT8) { + LOGE("[ResizeBilinear] SetCoordinateTransformMode failed," + "the coordinate_transform_mode should be type OH_NN_INT32"); + return OH_NN_INVALID_PARAMETER; + } + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[ResizeBilinear] SetCoordinateTransformMode failed," + "the coordinate_transform_mode passed buffer is empty."); + return OH_NN_INVALID_PARAMETER; + } + + m_coordinateTransformMode = *(static_cast(buffer)); + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode ResizeBilinearBuilder::SetExcludeOutside(std::shared_ptr tensor) +{ + tensor->IdentifyOpParameter(); + if (tensor->GetElementCount() != SCALE_LENGTH) { + LOGE("[ResizeBilinear] SetExcludeOutside failed, the exclude_outside dimensions should be scaler."); + return OH_NN_INVALID_PARAMETER; + } + + if (tensor->GetDataType() != OH_NN_INT64) { + LOGE("[ResizeBilinear] SetExcludeOutside failed, the exclude_outside should be type OH_NN_INT64"); + return OH_NN_INVALID_PARAMETER; + } + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[ResizeBilinear] SetExcludeOutside failed, the exclude_outside passed buffer is empty."); + return OH_NN_INVALID_PARAMETER; + } + + m_excludeOutside = *(static_cast(buffer)); + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode ResizeBilinearBuilder::Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) +{ + if (m_isBuild) { + LOGE("[ResizeBilinear] Build failed, the Resize operation has been build, cannot build again."); + return OH_NN_OPERATION_FORBIDDEN; + } + + OH_NN_ReturnCode returnCode = CheckIOIndex(inputsIndex, outputsIndex, allTensors, INPUT_NUM, OUTPUT_NUM); + if (returnCode != OH_NN_SUCCESS) { + LOGE("[ResizeBilinear] Build failed, passed invalid input or output index."); + return returnCode; + } + + m_inputsIndex = inputsIndex; + m_outputsIndex = outputsIndex; + + for (uint32_t i : paramsIndex) { + std::shared_ptr tensor = allTensors[i]; + switch (tensor->GetType()) { + case OH_NN_RESIZE_BILINEAR_NEW_HEIGHT: + returnCode = SetNewHeight(tensor); + break; + case OH_NN_RESIZE_BILINEAR_NEW_WIDTH: + returnCode = SetNewWidth(tensor); + break; + case OH_NN_RESIZE_BILINEAR_PRESERVE_ASPECT_RATIO: + returnCode = SetPreserveAspectRatio(tensor); + break; + case OH_NN_RESIZE_BILINEAR_COORDINATE_TRANSFORM_MODE: + returnCode = SetCoordinateTransformMode(tensor); + break; + case OH_NN_RESIZE_BILINEAR_EXCLUDE_OUTSIDE: + returnCode = SetExcludeOutside(tensor); + break; + default: + LOGE("[ResizeBilinear] Build failed, parameter type is invalid. type=%d", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; + } + + if (returnCode != OH_NN_SUCCESS) { + LOGE("[ResizeBilinear] Build failed, passed invalid param."); + return returnCode; + } + } + + SetQuantType(outputsIndex, allTensors); + + m_name = OP_NAME; + m_isBuild = true; + return OH_NN_SUCCESS; +} + +LiteGraphPrimitvePtr ResizeBilinearBuilder::GetPrimitive() +{ + if (!m_isBuild) { + LOGE("[ResizeBilinear] GetPrimitive failed, cannot get primitive before call build."); + return {nullptr, DestroyLiteGraphPrimitive}; + } + + float cubicCoeff{0.0f}; + float extrapolationValue{0.0f}; + mindspore::lite::NearestMode nearestMode{mindspore::lite::NEAREST_MODE_NORMAL}; + + void* primitive = mindspore::lite::MindIR_Resize_CreatePrimitive(m_method, m_newHeight, m_newWidth, + m_preserveAspectRatio, m_coordinateTransformMode, cubicCoeff, m_excludeOutside, + extrapolationValue, nearestMode); + + LiteGraphPrimitvePtr graphPrimitivePtr(primitive, DestroyLiteGraphPrimitive); + return graphPrimitivePtr; +} + +REGISTER_OPS(ResizeBilinearBuilder, OH_NN_OPS_RESIZE_BILINEAR); +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS \ No newline at end of file diff --git a/frameworks/native/ops/resize_bilinear_builder.h b/frameworks/native/ops/resize_bilinear_builder.h new file mode 100644 index 0000000000000000000000000000000000000000..ce8eb56945512f7e57bd2415212189d66a332860 --- /dev/null +++ b/frameworks/native/ops/resize_bilinear_builder.h @@ -0,0 +1,57 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_RESIZE_BILINEAR_BUILDER_H +#define NEURAL_NETWORK_RUNTIME_RESIZE_BILINEAR_BUILDER_H + +#include "mindir.h" + +#include "frameworks/native/ops_builder.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +class ResizeBilinearBuilder : public OpsBuilder { +public: + ResizeBilinearBuilder(); + ~ResizeBilinearBuilder() override; + OH_NN_ReturnCode Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) override; + + LiteGraphPrimitvePtr GetPrimitive() override; + +private: + OH_NN_ReturnCode SetNewHeight(std::shared_ptr tensor); + OH_NN_ReturnCode SetNewWidth(std::shared_ptr tensor); + OH_NN_ReturnCode SetPreserveAspectRatio(std::shared_ptr tensor); + OH_NN_ReturnCode SetCoordinateTransformMode(std::shared_ptr tensor); + OH_NN_ReturnCode SetExcludeOutside(std::shared_ptr tensor); + +private: + mindspore::lite::ResizeMethod m_method {mindspore::lite::RESIZE_METHOD_LINEAR}; + uint64_t m_newHeight{0}; + uint64_t m_newWidth{0}; + bool m_preserveAspectRatio{false}; + mindspore::lite::CoordinateTransformMode m_coordinateTransformMode { + mindspore::lite::COORDINATE_TRANSFORM_MODE_ASYMMETRIC}; + uint64_t m_excludeOutside{0}; +}; +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS + +#endif // NEURAL_NETWORK_RUNTIME_RESIZE_BILINEAR_BUILDER_H diff --git a/frameworks/native/ops/rsqrt_builder.cpp b/frameworks/native/ops/rsqrt_builder.cpp new file mode 100644 index 0000000000000000000000000000000000000000..b38400571ca511995ae4634995e3ba11c9647725 --- /dev/null +++ b/frameworks/native/ops/rsqrt_builder.cpp @@ -0,0 +1,79 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "rsqrt_builder.h" + +#include "mindir.h" + +#include "frameworks/native/ops_registry.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +static const int INPUT_NUM = 1; +static const int OUTPUT_NUM = 1; +static const std::string OP_NAME = "Rsqrt"; + +RsqrtBuilder::RsqrtBuilder() {} + +RsqrtBuilder::~RsqrtBuilder() {} + +OH_NN_ReturnCode RsqrtBuilder::Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) +{ + if (m_isBuild) { + LOGE("[Rsqrt] Build failed, operation has been build, cannot build again."); + return OH_NN_OPERATION_FORBIDDEN; + } + + OH_NN_ReturnCode returnCode = CheckIOIndex(inputsIndex, outputsIndex, allTensors, INPUT_NUM, OUTPUT_NUM); + if (returnCode != OH_NN_SUCCESS) { + LOGE("[Rsqrt] Build failed, passed invalid input or output index of Rsqrt operation index."); + return returnCode; + } + + if (!paramsIndex.empty()) { + LOGW("[Rsqrt] Build failed, the Rsqrt expects no parameters, but receive %zu", paramsIndex.size()); + return OH_NN_INVALID_PARAMETER; + } + + m_inputsIndex = inputsIndex; + m_outputsIndex = outputsIndex; + + SetQuantType(outputsIndex, allTensors); + + m_name = OP_NAME; + m_isBuild = true; + return OH_NN_SUCCESS; +} + +LiteGraphPrimitvePtr RsqrtBuilder::GetPrimitive() +{ + if (!m_isBuild) { + LOGE("[Rsqrt] GetPrimitive failed, cannot get primitive before call build."); + return {nullptr, DestroyLiteGraphPrimitive}; + } + + void* primitive = mindspore::lite::MindIR_Rsqrt_CreatePrimitive(); + LiteGraphPrimitvePtr graphPrimitivePtr(primitive, DestroyLiteGraphPrimitive); + return graphPrimitivePtr; +} + +REGISTER_OPS(RsqrtBuilder, OH_NN_OPS_RSQRT); +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS \ No newline at end of file diff --git a/frameworks/native/ops/rsqrt_builder.h b/frameworks/native/ops/rsqrt_builder.h new file mode 100644 index 0000000000000000000000000000000000000000..9d1f6c0e2b44d15fb9cf8846726f505db3b73784 --- /dev/null +++ b/frameworks/native/ops/rsqrt_builder.h @@ -0,0 +1,39 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_RSQRT_BUILDER_H +#define NEURAL_NETWORK_RUNTIME_RSQRT_BUILDER_H + +#include "frameworks/native/ops_builder.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +class RsqrtBuilder : public OpsBuilder { +public: + RsqrtBuilder(); + ~RsqrtBuilder() override; + OH_NN_ReturnCode Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) override; + + LiteGraphPrimitvePtr GetPrimitive() override; +}; +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS + +#endif // NEURAL_NETWORK_RUNTIME_RSQRT_BUILDER_H \ No newline at end of file diff --git a/frameworks/native/ops/scale_builder.cpp b/frameworks/native/ops/scale_builder.cpp new file mode 100644 index 0000000000000000000000000000000000000000..defccf7bc2979d01f38e94674c8907d20e96575a --- /dev/null +++ b/frameworks/native/ops/scale_builder.cpp @@ -0,0 +1,149 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "scale_builder.h" + +#include "frameworks/native/ops_registry.h" +#include "frameworks/native/validation.h" +#include "frameworks/native/transform.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +static const int INPUT_NUM = 3; +static const int OUTPUT_NUM = 1; +static const int SCALE_LENGTH = 1; +static const std::string OP_NAME = "Scale"; + +ScaleBuilder::ScaleBuilder() {} + +ScaleBuilder::~ScaleBuilder() {} + +OH_NN_ReturnCode ScaleBuilder::SetAxis(std::shared_ptr tensor) +{ + tensor->IdentifyOpParameter(); + if (tensor->GetDataType() != OH_NN_INT64) { + LOGE("[ScaleBuilder] SetAxis failed, the axis should be type OH_NN_INT64."); + return OH_NN_INVALID_PARAMETER; + } + + if (tensor->GetElementCount() != SCALE_LENGTH) { + LOGE("[ScaleBuilder] SetAxis failed, the axis dimensions should be scaler."); + return OH_NN_INVALID_PARAMETER; + } + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[ScaleBuilder] SetAxis failed, the axis passed buffer is empty."); + return OH_NN_INVALID_PARAMETER; + } + + m_axis = static_cast(buffer); + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode ScaleBuilder::SetActivationType(std::shared_ptr tensor) +{ + tensor->IdentifyOpParameter(); + if (tensor->GetDataType() != OH_NN_INT8) { + LOGE("[ScaleBuilder] SetActivationType failed, the activation should be type OH_NN_INT32."); + return OH_NN_INVALID_PARAMETER; + } + + if (tensor->GetElementCount() != SCALE_LENGTH) { + LOGE("[ScaleBuilder] SetActivationType failed, the activation dimensions should be scaler."); + return OH_NN_INVALID_PARAMETER; + } + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[ScaleBuilder] SetActivationType failed, the activation passed buffer is empty."); + return OH_NN_INVALID_PARAMETER; + } + + const int8_t* fuseData = static_cast(buffer); + if (!OHOS::NeuralNetworkRuntime::Validation::ValidateFuseType(static_cast(*fuseData))) { + LOGE("[ScaleBuilder] SetActivationType failed, the activation input is invalid."); + return OH_NN_INVALID_PARAMETER; + } + + auto fuseType = (OH_NN_FuseType)(*fuseData); + m_activationType = NNToMS::TransfromFusionType(fuseType); + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode ScaleBuilder::Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) +{ + if (m_isBuild) { + LOGE("[ScaleBuilder] Build failed, the scale operation has been build, cannot build again."); + return OH_NN_OPERATION_FORBIDDEN; + } + + OH_NN_ReturnCode returnCode = CheckIOIndex(inputsIndex, outputsIndex, allTensors, INPUT_NUM, OUTPUT_NUM); + if (returnCode != OH_NN_SUCCESS) { + LOGE("[ScaleBuilder] Build failed, passed invalid input or output index."); + return returnCode; + } + + m_inputsIndex = inputsIndex; + m_outputsIndex = outputsIndex; + + for (uint32_t i : paramsIndex) { + std::shared_ptr tensor = allTensors[i]; + switch (tensor->GetType()) { + case OH_NN_SCALE_AXIS: + returnCode = SetAxis(tensor); + break; + case OH_NN_SCALE_ACTIVATIONTYPE: + returnCode = SetActivationType(tensor); + break; + default: + LOGE("[ResizeBilinear] Build failed, parameter type is invalid. type=%d", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; + } + + if (returnCode != OH_NN_SUCCESS) { + LOGE("[ScaleBuilder] Build failed, passed invalid param."); + return returnCode; + } + } + + // The quantization type of the first output determinies that of the operator. + SetQuantType(outputsIndex, allTensors); + + m_isBuild = true; + m_name = OP_NAME; + return OH_NN_SUCCESS; +} + +LiteGraphPrimitvePtr ScaleBuilder::GetPrimitive() +{ + if (!m_isBuild) { + LOGE("[ScaleBuilder] GetPrimitive failed, cannot get primitive before call build."); + return {nullptr, DestroyLiteGraphPrimitive}; + } + + void* primitive = mindspore::lite::MindIR_ScaleFusion_CreatePrimitive(*m_axis, m_activationType); + LiteGraphPrimitvePtr graphPrimitivePtr(primitive, DestroyLiteGraphPrimitive); + return graphPrimitivePtr; +} + +REGISTER_OPS(ScaleBuilder, OH_NN_OPS_SCALE); +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS \ No newline at end of file diff --git a/frameworks/native/ops/scale_builder.h b/frameworks/native/ops/scale_builder.h new file mode 100644 index 0000000000000000000000000000000000000000..362f4ab14cd4be5a0c64a8b680017a36e134cbb3 --- /dev/null +++ b/frameworks/native/ops/scale_builder.h @@ -0,0 +1,49 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_SCALE_BUILDER_H +#define NEURAL_NETWORK_RUNTIME_SCALE_BUILDER_H + +#include "mindir.h" + +#include "frameworks/native/ops_builder.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +class ScaleBuilder : public OpsBuilder { +public: + ScaleBuilder(); + ~ScaleBuilder() override; + OH_NN_ReturnCode Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) override; + + LiteGraphPrimitvePtr GetPrimitive() override; + +private: + OH_NN_ReturnCode SetAxis(std::shared_ptr tensor); + OH_NN_ReturnCode SetActivationType(std::shared_ptr tensor); + +private: + mindspore::lite::ActivationType m_activationType{mindspore::lite::ACTIVATION_TYPE_NO_ACTIVATION}; + const uint64_t* m_axis{nullptr}; +}; +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS + +#endif // NEURAL_NETWORK_RUNTIME_SCALE_BUILDER_H diff --git a/frameworks/native/ops/shape_builder.cpp b/frameworks/native/ops/shape_builder.cpp new file mode 100644 index 0000000000000000000000000000000000000000..9164b0e3f55f198b8e77765155709f96dd94db0d --- /dev/null +++ b/frameworks/native/ops/shape_builder.cpp @@ -0,0 +1,77 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "shape_builder.h" + +#include "mindir.h" + +#include "frameworks/native/ops_registry.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +static const int INPUT_NUM = 1; +static const int OUTPUT_NUM = 1; +static const std::string OP_NAME = "Shape"; + +ShapeBuilder::ShapeBuilder() {} + +ShapeBuilder::~ShapeBuilder() {} + +OH_NN_ReturnCode ShapeBuilder::Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) +{ + if (m_isBuild) { + LOGE("[ShapeBuilder] Build failed, the Shape operation has been build, cannot build again."); + return OH_NN_OPERATION_FORBIDDEN; + } + + OH_NN_ReturnCode returnCode = CheckIOIndex(inputsIndex, outputsIndex, allTensors, INPUT_NUM, OUTPUT_NUM); + if (returnCode != OH_NN_SUCCESS) { + LOGE("[ShapeBuilder] Build failed, passed invalid input or output index."); + return returnCode; + } + + if (!paramsIndex.empty()) { + LOGW("[ShapeBuilder] Build failed, the Shape expects no parameters, but receive %zu", paramsIndex.size()); + return OH_NN_INVALID_PARAMETER; + } + + m_inputsIndex = inputsIndex; + m_outputsIndex = outputsIndex; + + m_isBuild = true; + m_name = OP_NAME; + return OH_NN_SUCCESS; +} + +LiteGraphPrimitvePtr ShapeBuilder::GetPrimitive() +{ + if (!m_isBuild) { + LOGE("[ShapeBuilder] GetPrimitive failed, cannot get primitive before call build."); + return {nullptr, DestroyLiteGraphPrimitive}; + } + + void* primitive = mindspore::lite::MindIR_Shape_CreatePrimitive(); + LiteGraphPrimitvePtr graphPrimitivePtr(primitive, DestroyLiteGraphPrimitive); + return graphPrimitivePtr; +} + +REGISTER_OPS(ShapeBuilder, OH_NN_OPS_SHAPE); +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS \ No newline at end of file diff --git a/frameworks/native/ops/shape_builder.h b/frameworks/native/ops/shape_builder.h new file mode 100644 index 0000000000000000000000000000000000000000..bd3d7989e1e461f0a37f23a7da8f03922bd9ff8c --- /dev/null +++ b/frameworks/native/ops/shape_builder.h @@ -0,0 +1,39 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_SHAPE_BUILDER_H +#define NEURAL_NETWORK_RUNTIME_SHAPE_BUILDER_H + +#include "frameworks/native/ops_builder.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +class ShapeBuilder : public OpsBuilder { +public: + ShapeBuilder(); + ~ShapeBuilder() override; + OH_NN_ReturnCode Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) override; + + LiteGraphPrimitvePtr GetPrimitive() override; +}; +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS + +#endif // NEURAL_NETWORK_RUNTIME_SHAPE_BUILDER_H \ No newline at end of file diff --git a/frameworks/native/ops/sigmoid_builder.cpp b/frameworks/native/ops/sigmoid_builder.cpp new file mode 100644 index 0000000000000000000000000000000000000000..ae7df183cb5611cfa7267c4a18eccf5ca4249841 --- /dev/null +++ b/frameworks/native/ops/sigmoid_builder.cpp @@ -0,0 +1,88 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "sigmoid_builder.h" + +#include "mindir.h" + +#include "frameworks/native/ops_registry.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +static const int INPUT_NUM = 1; +static const int OUTPUT_NUM = 1; +static const std::string OP_NAME = "Sigmoid"; + +SigmoidBuilder::SigmoidBuilder() {} + +SigmoidBuilder::~SigmoidBuilder() {} + +OH_NN_ReturnCode SigmoidBuilder::Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) +{ + if (m_isBuild) { + LOGE("[SigmoidBuilder] Build failed, the Sigmoid operation has been build, cannot build again."); + return OH_NN_OPERATION_FORBIDDEN; + } + + OH_NN_ReturnCode returnCode = CheckIOIndex(inputsIndex, outputsIndex, allTensors, INPUT_NUM, OUTPUT_NUM); + if (returnCode != OH_NN_SUCCESS) { + LOGE("[SigmoidBuilder] Build failed, passed invalid input or output index."); + return returnCode; + } + + if (!paramsIndex.empty()) { + LOGW("[SigmoidBuilder] Build failed, the Sigmoid expects no parameters, but receive %zu", paramsIndex.size()); + return OH_NN_INVALID_PARAMETER; + } + + m_inputsIndex = inputsIndex; + m_outputsIndex = outputsIndex; + + // The quantization type of the first output determinies that of the operator. + SetQuantType(outputsIndex, allTensors); + + m_isBuild = true; + m_name = OP_NAME; + return OH_NN_SUCCESS; +} + +LiteGraphPrimitvePtr SigmoidBuilder::GetPrimitive() +{ + if (!m_isBuild) { + LOGE("[SigmoidBuilder] GetPrimitive failed, cannot get primitive before call build."); + return {nullptr, DestroyLiteGraphPrimitive}; + } + + float alpha{0.0f}; + float minVal{0.0f}; + float maxVal{0.0f}; + bool approximate{false}; + mindspore::lite::ActivationType activationType{mindspore::lite::ACTIVATION_TYPE_SIGMOID}; + + void* primitive = mindspore::lite::MindIR_Activation_CreatePrimitive(activationType, alpha, minVal, + maxVal, approximate); + + LiteGraphPrimitvePtr graphPrimitivePtr(primitive, DestroyLiteGraphPrimitive); + return graphPrimitivePtr; +} + +REGISTER_OPS(SigmoidBuilder, OH_NN_OPS_SIGMOID); +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS \ No newline at end of file diff --git a/frameworks/native/ops/sigmoid_builder.h b/frameworks/native/ops/sigmoid_builder.h new file mode 100644 index 0000000000000000000000000000000000000000..63d9c8eb89db33dd10c016a42f4c43d00c3daa4b --- /dev/null +++ b/frameworks/native/ops/sigmoid_builder.h @@ -0,0 +1,39 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_SIGMOID_BUILDER_H +#define NEURAL_NETWORK_RUNTIME_SIGMOID_BUILDER_H + +#include "frameworks/native/ops_builder.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +class SigmoidBuilder : public OpsBuilder { +public: + SigmoidBuilder(); + ~SigmoidBuilder() override; + OH_NN_ReturnCode Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) override; + + LiteGraphPrimitvePtr GetPrimitive() override; +}; +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS + +#endif // NEURAL_NETWORK_RUNTIME_SIGMOID_BUILDER_H \ No newline at end of file diff --git a/frameworks/native/ops/slice_builder.cpp b/frameworks/native/ops/slice_builder.cpp new file mode 100644 index 0000000000000000000000000000000000000000..6351cc807a9bfc029f58e64c044ce27dcb2034f6 --- /dev/null +++ b/frameworks/native/ops/slice_builder.cpp @@ -0,0 +1,89 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "slice_builder.h" + +#include "mindir.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +static const int INPUT_NUM = 3; +static const int OUTPUT_NUM = 1; +static const std::string OP_NAME = "Slice"; + +SliceBuilder::SliceBuilder() {} + +SliceBuilder::~SliceBuilder() {} + +/** + * Build method. + * 1.set attr of ops. + * 2.set inputIndex of ops. + * 3.set outputIndex of ops. + */ +OH_NN_ReturnCode SliceBuilder::Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) +{ + if (m_isBuild) { + LOGE("[SliceBuilder] Slice operation has been build, cannot build again."); + return OH_NN_OPERATION_FORBIDDEN; + } + + OH_NN_ReturnCode returnCode = CheckIOIndex(inputsIndex, outputsIndex, allTensors, INPUT_NUM, OUTPUT_NUM); + if (returnCode != OH_NN_SUCCESS) { + LOGE("[SliceBuilder] Passed invalid input or output index."); + return returnCode; + } + + if (!paramsIndex.empty()) { + LOGE("[SliceBuilder] slice expects no parameters, but receive %zu", paramsIndex.size()); + return OH_NN_INVALID_PARAMETER; + } + + m_inputsIndex = inputsIndex; + m_outputsIndex = outputsIndex; + + // The quantization type of the first output determinies that of the operator. + SetQuantType(outputsIndex, allTensors); + + m_name = OP_NAME; + m_isBuild = true; + return OH_NN_SUCCESS; +} + +LiteGraphPrimitvePtr SliceBuilder::GetPrimitive() +{ + if (!m_isBuild) { + LOGE("[SliceBuilder] Cannot get primitive before call build."); + return {nullptr, DestroyLiteGraphPrimitive}; + } + + auto primitive = mindspore::lite::MindIR_SliceFusion_CreatePrimitive(m_axes); + if (primitive == nullptr) { + LOGE("[SliceBuilder] MindIR_SliceFusion_CreatePrimitive failed."); + return {nullptr, DestroyLiteGraphPrimitive}; + } + + LiteGraphPrimitvePtr graphPrimitivePtr(primitive, DestroyLiteGraphPrimitive); + return graphPrimitivePtr; +} + +REGISTER_OPS(SliceBuilder, OH_NN_OPS_SLICE); +} // namespace ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS diff --git a/frameworks/native/ops/slice_builder.h b/frameworks/native/ops/slice_builder.h new file mode 100644 index 0000000000000000000000000000000000000000..3494d6efd1b5278d21fd4a5a667695aa75b65d33 --- /dev/null +++ b/frameworks/native/ops/slice_builder.h @@ -0,0 +1,43 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_SLICE_BUILDER_H +#define NEURAL_NETWORK_RUNTIME_SLICE_BUILDER_H + +#include "frameworks/native/ops_builder.h" +#include "frameworks/native/ops_registry.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +class SliceBuilder : public OpsBuilder { +public: + SliceBuilder(); + ~SliceBuilder() override; + OH_NN_ReturnCode Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) override; + + LiteGraphTensorPtr GetPrimitive() override; + +private: + std::vector m_axes; +}; +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS + +#endif // NEURAL_NETWORK_RUNTIME_SLICE_BUILDER_H \ No newline at end of file diff --git a/frameworks/native/ops/softmax_builder.cpp b/frameworks/native/ops/softmax_builder.cpp new file mode 100644 index 0000000000000000000000000000000000000000..e0835bf71dfa84995bdf065d2d95b7a300bebc71 --- /dev/null +++ b/frameworks/native/ops/softmax_builder.cpp @@ -0,0 +1,121 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "softmax_builder.h" + +#include "mindir.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +static const int INPUT_NUM = 1; +static const int OUTPUT_NUM = 1; +static const std::string OP_NAME = "Softmax"; + +SoftmaxBuilder::SoftmaxBuilder() {} + +SoftmaxBuilder::~SoftmaxBuilder() {} + +OH_NN_ReturnCode SoftmaxBuilder::SetAxis(std::shared_ptr tensor) +{ + // Set Axis + if (tensor->GetDataType() != OH_NN_INT64) { + LOGE("[SoftmaxBuilder] The 2nd input axis should be type OH_NN_INT64."); + return OH_NN_INVALID_PARAMETER; + } + + if (tensor->GetElementCount() != 1) { + LOGE("[SoftmaxBuilder] The 2nd input axis should be scaler."); + return OH_NN_INVALID_PARAMETER; + } + + m_axis.clear(); + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[SoftmaxBuilder] Tensor buffer is nullptr."); + return OH_NN_INVALID_PARAMETER; + } + m_axis.emplace_back(*(static_cast(buffer))); + + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode SoftmaxBuilder::Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) +{ + if (m_isBuild) { + LOGE("[SoftmaxBuilder] Softmax operation has been build, cannot build again."); + return OH_NN_OPERATION_FORBIDDEN; + } + + OH_NN_ReturnCode returnCode = CheckIOIndex(inputsIndex, outputsIndex, allTensors, INPUT_NUM, OUTPUT_NUM); + if (returnCode != OH_NN_SUCCESS) { + LOGE("[SoftmaxBuilder] Passed invalid input or output index."); + return returnCode; + } + + m_inputsIndex = inputsIndex; + m_outputsIndex = outputsIndex; + + for (int i : paramsIndex) { + std::shared_ptr tensor = allTensors[i]; + tensor->IdentifyOpParameter(); + switch (tensor->GetType()) { + case OH_NN_SOFTMAX_AXIS: + returnCode = SetAxis(tensor); + break; + default: + LOGE("[SoftmaxBuilder] Parameter Type is invalid. type=%d", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; + } + + if (returnCode != OH_NN_SUCCESS) { + LOGE("[SoftmaxBuilder] Passed invalid param."); + return returnCode; + } + } + + // The quantization type of the first output determinies that of the operator. + SetQuantType(outputsIndex, allTensors); + + m_isBuild = true; + m_name = OP_NAME; + return OH_NN_SUCCESS; +} + +LiteGraphTensorPtr SoftmaxBuilder::GetPrimitive() +{ + if (!m_isBuild) { + LOGE("[SoftmaxBuilder] Cannot get primitive before call build."); + return {nullptr, DestroyLiteGraphPrimitive}; + } + + auto primitive = mindspore::lite::MindIR_Softmax_CreatePrimitive(m_axis); + if (primitive == nullptr) { + LOGE("[SoftmaxBuilder] Create primitive of Softmax failed."); + return {nullptr, DestroyLiteGraphPrimitive}; + } + + LiteGraphTensorPtr graphPrimitivePtr(primitive, DestroyLiteGraphPrimitive); + return graphPrimitivePtr; +} + +REGISTER_OPS(SoftmaxBuilder, OH_NN_OPS_SOFTMAX); +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS \ No newline at end of file diff --git a/frameworks/native/ops/softmax_builder.h b/frameworks/native/ops/softmax_builder.h new file mode 100644 index 0000000000000000000000000000000000000000..4a449b2113008231959bcf0eccf5855415481fb5 --- /dev/null +++ b/frameworks/native/ops/softmax_builder.h @@ -0,0 +1,46 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_SOFTMAX_BUILDER_H +#define NEURAL_NETWORK_RUNTIME_SOFTMAX_BUILDER_H + +#include "frameworks/native/ops_builder.h" +#include "frameworks/native/ops_registry.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +class SoftmaxBuilder : public OpsBuilder { +public: + SoftmaxBuilder(); + ~SoftmaxBuilder() override; + OH_NN_ReturnCode Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) override; + + LiteGraphTensorPtr GetPrimitive() override; + +private: + OH_NN_ReturnCode SetAxis(std::shared_ptr tensor); + +private: + std::vector m_axis; +}; +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS + +#endif // NEURAL_NETWORK_RUNTIME_SOFTMAX_BUILDER_H \ No newline at end of file diff --git a/frameworks/native/ops/space_to_batch_nd_builder.cpp b/frameworks/native/ops/space_to_batch_nd_builder.cpp new file mode 100644 index 0000000000000000000000000000000000000000..094c1681db12b12e87a10683d11cf9f92bd7557d --- /dev/null +++ b/frameworks/native/ops/space_to_batch_nd_builder.cpp @@ -0,0 +1,192 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "space_to_batch_nd_builder.h" + +#include "mindir.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +static const int INPUT_NUM = 1; +static const int OUTPUT_NUM = 1; +static const std::string OP_NAME = "SpaceToBatchND"; +static const int PADDINGS_DATA_SIZE = 2; +static const int VECT_DATA_SIZE = 2; +static const int BLOCKSHAPE_RANK = 1; +static const int PADDINGS_RANK = 2; +static const int BLOCK_SIZE = 2; +static const int PADDINGS_SIZE = 4; + +SpaceToBatchNDBuilder::SpaceToBatchNDBuilder() {} + +SpaceToBatchNDBuilder::~SpaceToBatchNDBuilder() {} + +OH_NN_ReturnCode SpaceToBatchNDBuilder::SetBlockShape(std::shared_ptr tensor) +{ + if (tensor->GetDataType() != OH_NN_INT64) { + LOGE("[SpaceToBatchNDBuilder] The 2nd input blockShape should be type OH_NN_INT64."); + return OH_NN_INVALID_PARAMETER; + } + + auto blockshape_shape = tensor->GetDimensions(); + if (blockshape_shape.size() != BLOCKSHAPE_RANK) { + LOGE("[SpaceToBatchNDBuilder] Invalid rank of shape of 2nd input blockShape, should be 1 dimensions."); + return OH_NN_INVALID_PARAMETER; + } + + if (tensor->GetElementCount() != BLOCK_SIZE) { + LOGE("[SpaceToBatchNDBuilder] The 2nd input blockShape size should be 2."); + return OH_NN_INVALID_PARAMETER; + } + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[SpaceToBatchNDBuilder] Tensor buffer is nullptr."); + return OH_NN_INVALID_PARAMETER; + } + + const int64_t* blockShapeData = reinterpret_cast(buffer); + const uint32_t elementSize = tensor->GetElementCount(); + for (uint32_t i = 0; i < elementSize; ++i) { + block_shape.push_back(blockShapeData[i]); + } + + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode SpaceToBatchNDBuilder::SetPaddings(std::shared_ptr tensor) +{ + if (tensor->GetDataType() != OH_NN_INT64) { + LOGE("[SpaceToBatchNDBuilder] The 3rd input paddings should be type OH_NN_INT64."); + return OH_NN_INVALID_PARAMETER; + } + + auto paddings_shape = tensor->GetDimensions(); + if (paddings_shape.size() != PADDINGS_RANK) { + LOGE("[SpaceToBatchNDBuilder] Invalid rank of shape of 3rd input paddings, should be 2 dimensions."); + return OH_NN_INVALID_PARAMETER; + } + + if (tensor->GetElementCount() != PADDINGS_SIZE) { + LOGE("[SpaceToBatchNDBuilder] The 3rd input paddings size should be 4."); + return OH_NN_INVALID_PARAMETER; + } + + OH_NN_ReturnCode returnCode = SetPadData(tensor); + if (returnCode != OH_NN_SUCCESS) { + LOGE("[SpaceToBatchNDBuilder] SetPadData failed."); + return returnCode; + } + + return OH_NN_SUCCESS; +} +/** + * Build method. + * 1.set attr of ops. + * 2.set inputIndex of ops. + * 3.set outputIndex of ops. + */ +OH_NN_ReturnCode SpaceToBatchNDBuilder::Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) +{ + if (m_isBuild) { + LOGE("[SpaceToBatchNDBuilder] SpaceToBatchND operation has been build, cannot build again."); + return OH_NN_OPERATION_FORBIDDEN; + } + + OH_NN_ReturnCode returnCode = CheckIOIndex(inputsIndex, outputsIndex, allTensors, INPUT_NUM, OUTPUT_NUM); + if (returnCode != OH_NN_SUCCESS) { + LOGE("[SpaceToBatchNDBuilder] Passed invalid input or output index."); + return returnCode; + } + + m_inputsIndex = inputsIndex; + m_outputsIndex = outputsIndex; + + for (int i : paramsIndex) { + std::shared_ptr tensor = allTensors[i]; + tensor->IdentifyOpParameter(); + switch (tensor->GetType()) { + case OH_NN_SPACE_TO_BATCH_ND_BLOCK_SHAPE: + returnCode = SetBlockShape(tensor); + break; + case OH_NN_SPACE_TO_BATCH_ND_PADDINGS: + returnCode = SetPaddings(tensor); + break; + default: + LOGE("[SpaceToBatchNDBuilder] Parameter Type is invalid. type=%d", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; + } + + if (returnCode != OH_NN_SUCCESS) { + LOGE("[SpaceToBatchNDBuilder] Passed invalid param."); + return returnCode; + } + } + + // The quantization type of the first output determinies that of the operator. + SetQuantType(outputsIndex, allTensors); + + m_isBuild = true; + m_name = OP_NAME; + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode SpaceToBatchNDBuilder::SetPadData(std::shared_ptr tensor) +{ + paddings.clear(); + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[SpaceToBatchNDBuilder] Tensor buffer is nullptr."); + return OH_NN_INVALID_PARAMETER; + } + + const int64_t* paddingsData = reinterpret_cast(buffer); + for (int i = 0; i < PADDINGS_DATA_SIZE; i++) { + std::vector vect_data; + vect_data.reserve(VECT_DATA_SIZE); + for (int i = 0; i < VECT_DATA_SIZE; ++i) { + vect_data.push_back(paddingsData[i]); + } + paddings.push_back(vect_data); + } + return OH_NN_SUCCESS; +} + +LiteGraphTensorPtr SpaceToBatchNDBuilder::GetPrimitive() +{ + if (!m_isBuild) { + LOGE("[SpaceToBatchNDBuilder] Cannot get primitive before call build."); + return {nullptr, DestroyLiteGraphPrimitive}; + } + + auto primitive = mindspore::lite::MindIR_SpaceToBatchND_CreatePrimitive(block_shape, paddings); + if (primitive == nullptr) { + LOGE("[SpaceToBatchNDBuilder] MindIR_SpaceToBatchND_CreatePrimitive failed."); + return {nullptr, DestroyLiteGraphPrimitive}; + } + + LiteGraphTensorPtr graphPrimitivePtr(primitive, DestroyLiteGraphPrimitive); + return graphPrimitivePtr; +} + +REGISTER_OPS(SpaceToBatchNDBuilder, OH_NN_OPS_SPACE_TO_BATCH_ND); +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS diff --git a/frameworks/native/ops/space_to_batch_nd_builder.h b/frameworks/native/ops/space_to_batch_nd_builder.h new file mode 100644 index 0000000000000000000000000000000000000000..0944183c6247b3a73941178bc8bcbc1d29184a6e --- /dev/null +++ b/frameworks/native/ops/space_to_batch_nd_builder.h @@ -0,0 +1,49 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_SPACETOBATCHND_BUILDER_H +#define NEURAL_NETWORK_RUNTIME_SPACETOBATCHND_BUILDER_H + +#include "frameworks/native/ops_builder.h" +#include "frameworks/native/ops_registry.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +class SpaceToBatchNDBuilder : public OpsBuilder { +public: + SpaceToBatchNDBuilder(); + ~SpaceToBatchNDBuilder() override; + OH_NN_ReturnCode Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) override; + + LiteGraphTensorPtr GetPrimitive() override; + +private: + OH_NN_ReturnCode SetPadData(std::shared_ptr tensor); + OH_NN_ReturnCode SetBlockShape(std::shared_ptr tensor); + OH_NN_ReturnCode SetPaddings(std::shared_ptr tensor); + +private: + std::vector> paddings {}; + std::vector block_shape {}; +}; +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS + +#endif // NEURAL_NETWORK_RUNTIME_SPACETOBATCHND_BUILDER_H diff --git a/frameworks/native/ops/split_builder.cpp b/frameworks/native/ops/split_builder.cpp new file mode 100644 index 0000000000000000000000000000000000000000..fbdd9903fc5cd8bc5b4548331708e9ba5ef2a48e --- /dev/null +++ b/frameworks/native/ops/split_builder.cpp @@ -0,0 +1,186 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "split_builder.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +static const int INPUT_NUM = 1; +static const std::string OP_NAME = "Split"; + +SplitBuilder::SplitBuilder() {} + +SplitBuilder::~SplitBuilder() {} + +OH_NN_ReturnCode SplitBuilder::SetInputAndOutput(const std::vector &inputsIndex, + const std::vector &outputsIndex, const std::vector> &allTensors) +{ + auto inputSize = inputsIndex.size(); + if (inputSize != INPUT_NUM) { + LOGE("[SplitBuilder] The number of inputsIndex should be %d, its number is %zu.", INPUT_NUM, inputSize); + return OH_NN_INVALID_PARAMETER; + } + + auto allTensorSize = allTensors.size(); + for (auto index : inputsIndex) { + if (index >= allTensorSize) { + LOGE("[SplitBuilder] InputsIndex of Split is out of range."); + return OH_NN_INVALID_PARAMETER; + } + } + + for (auto index : outputsIndex) { + if (index >= allTensorSize) { + LOGE("[SplitBuilder] OutputsIndex of Split is out of range."); + return OH_NN_INVALID_PARAMETER; + } + } + + m_inputsIndex = inputsIndex; + m_outputsIndex = outputsIndex; + + // The quantization type of the first output determinies that of the operator. + SetQuantType(outputsIndex, allTensors); + + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode SplitBuilder::SetAxis(std::shared_ptr tensor) +{ + if (tensor->GetDataType() != OH_NN_INT64) { + LOGE("[SplitBuilder] The 4th input axis should be type OH_NN_INT64."); + return OH_NN_INVALID_PARAMETER; + } + + if (tensor->GetElementCount() != 1) { + LOGE("[SplitBuilder] The 4th input axis should be scaler."); + return OH_NN_INVALID_PARAMETER; + } + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[SplitBuilder] Tensor buffer is nullptr."); + return OH_NN_INVALID_PARAMETER; + } + m_axis = *(static_cast(buffer)); + + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode SplitBuilder::SetOutputNum(std::shared_ptr tensor) +{ + if (tensor->GetDataType() != OH_NN_INT64) { + LOGE("[SplitBuilder] The 2nd input outputNum should be type OH_NN_INT64."); + return OH_NN_INVALID_PARAMETER; + } + + if (tensor->GetElementCount() != 1) { + LOGE("[SoftmaxBuilder] The 2nd input outputNum should be scaler."); + return OH_NN_INVALID_PARAMETER; + } + + m_output_num = *(static_cast(tensor->GetBuffer())); + + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode SplitBuilder::SetSizeSplits(std::shared_ptr tensor) +{ + if (tensor->GetDataType() != OH_NN_INT64) { + LOGE("[SplitBuilder] The 3rd input sizeSplit should be type OH_NN_INT64."); + return OH_NN_INVALID_PARAMETER; + } + + const int64_t *size_splits_data_ptr = reinterpret_cast(tensor->GetBuffer()); + for (uint32_t i = 0; i < tensor->GetElementCount(); i++) { + m_size_splits.push_back(*size_splits_data_ptr++); + } + + return OH_NN_SUCCESS; +} + +/** + * Build method. + * 1.set attr of ops. + * 2.set inputIndex of ops. + * 3.set outputIndex of ops. + */ +OH_NN_ReturnCode SplitBuilder::Build(const std::vector ¶msIndex, + const std::vector &inputsIndex, + const std::vector &outputsIndex, + const std::vector> &allTensors) +{ + if (m_isBuild) { + LOGE("[SplitBuilder] Split operation has been build, cannot build again."); + return OH_NN_OPERATION_FORBIDDEN; + } + + OH_NN_ReturnCode returnCode = SetInputAndOutput(inputsIndex, outputsIndex, allTensors); + if (returnCode != OH_NN_SUCCESS) { + LOGE("[SplitBuilder] Set index of inputs or outputs failed."); + return returnCode; + } + + for (int i : paramsIndex) { + std::shared_ptr tensor = allTensors[i]; + tensor->IdentifyOpParameter(); + switch (tensor->GetType()) { + case OH_NN_SPLIT_AXIS: + returnCode = SetAxis(tensor); + break; + case OH_NN_SPLIT_OUTPUT_NUM: + returnCode = SetOutputNum(tensor); + break; + case OH_NN_SPLIT_SIZE_SPLITS: + returnCode = SetSizeSplits(tensor); + break; + default: + LOGE("[SplitBuilder] Parameter Type is invalid. type=%d", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; + } + + if (returnCode != OH_NN_SUCCESS) { + LOGE("[SplitBuilder] Passed invalid param."); + return returnCode; + } + } + + m_isBuild = true; + m_name = OP_NAME; + return OH_NN_SUCCESS; +} + +LiteGraphTensorPtr SplitBuilder::GetPrimitive() +{ + if (!m_isBuild) { + LOGE("[SplitBuilder] Cannot get primitive before call build."); + return { nullptr, DestroyLiteGraphPrimitive }; + } + + auto primitive = mindspore::lite::MindIR_Split_CreatePrimitive(m_output_num, m_size_splits, m_axis); + if (primitive == nullptr) { + LOGE("[SplitBuilder] MindIR_Split_CreatePrimitive failed."); + return { nullptr, DestroyLiteGraphPrimitive }; + } + + LiteGraphTensorPtr graphPrimitivePtr(primitive, DestroyLiteGraphPrimitive); + return graphPrimitivePtr; +} + +REGISTER_OPS(SplitBuilder, OH_NN_OPS_SPLIT); +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS diff --git a/frameworks/native/ops/split_builder.h b/frameworks/native/ops/split_builder.h new file mode 100644 index 0000000000000000000000000000000000000000..2ab4cf6b918b6172a260a5ee20d0aad692039c6a --- /dev/null +++ b/frameworks/native/ops/split_builder.h @@ -0,0 +1,55 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_SPLIT_BUILDER_H +#define NEURAL_NETWORK_RUNTIME_SPLIT_BUILDER_H + +#include "mindir.h" + +#include "frameworks/native/ops_builder.h" +#include "frameworks/native/ops_registry.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +class SplitBuilder : public OpsBuilder { +public: + SplitBuilder(); + ~SplitBuilder() override; + OH_NN_ReturnCode Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) override; + + LiteGraphTensorPtr GetPrimitive() override; + +private: + OH_NN_ReturnCode SetInputAndOutput(const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors); + OH_NN_ReturnCode SetAxis(std::shared_ptr tensor); + OH_NN_ReturnCode SetOutputNum(std::shared_ptr tensor); + OH_NN_ReturnCode SetSizeSplits(std::shared_ptr tensor); + +private: + int64_t m_output_num {0}; + std::vector m_size_splits; + int64_t m_axis {0}; +}; +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS + +#endif // NEURAL_NETWORK_RUNTIME_SPLIT_BUILDER_H diff --git a/frameworks/native/ops/sqrt_builder.cpp b/frameworks/native/ops/sqrt_builder.cpp new file mode 100644 index 0000000000000000000000000000000000000000..378902ff85bb7c89b0c584d3896bd1ea04cf1b66 --- /dev/null +++ b/frameworks/native/ops/sqrt_builder.cpp @@ -0,0 +1,88 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "sqrt_builder.h" + +#include "mindir.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +static const int INPUT_NUM = 1; +static const int OUTPUT_NUM = 1; +static const std::string OP_NAME = "Sqrt"; + +SqrtBuilder::SqrtBuilder() {} + +SqrtBuilder::~SqrtBuilder() {} + +/** + * Build method. + * 1.set attr of ops. + * 2.set inputIndex of ops. + * 3.set outputIndex of ops. + */ +OH_NN_ReturnCode SqrtBuilder::Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) +{ + if (m_isBuild) { + LOGE("[SqrtBuilder] Sqrt operation has been build, cannot build again."); + return OH_NN_OPERATION_FORBIDDEN; + } + + OH_NN_ReturnCode returnCode = CheckIOIndex(inputsIndex, outputsIndex, allTensors, INPUT_NUM, OUTPUT_NUM); + if (returnCode != OH_NN_SUCCESS) { + LOGE("[SqrtBuilder] Passed invalid input or output index."); + return returnCode; + } + + if (!paramsIndex.empty()) { + LOGE("[SqrtBuilder] sqrt expects no parameters, but receive %zu", paramsIndex.size()); + return OH_NN_INVALID_PARAMETER; + } + + m_inputsIndex = inputsIndex; + m_outputsIndex = outputsIndex; + + // The quantization type of the first output determinies that of the operator. + SetQuantType(outputsIndex, allTensors); + m_isBuild = true; + m_name = OP_NAME; + return OH_NN_SUCCESS; +} + +LiteGraphTensorPtr SqrtBuilder::GetPrimitive() +{ + if (!m_isBuild) { + LOGE("[SqrtBuilder] Cannot get primitive before call build."); + return {nullptr, DestroyLiteGraphPrimitive}; + } + + auto primitive = mindspore::lite::MindIR_Sqrt_CreatePrimitive(); + if (primitive == nullptr) { + LOGE("[SqrtBuilder] Create primitive of Sqrt failed."); + return {nullptr, DestroyLiteGraphPrimitive}; + } + + LiteGraphTensorPtr graphPrimitivePtr(primitive, DestroyLiteGraphPrimitive); + return graphPrimitivePtr; +} + +REGISTER_OPS(SqrtBuilder, OH_NN_OPS_SQRT); +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS diff --git a/frameworks/native/ops/sqrt_builder.h b/frameworks/native/ops/sqrt_builder.h new file mode 100644 index 0000000000000000000000000000000000000000..ad835b8c3378bd33966fbb56e49d2c65fdae8534 --- /dev/null +++ b/frameworks/native/ops/sqrt_builder.h @@ -0,0 +1,40 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_SQRT_BUILDER_H +#define NEURAL_NETWORK_RUNTIME_SQRT_BUILDER_H + +#include "frameworks/native/ops_builder.h" +#include "frameworks/native/ops_registry.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +class SqrtBuilder : public OpsBuilder { +public: + SqrtBuilder(); + ~SqrtBuilder() override; + OH_NN_ReturnCode Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) override; + + LiteGraphTensorPtr GetPrimitive() override; +}; +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS + +#endif // NEURAL_NETWORK_RUNTIME_SQRT_BUILDER_H diff --git a/frameworks/native/ops/squared_difference_builder.cpp b/frameworks/native/ops/squared_difference_builder.cpp new file mode 100644 index 0000000000000000000000000000000000000000..331b43eca5a9885d07b81e86e71122ee62602e9c --- /dev/null +++ b/frameworks/native/ops/squared_difference_builder.cpp @@ -0,0 +1,86 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "squared_difference_builder.h" + +#include "mindir.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +static const int INPUT_NUM = 2; +static const int OUTPUT_NUM = 1; +static const std::string OP_NAME = "SquaredDifference"; + +SquaredDifferenceBuilder::SquaredDifferenceBuilder() {} + +SquaredDifferenceBuilder::~SquaredDifferenceBuilder() {} + +/** + * Build method. + * 1.set attr of ops. + * 2.set inputIndex of ops. + * 3.set outputIndex of ops. + */ +OH_NN_ReturnCode SquaredDifferenceBuilder::Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) +{ + if (m_isBuild) { + LOGE("[SquaredDifferenceBuilder] SquaredDifference operation has been build, cannot build again."); + return OH_NN_OPERATION_FORBIDDEN; + } + + OH_NN_ReturnCode returnCode = CheckIOIndex(inputsIndex, outputsIndex, allTensors, INPUT_NUM, OUTPUT_NUM); + if (returnCode != OH_NN_SUCCESS) { + LOGE("[SquaredDifferenceBuilder] Passed invalid input or output index."); + return returnCode; + } + + if (!paramsIndex.empty()) { + LOGE("[SquaredDifferenceBuilder] squaredDifference expects no parameters, but receive %zu", paramsIndex.size()); + return OH_NN_INVALID_PARAMETER; + } + + m_inputsIndex = inputsIndex; + m_outputsIndex = outputsIndex; + + m_isBuild = true; + m_name = OP_NAME; + return OH_NN_SUCCESS; +} + +LiteGraphTensorPtr SquaredDifferenceBuilder::GetPrimitive() +{ + if (!m_isBuild) { + LOGE("[SquaredDifferenceBuilder] Cannot get primitive before call build."); + return {nullptr, DestroyLiteGraphPrimitive}; + } + + auto primitive = mindspore::lite::MindIR_SquaredDifference_CreatePrimitive(); + if (primitive == nullptr) { + LOGE("[SquaredDifferenceBuilder] MindIR_SquaredDifference_CreatePrimitive failed."); + return {nullptr, DestroyLiteGraphPrimitive}; + } + + LiteGraphTensorPtr graphPrimitivePtr(primitive, DestroyLiteGraphPrimitive); + return graphPrimitivePtr; +} + +REGISTER_OPS(SquaredDifferenceBuilder, OH_NN_OPS_SQUARED_DIFFERENCE); +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS \ No newline at end of file diff --git a/frameworks/native/ops/squared_difference_builder.h b/frameworks/native/ops/squared_difference_builder.h new file mode 100644 index 0000000000000000000000000000000000000000..d51847c4db7f4f3df91971065f297f9db3831e40 --- /dev/null +++ b/frameworks/native/ops/squared_difference_builder.h @@ -0,0 +1,40 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_SQUAREDDIFFERENCE_BUILDER_H +#define NEURAL_NETWORK_RUNTIME_SQUAREDDIFFERENCE_BUILDER_H + +#include "frameworks/native/ops_builder.h" +#include "frameworks/native/ops_registry.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +class SquaredDifferenceBuilder : public OpsBuilder { +public: + SquaredDifferenceBuilder(); + ~SquaredDifferenceBuilder() override; + OH_NN_ReturnCode Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) override; + + LiteGraphTensorPtr GetPrimitive() override; +}; +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS + +#endif // NEURAL_NETWORK_RUNTIME_SQUAREDDIFFERENCE_BUILDER_H \ No newline at end of file diff --git a/frameworks/native/ops/squeeze_builder.cpp b/frameworks/native/ops/squeeze_builder.cpp new file mode 100644 index 0000000000000000000000000000000000000000..c37da63524f0347b09c909fe72139c432a5a3430 --- /dev/null +++ b/frameworks/native/ops/squeeze_builder.cpp @@ -0,0 +1,125 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "squeeze_builder.h" + +#include "mindir.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +static const int INPUT_NUM = 1; +static const int OUTPUT_NUM = 1; +static const std::string OP_NAME = "Squeeze"; + +SqueezeBuilder::SqueezeBuilder() {} + +SqueezeBuilder::~SqueezeBuilder() {} + +OH_NN_ReturnCode SqueezeBuilder::SetAxis(std::shared_ptr tensor) +{ + if (tensor->GetDataType() != OH_NN_INT64) { + LOGE("[SqueezeBuilder] The 2nd input axis should be type OH_NN_INT64."); + return OH_NN_INVALID_PARAMETER; + } + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[SqueezeBuilder] Tensor buffer is nullptr."); + return OH_NN_INVALID_PARAMETER; + } + + int64_t *axis_data_ptr = static_cast(buffer); + const uint32_t elementSize = tensor->GetElementCount(); + for (uint32_t i = 0; i < elementSize; ++i) { + m_axis.push_back(*axis_data_ptr); + ++axis_data_ptr; + } + + return OH_NN_SUCCESS; +} + +/* * + * Build method. + * 1.set attr of ops. + * 2.set inputIndex of ops. + * 3.set outputIndex of ops. + */ +OH_NN_ReturnCode SqueezeBuilder::Build(const std::vector ¶msIndex, + const std::vector &inputsIndex, + const std::vector &outputsIndex, + const std::vector> &allTensors) +{ + if (m_isBuild) { + LOGE("[SqueezeBuilder] Squeeze operation has been build, cannot build again."); + return OH_NN_OPERATION_FORBIDDEN; + } + + OH_NN_ReturnCode returnCode = CheckIOIndex(inputsIndex, outputsIndex, allTensors, INPUT_NUM, OUTPUT_NUM); + if (returnCode != OH_NN_SUCCESS) { + LOGE("[SqueezeBuilder] Passed invalid input or output index."); + return returnCode; + } + + m_inputsIndex = inputsIndex; + m_outputsIndex = outputsIndex; + + for (int i : paramsIndex) { + std::shared_ptr tensor = allTensors[i]; + tensor->IdentifyOpParameter(); + switch (tensor->GetType()) { + case OH_NN_SQUEEZE_AXIS: + returnCode = SetAxis(tensor); + break; + default: + LOGE("[SqueezeBuilder] Parameter Type is invalid. type=%d", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; + } + + if (returnCode != OH_NN_SUCCESS) { + LOGE("[SqueezeBuilder] Passed invalid param."); + return returnCode; + } + } + + // The quantization type of the first output determinies that of the operator. + SetQuantType(outputsIndex, allTensors); + + m_isBuild = true; + m_name = OP_NAME; + return OH_NN_SUCCESS; +} + +LiteGraphTensorPtr SqueezeBuilder::GetPrimitive() +{ + if (!m_isBuild) { + LOGE("[SqueezeBuilder] Cannot get primitive before call build."); + return { nullptr, DestroyLiteGraphPrimitive }; + } + + auto primitive = mindspore::lite::MindIR_Squeeze_CreatePrimitive(m_axis); + if (primitive == nullptr) { + LOGE("[SqueezeBuilder] MindIR_Squeeze_CreatePrimitive failed."); + return { nullptr, DestroyLiteGraphPrimitive }; + } + + LiteGraphTensorPtr graphPrimitivePtr(primitive, DestroyLiteGraphPrimitive); + return graphPrimitivePtr; +} + +REGISTER_OPS(SqueezeBuilder, OH_NN_OPS_SQUEEZE); +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS \ No newline at end of file diff --git a/frameworks/native/ops/squeeze_builder.h b/frameworks/native/ops/squeeze_builder.h new file mode 100644 index 0000000000000000000000000000000000000000..989caa1c270488723a2f94b11b70b8f41dafda15 --- /dev/null +++ b/frameworks/native/ops/squeeze_builder.h @@ -0,0 +1,46 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_SQUEEZE_BUILDER_H +#define NEURAL_NETWORK_RUNTIME_SQUEEZE_BUILDER_H + +#include "frameworks/native/ops_builder.h" +#include "frameworks/native/ops_registry.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +class SqueezeBuilder : public OpsBuilder { +public: + SqueezeBuilder(); + ~SqueezeBuilder() override; + OH_NN_ReturnCode Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) override; + + LiteGraphTensorPtr GetPrimitive() override; + +private: + OH_NN_ReturnCode SetAxis(std::shared_ptr tensor); + +private: + std::vector m_axis; +}; +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS + +#endif // NEURAL_NETWORK_RUNTIME_SQUEEZE_BUILDER_H \ No newline at end of file diff --git a/frameworks/native/ops/stack_builder.cpp b/frameworks/native/ops/stack_builder.cpp new file mode 100644 index 0000000000000000000000000000000000000000..efe7a5cf99a018afa9e7551fcb83d566dbcf0ed3 --- /dev/null +++ b/frameworks/native/ops/stack_builder.cpp @@ -0,0 +1,125 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "stack_builder.h" + +#include "mindir.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +static const int INPUT_MIN_NUM = 2; +static const int OUTPUT_NUM = 1; +static const std::string OP_NAME = "Stack"; + +StackBuilder::StackBuilder() {} + +StackBuilder::~StackBuilder() {} + +OH_NN_ReturnCode StackBuilder::SetAxis(std::shared_ptr tensor) +{ + if (tensor->GetDataType() != OH_NN_INT64) { + LOGE("[StackBuilder] The last input axis should be type OH_NN_INT64."); + return OH_NN_INVALID_PARAMETER; + } + + if (tensor->GetElementCount() != 1) { + LOGE("[StackBuilder] The last input axis should be scaler."); + return OH_NN_INVALID_PARAMETER; + } + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[StackBuilder] Tensor buffer is nullptr."); + return OH_NN_INVALID_PARAMETER; + } + m_axis = *(static_cast(buffer)); + + return OH_NN_SUCCESS; +} + +/** + * Build method. + * 1.set attr of ops. + * 2.set inputIndex of ops. + * 3.set outputIndex of ops. + */ +OH_NN_ReturnCode StackBuilder::Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) +{ + if (m_isBuild) { + LOGE("[StackBuilder] Stack operation has been build, cannot build again."); + return OH_NN_OPERATION_FORBIDDEN; + } + + if (inputsIndex.size() < INPUT_MIN_NUM) { + LOGE("[StackBuilder] The number of index of inputs don't larger than %d.", INPUT_MIN_NUM); + return OH_NN_INVALID_PARAMETER; + } + if (outputsIndex.size() != OUTPUT_NUM) { + LOGE("[StackBuilder] The number of index of outputs don't equal to %d.", OUTPUT_NUM); + return OH_NN_INVALID_PARAMETER; + } + + m_inputsIndex = inputsIndex; + m_outputsIndex = outputsIndex; + + OH_NN_ReturnCode returnCode; + for (int i : paramsIndex) { + std::shared_ptr tensor = allTensors[i]; + tensor->IdentifyOpParameter(); + switch (tensor->GetType()) { + case OH_NN_STACK_AXIS: + returnCode = SetAxis(tensor); + break; + default: + LOGE("[StackBuilder] Parameter Type is invalid. type=%d", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; + } + + if (returnCode != OH_NN_SUCCESS) { + LOGE("[StackBuilder] Passed invalid param."); + return returnCode; + } + } + + m_isBuild = true; + m_name = OP_NAME; + return OH_NN_SUCCESS; +} + +LiteGraphTensorPtr StackBuilder::GetPrimitive() +{ + if (!m_isBuild) { + LOGE("[StackBuilder] Cannot get primitive before call build."); + return {nullptr, DestroyLiteGraphPrimitive}; + } + + auto primitive = mindspore::lite::MindIR_Stack_CreatePrimitive(m_axis); + if (primitive == nullptr) { + LOGE("[StackBuilder] MindIR_Stack_CreatePrimitive failed."); + return {nullptr, DestroyLiteGraphPrimitive}; + } + + LiteGraphTensorPtr graphPrimitivePtr(primitive, DestroyLiteGraphPrimitive); + return graphPrimitivePtr; +} + +REGISTER_OPS(StackBuilder, OH_NN_OPS_STACK); +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS diff --git a/frameworks/native/ops/stack_builder.h b/frameworks/native/ops/stack_builder.h new file mode 100644 index 0000000000000000000000000000000000000000..1e80ecd212b45616c7f9f9fd49c5a4f1d4b7c1c9 --- /dev/null +++ b/frameworks/native/ops/stack_builder.h @@ -0,0 +1,46 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_STACK_BUILDER_H +#define NEURAL_NETWORK_RUNTIME_STACK_BUILDER_H + +#include "frameworks/native/ops_builder.h" +#include "frameworks/native/ops_registry.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +class StackBuilder : public OpsBuilder { +public: + StackBuilder(); + ~StackBuilder() override; + OH_NN_ReturnCode Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) override; + + LiteGraphTensorPtr GetPrimitive() override; + +private: + OH_NN_ReturnCode SetAxis(std::shared_ptr tensor); + +private: + int64_t m_axis = {0}; +}; +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS + +#endif // NEURAL_NETWORK_RUNTIME_STACK_BUILDER_H \ No newline at end of file diff --git a/frameworks/native/ops/strided_slice_builder.cpp b/frameworks/native/ops/strided_slice_builder.cpp new file mode 100644 index 0000000000000000000000000000000000000000..4f25d85db795cc4fa7602ac9ecd7bbe128b8261e --- /dev/null +++ b/frameworks/native/ops/strided_slice_builder.cpp @@ -0,0 +1,212 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "strided_slice_builder.h" + +#include "mindir.h" + +#include "interfaces/kits/c/neural_network_runtime_type.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +static const int INPUT_NUM = 4; +static const int OUTPUT_NUM = 1; +static const std::string OP_NAME = "StridedSlice"; + +StridedSliceBuilder::StridedSliceBuilder() {} + +StridedSliceBuilder::~StridedSliceBuilder() {} + +OH_NN_ReturnCode StridedSliceBuilder::SetInputOutput(const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) +{ + OH_NN_ReturnCode returnCode = CheckIOIndex(inputsIndex, outputsIndex, allTensors, INPUT_NUM, OUTPUT_NUM); + if (returnCode != OH_NN_SUCCESS) { + LOGE("[StridedSliceBuilder] Passed invalid input or output index."); + return returnCode; + } + + m_inputsIndex = inputsIndex; + m_outputsIndex = outputsIndex; + + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode StridedSliceBuilder::SetBeginMask(std::shared_ptr tensor) +{ + if (tensor->GetDataType() != OH_NN_INT64) { + LOGE("[StridedSliceBuilder] The 5th input beginMask should be type HNN_INT64."); + return OH_NN_INVALID_PARAMETER; + } + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[StridedSliceBuilder] Tensor buffer is nullptr."); + return OH_NN_INVALID_PARAMETER; + } + m_begin_mask = *(static_cast(buffer)); + + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode StridedSliceBuilder::SetEndMask(std::shared_ptr tensor) +{ + if (tensor->GetDataType() != OH_NN_INT64) { + LOGE("[StridedSliceBuilder] The 6th input endMask should be type HNN_INT64."); + return OH_NN_INVALID_PARAMETER; + } + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[StridedSliceBuilder] Tensor buffer is nullptr."); + return OH_NN_INVALID_PARAMETER; + } + m_end_mask = *(static_cast(buffer)); + + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode StridedSliceBuilder::SetEllipsisMask(std::shared_ptr tensor) +{ + if (tensor->GetDataType() != OH_NN_INT64) { + LOGE("[StridedSliceBuilder] The 7th input ellipsisMask should be type HNN_INT64."); + return OH_NN_INVALID_PARAMETER; + } + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[StridedSliceBuilder] Tensor buffer is nullptr."); + return OH_NN_INVALID_PARAMETER; + } + m_ellipsis_mask = *(static_cast(buffer)); + + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode StridedSliceBuilder::SetNewAxisMask(std::shared_ptr tensor) +{ + if (tensor->GetDataType() != OH_NN_INT64) { + LOGE("[StridedSliceBuilder] The 8th input newAxisMask should be type HNN_INT64."); + return OH_NN_INVALID_PARAMETER; + } + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[StridedSliceBuilder] Tensor buffer is nullptr."); + return OH_NN_INVALID_PARAMETER; + } + m_new_axis_mask = *(static_cast(buffer)); + + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode StridedSliceBuilder::SetShrinkAxisMask(std::shared_ptr tensor) +{ + if (tensor->GetDataType() != OH_NN_INT64) { + LOGE("[StridedSliceBuilder] The 9th input shrinkAxisMAsk should be type HNN_INT64."); + return OH_NN_INVALID_PARAMETER; + } + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[StridedSliceBuilder] Tensor buffer is nullptr."); + return OH_NN_INVALID_PARAMETER; + } + m_shrink_axis_mask = *(static_cast(buffer)); + + return OH_NN_SUCCESS; +} + +/** + * Build method. + * 1.set attr of ops. + * 2.set inputIndex of ops. + * 3.set outputIndex of ops. + */ +OH_NN_ReturnCode StridedSliceBuilder::Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) +{ + if (m_isBuild) { + LOGE("[StridedSliceBuilder] StridedSlice operation has been build, cannot build again."); + return OH_NN_OPERATION_FORBIDDEN; + } + + OH_NN_ReturnCode returnCode = SetInputOutput(inputsIndex, outputsIndex, allTensors); + if (returnCode != OH_NN_SUCCESS) { + LOGE("[StridedSliceBuilder] Set index of inputs or outputs failed."); + return returnCode; + } + + for (int i : paramsIndex) { + std::shared_ptr tensor = allTensors[i]; + tensor->IdentifyOpParameter(); + switch (tensor->GetType()) { + case OH_NN_STRIDED_SLICE_BEGIN_MASK: + returnCode = SetBeginMask(tensor); + break; + case OH_NN_STRIDED_SLICE_END_MASK: + returnCode = SetEndMask(tensor); + break; + case OH_NN_STRIDED_SLICE_ELLIPSIS_MASK: + returnCode = SetEllipsisMask(tensor); + break; + case OH_NN_STRIDED_SLICE_NEW_AXIS_MASK: + returnCode = SetNewAxisMask(tensor); + break; + case OH_NN_STRIDED_SLICE_SHRINK_AXIS_MASK: + returnCode = SetShrinkAxisMask(tensor); + break; + default: + LOGE("[StridedSliceBuilder] Parameter Type is invalid. type=%d", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; + } + + if (returnCode != OH_NN_SUCCESS) { + LOGE("[StridedSliceBuilder] Passed invalid param."); + return returnCode; + } + } + + m_isBuild = true; + m_name = OP_NAME; + return OH_NN_SUCCESS; +} + +LiteGraphPrimitvePtr StridedSliceBuilder::GetPrimitive() +{ + if (!m_isBuild) { + LOGE("[StridedSliceBuilder] Cannot get primitive before call build."); + return {nullptr, DestroyLiteGraphPrimitive}; + } + + auto primitive = mindspore::lite::MindIR_StridedSlice_CreatePrimitive(m_begin_mask, m_end_mask, m_ellipsis_mask, + m_new_axis_mask, m_shrink_axis_mask); + if (primitive == nullptr) { + LOGE("[StridedSliceBuilder] MindIR_StridedSlice_CreatePrimitive failed."); + return {nullptr, DestroyLiteGraphPrimitive}; + } + + LiteGraphPrimitvePtr graphPrimitivePtr(primitive, DestroyLiteGraphPrimitive); + return graphPrimitivePtr; +} + +REGISTER_OPS(StridedSliceBuilder, OH_NN_OPS_STRIDED_SLICE); +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS diff --git a/frameworks/native/ops/strided_slice_builder.h b/frameworks/native/ops/strided_slice_builder.h new file mode 100644 index 0000000000000000000000000000000000000000..45637b05e47dfb16904d9cab0e688ffe29414ce9 --- /dev/null +++ b/frameworks/native/ops/strided_slice_builder.h @@ -0,0 +1,57 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_STRIDEDSLICE_BUILDER_H +#define NEURAL_NETWORK_RUNTIME_STRIDEDSLICE_BUILDER_H + +#include "frameworks/native/ops_builder.h" +#include "frameworks/native/ops_registry.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +class StridedSliceBuilder : public OpsBuilder { +public: + StridedSliceBuilder(); + ~StridedSliceBuilder() override; + OH_NN_ReturnCode Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) override; + + LiteGraphPrimitvePtr GetPrimitive() override; + +private: + OH_NN_ReturnCode SetInputOutput(const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors); + OH_NN_ReturnCode SetBeginMask(std::shared_ptr tensor); + OH_NN_ReturnCode SetEndMask(std::shared_ptr tensor); + OH_NN_ReturnCode SetEllipsisMask(std::shared_ptr tensor); + OH_NN_ReturnCode SetNewAxisMask(std::shared_ptr tensor); + OH_NN_ReturnCode SetShrinkAxisMask(std::shared_ptr tensor); + +private: + int64_t m_begin_mask = {0}; + int64_t m_end_mask = {0}; + int64_t m_ellipsis_mask = {0}; + int64_t m_new_axis_mask = {0}; + int64_t m_shrink_axis_mask = {0}; +}; +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS + +#endif // NEURAL_NETWORK_RUNTIME_STRIDEDSLICE_BUILDER_H diff --git a/frameworks/native/ops/sub_builder.cpp b/frameworks/native/ops/sub_builder.cpp new file mode 100644 index 0000000000000000000000000000000000000000..6021c17531d94fd0fe4188928ae44cd56ccd2b9f --- /dev/null +++ b/frameworks/native/ops/sub_builder.cpp @@ -0,0 +1,131 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "sub_builder.h" +#include "frameworks/native/transform.h" +#include "frameworks/native/validation.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +static const int INPUT_NUM = 2; +static const int OUTPUT_NUM = 1; +static const std::string OP_NAME = "Sub"; + +SubBuilder::SubBuilder() {} + +SubBuilder::~SubBuilder() {} + +OH_NN_ReturnCode SubBuilder::SetActivationType(std::shared_ptr tensor) +{ + if (tensor->GetDataType() != OH_NN_INT8) { + LOGE("[SubBuilder] The 3rd input activation should be type OH_NN_INT8."); + return OH_NN_INVALID_PARAMETER; + } + + if (tensor->GetElementCount() != 1) { + LOGE("[SubBuilder] The 3rd input activation should be scaler."); + return OH_NN_INVALID_PARAMETER; + } + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[SubBuilder] Tensor buffer is nullptr."); + return OH_NN_INVALID_PARAMETER; + } + + int8_t* fuseData = static_cast(buffer); + if (!OHOS::NeuralNetworkRuntime::Validation::ValidateFuseType(static_cast(*fuseData))) { + LOGE("[SubBuilder] Fuse activation type is invalid"); + return OH_NN_INVALID_PARAMETER; + } + + auto fuseType = (OH_NN_FuseType)(*fuseData); + m_activationType = NNToMS::TransfromFusionType(fuseType); + return OH_NN_SUCCESS; +} + +/** + * Build method. + * 1.set attr of ops. + * 2.set inputIndex of ops. + * 3.set outputIndex of ops. + */ +OH_NN_ReturnCode SubBuilder::Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) +{ + if (m_isBuild) { + LOGE("[SubBuilder] Sub operation has been build, cannot build again."); + return OH_NN_OPERATION_FORBIDDEN; + } + + OH_NN_ReturnCode returnCode = CheckIOIndex(inputsIndex, outputsIndex, allTensors, INPUT_NUM, OUTPUT_NUM); + if (returnCode != OH_NN_SUCCESS) { + LOGE("[SubBuilder] Passed invalid input or output index."); + return returnCode; + } + + m_inputsIndex = inputsIndex; + m_outputsIndex = outputsIndex; + + for (int i : paramsIndex) { + std::shared_ptr tensor = allTensors[i]; + tensor->IdentifyOpParameter(); + switch (tensor->GetType()) { + case OH_NN_SUB_ACTIVATIONTYPE: + returnCode = SetActivationType(tensor); + break; + default: + LOGE("[SubBuilder] Parameter Type is invalid. type=%d", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; + } + + if (returnCode != OH_NN_SUCCESS) { + LOGE("[SubBuilder] Passed invalid param."); + return returnCode; + } + } + + // The quantization type of the first output determinies that of the operator. + SetQuantType(outputsIndex, allTensors); + + m_isBuild = true; + m_name = "Sub"; + return OH_NN_SUCCESS; +} + +LiteGraphPrimitvePtr SubBuilder::GetPrimitive() +{ + if (!m_isBuild) { + LOGE("[SubBuilder] Cannot get primitive before call build."); + return {nullptr, DestroyLiteGraphPrimitive}; + } + + auto primitive = mindspore::lite::MindIR_SubFusion_CreatePrimitive(m_activationType); + if (primitive == nullptr) { + LOGE("[SubBuilder] MindIR_SubFusion_CreatePrimitive failed."); + return {nullptr, DestroyLiteGraphPrimitive}; + } + + LiteGraphPrimitvePtr graphPrimitivePtr(primitive, DestroyLiteGraphPrimitive); + return graphPrimitivePtr; +} + +REGISTER_OPS(SubBuilder, OH_NN_OPS_SUB); +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS diff --git a/frameworks/native/ops/sub_builder.h b/frameworks/native/ops/sub_builder.h new file mode 100644 index 0000000000000000000000000000000000000000..19dc8d23ee0e77896b25160240e149ef29d68c92 --- /dev/null +++ b/frameworks/native/ops/sub_builder.h @@ -0,0 +1,48 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_SUB_BUILDER_H +#define NEURAL_NETWORK_RUNTIME_SUB_BUILDER_H + +#include "mindir.h" + +#include "frameworks/native/ops_builder.h" +#include "frameworks/native/ops_registry.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +class SubBuilder : public OpsBuilder { +public: + SubBuilder(); + ~SubBuilder() override; + OH_NN_ReturnCode Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) override; + + LiteGraphPrimitvePtr GetPrimitive() override; + +private: + OH_NN_ReturnCode SetActivationType(std::shared_ptr tensor); + +private: + mindspore::lite::ActivationType m_activationType{mindspore::lite::ACTIVATION_TYPE_NO_ACTIVATION}; +}; +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS + +#endif // NEURAL_NETWORK_RUNTIME_SUB_BUILDER_H \ No newline at end of file diff --git a/frameworks/native/ops/tanh_builder.cpp b/frameworks/native/ops/tanh_builder.cpp new file mode 100644 index 0000000000000000000000000000000000000000..c275f004d069484853ec7537e230fa5554bcaf51 --- /dev/null +++ b/frameworks/native/ops/tanh_builder.cpp @@ -0,0 +1,92 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "tanh_builder.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +static const int INPUT_NUM = 1; +static const int OUTPUT_NUM = 1; +static const std::string OP_NAME = "Tanh"; + +TanhBuilder::TanhBuilder() {} + +TanhBuilder::~TanhBuilder() {} + +/** + * Build method. + * 1.set attr of ops. + * 2.set inputIndex of ops. + * 3.set outputIndex of ops. + */ +OH_NN_ReturnCode TanhBuilder::Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) +{ + if (m_isBuild) { + LOGE("[TanhBuilder] Tanh operation has been build, cannot build again."); + return OH_NN_OPERATION_FORBIDDEN; + } + + OH_NN_ReturnCode returnCode = CheckIOIndex(inputsIndex, outputsIndex, allTensors, INPUT_NUM, OUTPUT_NUM); + if (returnCode != OH_NN_SUCCESS) { + LOGE("[TanhBuilder] Passed invalid input or output index."); + return returnCode; + } + + if (!paramsIndex.empty()) { + LOGE("[TanhBuilder] TanhBuilder expects no parameters, but receive %zu", paramsIndex.size()); + return OH_NN_INVALID_PARAMETER; + } + + m_inputsIndex = inputsIndex; + m_outputsIndex = outputsIndex; + + // The quantization type of the first output determinies that of the operator. + SetQuantType(outputsIndex, allTensors); + + m_name = OP_NAME; + m_isBuild = true; + return OH_NN_SUCCESS; +} + +LiteGraphPrimitvePtr TanhBuilder::GetPrimitive() +{ + if (!m_isBuild) { + LOGE("[TanhBuilder] Cannot get primitive before call build."); + return { nullptr, DestroyLiteGraphPrimitive }; + } + + float alpha {0.0f}; + float minVal {0.0f}; + float maxVal {0.0f}; + bool approximate {false}; + auto primitive = + mindspore::lite::MindIR_Activation_CreatePrimitive(m_activationType, alpha, minVal, maxVal, approximate); + if (primitive == nullptr) { + LOGE("[TanhBuilder] Create primitive of Tanh failed."); + return { nullptr, DestroyLiteGraphPrimitive }; + } + + LiteGraphPrimitvePtr graphPrimitivePtr(primitive, DestroyLiteGraphPrimitive); + return graphPrimitivePtr; +} + +REGISTER_OPS(TanhBuilder, OH_NN_OPS_TANH); +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS \ No newline at end of file diff --git a/frameworks/native/ops/tanh_builder.h b/frameworks/native/ops/tanh_builder.h new file mode 100644 index 0000000000000000000000000000000000000000..32fd2a7cec85872d2d157f9ad3bd02acff1a0fe2 --- /dev/null +++ b/frameworks/native/ops/tanh_builder.h @@ -0,0 +1,45 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_TANH_BUILDER_H +#define NEURAL_NETWORK_RUNTIME_TANH_BUILDER_H + +#include "mindir.h" + +#include "frameworks/native/ops_builder.h" +#include "frameworks/native/ops_registry.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +class TanhBuilder : public OpsBuilder { +public: + TanhBuilder(); + ~TanhBuilder() override; + OH_NN_ReturnCode Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) override; + + LiteGraphPrimitvePtr GetPrimitive() override; + +private: + mindspore::lite::ActivationType m_activationType{mindspore::lite::ACTIVATION_TYPE_TANH}; +}; +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS + +#endif // NEURAL_NETWORK_RUNTIME_TANH_BUILDER_H \ No newline at end of file diff --git a/frameworks/native/ops/tile_builder.cpp b/frameworks/native/ops/tile_builder.cpp new file mode 100644 index 0000000000000000000000000000000000000000..8032acc505d40ba9f674704d2170e7aacbd491e9 --- /dev/null +++ b/frameworks/native/ops/tile_builder.cpp @@ -0,0 +1,86 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "tile_builder.h" + +#include "mindir.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +static const int INPUT_NUM = 2; +static const int OUTPUT_NUM = 1; +static const std::string OP_NAME = "Tile"; + +TileBuilder::TileBuilder() {} + +TileBuilder::~TileBuilder() {} + +/** + * Build method. + * 1.set attr of ops. + * 2.set inputIndex of ops. + * 3.set outputIndex of ops. + */ +OH_NN_ReturnCode TileBuilder::Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) +{ + if (m_isBuild) { + LOGE("[TileBuilder] Tile operation has been build, cannot build again."); + return OH_NN_OPERATION_FORBIDDEN; + } + + OH_NN_ReturnCode returnCode = CheckIOIndex(inputsIndex, outputsIndex, allTensors, INPUT_NUM, OUTPUT_NUM); + if (returnCode != OH_NN_SUCCESS) { + LOGE("[TileBuilder] Passed invalid input or output index."); + return returnCode; + } + + if (!paramsIndex.empty()) { + LOGE("[TileBuilder] TransposeBuilder expects no parameters, but receive %zu", paramsIndex.size()); + return OH_NN_INVALID_PARAMETER; + } + + m_inputsIndex = inputsIndex; + m_outputsIndex = outputsIndex; + + m_isBuild = true; + m_name = OP_NAME; + return OH_NN_SUCCESS; +} + +LiteGraphPrimitvePtr TileBuilder::GetPrimitive() +{ + if (!m_isBuild) { + LOGE("[TileBuilder] Cannot get primitive before call build."); + return {nullptr, DestroyLiteGraphPrimitive}; + } + + auto primitive = mindspore::lite::MindIR_TileFusion_CreatePrimitive(m_dims); + if (primitive == nullptr) { + LOGE("[TileBuilder] MindIR_TileFusion_CreatePrimitive failed."); + return {nullptr, DestroyLiteGraphPrimitive}; + } + + LiteGraphPrimitvePtr graphPrimitivePtr(primitive, DestroyLiteGraphPrimitive); + return graphPrimitivePtr; +} + +REGISTER_OPS(TileBuilder, OH_NN_OPS_TILE); +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS diff --git a/frameworks/native/ops/tile_builder.h b/frameworks/native/ops/tile_builder.h new file mode 100644 index 0000000000000000000000000000000000000000..fc9321251af8606b6da49688b9af77b102fd32aa --- /dev/null +++ b/frameworks/native/ops/tile_builder.h @@ -0,0 +1,43 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_TILE_BUILDER_H +#define NEURAL_NETWORK_RUNTIME_TILE_BUILDER_H + +#include "frameworks/native/ops_builder.h" +#include "frameworks/native/ops_registry.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +class TileBuilder : public OpsBuilder { +public: + TileBuilder(); + ~TileBuilder() override; + OH_NN_ReturnCode Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) override; + + LiteGraphPrimitvePtr GetPrimitive() override; + +private: + std::vector m_dims {0}; +}; +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS + +#endif // NEURAL_NETWORK_RUNTIME_TILE_BUILDER_H diff --git a/frameworks/native/ops/top_k_builder.cpp b/frameworks/native/ops/top_k_builder.cpp new file mode 100644 index 0000000000000000000000000000000000000000..673d6f0baff2497072cb8aaf872573be16b953c6 --- /dev/null +++ b/frameworks/native/ops/top_k_builder.cpp @@ -0,0 +1,117 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "top_k_builder.h" + +#include "mindir.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +static const std::string OP_NAME = "TopK"; +static const int INPUT_NUM = 2; +static const int OUTPUT_NUM = 2; + +TopKBuilder::TopKBuilder() {} + +TopKBuilder::~TopKBuilder() {} + +OH_NN_ReturnCode TopKBuilder::SetSorted(std::shared_ptr tensor) +{ + if (tensor->GetDataType() != OH_NN_BOOL) { + LOGE("[TopK] The sorted should be type OH_NN_BOOL."); + return OH_NN_INVALID_PARAMETER; + } + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[TopK] Tensor buffer is nullptr."); + return OH_NN_INVALID_PARAMETER; + } + m_sorted = *(static_cast(buffer)); + + return OH_NN_SUCCESS; +} + +/** + * Build method. + * 1.build primitive of ops. + * 2.build inputIndex of ops. + * 3.build outputIndex of ops. + */ +OH_NN_ReturnCode TopKBuilder::Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) +{ + if (m_isBuild) { + LOGE("[TopK] Build operation has been completed, cannot build again."); + return OH_NN_OPERATION_FORBIDDEN; + } + + OH_NN_ReturnCode returnCode = CheckIOIndex(inputsIndex, outputsIndex, allTensors, INPUT_NUM, OUTPUT_NUM); + if (returnCode != OH_NN_SUCCESS) { + LOGE("[TopK] Passed invalid input or output index."); + return returnCode; + } + + m_inputsIndex = inputsIndex; + m_outputsIndex = outputsIndex; + + for (int i : paramsIndex) { + std::shared_ptr tensor = allTensors[i]; + tensor->IdentifyOpParameter(); + switch (tensor->GetType()) { + case OH_NN_TOP_K_SORTED: + returnCode = SetSorted(tensor); + break; + default: + LOGE("[TopK] Parameter Type is invalid. type=%d", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; + } + + if (returnCode != OH_NN_SUCCESS) { + LOGE("[TopK] Passed invalid param."); + return returnCode; + } + } + + m_name = OP_NAME; + m_isBuild = true; + return OH_NN_SUCCESS; +} + +LiteGraphPrimitvePtr TopKBuilder::GetPrimitive() +{ + if (!m_isBuild) { + LOGE("[TopK] Cannot get primitive before call build."); + return {nullptr, DestroyLiteGraphPrimitive}; + } + + int64_t axis = 0; + auto primitive = mindspore::lite::MindIR_TopKFusion_CreatePrimitive(m_sorted, axis); + if (primitive == nullptr) { + LOGE("[TopK] MindIR_TopKFusion_CreatePrimitive failed."); + return {nullptr, DestroyLiteGraphPrimitive}; + } + + LiteGraphPrimitvePtr graphPrimitivePtr(primitive, DestroyLiteGraphPrimitive); + return graphPrimitivePtr; +} + +REGISTER_OPS(TopKBuilder, OH_NN_OPS_TOP_K); +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS diff --git a/frameworks/native/ops/top_k_builder.h b/frameworks/native/ops/top_k_builder.h new file mode 100644 index 0000000000000000000000000000000000000000..dfd4a6a78487ca8b2e0ef8fc02a536505e8617bf --- /dev/null +++ b/frameworks/native/ops/top_k_builder.h @@ -0,0 +1,45 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_TOPK_BUILDER_H +#define NEURAL_NETWORK_RUNTIME_TOPK_BUILDER_H + +#include "frameworks/native/ops_builder.h" +#include "frameworks/native/ops_registry.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +class TopKBuilder : public OpsBuilder { +public: + TopKBuilder(); + ~TopKBuilder() override; + OH_NN_ReturnCode Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) override; + LiteGraphPrimitvePtr GetPrimitive() override; + +private: + OH_NN_ReturnCode SetSorted(std::shared_ptr tensor); + +private: + bool m_sorted; +}; +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS + +#endif // NEURAL_NETWORK_RUNTIME_TOPK_BUILDER_H diff --git a/frameworks/native/ops/transpose_builder.cpp b/frameworks/native/ops/transpose_builder.cpp new file mode 100644 index 0000000000000000000000000000000000000000..a89b28207a93531d572c65441ba6dd28eef8cedd --- /dev/null +++ b/frameworks/native/ops/transpose_builder.cpp @@ -0,0 +1,86 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "transpose_builder.h" + +#include "mindir.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +static const int INPUT_NUM = 2; +static const int OUTPUT_NUM = 1; +static const std::string OP_NAME = "Transpose"; + +TransposeBuilder::TransposeBuilder() {} + +TransposeBuilder::~TransposeBuilder() {} + +/** + * Build method. + * 1.set attr of ops. + * 2.set inputIndex of ops. + * 3.set outputIndex of ops. + */ +OH_NN_ReturnCode TransposeBuilder::Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) +{ + if (m_isBuild) { + LOGE("[TransposeBuilder] Transpose operation has been build, cannot build again."); + return OH_NN_OPERATION_FORBIDDEN; + } + + OH_NN_ReturnCode returnCode = CheckIOIndex(inputsIndex, outputsIndex, allTensors, INPUT_NUM, OUTPUT_NUM); + if (returnCode != OH_NN_SUCCESS) { + LOGE("[TransposeBuilder] Passed invalid input or output index."); + return returnCode; + } + + if (!paramsIndex.empty()) { + LOGE("[TransposeBuilder] TransposeBuilder expects no parameters, but receive %zu", paramsIndex.size()); + return OH_NN_INVALID_PARAMETER; + } + + m_inputsIndex = inputsIndex; + m_outputsIndex = outputsIndex; + + m_isBuild = true; + m_name = OP_NAME; + return OH_NN_SUCCESS; +} + +LiteGraphPrimitvePtr TransposeBuilder::GetPrimitive() +{ + if (!m_isBuild) { + LOGE("[TransposeBuilder] Cannot get primitive before call build."); + return {nullptr, DestroyLiteGraphPrimitive}; + } + + auto primitive = mindspore::lite::MindIR_Transpose_CreatePrimitive(); + if (primitive == nullptr) { + LOGE("[TransposeBuilder] MindIR_Transpose_CreatePrimitive failed."); + return {nullptr, DestroyLiteGraphPrimitive}; + } + + LiteGraphPrimitvePtr graphPrimitivePtr(primitive, DestroyLiteGraphPrimitive); + return graphPrimitivePtr; +} + +REGISTER_OPS(TransposeBuilder, OH_NN_OPS_TRANSPOSE); +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS diff --git a/frameworks/native/ops/transpose_builder.h b/frameworks/native/ops/transpose_builder.h new file mode 100644 index 0000000000000000000000000000000000000000..998d5803a0c7771dabf02ad0da88340f92e3197c --- /dev/null +++ b/frameworks/native/ops/transpose_builder.h @@ -0,0 +1,39 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_TRANSPOSE_BUILDER_H +#define NEURAL_NETWORK_RUNTIME_TRANSPOSE_BUILDER_H + +#include "frameworks/native/ops_builder.h" +#include "frameworks/native/ops_registry.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +class TransposeBuilder : public OpsBuilder { +public: + TransposeBuilder(); + ~TransposeBuilder() override; + OH_NN_ReturnCode Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) override; + LiteGraphPrimitvePtr GetPrimitive() override; +}; +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS + +#endif // NEURAL_NETWORK_RUNTIME_TRANSPOSE_BUILDER_H diff --git a/frameworks/native/ops/unsqueeze_builder.cpp b/frameworks/native/ops/unsqueeze_builder.cpp new file mode 100644 index 0000000000000000000000000000000000000000..62b6d86b531901cb301f92ef6f1fac914118e8fa --- /dev/null +++ b/frameworks/native/ops/unsqueeze_builder.cpp @@ -0,0 +1,119 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "unsqueeze_builder.h" + +#include "mindir.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +static const int INPUT_NUM = 1; +static const int OUTPUT_NUM = 1; +static const std::string OP_NAME = "Unsqueeze"; + +UnsqueezeBuilder::UnsqueezeBuilder() {} + +UnsqueezeBuilder::~UnsqueezeBuilder() {} + +OH_NN_ReturnCode UnsqueezeBuilder::SetAxis(std::shared_ptr tensor) +{ + // Set Axis + if (tensor->GetDataType() != OH_NN_INT64) { + LOGE("[UnsqueezeBuilder] The 2nd input axis should be type OH_NN_INT64."); + return OH_NN_INVALID_PARAMETER; + } + + if (tensor->GetElementCount() != 1) { + LOGE("[UnsqueezeBuilder] The 2nd input axis should be scaler."); + return OH_NN_INVALID_PARAMETER; + } + + m_axis.clear(); + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[UnsqueezeBuilder] Tensor buffer is nullptr."); + return OH_NN_INVALID_PARAMETER; + } + m_axis.emplace_back(*(static_cast(buffer))); + + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode UnsqueezeBuilder::Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) +{ + if (m_isBuild) { + LOGE("[UnsqueezeBuilder] Unsqueeze build operation has been build, cannot build again"); + return OH_NN_OPERATION_FORBIDDEN; + } + OH_NN_ReturnCode returnCode = CheckIOIndex(inputsIndex, outputsIndex, allTensors, INPUT_NUM, OUTPUT_NUM); + if (returnCode != OH_NN_SUCCESS) { + LOGE("[UnsqueezeBuilder] Passed invalid input or output index."); + return returnCode; + } + + m_inputsIndex = inputsIndex; + m_outputsIndex = outputsIndex; + + for (int i : paramsIndex) { + std::shared_ptr tensor = allTensors[i]; + tensor->IdentifyOpParameter(); + switch (tensor->GetType()) { + case OH_NN_UNSQUEEZE_AXIS: + returnCode = SetAxis(tensor); + break; + default: + LOGE("[UnsqueezeBuilder] Parameter Type is invalid. type=%d", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; + } + + if (returnCode != OH_NN_SUCCESS) { + LOGE("[UnsqueezeBuilder] Passed invalid param."); + return returnCode; + } + } + // The quantization type of the first output determinies that of the operator. + SetQuantType(outputsIndex, allTensors); + + m_isBuild = true; + m_name = OP_NAME; + return OH_NN_SUCCESS; +} + +LiteGraphPrimitvePtr UnsqueezeBuilder::GetPrimitive() +{ + if (!m_isBuild) { + LOGE("[UnsqueezeBuilder] Cannot get primitive before call build."); + return {nullptr, DestroyLiteGraphPrimitive}; + } + + auto primitive = mindspore::lite::MindIR_Unsqueeze_CreatePrimitive(m_axis); + if (primitive == nullptr) { + LOGE("[UnsqueezeBuilder] MindIR_Unsqueeze_CreatePrimitive failed."); + return {nullptr, DestroyLiteGraphPrimitive}; + } + + LiteGraphPrimitvePtr graphPrimitivePtr(primitive, DestroyLiteGraphPrimitive); + return graphPrimitivePtr; +} + +REGISTER_OPS(UnsqueezeBuilder, OH_NN_OPS_UNSQUEEZE); +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS diff --git a/frameworks/native/ops/unsqueeze_builder.h b/frameworks/native/ops/unsqueeze_builder.h new file mode 100644 index 0000000000000000000000000000000000000000..663395740d3ee93d80bde99e01b6bd9d3fe41081 --- /dev/null +++ b/frameworks/native/ops/unsqueeze_builder.h @@ -0,0 +1,46 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_UNSQUEEZE_BUILDER_H +#define NEURAL_NETWORK_RUNTIME_UNSQUEEZE_BUILDER_H + +#include "frameworks/native/ops_builder.h" +#include "frameworks/native/ops_registry.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +class UnsqueezeBuilder : public OpsBuilder { +public: + UnsqueezeBuilder(); + ~UnsqueezeBuilder() override; + OH_NN_ReturnCode Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) override; + + LiteGraphPrimitvePtr GetPrimitive() override; + +private: + OH_NN_ReturnCode SetAxis(std::shared_ptr tensor); + +private: + std::vector m_axis; +}; +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS + +#endif // NEURAL_NETWORK_RUNTIME_UNSQUEEZE_BUILDER_H diff --git a/frameworks/native/ops_builder.cpp b/frameworks/native/ops_builder.cpp new file mode 100644 index 0000000000000000000000000000000000000000..d815fc99c8492e4cfd8a7332481da5c7ebfd805b --- /dev/null +++ b/frameworks/native/ops_builder.cpp @@ -0,0 +1,99 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ops_builder.h" +#include "mindir.h" +#include "mindir_types.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +void DestroyLiteGraphPrimitive(void* primitive) +{ + mindspore::lite::MindIR_Primitive_Destroy(&primitive); +} + +void OpsBuilder::GetInputIndex(std::vector& inputsIndex, + const std::unordered_map& modelIDToGraphID) const +{ + for (auto index : m_inputsIndex) { + // index has been prevented from taking value out of modelIDToGraphID, no need to check. + inputsIndex.emplace_back(modelIDToGraphID.at(index)); + } +} + +void OpsBuilder::GetOutputIndex(std::vector& outputsIndex, + const std::unordered_map& modelIDToGraphID) const +{ + for (auto index : m_outputsIndex) { + // index has been prevented from taking value out of modelIDToGraphID, no need to check. + outputsIndex.emplace_back(modelIDToGraphID.at(index)); + } +} + +std::string OpsBuilder::GetName() const +{ + return m_name; +} + +OpsQuantType OpsBuilder::GetQuantType() const +{ + return m_quantType; +} + +OH_NN_ReturnCode OpsBuilder::CheckIOIndex(const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors, + const size_t inputNum, + const size_t outputNum) const +{ + size_t inputsIndexSize = inputsIndex.size(); + size_t outputIndexSize = outputsIndex.size(); + if (inputsIndexSize != inputNum) { + LOGE("The number of index of inputs is %zu don't equal to %zu.", inputsIndexSize, inputNum); + return OH_NN_INVALID_PARAMETER; + } + if (outputIndexSize != outputNum) { + LOGE("The number of index of outputs is %zu don't equal to %zu.", outputIndexSize, outputNum); + return OH_NN_INVALID_PARAMETER; + } + + for (auto index : inputsIndex) { + if (index >= allTensors.size()) { + LOGE("The index of inputs is out of range."); + return OH_NN_INVALID_PARAMETER; + } + } + + for (auto index : outputsIndex) { + if (index >= allTensors.size()) { + LOGE("The index of outputs is out of range."); + return OH_NN_INVALID_PARAMETER; + } + } + + return OH_NN_SUCCESS; +} + +void OpsBuilder::SetQuantType(const std::vector& outputsIndex, + const std::vector>& allTensors) +{ + if (allTensors[outputsIndex.front()]->IsQuantTensor()) { + m_quantType = OpsQuantType::QUANT_ALL; + } +} +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS \ No newline at end of file diff --git a/frameworks/native/ops_builder.h b/frameworks/native/ops_builder.h new file mode 100644 index 0000000000000000000000000000000000000000..ca2f70c4cd12e946dd78524358e79159dfb876da --- /dev/null +++ b/frameworks/native/ops_builder.h @@ -0,0 +1,81 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_OPS_BUILDER_H +#define NEURAL_NETWORK_RUNTIME_OPS_BUILDER_H + +#include +#include + +#include "nn_tensor.h" +#include "common/log.h" +#include "interfaces/kits/c/neural_network_runtime.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +using LiteGraphPrimitvePtr = std::unique_ptr; +void DestroyLiteGraphPrimitive(void* primitive); + +// QuantType Enum +enum class OpsQuantType: int { + QUANT_NONE = 0, + QUANT_ALL = 1 +}; + +class OpsBuilder { +public: + OpsBuilder() = default; + virtual ~OpsBuilder() = default; + + // Other operation builders inherit from OpsBuilder, delete these special construction and assignment functions. + OpsBuilder(const OpsBuilder& opsBuilder) = delete; + OpsBuilder& operator=(const OpsBuilder& opsBuilder) = delete; + OpsBuilder(OpsBuilder&& opsBuilder) = delete; + OpsBuilder& operator=(OpsBuilder&& opsBuilder) = delete; + + virtual OH_NN_ReturnCode Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) = 0; + virtual LiteGraphPrimitvePtr GetPrimitive() = 0; + + virtual void GetInputIndex(std::vector& inputsIndex, + const std::unordered_map& modelIDToGraphID) const; + virtual void GetOutputIndex(std::vector& outputsIndex, + const std::unordered_map& modelIDToGraphID) const; + virtual std::string GetName() const; + virtual OpsQuantType GetQuantType() const; + +protected: + OH_NN_ReturnCode CheckIOIndex(const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors, + const size_t inputNum, + const size_t outputNum) const; + void SetQuantType(const std::vector& outputsIndex, + const std::vector>& allTensors); + +protected: + std::string m_name; + std::vector m_inputsIndex; + std::vector m_outputsIndex; + OpsQuantType m_quantType {OpsQuantType::QUANT_NONE}; + bool m_isBuild {false}; +}; +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS +#endif // NEURAL_NETWORK_RUNTIME_OPS_BUILDER_H \ No newline at end of file diff --git a/frameworks/native/ops_registry.cpp b/frameworks/native/ops_registry.cpp new file mode 100644 index 0000000000000000000000000000000000000000..c71eb354f1fe0526e3a66776c76f8490caa3a7af --- /dev/null +++ b/frameworks/native/ops_registry.cpp @@ -0,0 +1,46 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ops_registry.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +OpsRegistry::Registrar::Registrar(OH_NN_OperationType opsType, std::function()> createFunc) +{ + OpsRegistry& registry = OpsRegistry::GetSingleton(); + if (registry.m_opsRegedit.find(opsType) != registry.m_opsRegedit.end()) { + LOGW("Operantion has been registered, cannot register twice. Operation type: %d", opsType); + } else { + registry.m_opsRegedit[opsType] = createFunc; + } +} + +OpsRegistry& OpsRegistry::GetSingleton() +{ + static OpsRegistry opsRegistry; + return opsRegistry; +} + +std::unique_ptr OpsRegistry::GetOpsBuilder(OH_NN_OperationType type) const +{ + if (m_opsRegedit.find(type) != m_opsRegedit.end()) { + return m_opsRegedit.at(type)(); + } + return nullptr; +} +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS \ No newline at end of file diff --git a/frameworks/native/ops_registry.h b/frameworks/native/ops_registry.h new file mode 100644 index 0000000000000000000000000000000000000000..29171cbf11da14ebb42bb597f94ca26dd0e4a869 --- /dev/null +++ b/frameworks/native/ops_registry.h @@ -0,0 +1,54 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef HETERNEURAL_NETWORK_OPS_REGISTRY_H +#define HETERNEURAL_NETWORK_OPS_REGISTRY_H + +#include +#include +#include + +#include "ops_builder.h" +#include "interfaces/kits/c/neural_network_runtime.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +class OpsRegistry { +public: + struct Registrar { + Registrar() = delete; + Registrar(OH_NN_OperationType opsType, std::function()> createFunc); + }; + +public: + static OpsRegistry& GetSingleton(); + std::unique_ptr GetOpsBuilder(OH_NN_OperationType type) const; + +private: + OpsRegistry() {}; + OpsRegistry(const OpsRegistry&) = delete; + OpsRegistry& operator=(const OpsRegistry&) = delete; + +private: + std::unordered_map()>> m_opsRegedit; +}; + +#define CREATE_FUNC(T) ([]()->std::unique_ptr {return std::make_unique();}) +#define REGISTER_OPS(T, opsType) static OpsRegistry::Registrar g_##T(opsType, CREATE_FUNC(T)) +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespcae OHOS +#endif // HETERNEURAL_NETWORK_OPS_REGISTRY_H \ No newline at end of file diff --git a/frameworks/native/transform.cpp b/frameworks/native/transform.cpp new file mode 100644 index 0000000000000000000000000000000000000000..ea0d3391a84757016eb600c0e782642a768cd627 --- /dev/null +++ b/frameworks/native/transform.cpp @@ -0,0 +1,299 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "transform.h" + +#include "memory_manager.h" +#include "common/log.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +const uint32_t BIT8_TO_BYTE = 1; +const uint32_t BIT16_TO_BYTE = 2; +const uint32_t BIT32_TO_BYTE = 4; +const uint32_t BIT64_TO_BYTE = 8; + +OH_NN_DeviceType HDIToNN::TransHDIDeviceType(const V1_0::DeviceType& iDeviceType) +{ + switch (iDeviceType) { + case V1_0::DeviceType::CPU: + return OH_NN_CPU; + case V1_0::DeviceType::GPU: + return OH_NN_GPU; + case V1_0::DeviceType::ACCELERATOR: + return OH_NN_ACCELERATOR; + default: + return OH_NN_OTHERS; + } +} + +DeviceStatus HDIToNN::TransHDIDeviceStatus(const V1_0::DeviceStatus& iDeviceStatus) +{ + switch (iDeviceStatus) { + case V1_0::DeviceStatus::AVAILABLE: + return DeviceStatus::AVAILABLE; + case V1_0::DeviceStatus::BUSY: + return DeviceStatus::BUSY; + case V1_0::DeviceStatus::OFFLINE: + return DeviceStatus::OFFLINE; + default: + return DeviceStatus::UNKNOWN; + } +} + +V1_0::PerformanceMode NNToHDI::TransPerformanceMode(const OH_NN_PerformanceMode& mode) +{ + switch (mode) { + case OH_NN_PERFORMANCE_LOW: + return V1_0::PerformanceMode::PERFORMANCE_LOW; + case OH_NN_PERFORMANCE_MEDIUM: + return V1_0::PerformanceMode::PERFORMANCE_MEDIUM; + case OH_NN_PERFORMANCE_HIGH: + return V1_0::PerformanceMode::PERFORMANCE_HIGH; + case OH_NN_PERFORMANCE_EXTREME: + return V1_0::PerformanceMode::PERFORMANCE_EXTREME; + default: + return V1_0::PerformanceMode::PERFORMANCE_NONE; + } +} +V1_0::Priority NNToHDI::TransPriority(const OH_NN_Priority& priority) +{ + switch (priority) { + case OH_NN_PRIORITY_LOW: + return V1_0::Priority::PRIORITY_LOW; + case OH_NN_PRIORITY_MEDIUM: + return V1_0::Priority::PRIORITY_MEDIUM; + case OH_NN_PRIORITY_HIGH: + return V1_0::Priority::PRIORITY_HIGH; + default: + return V1_0::Priority::PRIORITY_NONE; + } +} + +V1_0::DataType NNToHDI::TransDataType(const OH_NN_DataType& dataType) +{ + switch (dataType) { + case OH_NN_BOOL: + return V1_0::DataType::DATA_TYPE_BOOL; + case OH_NN_INT8: + return V1_0::DataType::DATA_TYPE_INT8; + case OH_NN_INT16: + return V1_0::DataType::DATA_TYPE_INT16; + case OH_NN_INT32: + return V1_0::DataType::DATA_TYPE_INT32; + case OH_NN_INT64: + return V1_0::DataType::DATA_TYPE_INT64; + case OH_NN_UINT8: + return V1_0::DataType::DATA_TYPE_UINT8; + case OH_NN_UINT16: + return V1_0::DataType::DATA_TYPE_UINT16; + case OH_NN_UINT32: + return V1_0::DataType::DATA_TYPE_UINT32; + case OH_NN_UINT64: + return V1_0::DataType::DATA_TYPE_UINT64; + case OH_NN_FLOAT16: + return V1_0::DataType::DATA_TYPE_FLOAT16; + case OH_NN_FLOAT32: + return V1_0::DataType::DATA_TYPE_FLOAT32; + case OH_NN_FLOAT64: + return V1_0::DataType::DATA_TYPE_FLOAT64; + default: + return V1_0::DataType::DATA_TYPE_UNKNOWN; + } +} + +V1_0::Format NNToHDI::TransFormat(const OH_NN_Format& format) +{ + switch (format) { + case OH_NN_FORMAT_NCHW: + return V1_0::Format::FORMAT_NCHW; + case OH_NN_FORMAT_NHWC: + return V1_0::Format::FORMAT_NHWC; + default: + return V1_0::Format::FORMAT_NONE; + } +} + +V1_0::IOTensor NNToHDI::TransIOTensor(const IOTensor& tensor) +{ + V1_0::IOTensor iTensor; + iTensor.name = tensor.name; + iTensor.dataType = TransDataType(tensor.dataType); + iTensor.dimensions = tensor.dimensions; + iTensor.format = TransFormat(tensor.format); + + V1_0::SharedBuffer iBuffer {INVALID_FD, 0, 0, 0}; + if (tensor.data != nullptr) { + auto memManager = MemoryManager::GetInstance(); + Memory memory; + auto ret = memManager->GetMemory(tensor.data, memory); + if (ret != OH_NN_SUCCESS) { + LOGE("Invalid Tensor buffer, cannot transform to fd."); + } else { + iBuffer.fd = memory.fd; + iBuffer.bufferSize = memory.length; + iBuffer.offset = 0; + iBuffer.dataSize = memory.length; + } + } + iTensor.data = iBuffer; + + return iTensor; +} + +uint32_t GetTypeSize(OH_NN_DataType type) +{ + switch (type) { + case OH_NN_BOOL: + return sizeof(bool); + case OH_NN_INT8: + case OH_NN_UINT8: + return BIT8_TO_BYTE; + case OH_NN_INT16: + case OH_NN_UINT16: + case OH_NN_FLOAT16: + return BIT16_TO_BYTE; + case OH_NN_INT32: + case OH_NN_UINT32: + case OH_NN_FLOAT32: + return BIT32_TO_BYTE; + case OH_NN_INT64: + case OH_NN_UINT64: + case OH_NN_FLOAT64: + return BIT64_TO_BYTE; + default: + return 0; + } +} + +mindspore::lite::DataType NNToMS::TransformDataType(OH_NN_DataType type) +{ + switch (type) { + case OH_NN_BOOL: + return mindspore::lite::DATA_TYPE_BOOL; + case OH_NN_INT8: + return mindspore::lite::DATA_TYPE_INT8; + case OH_NN_INT16: + return mindspore::lite::DATA_TYPE_INT16; + case OH_NN_INT32: + return mindspore::lite::DATA_TYPE_INT32; + case OH_NN_INT64: + return mindspore::lite::DATA_TYPE_INT64; + case OH_NN_UINT8: + return mindspore::lite::DATA_TYPE_UINT8; + case OH_NN_UINT16: + return mindspore::lite::DATA_TYPE_UINT16; + case OH_NN_UINT32: + return mindspore::lite::DATA_TYPE_UINT32; + case OH_NN_UINT64: + return mindspore::lite::DATA_TYPE_UINT64; + case OH_NN_FLOAT16: + return mindspore::lite::DATA_TYPE_FLOAT16; + case OH_NN_FLOAT32: + return mindspore::lite::DATA_TYPE_FLOAT32; + case OH_NN_FLOAT64: + return mindspore::lite::DATA_TYPE_FLOAT64; + default: + return mindspore::lite::DATA_TYPE_UNKNOWN; + } +} + +mindspore::lite::Format NNToMS::TransformFormat(OH_NN_Format type) +{ + switch (type) { + case OH_NN_FORMAT_NCHW: + return mindspore::lite::FORMAT_NCHW; + case OH_NN_FORMAT_NHWC: + return mindspore::lite::FORMAT_NHWC; + default: + return mindspore::lite::FORMAT_NHWC; + } +} + +mindspore::lite::ActivationType NNToMS::TransfromFusionType(OH_NN_FuseType type) +{ + switch (type) { + case OH_NN_FUSED_NONE: + return mindspore::lite::ACTIVATION_TYPE_NO_ACTIVATION; + case OH_NN_FUSED_RELU: + return mindspore::lite::ACTIVATION_TYPE_RELU; + case OH_NN_FUSED_RELU6: + return mindspore::lite::ACTIVATION_TYPE_RELU6; + default: + return mindspore::lite::ACTIVATION_TYPE_UNKNOWN; + } +} + +mindspore::lite::QuantType NNToMS::TransformQuantType(OHOS::NeuralNetworkRuntime::Ops::OpsQuantType type) +{ + switch (type) { + case OHOS::NeuralNetworkRuntime::Ops::OpsQuantType::QUANT_NONE: + return mindspore::lite::QUANT_TYPE_NONE; + case OHOS::NeuralNetworkRuntime::Ops::OpsQuantType::QUANT_ALL: + return mindspore::lite::QUANT_TYPE_ALL; + default: return mindspore::lite::QUANT_TYPE_NONE; + } +} + +mindspore::lite::PadMode NNToMS::TransformPadModeValue(int8_t padMode) +{ + // The value is an optional value of the int8_t type. The value 0 indicates the same, + // and the value 1 indicates valid. + return (padMode == 0) ? mindspore::lite::PadMode::PAD_MODE_SAME : + mindspore::lite::PadMode::PAD_MODE_VALID; +} + +OH_NN_DataType MSToNN::TransformDataType(mindspore::lite::DataType type) +{ + switch (type) { + case mindspore::lite::DATA_TYPE_BOOL: + return OH_NN_BOOL; + case mindspore::lite::DATA_TYPE_INT8: + return OH_NN_INT8; + case mindspore::lite::DATA_TYPE_INT16: + return OH_NN_INT16; + case mindspore::lite::DATA_TYPE_INT32: + return OH_NN_INT32; + case mindspore::lite::DATA_TYPE_INT64: + return OH_NN_INT64; + case mindspore::lite::DATA_TYPE_UINT8: + return OH_NN_UINT8; + case mindspore::lite::DATA_TYPE_UINT16: + return OH_NN_UINT16; + case mindspore::lite::DATA_TYPE_UINT32: + return OH_NN_UINT32; + case mindspore::lite::DATA_TYPE_UINT64: + return OH_NN_UINT64; + case mindspore::lite::DATA_TYPE_FLOAT16: + return OH_NN_FLOAT16; + case mindspore::lite::DATA_TYPE_FLOAT32: + return OH_NN_FLOAT32; + case mindspore::lite::DATA_TYPE_FLOAT64: + return OH_NN_FLOAT64; + default: + return OH_NN_UNKNOWN; + } +} + +std::vector MSToNN::TransformQuantParams(std::vector msQuantParams) +{ + std::vector nnQuantParam; + for (const mindspore::lite::QuantParam& param : msQuantParams) { + nnQuantParam.emplace_back((QuantParam){param.numBits, param.scale, param.zeroPoint}); + } + return nnQuantParam; +} +} // namespace NeuralNetworkRuntime +} // namespace OHOS \ No newline at end of file diff --git a/frameworks/native/transform.h b/frameworks/native/transform.h new file mode 100644 index 0000000000000000000000000000000000000000..70216118dbed98666a2ead9f9f98507b5a2965ef --- /dev/null +++ b/frameworks/native/transform.h @@ -0,0 +1,68 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_TRANSFORM_H +#define NEURAL_NETWORK_RUNTIME_TRANSFORM_H + +#include "hdi_interfaces.h" +#include "interfaces/kits/c/neural_network_runtime_type.h" +#include "interfaces/oem/cpp_api/cpp_type.h" +#include "mindir.h" +#include "mindir_types.h" +#include "ops_builder.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +template +std::vector ConstructVectorFromArray(const T* data, size_t size) +{ + std::vector array; + if (data != nullptr) { + array.assign(data, data + size); + } + return array; +} + +uint32_t GetTypeSize(OH_NN_DataType type); + + +namespace HDIToNN { +OH_NN_DeviceType TransHDIDeviceType(const V1_0::DeviceType& iDeviceType); +DeviceStatus TransHDIDeviceStatus(const V1_0::DeviceStatus& iDeviceStatus); +} // namespace HDIToNN + +namespace NNToHDI { +V1_0::PerformanceMode TransPerformanceMode(const OH_NN_PerformanceMode& mode); +V1_0::Priority TransPriority(const OH_NN_Priority& priority); +V1_0::DataType TransDataType(const OH_NN_DataType& dataType); +V1_0::Format TransFormat(const OH_NN_Format& format); +V1_0::IOTensor TransIOTensor(const IOTensor& tensor); +} // namespace NNToHDI + +namespace NNToMS { +mindspore::lite::DataType TransformDataType(OH_NN_DataType type); +mindspore::lite::Format TransformFormat(OH_NN_Format type); +mindspore::lite::ActivationType TransfromFusionType(OH_NN_FuseType type); +mindspore::lite::QuantType TransformQuantType(OHOS::NeuralNetworkRuntime::Ops::OpsQuantType type); +mindspore::lite::PadMode TransformPadModeValue(int8_t padMode); +} // NNToMS + +namespace MSToNN { +OH_NN_DataType TransformDataType(mindspore::lite::DataType type); +std::vector TransformQuantParams(std::vector msQuantParams); +} // namespace MSToNN +} // namespace NeuralNetworkRuntime +} // namespace OHOS +#endif // NEURAL_NETWORK_RUNTIME_TRANSFORM_H \ No newline at end of file diff --git a/frameworks/native/validation.cpp b/frameworks/native/validation.cpp new file mode 100644 index 0000000000000000000000000000000000000000..719c8be3600821f1462179d5bd76efa34caae588 --- /dev/null +++ b/frameworks/native/validation.cpp @@ -0,0 +1,72 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "mindir_types.h" + +#include "validation.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Validation { +bool ValidateTensorDataType(OH_NN_DataType dataType) +{ + if (dataType >= OH_NN_UNKNOWN && dataType <= OH_NN_FLOAT64) { + return true; + } + return false; +} + +bool ValidatePerformanceMode(OH_NN_PerformanceMode performanceMode) +{ + if ((performanceMode >= OH_NN_PERFORMANCE_NONE) && (performanceMode <= OH_NN_PERFORMANCE_EXTREME)) { + return true; + } + return false; +} + +bool ValidatePriority(OH_NN_Priority priority) +{ + if ((priority >= OH_NN_PRIORITY_NONE) && (priority <= OH_NN_PRIORITY_HIGH)) { + return true; + } + return false; +} + +bool ValidateFuseType(OH_NN_FuseType fuseType) +{ + if ((fuseType >= OH_NN_FUSED_NONE) && (fuseType <= OH_NN_FUSED_RELU6)) { + return true; + } + return false; +} + +bool ValidatePadMode(int8_t padMode) +{ + if ((padMode >= mindspore::lite::PAD_MODE_PAD) && (padMode <= mindspore::lite::PAD_MODE_VALID)) { + return true; + } + return false; +} + +bool ValidateTensorType(OH_NN_TensorType nnTensorType) +{ + if ((nnTensorType >= OH_NN_TENSOR) && (nnTensorType <= OH_NN_UNSQUEEZE_AXIS)) { + return true; + } + return false; +} +} // namespace Validation +} // namespace NeuralNetworkRuntime +} // namespace OHOS diff --git a/frameworks/native/validation.h b/frameworks/native/validation.h new file mode 100644 index 0000000000000000000000000000000000000000..919d4c440e1a060c436e5a292bb87e8e1c2e9a3c --- /dev/null +++ b/frameworks/native/validation.h @@ -0,0 +1,45 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_VALIDATION_H +#define NEURAL_NETWORK_RUNTIME_VALIDATION_H + +#include "common/log.h" +#include "interfaces/kits/c/neural_network_runtime.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Validation { +template +OH_NN_ReturnCode ValidateArray(const T* data, size_t size) +{ + if ((data != nullptr) != (size > 0)) { + LOGE("ValidateArray failed, data is %p but the length is %zu", data, size); + return OH_NN_INVALID_PARAMETER; + } + return OH_NN_SUCCESS; +} + +bool ValidateTensorType(OH_NN_TensorType nnTensorType); +bool ValidateTensorDataType(OH_NN_DataType dataType); +bool ValidatePerformanceMode(OH_NN_PerformanceMode performanceMode); +bool ValidatePriority(OH_NN_Priority priority); +bool ValidateFuseType(OH_NN_FuseType fuseType); +bool ValidatePadMode(int8_t padMode); +} // namespace Validation +} // NeuralNetworkRuntime +} // namespace OHOS + +#endif // NEURAL_NETWORK_RUNTIME_VALIDATION_H diff --git a/interfaces/innerkits/c/neural_network_runtime_inner.h b/interfaces/innerkits/c/neural_network_runtime_inner.h new file mode 100644 index 0000000000000000000000000000000000000000..4b298e4f00498836eba507488ac578e5139ae63c --- /dev/null +++ b/interfaces/innerkits/c/neural_network_runtime_inner.h @@ -0,0 +1,54 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_INNER_H +#define NEURAL_NETWORK_RUNTIME_INNER_H + +#include "interfaces/kits/c/neural_network_runtime_type.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * @brief 直接加载LiteGraph,完成模型搭建。 + * + * 调用{@link OH_NNModel_Construct}创建模型实例后,直接调用本方法加载LiteGraph。加载LiteGraph后,只能调用 + * {@link OH_NNCompilation_Construct}创建模型编译器,或者调用{@link OH_NNModel_Destroy}销毁模型实例。\n + * + * 不允许本方法与{@link OH_NNModel_AddTensor}、{@link OH_NNModel_AddOperation}、 + * {@link OH_NNModel_SetTensorData}和{@link OH_NNModel_SpecifyInputsAndOutputs} + * 等构图接口混用,否则返回{@link OH_NN_OPERATION_FORBIDDEN}错误。\n + * + * 如果本方法调用成功,返回{@link OH_NN_SUCCESS},liteGraph将由NNRt管理,调用者无需释放,避免造成二次释放; + * 如果方法返回其他错误码,则NNRt不会持有liteGraph,此时需要调用者主动释放内存。 + * + * + * 本接口不作为Neural Network Runtime接口对外开放。\n + * + * @param model 指向{@link OH_NNModel}实例的指针。 + * @param liteGraph 指向LiteGraph的指针。 + * @return 函数执行的结果状态,执行成功返回OH_NN_SUCCESS,失败返回具体错误码,参考{@link OH_NN_ReturnCode}。 + * @throw std::bad_alloc 本方法可能在转换原始指针到智能指针的过程中,抛出std::bad_alloc异常,此时liteGraph将被 + * 主动释放。 + * @since 9 + * @version 1.0 + */ +OH_NN_ReturnCode OH_NNModel_BuildFromLiteGraph(OH_NNModel *model, const void *liteGraph); + +#ifdef __cplusplus +} +#endif // __cpluscplus +#endif // NEURAL_NETWORK_RUNTIME_INNER_H \ No newline at end of file diff --git a/interfaces/kits/c/neural_network_runtime.h b/interfaces/kits/c/neural_network_runtime.h new file mode 100644 index 0000000000000000000000000000000000000000..15d8dc7375c99e6cb8ed25cf71d82b3d47d24b0d --- /dev/null +++ b/interfaces/kits/c/neural_network_runtime.h @@ -0,0 +1,686 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_H +#define NEURAL_NETWORK_RUNTIME_H +/** + * @file neural_network_runtime.h + * + * @brief Neural Network Runtime部件接口定义,通过调用以下接口,在硬件加速器上执行深度学习模型推理计算。 + * + * 注意:Neural Network Runtime的接口目前均不支持多线程调用。\n + * + * @since 9 + * @version 1.0 + */ +#include "neural_network_runtime_type.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * @addtogroup NNModel + * @{ + * + * @brief Neural Network Runtime 构图模块,提供了一系列构图接口实现操作数的添加、算子的添加和输入输出的设置,帮助开发者完成 + * AI模型的构建。 + * + * @since 9 + * @version 1.0 + */ + +/** + * @brief 创建{@link OH_NNModel}类型的模型实例,搭配OH_NNModel模块提供的其他接口,完成模型实例的构造。 + * + * 在开始构图前,先调用{@link OH_NNModel_Construct}创建模型实例,根据模型的拓扑结构,调用 + * {@link OH_NNModel_AddTensor}、{@link OH_NNModel_AddOperation}和 + * {@link OH_NNModel_SetTensorData}方法,填充模型的数据节点和算子节点;然后调用 + * {@link OH_NNModel_SpecifyInputsAndOutputs}指定模型的输入和输出;当构造完模型的拓扑结构,调用 + * {@link OH_NNModel_Finish}完成模型的构建。\n + * + * 模型实例使用完毕后,需要调用{@link OH_NNModel_Destroy}销毁模型实例,避免内存泄漏。\n + * + * @return 返回一个指向{@link OH_NNModel}实例的指针。 + * @since 9 + * @version 1.0 + */ +OH_NNModel *OH_NNModel_Construct(void); + +/** + * @brief 向模型实例中添加操作数 + * + * Neural Network Runtime模型中的数据节点和算子参数均由模型的操作数构成。本方法根据tensor,向model实 + * 例中添加操作数。操作数添加的顺序是模型中记录操作数的索引值,{@link OH_NNModel_SetTensorData}、 + * {@link OH_NNModel_AddOperation}和{@link OH_NNModel_SpecifyInputsAndOutputs} + * 方法根据该索引值,指定不同的操作数。\n + * + * Neural Network Runtime支持动态形状输入和输出。在添加动态形状的数据节点时,需要将tensor.dimensions中支持动态 + * 变化的维度设置为-1。例如:一个4维tensor,将tensor.dimensions设置为[1, -1, 2, 2],表示其第二个维度支持 + * 动态变化。\n + * + * @param model 指向{@link OH_NNModel}实例的指针。 + * @param tensor {@link OH_NN_Tensor}操作数的指针,tensor指定了添加到模型实例中操作数的属性。 + * @return 函数执行的结果状态。执行成功返回OH_NN_SUCCESS;失败返回具体错误码,具体失败错误码可参考{@link OH_NN_ReturnCode}。 + * @since 9 + * @version 1.0 + */ +OH_NN_ReturnCode OH_NNModel_AddTensor(OH_NNModel *model, const OH_NN_Tensor *tensor); + +/** + * @brief 设置操作数的数值 + * + * 对于具有常量值的操作数(如模型的权重),需要在构图阶段使用本方法设置数值。操作数的索引值根据操作数添加进模型的顺序决定,操作数的添加参考 + * {@link OH_NNModel_AddTensor}。\n + * + * @param model 指向{@link OH_NNModel}实例的指针。 + * @param index 操作数的索引值。 + * @param dataBuffer 指向真实数据的指针。 + * @param length 数据缓冲区的长度。 + * @return 函数执行的结果状态。执行成功返回OH_NN_SUCCESS;失败返回具体错误码,具体失败错误码可参考{@link OH_NN_ReturnCode}。 + * @since 9 + * @version 1.0 + */ +OH_NN_ReturnCode OH_NNModel_SetTensorData(OH_NNModel *model, uint32_t index, const void *dataBuffer, size_t length); + +/** + * @brief 向模型实例中添加算子 + * + * 本方法向模型实例中添加算子,算子类型由op指定,算子的参数、输入和输出由paramIndices、inputIndices和 + * outputIndices指定。本方法将对算子参数的属性和输入输出的数量进行校验,这些属性需要在调用 + * {@link OH_NNModel_AddTensor}添加操作数的时候正确设置。每个算子期望的参数、输入和输出属性请参考 + * {@link OH_NN_OperationType}。\n + * + * paramIndices、inputIndices和outputIndices中存储的是操作数的索引值,每个索引值根据操作数添加进模型的顺序决定,正确 + * 设置并添加算子要求准确设置每个操作数的索引值。操作数的添加参考{@link OH_NNModel_AddTensor}。\n + * + * 如果添加算子时,添加了额外的参数(非算子需要的参数),本方法返回{@link OH_NN_INVALID_PARAMETER};如果没有设置算子参数, + * 则算子按默认值设置缺省的参数,默认值请参考{@link OH_NN_OperationType}。\n + * + * @param model 指向{@link OH_NNModel}实例的指针。 + * @param op 指定添加的算子类型,取值请参考{@link OH_NN_OperationType}的枚举值。 + * @param paramIndices OH_NN_UInt32Array实例的指针,设置算子的参数。 + * @param inputIndices OH_NN_UInt32Array实例的指针,指定算子的输入。 + * @param outputIndices OH_NN_UInt32Array实例的指针,设置算子的输出。 + * @return 函数执行的结果状态。执行成功返回OH_NN_SUCCESS;失败返回具体错误码,具体失败错误码可参考{@link OH_NN_ReturnCode}。 + * @since 9 + * @version 1.0 + */ +OH_NN_ReturnCode OH_NNModel_AddOperation(OH_NNModel *model, + OH_NN_OperationType op, + const OH_NN_UInt32Array *paramIndices, + const OH_NN_UInt32Array *inputIndices, + const OH_NN_UInt32Array *outputIndices); + +/** + * @brief 指定模型的输入输出 + * + * 模型实例需要指定操作数作为端到端的输入和输出,设置为输入和输出的操作数不能使用{@link OH_NNModel_SetTensorData}设置 + * 数值,需要在执行阶段调用OH_NNExecutor的方法设置输入、输出数据。\n + * + * 操作数的索引值根据操作数添加进模型的顺序决定,操作数的添加参考 + * {@link OH_NNModel_AddTensor}。\n + * + * 暂时不支持异步设置模型输入输出。\n + * + * @param model 指向{@link OH_NNModel}实例的指针。 + * @param inputIndices OH_NN_UInt32Array实例的指针,指定算子的输入。 + * @param outputIndices OH_NN_UInt32Array实例的指针,指定算子的输出。 + * @return 函数执行的结果状态。执行成功返回OH_NN_SUCCESS;失败返回具体错误码,具体失败错误码可参考{@link OH_NN_ReturnCode}。 + * @since 9 + * @version 1.0 + */ +OH_NN_ReturnCode OH_NNModel_SpecifyInputsAndOutputs(OH_NNModel *model, + const OH_NN_UInt32Array *inputIndices, + const OH_NN_UInt32Array *outputIndices); + +/** + * @brief 完成模型构图 + * + * 完成模型拓扑结构的搭建后,调用本方法指示构图已完成。在调用本方法后,无法进行额外的构图操作,调用 + * {@link OH_NNModel_AddTensor}、{@link OH_NNModel_AddOperation}、 + * {@link OH_NNModel_SetTensorData}和 + * {@link OH_NNModel_SpecifyInputsAndOutputs}将返回 + * {@link OH_NN_OPERATION_FORBIDDEN}。\n + * + * 在调用{@link OH_NNModel_GetAvailableOperations}和{@link OH_NNCompilation_Construct} + * 之前,必须先调用本方法完成构图。\n + * + * @param model 指向{@link OH_NNModel}实例的指针。 + * @return 函数执行的结果状态。执行成功返回OH_NN_SUCCESS;失败返回具体错误码,具体失败错误码可参考{@link OH_NN_ReturnCode}。 + * @since 9 + * @version 1.0 + */ +OH_NN_ReturnCode OH_NNModel_Finish(OH_NNModel *model); + +/** + * @brief 释放模型实例。 + * + * 调用{@link OH_NNModel_Construct}创建的模型实例需要调用本方法主动释放,否则将造成内存泄漏。\n + * + * 如果model为空指针或者*model为空指针,本方法只打印warning日志,不执行释放逻辑。\n + * + * @param model 指向{@link OH_NNModel}实例的二级指针。模型实例销毁后,本方法将*model主动设置为空指针。 + * @since 9 + * @version 1.0 + */ +void OH_NNModel_Destroy(OH_NNModel **model); + +/** + * @brief 查询硬件对模型内所有算子的支持情况,通过布尔值序列指示支持情况。 + * + * 查询底层硬件对模型实例内每个算子的支持情况,硬件由deviceID指定,结果将通过isSupported指向的数组表示。如果支持第i个算子,则 + * (*isSupported)[i] == true,否则为 false。\n + * + * 本方法成功执行后,(*isSupported)将指向记录算子支持情况的bool数组,数组长度和模型实例的算子数量相等。该数组对应的内存由 + * Neural Network Runtime管理,在模型实例销毁或再次调用本方法后自动销毁。\n + * + * @param model 指向{@link OH_NNModel}实例的指针。 + * @param deviceID 指定查询的硬件ID,通过{@link OH_NNDevice_GetAllDevicesID}获取。 + * @param isSupported 指向bool数组的指针。调用本方法时,要求(*isSupported)为空指针,否则返回 + * {@link OH_NN_INVALID_PARAMETER}。 + * @param opCount 模型实例中算子的数量,对应(*isSupported)数组的长度。 + * @return 函数执行的结果状态。执行成功返回OH_NN_SUCCESS;失败返回具体错误码,具体失败错误码可参考{@link OH_NN_ReturnCode}。 + * @since 9 + * @version 1.0 + */ +OH_NN_ReturnCode OH_NNModel_GetAvailableOperations(OH_NNModel *model, + size_t deviceID, + const bool **isSupported, + uint32_t *opCount); +/** @} */ + +/** + * @addtogroup NNCompilation + * @{ + * + * @brief Neural Network Runtime 编译模块 + * + * @since 9 + * @version 1.0 + */ + +/** + * @brief 创建{@link OH_NNCompilation}类型的编译实例 + * + * 使用OH_NNModel模块完成模型的构造后,借助OH_NNCompilation模块提供的接口,将模型传递到底层硬件完成编译。本方法接受一个 + * {@link OH_NNModel}实例,创建出{@link OH_NNCompilation}实例;通过 + * {@link OH_NNCompilation_SetDevice}方法,设置编译的设备,最后调用 + * {@link OH_NNCompilation_Build}完成编译。\n + * + * 除了计算硬件的选择,OH_NNCompilation模块支持模型缓存、性能偏好、优先级设置、float16计算等特性,参考以下方法: + * - {@link OH_NNCompilation_SetCache} + * - {@link OH_NNCompilation_SetPerformanceMode} + * - {@link OH_NNCompilation_SetPriority} + * - {@link OH_NNCompilation_EnableFloat16}\n + * + * 调用本方法创建{@link OH_NNCompilation}后,{@link OH_NNModel}实例可以释放。\n + * + * @param model 指向{@link OH_NNModel}实例的指针。 + * @return 返回一个指向{@link OH_NNCompilation}实例的指针。 + * @since 9 + * @version 1.0 + */ +OH_NNCompilation *OH_NNCompilation_Construct(const OH_NNModel *model); + +/** + * @brief 指定模型编译和计算的硬件。 + * + * 编译阶段,需要指定模型编译和执行计算的硬件设备。先调用{@link OH_NNDevice_GetAllDevicesID}获取可用的设备ID, + * 通过{@link OH_NNDevice_GetType}和{@link OH_NNDevice_GetType}获取设备信息后,将期望编译执行的 + * 设备ID传入本方法进行设置。\n + * + * @param compilation 指向{@link OH_NNCompilation}实例的指针。 + * @param deviceID 指定的硬件ID。 + * @return 函数执行的结果状态。执行成功返回OH_NN_SUCCESS;失败返回具体错误码,具体失败错误码可参考{@link OH_NN_ReturnCode}。 + * @since 9 + * @version 1.0 + */ +OH_NN_ReturnCode OH_NNCompilation_SetDevice(OH_NNCompilation *compilation, size_t deviceID); + +/** + * @brief 设置编译后的模型缓存路径和缓存版本。 + * + * 在支持缓存的硬件上,模型在硬件驱动层编译后可以保存为缓存文件,下次编译时直接从缓存文件读取模型,减少重新编译的耗时。本方法接受缓存路径和版本,根据缓存 + * 路径中和版本的不同情况,本方法采取不同的行为:\n + * + * - 缓存路径指定的目录下没有文件: + * 将编译后的模型缓存到目录下,设置缓存版本等于version。\n + * + * - 缓存路径指定的目录下存在完整的缓存文件,且版本号 == version: + * 读取路径下的缓存文件,传递到底层硬件中转换为可以执行的模型实例。\n + * + * - 缓存路径指定的目录下存在完整的缓存文件,但版本号 < version: + * 路径下的缓存文件需要更新,模型在底层硬件完成编译后,覆写路径下的缓存文件,将版本号更新为version。\n + * + * - 缓存路径指定的目录下存在完整的缓存文件,但版本号 > version: + * 路径下的缓存文件版本高于version,不读取缓存文件,同时返回{@link OH_NN_INVALID_PARAMETER}错误码。\n + * + * - 缓存路径指定的目录下的缓存文件不完整或没有缓存文件的访问权限: + * 返回{@link OH_NN_INVALID_FILE}错误码。\n + * + * - 缓存目录不存在,或者没有访问权限: + * 返回{@link OH_NN_INVALID_PATH}错误码。\n + * + * @param compilation 指向{@link OH_NNCompilation}实例的指针。 + * @param cachePath 模型缓存文件目录,本方法在cachePath目录下为不同的硬件创建缓存目录。建议每个模型使用单独的缓存目录。 + * @param version 缓存版本。 + * @return 函数执行的结果状态。执行成功返回OH_NN_SUCCESS;失败返回具体错误码,具体失败错误码可参考{@link OH_NN_ReturnCode}。 + * @since 9 + * @version 1.0 + */ +OH_NN_ReturnCode OH_NNCompilation_SetCache(OH_NNCompilation *compilation, const char *cachePath, uint32_t version); + +/** + * @brief 设置模型计算的性能模式。 + * + * Neural Network Runtime 支持为模型计算设置性能模式,满足低功耗到极致性能的需求。如果编译阶段没有调用本方法设置性能模式, + * 编译实例为模型默认分配{@link OH_NN_PERFORMANCE_NONE}模式。在{@link OH_NN_PERFORMANCE_NONE} + * 模式下,硬件按默认的性能模式执行计算。\n + * + * 在不支持性能模式设置的硬件上调用本方法,将返回{@link OH_NN_UNAVALIDABLE_DEVICE}错误码。\n + * + * @param compilation 指向{@link OH_NNCompilation}实例的指针。 + * @param performanceMode 指定性能模式,可选的性能模式参考{@link OH_NN_PerformanceMode}。 + * @return 函数执行的结果状态。执行成功返回OH_NN_SUCCESS;失败返回具体错误码,具体失败错误码可参考{@link OH_NN_ReturnCode}。 + * @since 9 + * @version 1.0 + */ +OH_NN_ReturnCode OH_NNCompilation_SetPerformanceMode(OH_NNCompilation *compilation, + OH_NN_PerformanceMode performanceMode); + +/** + * @brief 设置模型计算的优先级。 + * + * Neural Network Runtime 支持为模型设置计算优先级,优先级仅作用于相同uid进程创建的模型,不同uid进程、不同设备的优先级不会 + * 相互影响。\n + * + * 在不支持优先级设置的硬件上调用本方法,将返回{@link OH_NN_UNAVALIDABLE_DEVICE}错误码。\n + * + * @param compilation 指向{@link OH_NNCompilation}实例的指针。 + * @param priority 指定优先级,可选的优先级参考{@link OH_NN_Priority}。 + * @return 函数执行的结果状态。执行成功返回OH_NN_SUCCESS;失败返回具体错误码,具体失败错误码可参考{@link OH_NN_ReturnCode}。 + * @since 9 + * @version 1.0 + */ +OH_NN_ReturnCode OH_NNCompilation_SetPriority(OH_NNCompilation *compilation, OH_NN_Priority priority); + +/** + * @brief 是否以float16的浮点数精度计算。 + * + * Neural Network Runtime目前仅支持构造float32浮点模型和int8量化模型。在支持float16精度的硬件上调用本方法, + * float32浮点数精度的模型将以float16的精度执行计算,以减少内存占用和执行时间。\n + * + * 在不支持float16精度计算的硬件上调用本方法,将返回{@link OH_NN_UNAVALIDABLE_DEVICE}错误码。\n + * + * @param compilation 指向{@link OH_NNCompilation}实例的指针。 + * @param enableFloat16 Float16低精度计算标志位。设置为true时,执行Float16推理;设置为false时,执行float32推理。 + * @return 函数执行的结果状态。执行成功返回OH_NN_SUCCESS;失败返回具体错误码,具体失败错误码可参考{@link OH_NN_ReturnCode}。 + * @since 9 + * @version 1.0 + */ +OH_NN_ReturnCode OH_NNCompilation_EnableFloat16(OH_NNCompilation *compilation, bool enableFloat16); + +/** + * @brief 进行模型编译 + * + * 完成编译配置后,调用本方法指示模型编译已完成。编译实例将模型和编译选项推送至硬件设备进行编译。在调用本方法后,无法进行额外的编译操作,调用 + * {@link OH_NNCompilation_SetDevice}、{@link OH_NNCompilation_SetCache}、 + * {@link OH_NNCompilation_SetPerformanceMode}、 + * {@link OH_NNCompilation_SetPriority}和{@link OH_NNCompilation_EnableFloat16} + * 方法将返回{@link OH_NN_OPERATION_FORBIDDEN}。\n + * + * @param compilation 指向{@link OH_NNCompilation}实例的指针。 + * @return 函数执行的结果状态。执行成功返回OH_NN_SUCCESS;失败返回具体错误码,具体失败错误码可参考{@link OH_NN_ReturnCode}。 + * @since 9 + * @version 1.0 + */ +OH_NN_ReturnCode OH_NNCompilation_Build(OH_NNCompilation *compilation); + +/** + * @brief 释放Compilation对象。 + * + * 调用{@link OH_NNCompilation_Construct}创建的编译实例需要调用本方法主动释放,否则将造成内存泄漏。\n + * + * 如果compilation为空指针或者*compilation为空指针,本方法只打印warning日志,不执行释放逻辑。\n + * + * @param compilation 指向{@link OH_NNCompilation}实例的二级指针。编译实例销毁后,本方法将*compilation主动设置为空指针。 + * @since 9 + * @version 1.0 + */ +void OH_NNCompilation_Destroy(OH_NNCompilation **compilation); +/** @} */ + +/** + * @addtogroup NNExecutor + * @{ + * + * @brief Neural Network Runtime 执行模块 + * + * @since 9 + * @version 1.0 + */ + +/** + * @brief 创建{@link OH_NNExecutor}类型的执行器实例 + * + * 本方法接受一个编译器,构造一个与硬件关联的模型推理执行器。通过{@link OH_NNExecutor_SetInput}设置模型输入数据, + * 设置输入数据后,调用{@link OH_NNExecutor_Run}方法执行推理,最后通过 + * {@link OH_NNExecutor_GetOutput}获取计算结果。\n + * + * 调用本方法创建{@link OH_NNExecutor}实例后,如果不需要创建其他执行器,可以安全释放{@link OH_NNCompilation}实例。\n + * + * @param compilation 指向{@link OH_NNCompilation}实例的指针。 + * @return 返回指向{@link OH_NNExecutor}实例的指针。 + * @since 9 + * @version 1.0 + */ +OH_NNExecutor *OH_NNExecutor_Construct(OH_NNCompilation *compilation); + +/** + * @brief 设置模型单个输入的数据。 + * + * 本方法将dataBuffer中,长度为length个字节的数据,拷贝到底层硬件的共享内存。inputIndex指定设置的输入,tensor用于设置输入的 + * 形状、类型、量化参数等信息。\n + * + * 由于Neural Network Runtime支持动态输入形状的模型,在固定形状输入和动态形状输入的场景下,本方法采取不同的处理策略: + * + * - 固定形状输入的场景:tensor各属性必须和构图阶段调用{@link OH_NNModel_AddTensor}添加的操作数保持一致; + * - 动态形状输入的场景:在构图阶段,由于动态输入的形状不确定,调用本方法时,要求tensor.dimensions中的每个值必须大于0, + * 以确定执行计算阶段输入的形状。设置形状时,只允许调整数值为-1的维度。假设在构图阶段,输入A的维度为 + * [-1, 224, 224, 3],调用本方法时,只能调整第一个维度的尺寸,如:[3, 224, 224, 3]。调整其他维度将返回 + * {@link OH_NN_INVALID_PARAMETER}。\n + * + * inputIndex的值,从0开始,根据调用{@link OH_NNModel_SpecifyInputsAndOutputs}时,指定输入的顺序, + * 依次加一。假设调用{@link OH_NNModel_SpecifyInputsAndOutputs}时,inputIndices为{1,2,3}, + * 则在执行阶段,三个输入的索引值分别为{0, 1, 2}。\n + * + * @param executor 指向{@link OH_NNExecutor}实例的指针。 + * @param inputIndex 输入的索引值。 + * @param tensor 设置输入数据对应的操作数。 + * @param dataBuffer 指向输入数据的指针。 + * @param length 数据缓冲区的字节长度。 + * @return 函数执行的结果状态。执行成功返回OH_NN_SUCCESS;失败返回具体错误码,具体失败错误码可参考{@link OH_NN_ReturnCode}。 + * @since 9 + * @version 1.0 + */ +OH_NN_ReturnCode OH_NNExecutor_SetInput(OH_NNExecutor *executor, + uint32_t inputIndex, + const OH_NN_Tensor *tensor, + const void *dataBuffer, + size_t length); + +/** + * @brief 设置模型单个输出的缓冲区。 + * + * 本方法将dataBuffer指向的缓冲区与outputIndex指定的输出绑定,缓冲区的长度由length指定。\n + * + * 调用{@link OH_NNExecutor_Run}完成单次模型推理后,Neural Network Runtime将比对dataBuffer指向的缓冲区与 + * 输出数据的长度,根据不同情况,返回不同结果:\n + * + * - 如果缓冲区大于或等于数据长度:则推理后的结果将拷贝至缓冲区,并返回{@link OH_NN_SUCCESS},可以通过访问dataBuffer读取推理结果。 + * - 如果缓冲区小于数据长度:则{@link OH_NNExecutor_Run}将返回{@link OH_NN_INVALID_PARAMETER}, + * 并输出日志告知缓冲区太小的信息。\n + * + * outputIndex的值,从0开始,根据调用{@link OH_NNModel_SpecifyInputsAndOutputs}时,指定输出的顺序, + * 依次加一。假设调用{@link OH_NNModel_SpecifyInputsAndOutputs}时,outputIndices为{4, 5, 6}, + * 则在执行阶段,三个输出的索引值分别为{0, 1, 2}。\n + * + * @param executor 指向{@link OH_NNExecutor}实例的指针。 + * @param outputIndex 输出的索引值。 + * @param dataBuffer 指向输出数据的指针。 + * @param length 数据缓冲区的字节长度。 + * @return 函数执行的结果状态。执行成功返回OH_NN_SUCCESS;失败返回具体错误码,具体失败错误码可参考{@link OH_NN_ReturnCode}。 + * @since 9 + * @version 1.0 + */ +OH_NN_ReturnCode OH_NNExecutor_SetOutput(OH_NNExecutor *executor, + uint32_t outputIndex, + void *dataBuffer, + size_t length); + +/** + * @brief 获取输出tensor的维度信息。 + * + * 调用{@link OH_NNExecutor_Run}完成单次推理后,本方法获取指定输出的维度信息和维数。在动态形状输入、输出的场景中常用。\n + * + * outputIndex的值,从0开始,根据调用{@link OH_NNModel_SpecifyInputsAndOutputs}时,指定输出的顺序, + * 依次加一。假设调用{@link OH_NNModel_SpecifyInputsAndOutputs}时,outputIndices为{4, 5, 6}, + * 则在执行阶段,三个输出的索引值分别为{0, 1, 2}。\n + * + * @param executor 指向{@link OH_NNExecutor}实例的指针。 + * @param outputIndex 输出的索引值。 + * @param shape 指向int32_t数组的指针,数组中的每个元素值,是输出tensor在每个维度上的长度。 + * @param length uint32_t类型的指针,返回输出的维数。 + * @return 函数执行的结果状态。执行成功返回OH_NN_SUCCESS;失败返回具体错误码,具体失败错误码可参考{@link OH_NN_ReturnCode}。 + * @since 9 + * @version 1.0 + */ +OH_NN_ReturnCode OH_NNExecutor_GetOutputShape(OH_NNExecutor *executor, + uint32_t outputIndex, + int32_t **shape, + uint32_t *shapeLength); + +/** + * @brief 执行推理。 + * + * 在执行器关联的硬件上,执行模型的端到端推理计算。\n + * + * @param executor 指向{@link OH_NNExecutor}实例的指针。 + * @return 函数执行的结果状态。执行成功返回OH_NN_SUCCESS;失败返回具体错误码,具体失败错误码可参考{@link OH_NN_ReturnCode}。 + * @since 9 + * @version 1.0 + */ +OH_NN_ReturnCode OH_NNExecutor_Run(OH_NNExecutor *executor); + +/** + * @brief 在硬件上为单个输入申请共享内存。 + * + * Neural Network Runtime 提供主动申请硬件共享内存的方法。通过指定执行器和输入索引值,本方法在单个输入关联的硬件 + * 上,申请大小为length的共享内存,通过{@link OH_NN_Memory}实例返回。\n + * + * inputIndex的值,从0开始,根据调用{@link OH_NNModel_SpecifyInputsAndOutputs}时,指定输入的顺序, + * 依次加一。假设调用{@link OH_NNModel_SpecifyInputsAndOutputs}时,inputIndices为{1,2,3}, + * 则在执行阶段,三个输入的索引值分别为{0, 1, 2}。\n + * + * @param executor 指向{@link OH_NNExecutor}实例的指针。 + * @param inputIndex 输入的索引值。 + * @param length 申请的内存字节。 + * @return 指向{@link OH_NN_Memory}实例的指针。 + * @since 9 + * @version 1.0 + */ +OH_NN_Memory *OH_NNExecutor_AllocateInputMemory(OH_NNExecutor *executor, uint32_t inputIndex, size_t length); + +/** + * @brief 在硬件上为单个输出申请共享内存。 + * + * Neural Network Runtime 提供主动申请硬件共享内存的方法。通过指定执行器和输出索引值,本方法在单个输出关联的硬件 + * 上,申请大小为length的共享内存,通过{@link OH_NN_Memory}实例返回。\n + * + * outputIndex的值,从0开始,根据调用{@link OH_NNModel_SpecifyInputsAndOutputs}时,指定输出的顺序, + * 依次加一。假设调用{@link OH_NNModel_SpecifyInputsAndOutputs}时,outputIndices为{4, 5, 6}, + * 则在执行阶段,三个输出的索引值分别为{0, 1, 2}。\n + * + * @param executor 指向{@link OH_NNExecutor}实例的指针。 + * @param outputIndex 输出的索引值。 + * @param length 申请的内存字节。 + * @return 指向{@link OH_NN_Memory}实例的指针。 + * @since 9 + * @version 1.0 + */ +OH_NN_Memory *OH_NNExecutor_AllocateOutputMemory(OH_NNExecutor *executor, uint32_t outputIndex, size_t length); + +/** + * @brief 释放{@link OH_NN_Memory}实例指向的输入内存。 + * + * 调用{@link OH_NNExecutor_AllocateInputMemory}创建的内存实例,需要主动调用本方法进行释放,否则将造成内存泄漏。 + * inputIndex和memory的对应关系需要和创建内存实例时保持一致。\n + * + * 如果memory或*memory为空指针,本方法只打印warning日志,不执行释放逻辑。\n + * + * inputIndex的值,从0开始,根据调用{@link OH_NNModel_SpecifyInputsAndOutputs}时,指定输入的顺序, + * 依次加一。假设调用{@link OH_NNModel_SpecifyInputsAndOutputs}时,inputIndices为{1,2,3}, + * 则在执行阶段,三个输入的索引值分别为{0, 1, 2}。\n + * + * @param executor 指向{@link OH_NNExecutor}实例的指针。 + * @param inputIndex 输入的索引值。 + * @param memory 指向{@link OH_NN_Memory}实例的二级指针。共享内存销毁后,本方法将*memory主动设置为空指针。 + * @since 9 + * @version 1.0 + */ +void OH_NNExecutor_DestroyInputMemory(OH_NNExecutor *executor, uint32_t inputIndex, OH_NN_Memory **memory); + +/** + * @brief 释放{@link OH_NN_Memory}实例指向的输出内存。 + * + * 调用{@link OH_NNExecutor_AllocateOutputMemory}创建的内存实例,需要主动调用本方法进行释放,否则将造成内存泄漏。 + * outputIndex和memory的对应关系需要和创建内存实例时保持一致。\n + * + * 如果memory或*memory为空指针,本方法只打印warning日志,不执行释放逻辑。\n + * + * outputIndex的值,从0开始,根据调用{@link OH_NNModel_SpecifyInputsAndOutputs}时,指定输出的顺序, + * 依次加一。假设调用{@link OH_NNModel_SpecifyInputsAndOutputs}时,outputIndices为{4, 5, 6}, + * 则在执行阶段,三个输出的索引值分别为{0, 1, 2}。\n + * + * @param executor 指向{@link OH_NNExecutor}实例的指针。 + * @param outputIndex 输出的索引值。 + * @param memory 指向{@link OH_NN_Memory}实例的二级指针。共享内存销毁后,本方法将*memory主动设置为空指针。 + * @since 9 + * @version 1.0 + */ +void OH_NNExecutor_DestroyOutputMemory(OH_NNExecutor *executor, uint32_t outputIndex, OH_NN_Memory **memory); + +/** + * @brief 将{@link OH_NN_Memory}实例指向的硬件共享内存,指定为单个输入使用的共享内存。 + * + * 在需要自行管理内存的场景下,本方法将执行输入和{@link OH_NN_Memory}内存实例绑定。执行计算时,底层硬件从内存实例指向的共享内存中读取 + * 输入数据。通过本方法,可以实现设置输入、执行计算、读取输出的并发执行,提升数据流的推理效率。\n + * + * inputIndex的值,从0开始,根据调用{@link OH_NNModel_SpecifyInputsAndOutputs}时,指定输入的顺序, + * 依次加一。假设调用{@link OH_NNModel_SpecifyInputsAndOutputs}时,inputIndices为{1,2,3}, + * 则在执行阶段,三个输入的索引值分别为{0, 1, 2}。\n + * + * @param executor 指向{@link OH_NNExecutor}实例的指针。 + * @param inputIndex 输入的索引值。 + * @param tensor 指向{@link OH_NN_Tensor}的指针,设置单个输入所对应的操作数。 + * @param memory 指向{@link OH_NN_Memory}的指针。 + * @return 函数执行的结果状态。执行成功返回OH_NN_SUCCESS;失败返回具体错误码,具体失败错误码可参考{@link OH_NN_ReturnCode}。 + * @since 9 + * @version 1.0 + */ +OH_NN_ReturnCode OH_NNExecutor_SetInputWithMemory(OH_NNExecutor *executor, + uint32_t inputIndex, + const OH_NN_Tensor *tensor, + const OH_NN_Memory *memory); + +/** + * @brief 将{@link OH_NN_Memory}实例指向的硬件共享内存,指定为单个输出使用的共享内存。 + * + * 在需要自行管理内存的场景下,本方法将执行输出和{@link OH_NN_Memory}内存实例绑定。执行计算时,底层硬件将计算结果直接写入内存实例指向 + * 的共享内存。通过本方法,可以实现设置输入、执行计算、读取输出的并发执行,提升数据流的推理效率。\n + * + * outputIndex的值,从0开始,根据调用{@link OH_NNModel_SpecifyInputsAndOutputs}时,指定输出的顺序, + * 依次加一。假设调用{@link OH_NNModel_SpecifyInputsAndOutputs}时,outputIndices为{4, 5, 6}, + * 则在执行阶段,三个输出的索引值分别为{0, 1, 2}。\n + * + * @param executor 执行器。 + * @param outputIndex 输出的索引值。 + * @param memory 指向{@link OH_NN_Memory}的指针。 + * @return 函数执行的结果状态。执行成功返回OH_NN_SUCCESS;失败返回具体错误码,具体失败错误码可参考{@link OH_NN_ReturnCode}。 + * @since 9 + * @version 1.0 + */ +OH_NN_ReturnCode OH_NNExecutor_SetOutputWithMemory(OH_NNExecutor *executor, + uint32_t outputIndex, + const OH_NN_Memory *memory); + +/** + * @brief 销毁执行器实例,释放执行器占用的内存。 + * + * 调用{@link OH_NNExecutor_Construct}创建的执行器实例需要调用本方法主动释放,否则将造成内存泄漏。\n + * + * 如果executor为空指针或者*executor为空指针,本方法只打印warning日志,不执行释放逻辑。\n + * + * @param executor 指向{@link OH_NNExecutor}实例的二级指针。 + * @since 9 + * @version 1.0 + */ +void OH_NNExecutor_Destroy(OH_NNExecutor **executor); +/** @} */ + +/** + * @addtogroup NNDevice + * @{ + * + * @brief Neural Network Runtime 设备管理模块 + * + * @since 9 + * @version 1.0 + */ + +/** + * @brief 获取对接到 Neural Network Runtime 的硬件ID。 + * + * 每个硬件在 Neural Network Runtime 中存在唯一且固定ID,本方法通过uin32_t数组返回当前设备上已经对接的硬件ID。\n + * + * 硬件ID通过size_t数组返回,数组的每个元素是单个硬件的ID值。数组内存由Neural Network Runtime管理。在下次调用本方法前, + * 数据指针有效。\n + * + * @param allDevicesID 指向size_t数组的指针。要求传入的(*allDevicesID)为空指针,否则返回 + * {@link OH_NN_INVALID_PARAMETER}。 + * @param deviceCount uint32_t类型的指针,用于返回(*allDevicesID)的长度。 + * @return 函数执行的结果状态。执行成功返回OH_NN_SUCCESS;失败返回具体错误码,具体失败错误码可参考{@link OH_NN_ReturnCode}。 + * @since 9 + * @version 1.0 + */ +OH_NN_ReturnCode OH_NNDevice_GetAllDevicesID(const size_t **allDevicesID, uint32_t *deviceCount); + +/** + * @brief 获取指定硬件的类型信息。 + * + * 通过deviceID指定计算硬件,获取硬件的名称。硬件ID需要调用{@link OH_NNDevice_GetAllDevicesID}获取。\n + * + * @param deviceID 指定硬件ID。 + * @param name 指向char数组的指针,要求传入的(*char)为空指针,否则返回 + * {@link OH_NN_INVALID_PARAMETER}。(*name)以C风格字符串保存硬件名称,数组以\0结尾。 + * @return 函数执行的结果状态。执行成功返回OH_NN_SUCCESS;失败返回具体错误码,具体失败错误码可参考{@link OH_NN_ReturnCode}。 + * @since 9 + * @version 1.0 + */ +OH_NN_ReturnCode OH_NNDevice_GetName(size_t deviceID, const char **name); + +/** + * @brief 获取指定硬件的类别信息。 + * + * 通过deviceID指定计算硬件,获取硬件的类别。目前 Neural Network Runtime 支持的设备类型有: + * - CPU设备:OH_NN_CPU + * - GPU设备:OH_NN_GPU + * - 机器学习专用加速器:OH_NN_ACCELERATOR + * - 不属于以上类型的其他硬件类型:OH_NN_OTHERS\n + * + * @param deviceID 指定硬件ID。 + * @param deviceType 指向{@link OH_NN_DeviceType}实例的指针,返回硬件的类别信息。 + * @return 函数执行的结果状态。执行成功返回OH_NN_SUCCESS;失败返回具体错误码,具体失败错误码可参考{@link OH_NN_ReturnCode}。 + * @since 9 + * @version 1.0 + */ +OH_NN_ReturnCode OH_NNDevice_GetType(size_t deviceID, OH_NN_DeviceType *deviceType); +/** @} */ + +#ifdef __cplusplus +} +#endif // __cplusplus +#endif // NEURAL_NETWORK_RUNTIME_H diff --git a/interfaces/kits/c/neural_network_runtime_type.h b/interfaces/kits/c/neural_network_runtime_type.h new file mode 100644 index 0000000000000000000000000000000000000000..73f932002ae5d710464d6a61510be72512cfaed0 --- /dev/null +++ b/interfaces/kits/c/neural_network_runtime_type.h @@ -0,0 +1,1632 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_TYPE_H +#define NEURAL_NETWORK_RUNTIME_TYPE_H +/** + * @file neural_network_runtime_type.h + * + * @brief Neural Network Runtime定义的结构体和枚举值。 + * + * @since 9 + * @version 1.0 + */ +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * @brief Neural Network Runtime的模型句柄 + * + * @since 9 + * @version 1.0 + */ +typedef struct OH_NNModel OH_NNModel; + +/** + * @brief Neural Network Runtime的编译器句柄 + * + * @since 9 + * @version 1.0 + */ +typedef struct OH_NNCompilation OH_NNCompilation; + +/** + * @brief Neural Network Runtime的执行器句柄 + * + * @since 9 + * @version 1.0 + */ +typedef struct OH_NNExecutor OH_NNExecutor; + +/** + * @brief 硬件的执行性能模式 + * + * @since 9 + * @version 1.0 + */ +typedef enum { + /** 无性能模式偏好 */ + OH_NN_PERFORMANCE_NONE = 0, + /** 低能耗模式 */ + OH_NN_PERFORMANCE_LOW = 1, + /** 中性能模式 */ + OH_NN_PERFORMANCE_MEDIUM = 2, + /** 高性能模式 */ + OH_NN_PERFORMANCE_HIGH = 3, + /** 极致性能模式 */ + OH_NN_PERFORMANCE_EXTREME = 4 +} OH_NN_PerformanceMode; + +/** + * @brief 模型推理任务优先级 + * + * @since 9 + * @version 1.0 + */ +typedef enum { + /** 无优先级偏好 */ + OH_NN_PRIORITY_NONE = 0, + /** 低优先级 */ + OH_NN_PRIORITY_LOW = 1, + /** 中优先级 */ + OH_NN_PRIORITY_MEDIUM = 2, + /** 高优先级 */ + OH_NN_PRIORITY_HIGH = 3 +} OH_NN_Priority; + +/** + * @brief Neural Network Runtime 定义的错误码类型 + * + * @since 9 + * @version 1.0 + */ +typedef enum { + /** 操作成功 */ + OH_NN_SUCCESS = 0, + /** 操作失败 */ + OH_NN_FAILED = 1, + /** 非法参数 */ + OH_NN_INVALID_PARAMETER = 2, + /** 内存相关的错误,包括:内存不足、内存数据拷贝失败、内存申请失败等。 */ + OH_NN_MEMORY_ERROR = 3, + /** 非法操作 */ + OH_NN_OPERATION_FORBIDDEN = 4, + /** 空指针异常 */ + OH_NN_NULL_PTR = 5, + /** 无效文件 */ + OH_NN_INVALID_FILE = 6, + /** 硬件发生错误,错误可能包含:HDL服务崩溃 */ + OH_NN_UNAVALIDABLE_DEVICE = 7, + /** 非法路径 */ + OH_NN_INVALID_PATH = 8 +} OH_NN_ReturnCode; + +/** + * @brief Neural Network Runtime 融合算子中激活函数的类型 + * + * @since 9 + * @version 1.0 + */ +typedef enum : int8_t { + /** 未指定融合激活函数 */ + OH_NN_FUSED_NONE = 0, + /** 融合relu激活函数 */ + OH_NN_FUSED_RELU = 1, + /** 融合relu6激活函数 */ + OH_NN_FUSED_RELU6 = 2 +} OH_NN_FuseType; + +/** + * @brief tensor数据的排布类型 + * + * @since 9 + * @version 1.0 + */ +typedef enum { + /** 当tensor没有特定的排布类型时(如标量或矢量),使用{@link OH_NN_FORMAT_NONE} */ + OH_NN_FORMAT_NONE = 0, + /** 读取(使用)维度信息时按照NCHW读取(使用)*/ + OH_NN_FORMAT_NCHW = 1, + /** 读取(使用)维度信息时按照NHWC读取(使用) */ + OH_NN_FORMAT_NHWC = 2 +} OH_NN_Format; + +/** + * @brief Neural Network Runtime 支持的设备类型 + * + * @since 9 + * @version 1.0 + */ +typedef enum { + /** 不属于CPU、GPU、专用加速器的设备 */ + OH_NN_OTHERS = 0, + /** CPU设备 */ + OH_NN_CPU = 1, + /** GPU设备 */ + OH_NN_GPU = 2, + /** 专用硬件加速器 */ + OH_NN_ACCELERATOR = 3, +} OH_NN_DeviceType; + +/** + * @brief Neural Network Runtime 支持的数据类型 + * + * @since 9 + * @version 1.0 + */ +typedef enum { + /** 操作数数据类型未知 */ + OH_NN_UNKNOWN = 0, + /** 操作数数据类型为bool */ + OH_NN_BOOL = 1, + /** 操作数数据类型为int8 */ + OH_NN_INT8 = 2, + /** 操作数数据类型为int16 */ + OH_NN_INT16 = 3, + /** 操作数数据类型为int32 */ + OH_NN_INT32 = 4, + /** 操作数数据类型为int64 */ + OH_NN_INT64 = 5, + /** 操作数数据类型为uint8 */ + OH_NN_UINT8 = 6, + /** 操作数数据类型为uint16 */ + OH_NN_UINT16 = 7, + /** 操作数数据类型为uint32 */ + OH_NN_UINT32 = 8, + /** 操作数数据类型为uint64 */ + OH_NN_UINT64 = 9, + /** 操作数数据类型为float16 */ + OH_NN_FLOAT16 = 10, + /** 操作数数据类型为float32 */ + OH_NN_FLOAT32 = 11, + /** 操作数数据类型为float64 */ + OH_NN_FLOAT64 = 12 +} OH_NN_DataType; + + +/** + * @brief Neural Network Runtime 支持算子的类型 + * + * @since 9 + * @version 1.0 + */ +typedef enum { + /** + * 返回两个输入张量对应元素相加的和的张量。 + * + * 输入: + * + * * x,第一个输入的张量,数据类型要求为布尔值或者数字。 + * * y,第二个输入的张量,数据类型和形状需要和第一个输入保持一致。 + * + * 参数: + * + * * activationType,是一个整型常量,且必须是FuseType中含有的值。 + * 在输出之前调用指定的激活。 + * + * 输出: + * + * * 0 输出x和y的和,数据形状与输入broadcast之后一样,数据类型与较高精度的输入精度一致 + */ + OH_NN_OPS_ADD = 1, + + /** + * 在输入tensor上应用 2D 平均池化,仅支持NHWC格式的tensor。支持int8量化输入。 + * + * 如果输入中含有padMode参数: + * + * 输入: + * + * * x,一个张量。 + * + * 参数: + * + * * kernelSize,用来取平均值的kernel大小,是一个长度为2的int数组[kernel_height,kernel_weight], + * 第一个数表示kernel高度,第二个数表示kernel宽度。 + * * strides,kernel移动的距离,是一个长度为2的int数组[stride_height,stride_weight], + * 第一个数表示高度上的移动步幅,第二个数表示宽度上的移动步幅。 + * * padMode,填充模式,int类型的可选值,0表示same,1表示valid,并且以最近邻的值填充。 + * same,输出的高度和宽度与x相同,填充总数将在水平和垂直方向计算,并在可能的情况下均匀分布到顶部 + * 和底部、左侧和右侧。否则,最后一个额外的填充将从底部和右侧完成。 + * valid,输出的可能最大高度和宽度将在不填充的情况下返回。额外的像素将被丢弃。 + * * activationType,是一个整型常量,且必须是FuseType中含有的值。 + * 在输出之前调用指定的激活。 + * + * 如果输入中含有padList参数: + * + * 输入: + * + * * x,一个张量。 + * + * 参数: + * + * * kernelSize,用来取平均值的kernel大小,是一个长度为2的int数组[kernel_height,kernel_weight], + * 第一个数表示kernel高度,第二个数表示kernel宽度。 + * * strides,kernel移动的距离,是一个长度为2的int数组[stride_height,stride_weight], + * 第一个数表示高度上的移动步幅,第二个数表示宽度上的移动步幅。 + * * padList,输入x周围的填充,是一个长度为4的int数组[top,bottom,left,right],并且以最近邻的值填充。 + * * activationType,是一个整型常量,且必须是FuseType中含有的值。 + * 在输出之前调用指定的激活。 + * + * 输出: + * + * * 输出x平均池化后的张量。 + */ + OH_NN_OPS_AVG_POOL = 2, + + /** + * 对一个tensor进行batch normalization,对tensor元素进行缩放和位移,缓解一批数据中潜在的covariate shift。 + * + * 输入: + * + * * x,一个n维的tensor,要求形状为[N,...,C],即第n维是通道数(channel)。 + * * scale,缩放因子的1D张量,用于缩放归一化的第一个张量。 + * * offset,用于偏移的1D张量,以移动到归一化的第一个张量。 + * * mean,总体均值的一维张量,仅用于推理;对于训练,必须为空。 + * * variance,用于总体方差的一维张量。仅用于推理;对于训练,必须为空。 + * + * 参数: + * + * * epsilon,数值稳定性的小附加值。 + * + * 输出: + * + * * 输出张量,形状和数据类型与输入x一致。 + */ + OH_NN_OPS_BATCH_NORM = 3, + + /** + * 将一个4维tensor的batch维度按block_shape切分成小块,并将这些小块拼接到空间维度。 + * + * 参数: + * + * * x,输入张量,维将被切分,拼接回空间维度。 + * + * 输出: + * + * * blockSize,一个长度为2的数组[height_block,weight_block],指定切分到空间维度上的block大小。 + * * crops,一个shape为(2,2)的2维数组[[crop0_start,crop0_end],[crop1_start,crop1_end]], + * 表示在output的空间维度上截掉部分元素。 + * + * 输出: + * + * * 输出张量,假设x的形状为(n,h,w,c),output的形状为(n',h',w',c'): + * n' = n / (block_shape[0] * block_shape[1]) + * h' = h * block_shape[0] - crops[0][0] - crops[0][1] + * w' = w * block_shape[1] - crops[1][0] - crops[1][1] + * c'= c + */ + OH_NN_OPS_BATCH_TO_SPACE_ND = 4, + + /** + * 对给出的输入张量上的各个维度方向上的数据进行偏置。 + * + * 输入: + * + * * x,输入张量,可为2-5维度。 + * * bias,参数对应输入维度数量的偏移值。 + * + * 输出: + * + * * 输出张量,根据输入中每个维度方向偏移后的结果。 + */ + OH_NN_OPS_BIAS_ADD = 5, + + /** + * 对输入张量中的数据类型进行转换。 + * + * 输入: + * + * * x,输入张量。 + * * type,输入转换目的的数据类型。 + * + * 输出: + * + * * 输出张量,输出转换为目的数据类型后的张量。 + */ + OH_NN_OPS_CAST = 6, + + /** + * 在指定轴上连接张量,将输入张量按给定的轴连接起来。 + * + * 输入: + * + * * x:N个输入张量。 + * + * 参数: + * + * * axis,指定轴的位置。 + * + * 输出: + * + * * 输出n个张量按axis轴连接的结果。 + */ + OH_NN_OPS_CONCAT = 7, + + /** + * 二维卷积层。 + * + * 如果输入中含有padMode参数: + * + * 输入: + * + * * x,输入张量。 + * * weight,卷积的权重,要求weight排布为[outChannel,kernelHeight,kernelWidth,inChannel/group], + * inChannel必须要能整除group。 + * * bias,卷积的偏置,是长度为[outChannel]的数组。在量化场景下,bias 参数不需要量化参数,其量化 + * 版本要求输入 OH_NN_INT32 类型数据,实际量化参数由 x 和 weight 共同决定。 + * + * 参数: + * + * * stride,卷积核在height和weight上的步幅,是一个长度为2的int数组[strideHeight,strideWidth]。 + * * dilation,表示扩张卷积在height和weight上的扩张率,是一个长度为2的int数组[dilationHeight,dilationWidth], + * 值必须大于或等于1,并且不能超过x的height和width。 + * * padMode,x的填充模式,支持same和valid,int类型,0表示same,1表示valid。 + * same,输出的高度和宽度与x相同,填充总数将在水平和垂直方向计算,并在可能的情况下均匀分布到顶部和底部、左侧 + * 和右侧。否则,最后一个额外的填充将从底部和右侧完成。 + * Valid,输出的可能最大高度和宽度将在不填充的情况下返回。额外的像素将被丢弃。 + * * group,将输入x按in_channel分组,int类型。group等于1,这是常规卷积;group大于1且小于或等于in_channel,这是分组卷积。 + * * activationType,是一个整型常量,且必须是FuseType中含有的值。 + * 在输出之前调用指定的激活。 + * + * + * 如果输入中含有padList参数: + * + * 输入: + * + * * x,输入张量。 + * * weight,卷积的权重,要求weight排布为[outChannel,kernelHeight,kernelWidth,inChannel/group], + * inChannel必须要能整除group。 + * * bias,卷积的偏置,是长度为[outChannel]的数组。在量化场景下,bias 参数不需要量化参数,其量化 + * 版本要求输入 OH_NN_INT32 类型数据,实际量化参数由 x 和 weight 共同决定。 + * + * 参数: + * + * * stride,卷积核在height和weight上的步幅,是一个长度为2的int数组[strideHeight,strideWidth]。 + * * dilation,表示扩张卷积在height和weight上的扩张率,是一个长度为2的int数组[dilationHeight,dilationWidth]。 + * 值必须大于或等于1,并且不能超过x的height和width。 + * * padList,输入x周围的填充,是一个长度为4的int数组[top,bottom,left,right]。 + * * group,将输入x按in_channel分组,int类型。 + * group等于1,这是常规卷积。 + * group等于in_channel,这是depthwiseConv2d,此时group==in_channel==out_channel。 + * group大于1且小于in_channel,这是分组卷积,out_channel==group。 + * * activationType,是一个整型常量,且必须是FuseType中含有的值。 + * 在输出之前调用指定的激活。 + * + * 输出: + * + * * 输出张量,卷积的输出。 + */ + OH_NN_OPS_CONV2D = 8, + + /** + * 二维卷积转置。 + * + * 如果输入中含有padMode参数: + * + * 输入: + * + * * x,输入张量。 + * * weight,卷积的权重,要求weight排布为[outChannel,kernelHeight,kernelWidth,inChannel/group], + * inChannel必须要能整除group。 + * * bias,卷积的偏置,是长度为[outChannel]的数组。在量化场景下,bias 参数不需要量化参数,其量化 + * 版本要求输入 OH_NN_INT32 类型数据,实际量化参数由 x 和 weight 共同决定。 + * * stride,卷积核在height和weight上的步幅,是一个长度为2的int数组[strideHeight,strideWidth]。 + * + * 参数: + * + * * dilation,表示扩张卷积在height和weight上的扩张率,是一个长度为2的int数组[dilationHeight,dilationWidth]。 + * 值必须大于或等于1,并且不能超过x的height和width。 + * * padMode,x的填充模式,支持same和valid,int类型,0表示same,1表示valid。 + * same,输出的高度和宽度与x相同,填充总数将在水平和垂直方向计算,并在可能的情况下均匀分布到顶部和底部、左侧 + * 和右侧。否则,最后一个额外的填充将从底部和右侧完成。 + * Valid,输出的可能最大高度和宽度将在不填充的情况下返回。额外的像素将被丢弃。 + * * group,将输入x按in_channel分组,int类型。group等于1,这是常规卷积;group大于1且小于或等于in_channel,这是分组卷积。 + * * outputPads,一个整数或元组/2 个整数的列表,指定沿输出张量的高度和宽度的填充量。可以是单个整数,用于为所 + * 有空间维度指定相同的值。沿给定维度的输出填充量必须小于沿同一维度的步幅。 + * * activationType,是一个整型常量,且必须是FuseType中含有的值。 + * 在输出之前调用指定的激活。 + * + * 如果输入中含有padList参数: + * + * 输入: + * + * * x,输入张量。 + * * weight,卷积的权重,要求weight排布为[outChannel,kernelHeight,kernelWidth,inChannel/group], + * inChannel必须要能整除group。 + * * bias,卷积的偏置,是长度为[outChannel]的数组。在量化场景下,bias 参数不需要量化参数,其量化 + * 版本要求输入 OH_NN_INT32 类型数据,实际量化参数由 x 和 weight 共同决定。 + * + * 参数: + * + * * stride,卷积核在height和weight上的步幅,是一个长度为2的int数组[strideHeight,strideWidth]。 + * * dilation,表示扩张卷积在height和weight上的扩张率,是一个长度为2的int数组[dilationHeight,dilationWidth]。 + * 值必须大于或等于1,并且不能超过x的height和width。 + * * padList,输入x周围的填充,是一个长度为4的int数组[top,bottom,left,right]。 + * * group,将输入x按in_channel分组,int类型。group等于1,这是常规卷积;group大于1且小于或等于in_channel,这是分组卷积。 + * * outputPads,一个整数或元组/2 个整数的列表,指定沿输出张量的高度和宽度的填充量。可以是单个整数,用于为所 + * 有空间维度指定相同的值。沿给定维度的输出填充量必须小于沿同一维度的步幅。 + * * activationType,是一个整型常量,且必须是FuseType中含有的值。 + * 在输出之前调用指定的激活。 + * + * 输出: + * + * * 输出张量,卷积转置后的输出。 + */ + OH_NN_OPS_CONV2D_TRANSPOSE = 9, + + /** + * 2维深度可分离卷积 + * + * 如果输入中含有padMode参数: + * + * 输入: + * + * * x,输入张量。 + * * weight,卷积的权重,要求weight排布为[outChannel,kernelHeight,kernelWidth,1],outChannel = channelMultiplier x inChannel。 + * * bias,卷积的偏置,是长度为[outChannel]的数组。在量化场景下,bias 参数不需要量化参数,其量化 + * 版本要求输入 OH_NN_INT32 类型数据,实际量化参数由 x 和 weight 共同决定。 + * + * 参数: + * + * * stride,卷积核在height和weight上的步幅,是一个长度为2的int数组[strideHeight,strideWidth]。 + * * dilation,表示扩张卷积在height和weight上的扩张率,是一个长度为2的int数组[dilationHeight,dilationWidth]。 + * 值必须大于或等于1,并且不能超过x的height和width。 + * * padMode,x的填充模式,支持same和valid,int类型,0表示same,1表示valid + * same,输出的高度和宽度与x相同,填充总数将在水平和垂直方向计算,并在可能的情况下均匀分布到顶部和底部、左侧 + * 和右侧。否则,最后一个额外的填充将从底部和右侧完成 + * Valid,输出的可能最大高度和宽度将在不填充的情况下返回。额外的像素将被丢弃 + * * activationType,是一个整型常量,且必须是FuseType中含有的值。 + * 在输出之前调用指定的激活。 + * + * 如果输入中含有padList 参数: + * + * 输入: + * + * * x,输入张量。 + * * weight,卷积的权重,要求weight排布为[outChannel,kernelHeight,kernelWidth,1],outChannel = channelMultiplier x inChannel。 + * * bias,卷积的偏置,是长度为[outChannel]的数组。在量化场景下,bias 参数不需要量化参数,其量化 + * 版本要求输入 OH_NN_INT32 类型数据,实际量化参数由 x 和 weight 共同决定。 + * + * 参数: + * + * * stride,卷积核在height和weight上的步幅,是一个长度为2的int数组[strideHeight,strideWidth]。 + * * dilation,表示扩张卷积在height和weight上的扩张率,是一个长度为2的int数组[dilationHeight,dilationWidth]。 + * 值必须大于或等于1,并且不能超过x的height和width。 + * * padList,输入x周围的填充,是一个长度为4的int数组[top,bottom,left,right]。 + * * activationType,是一个整型常量,且必须是FuseType中含有的值。 + * 在输出之前调用指定的激活。 + * + * 输出: + * + * * 输出张量,卷积后的输出。 + */ + OH_NN_OPS_DEPTHWISE_CONV2D_NATIVE = 10, + + /** + * 对输入的两个标量或张量做商。 + * + * 输入: + * + * * x1,第一个输入是标量或布尔值或数据类型为数字或布尔值的张量。 + * * x2,数据类型根据x1的类型,要求有所不同: + * 当第一个输入是张量时,第二个输入可以是实数或布尔值或数据类型为实数/布尔值的张量。 + * 当第一个输入是实数或布尔值时,第二个输入必须是数据类型为实数/布尔值的张量。 + * + * 参数: + * + * * activationType,是一个整型常量,且必须是FuseType中含有的值。 + * 在输出之前调用指定的激活。 + * + * 输出: + * + * * 输出张量,输出两输入相除后的结果。 + */ + OH_NN_OPS_DIV = 11, + + /** + * 设置参数对输入进行product(点乘)、sum(相加减)或max(取大值)。 + * + * 输入: + * + * * x1,第一个输入张量。 + * * x2,第二个输入张量。 + * + * 参数: + * + * * mode,枚举,选择操作方式。 + * + * 输出: + * + * * 输出tensor,与x1有相同的数据类型和形状。 + * + */ + OH_NN_OPS_ELTWISE = 12, + + /** + * 在给定轴上为tensor添加一个额外的维度。 + * + * 输入: + * + * * x,输入张量。 + * * axis,需要添加的维度的index,int32_t类型,值必须在[-dim-1,dim],且只允许常量值。 + * + * 输出: + * + * * 输出tensor,与x有相同的数据类型和形状。 + */ + OH_NN_OPS_EXPAND_DIMS = 13, + + /** + * 根据指定的维度,创建由一个标量填充的张量。 + * + * 输入: + * + * * value,填充的标量。 + * * shape,指定创建张量的维度。 + * + * 输出: + * + * * 输出张量,与value有相同的数据类型,shape由输入指定。 + */ + OH_NN_OPS_FILL = 14, + + /** + * 全连接,整个输入作为feature map,进行特征提取。 + * + * 输入: + * + * * x,全连接的输入张量。 + * * weight,全连接的权重张量。 + * * bias,全连接的偏置,在量化场景下,bias 参数不需要量化参数,其量化 + * 版本要求输入 OH_NN_INT32 类型数据,实际量化参数由 x 和 weight 共同决定。 + * + * 参数: + * + * * activationType,是一个整型常量,且必须是FuseType中含有的值。 + * 在输出之前调用指定的激活。 + * + * 输出: + * + * * output,输出运算后的张量。 + + * 如果输入中含有axis参数: + * + * 输入: + * + * * x,全连接的输入张量。 + * * weight,全连接的权重张量。 + * * bias,全连接的偏置,在量化场景下,bias 参数不需要量化参数,其量化 + * 版本要求输入 OH_NN_INT32 类型数据,实际量化参数由 x 和 weight 共同决定。 + * + * 参数: + * + * * axis,x做全连接的轴,从指定轴axis开始,将axis和axis后面的轴展开成1维去做全连接。 + * * activationType,是一个整型常量,且必须是FuseType中含有的值。 + * 在输出之前调用指定的激活。 + * + * 输出: + * + * * output,输出运算后的张量。 + */ + OH_NN_OPS_FULL_CONNECTION = 15, + + /** + * 根据指定的索引和轴返回输入tensor的切片。 + * + * 输入: + * + * * x,输入待切片的tensor。 + * * inputIndices,指定输入x在axis上的索引,是一个int类型的数组,值必须在[0,x.shape[axis])范围内 + * * axis,输入x被切片的轴,int32_t类型的数组,数组长度为1。 + * + * 输出: + * + * * Output,输出切片后的tensor。 + */ + OH_NN_OPS_GATHER = 16, + + /** + * 计算输入的Hswish激活值。 + * + * 输入: + * + * * 一个n维输入tensor。 + * + * 输出: + * + * * n维Hswish激活值,数据类型和shape和input一致。 + */ + OH_NN_OPS_HSWISH = 17, + + /** + * 对输入x1和x2,计算每对元素的x<=y的结果。 + * + * 输入: + * + * * x1,可以是实数、布尔值或数据类型是实数/NN_BOOL的tensor。 + * * x2,如果input_x是tensor,input_y可以是实数、布尔值,否则只能是tensor,其数据类型是实数或NN_BOOL。 + * + * 输出: + * + * * Tensor,数据类型为NN_BOOL的tensor,使用量化模型时,output的量化参数不可省略,但量化参数的数值不会对输入结果产生影响。 + */ + OH_NN_OPS_LESS_EQUAL = 18, + + /** + * 计算x1和x2的内积 + * + * 输入: + * + * * x1,n维输入tensor。 + * * x2,n维输入tensor。 + * + * 参数: + * + * * TransposeX,布尔值,是否对x1进行转置。 + * * TransposeY,布尔值,是否对x2进行转置。 + * + * 输出: + * + * * output,计算得到内积,当type!=NN_UNKNOWN时,output数据类型由type决定;当type==NN_UNKNOWN时, + * output的数据类型取决于inputX和inputY进行计算时转化的数据类型。 + */ + OH_NN_OPS_MATMUL = 19, + + /** + * 计算input1和input2对应元素最大值,input1和input2的输入遵守隐式类型转换规则,使数据类型一致。输入必须 + * 是两个张量或一个张量和一个标量。当输入是两个张量时,它们的数据类型不能同时为NN_BOOL。它们的形状支持 + * broadcast成相同的大小。当输入是一个张量和一个标量时,标量只能是一个常数。 + * + * 输入: + * + * * x1,n维输入tensor,实数或NN_BOOL类型。 + * * x2,n维输入tensor,实数或NN_BOOL类型。 + * + * 输出: + * + * * output,n维输出tensor,output的shape和数据类型和两个input中精度或者位数高的相同。 + */ + OH_NN_OPS_MAXIMUM = 20, + + /** + * 在输入tensor上应用 2D 最大值池化。 + * + * 如果输入中含有padMode参数: + * + * 输入: + * + * * x,一个张量。 + * + * 参数: + * + * * kernelSize,用来取最大值的kernel大小,是一个长度为2的int数组[kernel_height,kernel_weight], + * 第一个数表示kernel高度,第二个数表示kernel宽度。 + * * strides,kernel移动的距离,是一个长度为2的int数组[stride_height,stride_weight], + * 第一个数表示高度上的移动步幅,第二个数表示宽度上的移动步幅。 + * * padMode,填充模式,int类型的可选值,0表示same,1表示valid,并且以最近邻的值填充。 + * same,输出的高度和宽度与x相同,填充总数将在水平和垂直方向计算,并在可能的情况下均匀分布到顶部 + * 和底部、左侧和右侧。否则,最后一个额外的填充将从底部和右侧完成。 + * valid,输出的可能最大高度和宽度将在不填充的情况下返回。额外的像素将被丢弃。 + * * activationType,是一个整型常量,且必须是FuseType中含有的值。 + * 在输出之前调用指定的激活。 + * + * 如果输入中含有padList参数: + * + * 输入: + * + * * x,一个张量。 + * + * 参数: + * + * * kernelSize,用来取最大值的kernel大小,是一个长度为2的int数组[kernel_height,kernel_weight], + * 第一个数表示kernel高度,第二个数表示kernel宽度。 + * * strides,kernel移动的距离,是一个长度为2的int数组[stride_height,stride_weight], + * 第一个数表示高度上的移动步幅,第二个数表示宽度上的移动步幅。 + * * padList,输入x周围的填充,是一个长度为4的int数组[top,bottom,left,right],并且以最近邻的值填充。 + * * activationType,是一个整型常量,且必须是FuseType中含有的值。 + * 在输出之前调用指定的激活。 + * + * 输出: + * + * * output,输出x最大值池化后的张量。 + */ + OH_NN_OPS_MAX_POOL = 21, + + /** + * 将inputX和inputY相同的位置的元素相乘得到output。如果inputX和inputY类型shape不同,要求inputX和inputY可以 + * 通过broadcast扩充成相同的shape进行相乘。 + * + * 输入: + * + * * x1,一个n维tensor。 + * * x2,一个n维tensor。 + * + * 参数: + * + * * activationType,是一个整型常量,且必须是FuseType中含有的值。 + * 在输出之前调用指定的激活。 + * + * 输出: + * + * * output,x1和x2每个元素的乘积。 + */ + OH_NN_OPS_MUL = 22, + + /** + * 根据indices指定的位置,生成一个由one-hot向量构成的tensor。每个onehot向量中的有效值由on_value决定,其他位置由off_value决定。 + * + * 输入: + * + * * indices,n维tensor。indices中每个元素决定每个one-hot向量,on_value的位置 + * * depth,一个整型标量,决定one-hot向量的深度。要求depth>0。 + * * on_value,一个标量,指定one-hot向量中的有效值。 + * * off_value,(一个标量,指定one-hot向量中除有效位以外,其他位置的值。 + * + * 参数: + * + * * axis,一个整型标量,指定插入one-hot的维度。 + * indices的形状是[N,C],depth的值是D,当axis=0时,output形状为[D,N,C], + * indices的形状是[N,C],depth的值是D,当axis=-1时,output形状为[N,C,D], + * indices的形状是[N,C],depth的值是D,当axis=1时,output形状为[N,D,C]。 + * + * 输出: + * + * * output,如果indices时n维tensor,则output是(n+1)维tensor。output的形状由indices和axis共同决定。 + */ + OH_NN_OPS_ONE_HOT = 23, + + /** + * 在inputX指定维度的数据前后,添加指定数值进行增广。 + * + * 输入: + * + * * inputX,一个n维tensor,要求inputX的排布为[BatchSize,…]。 + * * paddings,一个2维tensor,指定每一维度增补的长度,shape为[n,2]。paddings[i][0]表示第i维上,需要在inputX前增补的数量; + * paddings[i][1]表示第i维上,需要在inputX后增补的数量。 + * + * 参数: + * + * * padValues,一个常数,数据类型和inputX一致,指定Pad操作补全的数值。 + * + * 输出: + * + * * output,一个n维tensor,维数和数据类型和inputX保持一致。shape由inputX和paddings共同决定 + * output.shape[i] = input.shape[i] + paddings[i][0]+paddings[i][1]。 + */ + OH_NN_OPS_PAD = 24, + + /** + * 求x的y次幂,输入必须是两个tensor或一个tensor和一个标量。当输入是两个tensor时,它们的数据类型不能同时为NN_BOOL, + * 且要求两个tensor的shape相同。当输入是一个tensor和一个标量时,标量只能是一个常数。 + * + * 输入: + * + * * x,实数、bool值或tensor,tensor的数据类型为实数/NN_BOOL。 + * * y,实数、bool值或tensor,tensor的数据类型为实数/NN_BOOL。 + * + * 输出: + * + * * output,形状由x和y broadcast后的形状决定。 + */ + OH_NN_OPS_POW = 25, + + /** + * 给定一个tensor,计算其缩放后的值。 + * + * 输入: + * + * * x,一个n维tensor。 + * * scale,缩放tensor。 + * * bias,偏置tensor。 + * + * 参数: + * + * * axis,指定缩放的维度。 + * * activationType,是一个整型常量,且必须是FuseType中含有的值。 + * 在输出之前调用指定的激活。 + * + * 输出: + * + * * output,scale的计算结果,一个n维tensor,类型和input一致,shape由axis决定。 + */ + OH_NN_OPS_SCALE = 26, + + /** + * 输入一个tensor,计算其shape。 + * + * 输入: + * + * * x,一个n维tensor。 + * + * 输出: + * + * * output,输出tensor的维度,一个整型数组。 + */ + OH_NN_OPS_SHAPE = 27, + + /** + * 给定一个tensor,计算其sigmoid结果。 + * + * 输入: + * + * * input,一个n维tensor。 + * + * 输出: + * + * * output,sigmoid的计算结果,一个n维tensor,类型和shape和input一致。 + */ + OH_NN_OPS_SIGMOID = 28, + + /** + * 在input tensor各维度,以begin为起点,截取size长度的切片。 + * + * 输入: + * + * * x,n维输入tensor。 + * * begin,一组不小于0的整数,指定每个维度上的起始切分点。 + * * size,一组不小于1的整数,指定每个维度上切片的长度。假设某一维度i,1<=size[i]<=input.shape[i]-begin[i]。 + * + * 输出: + * + * * output,切片得到的n维tensor,其TensorType和input一致,shape和size相同。 + */ + OH_NN_OPS_SLICE = 29, + + /** + * 给定一个tensor,计算其softmax结果。 + * + * 输入: + * + * * x,n维输入tensor。 + * + * 参数: + * + * * axis,int64类型,指定计算softmax的维度。整数取值范围为[-n,n)。 + * + * 输出: + * + * * output,softmax的计算结果,一个n维tensor,类型和shape和x一致。 + */ + OH_NN_OPS_SOFTMAX = 30, + + /** + * 将4维tensor在空间维度上进行切分成blockShape[0] * blockShape[1]个小块,然后在batch维度上拼接这些小块。 + * + * 输入: + * + * * x,一个4维tensor + * + * 参数: + * + * * blockShape,一对整数,每个整数不小于1。 + * * paddings,一对数组,每个数组由两个整数组成。组成paddings的4个整数都不小于0。paddings[0][0]和paddings[0][1]指 + * 定了第三个维度上padding的数量,paddings[1][0]和paddings[1][1]指定了第四个维度上padding的数量。 + * + * 输出: + * + * * output,一个4维tensor,数据类型和input一致。shape由input,blockShape和paddings共同决定,假设input shape为[n,c,h,w],则有 + * output.shape[0] = n * blockShape[0] * blockShape[1] + * output.shape[1] = c + * output.shape[2] = (h + paddings[0][0] + paddings[0][1]) / blockShape[0] + * output.shape[3] = (w + paddings[1][0] + paddings[1][1]) / blockShape[1] + * 要求(h + paddings[0][0] + paddings[0][1])和(w + paddings[1][0] + paddings[1][1])能被 + * blockShape[0]和blockShape[1]整除。 + */ + OH_NN_OPS_SPACE_TO_BATCH_ND = 31, + + /** + * Split 算子沿 axis 维度将 input 拆分成多个 tensor,tensor 数量由 outputNum 指定。 + * + * 输入: + * + * * x,n维tensor。 + * + * 参数: + * + * * outputNum,long,输出tensor的数量,output_num类型为int。 + * * size_splits,1维tensor,指定 tensor 沿 axis 轴拆分后,每个 tensor 的大小,size_splits 类型为 int。 + * 如果 size_splits 的数据为空,则 tensor 被拆分成大小均等的 tensor,此时要求 input.shape[axis] 可以被 outputNum 整除; + * 如果 size_splits 不为空,则要求 size_splits 所有元素之和等于 input.shape[axis]。 + * * axis,指定拆分的维度,axis类型为int。 + * + * 输出: + * + * * outputs,一组n维tensor,每一个tensor类型和shape相同,每个tensor的类型和input一致。 + */ + OH_NN_OPS_SPLIT = 32, + + /** + * 给定一个tensor,计算其平方根。 + * + * 输入: + * + * * x,一个n维tensor。 + * + * 输出: + * + * * output,输入的平方根,一个n维tensor,类型和shape和input一致。 + */ + OH_NN_OPS_SQRT = 33, + + /** + * 计算两个输入的差值并返回差值的平方。SquaredDifference算子支持tensor和tensor相减。 + * 如果两个tensor的TensorType不相同,Sub算子会将低精度的tensor转成更高精度的类型。 + * 如果两个tensor的shape不同,要求两个tensor可以通过broadcast拓展成相同shape的tensor。 + * + * 输入: + * + * * x,被减数,inputX是一个tensor,tensor的类型可以是NN_FLOAT16、NN_FLOAT32、NN_INT32或NN_BOOL。 + * * y,减数,inputY是一个tensor,tensor的类型可以是NN_FLOAT16、NN_FLOAT32、NN_INT32或NN_BOOL。 + * + * 输出: + * + * * output,两个input差值的平方。output的shape由inputX和inputY共同决定,inputX和inputY的shape相同时, + * output的shape和inputX、inputY相同;shape不同时,需要将inputX或inputY做broadcast操作后,相减得到output。 + * output的TensorType由两个输入中更高精度的TensorType决定。 + */ + OH_NN_OPS_SQUARED_DIFFERENCE = 34, + + /** + * 去除axis中,长度为1的维度。支持int8量化输入假设input的shape为[2,1,1,2,2],axis为[0,1], + * 则output的shape为[2,1,2,2]。第0维到第1维之间,长度为0的维度被去除。 + * + * 输入: + * + * * x,n维tensor。 + * + * 参数: + * + * * axis,指定删除的维度。axis可以是一个int64_t的整数或数组,整数的取值范围为[-n,n)。 + * + * 输出: + * + * * output,输出tensor。 + */ + OH_NN_OPS_SQUEEZE = 35, + + /** + * 将一组tensor沿axis维度进行堆叠,堆叠前每个tensor的维数为n,则堆叠后output维数为n+1。 + * + * 输入: + * + * * x,Stack支持传入多个输入n维tensor,每个tensor要求shape相同且类型相同。 + * + * 参数: + * + * * axis,一个整数,指定tensor堆叠的维度。axis可以是负数,axis取值范围为[-(n+1),(n+1))。 + * + * 输出: + * + * * output,将input沿axis维度堆叠的输出,n+1维tensor,TensorType和input相同。 + */ + OH_NN_OPS_STACK = 36, + + /** + * 跨步截取Tensor + * + * 输入: + * + * * x,n维输入tensor。 + * * begin,1维tensor,begin的长度等于n,begin[i]指定第i维上截取的起点。 + * * end,1维tensor,end的长度等于n,end[i]指定第i维上截取的终点。 + * * strides,1维tensor,strides的长度等于n,strides[i]指定第i维上截取的步长。 + * + * 参数: + * + * * beginMask,一个整数,用于解除begin的限制。将beginMask转成二进制表示,如果binary(beginMask)[i]==1, + * 则对于第i维,从第一个元素开始,以strides[i]为步长截取元素直到第end[i]-1个元素。 + * * endMask,个整数,用于解除end的限制。将endMask转成二进制表示,如果binary(endMask)[i]==1,则对于第i维, + * 从第begin[i]个元素起,以strides[i]为步长截取元素直到tensor边界。 + * * ellipsisMask,一个整数,用于解除begin和end的限制。将ellipsisMask转成二进制表示,如果binary(ellipsisMask)[i]==1, + * 则对于第i维,从第一个元素开始,以strides[i]为补偿,截取元素直到tensor边界。binary(ellipsisMask)仅允许有一位不为0。 + * * newAxisMask,一个整数,用于新增维度。将newAxisMask转成二进制表示,如果binary(newAxisMask)[i]==1,则在第i维插入长度为1的新维度。 + * * shrinkAxisMask,一个整数,用于压缩指定维度。将shrinkAxisMask转成二进制表示,如果binary(shrinkAxisMask)[i]==1, + * 则舍去第i维所有元素,第i维长度压缩至1。 + * + * 输出: + * + * * 堆叠运算后的Tensor,数据类型与x相同。输出维度rank(x[0])+1 维。 + */ + OH_NN_OPS_STRIDED_SLICE = 37, + + /** + * 计算两个输入的差值。 + * + * 输入: + * + * * x,被减数,x是一个tensor。 + * * y,减数,y是一个tensor。 + * + * 参数: + * + * * activationType,是一个整型常量,且必须是FuseType中含有的值。 + * 在输出之前调用指定的激活。 + * + * 输出: + * + * * output,两个input相减的差。output的shape由inputX和inputY共同决定,inputX和inputY的shape相同时,output的shape和inputX、inputY相同; + * shape不同时,需要将inputX或inputY做broadcast操作后,相减得到output。output的TensorType由两个输入中更高精度的TensorType决定。 + */ + OH_NN_OPS_SUB = 38, + + /** + * 计算输入tensor的双曲正切值。 + * + * 输入: + * + * * x,n维tensor。 + * + * 输出: + * + * * output,input的双曲正切,TensorType和tensor shape和input相同。 + */ + OH_NN_OPS_TANH = 39, + + /** + * 以multiples指定的次数拷贝input。 + * + * 输入: + * * x,n维tensor。 + * * multiples,1维tensor,指定各个维度拷贝的次数。其长度m不小于input的维数n。 + * + * 输出: + * * Tensor,m维tensor,TensorType与input相同。如果input和multiples长度相同, + * 则output和input维数一致,都是n维tensor;如果multiples长度大于n,则用1填充input的维度, + * 再在各个维度上拷贝相应的次数,得到m维tensor。 + */ + OH_NN_OPS_TILE = 40, + + /** + * 根据permutation对input 0进行数据重排。 + * + * 输入: + * + * * x,n维tensor,待重排的tensor。 + * * perm,1维tensor,其长度和input 0的维数一致。 + * + * 输出: + * + * * output,n维tensor,output 0的TensorType与input 0相同,shape由input 0的shape和permutation共同决定。 + */ + OH_NN_OPS_TRANSPOSE = 41, + + /** + * keepDims为false时,计算指定维度上的平均值,减少input的维数;当keepDims为true时,计算指定维度上的平均值,保留相应的维度。 + * + * 输入: + * + * * input,n维输入tensor,n<8。 + * * axis,1维tensor,指定计算均值的维度,axis中每个元素的取值范围为[-n,n)。 + * + * 参数: + * + * * keepDims,布尔值,是否保留维度的标志位。 + * + * 输出: + * + * * output,m维输出tensor,数据类型和input相同。当keepDims为false时,m==n;当keepDims为true时,m=2,则要求inputX的排布为[BatchSize,…,Channels],第二个维度为通道数。 + * * weight,一个1维tensor。weight的长度只能是1或者等于通道数。当weight长度为1,则inputX所有通道共享一个权重值。 + * 若weight长度等于通道数,每个通道独享一个权重,若inputX维数n<2,weight长度只能为1。 + * 输出: + * + * output,x的PReLU激活值。形状和数据类型和inputX保持一致。 + */ + OH_NN_OPS_PRELU = 46, + + /** + * 计算input的Relu激活值。 + * + * 输入: + * + * * input,一个n维输入tensor。 + * + * 输出: + * + * * output,n维Relu输出tensor,数据类型和shape和input一致。 + */ + OH_NN_OPS_RELU = 47, + + /** + * 计算input的Relu6激活值,即对input中每个元素x,计算min(max(x,0),6)。 + * + * 输入: + * + * * input,一个n维输入tensor。 + * + * 输出: + * + * * output,n维Relu6输出tensor,数据类型和shape和input一致。 + */ + OH_NN_OPS_RELU6 = 48, + + /** + * 对一个tensor从某一axis开始做层归一化。 + * + * 输入: + * + * * input,一个n维输入tensor。 + * * gamma,一个m维tensor,gamma维度应该与input做归一化部分的shape一致。 + * * beta,一个m维tensor,shape与gamma一样。 + * + * 参数: + * + * * beginAxis,是一个NN_INT32的标量,指定开始做归一化的轴,取值范围是[1,rank(input))。 + * * epsilon,是一个NN_FLOAT32的标量,是归一化公式中的微小量,常用值是1e-7。 + * + * 输出: + * + * * output,n维输出tensor,数据类型和shape和input一致。 + */ + OH_NN_OPS_LAYER_NORM = 49, + + /** + * 当keepDims为false时,过乘以维度中的所有元素来减小张量的维度,减少input的维数;当keepDims为true时,过乘以维度中的所有元素来减小张量的维度,保留相应的维度。 + * + * 输入: + * + * * input,n维输入tensor,n<8。 + * * axis,1维tensor,指定计算乘的维度,axis中每个元素的取值范围为[-n,n)。 + * + * 参数: + * + * * keepDims,布尔值,是否保留维度的标志位。 + * + * 输出: + * + * * output,m维输出tensor,数据类型和input相同。当keepDims为false时,m==n;当keepDims为true时,m +#include +#include + +#include "interfaces/kits/c/neural_network_runtime_type.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +// ALLOCATE_BUFFER_LIMIT is 1 Gb +const size_t ALLOCATE_BUFFER_LIMIT = 1024 * 1024 * 1024; +enum DeviceStatus: int { + UNKNOWN, + AVAILABLE, + BUSY, + OFFLINE +}; + +struct ModelConfig { + bool enableFloat16; + OH_NN_PerformanceMode mode; + OH_NN_Priority priority; +}; + +struct ModelBuffer { + void* buffer; + size_t length; +}; + +struct QuantParam { + uint32_t numBits; + double scale; + int32_t zeroPoint; +}; + +struct IOTensor { + std::string name; + OH_NN_DataType dataType; + OH_NN_Format format; + std::vector dimensions; + void* data; + size_t length; +}; +} // NeuralNetworkRuntime +} // OHOS + +#endif // NEURAL_NETWORK_RUNTIME_OEM_CPP_API_TYPE_H \ No newline at end of file diff --git a/interfaces/oem/cpp_api/device.h b/interfaces/oem/cpp_api/device.h new file mode 100644 index 0000000000000000000000000000000000000000..93415e4bb527e26049cba0563cc3e86bcfa4b148 --- /dev/null +++ b/interfaces/oem/cpp_api/device.h @@ -0,0 +1,60 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_DEVICE_H +#define NEURAL_NETWORK_RUNTIME_DEVICE_H + +#include +#include +#include + +#include "interfaces/kits/c/neural_network_runtime_type.h" +#include "cpp_type.h" +#include "prepared_model.h" +#include "mindir.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +class Device { +public: + Device() = default; + virtual ~Device() = default; + + virtual OH_NN_ReturnCode GetDeviceName(std::string& name) = 0; + virtual OH_NN_ReturnCode GetVendorName(std::string& name) = 0; + virtual OH_NN_ReturnCode GetDeviceType(OH_NN_DeviceType& deviceType) = 0; + virtual OH_NN_ReturnCode GetDeviceStatus(DeviceStatus& status) = 0; + virtual OH_NN_ReturnCode GetSupportedOperation(std::shared_ptr model, + std::vector& ops) = 0; + + virtual OH_NN_ReturnCode IsFloat16PrecisionSupported(bool& isSupported) = 0; + virtual OH_NN_ReturnCode IsPerformanceModeSupported(bool& isSupported) = 0; + virtual OH_NN_ReturnCode IsPrioritySupported(bool& isSupported) = 0; + virtual OH_NN_ReturnCode IsDynamicInputSupported(bool& isSupported) = 0; + virtual OH_NN_ReturnCode IsModelCacheSupported(bool& isSupported) = 0; + + virtual OH_NN_ReturnCode PrepareModel(std::shared_ptr model, + const ModelConfig& config, + std::shared_ptr& preparedModel) = 0; + virtual OH_NN_ReturnCode PrepareModelFromModelCache(const std::vector& modelCache, + const ModelConfig& config, + std::shared_ptr& preparedModel) = 0; + + virtual void* AllocateBuffer(size_t length) = 0; + virtual OH_NN_ReturnCode ReleaseBuffer(const void* buffer) = 0; +}; +} // namespace NeuralNetworkRuntime +} // namespace OHOS +#endif // NEURAL_NETWORK_RUNTIME_DEVICE_H \ No newline at end of file diff --git a/interfaces/oem/cpp_api/device_registrar.h b/interfaces/oem/cpp_api/device_registrar.h new file mode 100644 index 0000000000000000000000000000000000000000..9d3c8329bc28b6cdf111c9029c0436af8f718ebc --- /dev/null +++ b/interfaces/oem/cpp_api/device_registrar.h @@ -0,0 +1,41 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_DEVICE_REGISTRAR_H +#define NEURAL_NETWORK_RUNTIME_DEVICE_REGISTRAR_H + +#include +#include +#include + +#include "interfaces/oem/cpp_api/device.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +using CreateDevice = std::function()>; + +class DeviceRegistrar { +public: + DeviceRegistrar(const CreateDevice creator); + ~DeviceRegistrar() = default; +}; + +#define REGISTER_DEVICE(deviceName, vendorName, creator) \ + namespace { \ + static OHOS::NeuralNetworkRuntime::DeviceRegistrar g_##deviceName##_##vendorName##_device_registrar(creator) \ + } // namespace +} // namespace NeuralNetworkRuntime +} // OHOS +#endif // NEURAL_NETWORK_RUNTIME_DEVICE_REGISTRAR_H \ No newline at end of file diff --git a/interfaces/oem/cpp_api/prepared_model.h b/interfaces/oem/cpp_api/prepared_model.h new file mode 100644 index 0000000000000000000000000000000000000000..65741311a999d456487091e11fc54e2f2c4641b1 --- /dev/null +++ b/interfaces/oem/cpp_api/prepared_model.h @@ -0,0 +1,40 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_PREPARED_MODEL_H +#define NEURAL_NETWORK_RUNTIME_PREPARED_MODEL_H + +#include + +#include "interfaces/kits/c/neural_network_runtime_type.h" +#include "cpp_type.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +class PreparedModel { +public: + PreparedModel() = default; + virtual ~PreparedModel() = default; + + virtual OH_NN_ReturnCode ExportModelCache(std::vector& modelCache) = 0; + + virtual OH_NN_ReturnCode Run(const std::vector& inputs, + const std::vector& outputs, + std::vector>& outputsDims, + std::vector& isOutputBufferEnough) = 0; +}; +} // OHOS +} // namespace NeuralNetworkRuntime +#endif // NEURAL_NETWORK_RUNTIME_PREPARED_MODEL_H \ No newline at end of file diff --git a/neural-network-runtime-guidelines.md b/neural-network-runtime-guidelines.md new file mode 100644 index 0000000000000000000000000000000000000000..bf191bc414aeda56b259ba844a6bc72bea5d1372 --- /dev/null +++ b/neural-network-runtime-guidelines.md @@ -0,0 +1,460 @@ +# Neural Network Runtime开发指导 + +## 场景介绍 + +Neural Network Runtime作为AI推理引擎和加速芯片的桥梁,为AI推理引擎提供精简的Native接口,满足推理引擎通过加速芯片执行端到端推理的需求;同时为加速芯片提供了统一的HDI接口,使能加速芯片接入OpenHarmony社区生态。 + +## 环境准备 + +### 环境要求 + +Neural Network Runtime部件的环境要求如下: + +- 系统版本:OpenHarmony 3.2及以上。 +- 开发环境:Ubuntu 18.04及以上。 +- 接入设备:OpenHarmony定义的标准设备,并且系统中内置的硬件加速器驱动,已通过HDI接口对接Neural Network Runtime。 + +由于Neural Network Runtime通过OpenHarmony Native API对外开放,需要通过OpenHarmony的Native开发套件编译Neural Network Runtime应用。在社区的[每日构建](http://ci.openharmony.cn/dailys/dailybuilds)下载对应系统版本的ohos-sdk压缩包,从压缩包中提取对应平台的Native开发套件。以Linux为例,Native开发套件的压缩包命名为`native-linux-{版本号}.zip`。 + +### 环境搭建 + +1. 打开Ubuntu编译服务器的终端。 +2. 把下载好的Native开发套件压缩包拷贝至当前用户根目录下。 +3. 执行以下命令解压Native开发套件的压缩包。 +```shell +unzip native-linux-{版本号}.zip +``` + +解压缩后的内容如下(随版本迭代,目录下的内容可能发生变化,请以最新版本的Native API为准): +```text +native/ +├── build // 交叉编译工具链 +├── build-tools // 编译构建工具 +├── docs +├── llvm +├── nativeapi_syscap_config.json +├── ndk_system_capability.json +├── NOTICE.txt +├── oh-uni-package.json +└── sysroot // Native API头文件和库 +``` +## Neural Network Runtime接口 + +详细的Neural Network Runtime接口文档请参考: +- [neural_network_runtime.h](./interfaces/kits/c/neural_network_runtime.h) +- [neural_network_runtime_type.h](./interfaces/kits/c/neural_network_runtime_type.h) + +## Neural Network Runtime开发指导 + +### 开发步骤 + +Neural Network Runtime的开发流程主要包含**模型构造**、**模型编译**和**推理执行**三个阶段。以下开发步骤以`Add`单算子模型为例,介绍调用Neural Network Runtime接口,开发应用的过程。 + +1. 创建应用样例文件。 + + 首先,创建Neural Network Runtime应用样例的源文件。在项目目录下执行以下命令,创建`nnrt_example/`目录,在目录下创建 `nnrt_example.cpp` 源文件。 + + ```shell + mkdir ~/nnrt_example && cd ~/nnrt_example + touch nnrt_example.cpp + ``` + +2. 导入Neural Network Runtime。 + + 在 nnrt_example.cpp` 文件的开头添加以下代码,引入Neural Network Runtime接口。 + + ```cpp + #include + #include + #include + + #include "neural_network_runtime/neural_network_runtime.h" + + const size_t DATA_LENGTH = 4 * 12; // 输出、输出的字节长度 + ``` + +3. 构造模型。 + + 以下图所示的`Add`单算子模型为例,使用Neural Network Runtime构图模块,构造样例模型。`Add`算子有两个输入、一个参数和一个输出,其中的参数用于指定`Add`的激活类型。 + + !["Add单算子网络示意图"](neural_network_runtime_add_op_model.png) + + ```cpp + OH_NN_ReturnCode BuildModel(OH_NNModel** pModel) + { + // 创建模型实例,调用构图模块的接口,进行模型构图 + OH_NNModel* model = OH_NNModel_Construct(); + if (model == nullptr) { + std::cout << "Create model failed." << std::endl; + return OH_NN_MEMORY_ERROR; + } + + // 添加Add算子的第一个输入Tensor,类型为float32,张量形状为[1, 2, 2, 3] + int32_t inputDims[4] = {1, 2, 2, 3}; + OH_NN_Tensor input1 = {OH_NN_FLOAT32, 4, inputDims, nullptr, OH_NN_TENSOR}; + OH_NN_ReturnCode ret = OH_NNModel_AddTensor(model, &input1); + if (ret != OH_NN_SUCCESS) { + std::cout << "BuildModel failed, add Tensor of first input failed." << std::endl + return ret; + } + + // 添加Add算子的第二个输入Tensor,类型为float32,张量形状为[1, 2, 2, 3] + OH_NN_Tensor input2 = {OH_NN_FLOAT32, 4, inputDims, nullptr, OH_NN_TENSOR}; + ret = OH_NNModel_AddTensor(model, &input2); + if (ret != OH_NN_SUCCESS) { + std::cout << "BuildModel failed, add Tensor of second input failed." << std::endl; + return ret; + } + + // 添加Add算子唯一一个参数Tensor,激活函数类型,其数据类型为int8,是一个标量。 + int32_t activationDims = 1; + int8_t activationValue = OH_NN_FUSED_NONE; + OH_NN_Tensor activation = {OH_NN_INT8, 1, &activationDims, nullptr, OH_NN_ADD_ACTIVATIONTYPE}; + ret = OH_NNModel_AddTensor(model, &activation); + if (ret != OH_NN_SUCCESS) { + std::cout << "BuildModel failed, add Tensor of activation failed." << std::endl; + return ret; + } + + // 将激活函数类型设置为“无激活函数”。 + ret = OH_NNModel_SetTensorData(model, 2, &activationValue, sizeof(int8_t)); + if (ret != OH_NN_SUCCESS) { + std::cout << "BuildModel failed, set value of activation failed." << std::endl; + return ret; + } + + // 设置Add算子的输出,类型为float32,张量形状为[1, 2, 2, 3] + OH_NN_Tensor output = {OH_NN_FLOAT32, 4, inputDims, nullptr, OH_NN_TENSOR}; + ret = OH_NNModel_AddTensor(model, &output); + if (ret != OH_NN_SUCCESS) { + std::cout << "BuildModel failed, add Tensor of output failed." << std::endl; + return ret; + } + + // 指定Add算子的输入、参数和输出索引 + uint32_t inputIndicesValues[2] = {0, 1}; + uint32_t paramIndicesValues = 2; + uint32_t outputIndicesValues = 3; + OH_NN_UInt32Array paramIndices = {¶mIndicesValues, 1}; + OH_NN_UInt32Array inputIndices = {inputIndicesValues, 2}; + OH_NN_UInt32Array outputIndices = {&outputIndicesValues, 1}; + + // 向模型实例添加Add算子 + ret = OH_NNModel_AddOperation(model, OH_NN_OPS_ADD, ¶mIndices, &inputIndices, &outputIndices); + if (ret != OH_NN_SUCCESS) { + std::cout << "BuildModel failed, add operation failed." << std::endl; + return ret; + } + + // 设置模型实例的输入、输出索引 + ret = OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices); + if (ret != OH_NN_SUCCESS) { + std::cout << "BuildModel failed, specify inputs and outputs failed." << std::endl; + return ret; + } + + // 完成模型实例的构建 + ret = OH_NNModel_Finish(model); + if (ret != OH_NN_SUCCESS) { + std::cout << "BuildModel failed, error happened when finishing model construction." << std::endl; + return ret; + } + + *pModel = model; + return OH_NN_SUCCESS; + } + ``` + +4. 查询Neural Network Runtime已经对接的加速芯片。 + + 通过HDI接口,Neural Network Runtime支持对接多种加速芯片。在执行模型编译前,需要查询当前设备下,Neural Network Runtime已经对接的加速芯片。每个加速芯片对应唯一的ID值,在编译阶段需要通过设备ID,指定模型编译的芯片。 + ```cpp + void GetAvailableDevices(std::vector& availableDevice) + { + availableDevice.clear(); + + // 获取可用的硬件ID + const size_t* devices = nullptr; + uint32_t deviceCount = 0; + OH_NN_ReturnCode ret = OH_NNDevice_GetAllDevicesID(&devices, &deviceCount); + if (ret != OH_NN_SUCCESS) { + std::cout << "GetAllDevicesID failed, get no available device." << std::endl; + return; + } + + for (uint32_t i = 0; i < deviceCount; i++) { + availableDevice.emplace_back(devices[i]); + } + } + ``` + +5. 在指定的设备上编译模型。 + + Neural Network Runtime使用抽象的模型表达描述AI模型的拓扑结构,在加速芯片上执行前,需要通过Neural Network Runtime提供的编译模块,将抽象的模型表达下发至芯片驱动层,转换成可以直接推理计算的格式。 + ```cpp + OH_NN_ReturnCode CreateCompilation(OH_NNModel* model, const std::vector& availableDevice, OH_NNCompilation** pCompilation) + { + // 创建编译实例,用于将模型传递至底层硬件编译 + OH_NNCompilation* compilation = OH_NNCompilation_Construct(model); + if (compilation == nullptr) { + std::cout << "CreateCompilation failed, error happended when creating compilation." << std::endl; + return OH_NN_MEMORY_ERROR; + } + + // 设置编译的硬件、缓存路径、性能模式、计算优先级、是否开启float16低精度计算等选项 + + OH_NN_ReturnCode ret = OH_NNCompilation_SetDevice(compilation, availableDevice[0]); // 选择在第一个设备上编译模型 + if (ret != OH_NN_SUCCESS) { + std::cout << "CreateCompilation failed, error happened when setting device." << std::endl; + return ret; + } + + // 将模型编译结果缓存在/data/local/tmp目录下,版本号指定为1 + ret = OH_NNCompilation_SetCache(compilation, "/data/local/tmp", 1); + if (ret != OH_NN_SUCCESS) { + std::cout << "CreateCompilation failed, error happened when setting cache path." << std::endl; + return ret; + } + + ret = OH_NNCompilation_SetPerformanceMode(compilation, OH_NN_PERFORMANCE_MEDIUM); // 选择中等的性能模式 + if (ret != OH_NN_SUCCESS) { + std::cout << "CreateCompilation failed, error happened when setting performance mode." << std::endl; + return ret; + } + + ret = OH_NNCompilation_EnableFloat16(compilation, true); // 如果设备支持Float16低精度推理,则开启Float16精度推理 + if (ret != OH_NN_SUCCESS) { + std::cout << "CreateCompilation failed, error happened when enable float16 computation." << std::endl; + return ret; + } + + // 完成编译设置,进行模型编译 + ret = OH_NNCompilation_Build(compilation); + if (ret != OH_NN_SUCCESS) { + std::cout << "CreateCompilation failed, error happened when building compilation." << std::endl; + return ret; + } + + *pCompilation = compilation; + return OH_NN_SUCCESS; + } + ``` + +6. 创建执行器。 + + 完成模型编译后,需要调用Neural Network Runtime的执行模块,创建推理执行器。执行阶段,设置模型输入、获取模型输出和触发推理计算的操作均围绕执行器完成。 + ```cpp + OH_NNExecutor* CreateExecutor(OH_NNCompilation* compilation) + { + // 创建执行实例 + OH_NNExecutor* executor = OH_NNExecutor_Construct(compilation); + return executor; + } + ``` + +7. 执行推理计算,并打印计算结果。 + + 通过执行模块提供的接口,将推理计算所需要的输入数据传递给执行器,触发执行器完成一次推理计算,获取模型的推理计算结果。 + ```cpp + OH_NN_ReturnCode Run(OH_NNExecutor* executor) + { + // 构造示例数据 + float input1[12] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}; + float input2[12] = {11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22}; + + int32_t inputDims[4] = {1, 2, 2, 3}; + OH_NN_Tensor inputTensor1 = {OH_NN_FLOAT32, 4, inputDims, nullptr, OH_NN_TENSOR}; + OH_NN_Tensor inputTensor2 = {OH_NN_FLOAT32, 4, inputDims, nullptr, OH_NN_TENSOR}; + + // 设置执行的输入 + + // 设置执行的第一个输入,输入数据由input1指定 + OH_NN_ReturnCode ret = OH_NNExecutor_SetInput(executor, 0, &inputTensor1, input1, DATA_LENGTH); + if (ret != OH_NN_SUCCESS) { + std::cout << "Run failed, error happened when setting first input." << std::endl; + return ret; + } + + // 设置执行的第二个输入,输入数据由input2指定 + ret = OH_NNExecutor_SetInput(executor, 1, &inputTensor2, input2, DATA_LENGTH); + if (ret != OH_NN_SUCCESS) { + std::cout << "Run failed, error happened when setting second input." << std::endl; + return ret; + } + + // 设置输出的数据缓冲区,OH_NNExecutor_Run执行计算后,输出结果将保留在output中 + float output[12]; + ret = OH_NNExecutor_SetOutput(executor, 0, output, DATA_LENGTH); + if (ret != OH_NN_SUCCESS) { + std::cout << "Run failed, error happened when setting output buffer." << std::endl; + return ret; + } + + // 执行计算 + ret = OH_NNExecutor_Run(executor); + if (ret != OH_NN_SUCCESS) { + std::cout << "Run failed, error doing execution." << std::endl; + return ret; + } + + // 打印输出结果 + for (uint32_t i = 0; i < 12; i++) { + std::cout << "Output index: " << i << ", value is: " << output[i] << "." << std::endl; + } + + return OH_NN_SUCCESS; + } + ``` + +8. 构建端到端构图-编译-执行流程。 + + 步骤3-步骤7实现了模型的构图、编译和执行流程,并封装成4个函数,便于模块化开发。以下示例代码将4个函数串联成完整的Neural Network Runtime开发流程。 + ```cpp + int main() + { + OH_NNModel* model = nullptr; + OH_NNCompilation* compilation = nullptr; + OH_NNExecutor* executor = nullptr; + std::vector availableDevices; + + // 模型构图阶段 + OH_NN_ReturnCode ret = BuildModel(&model); + if (ret != OH_NN_SUCCESS) { + std::cout << "BuildModel failed." << std::endl; + OH_NNModel_Destroy(&model); + return -1; + } + + // 获取 + GetAvailableDevices(availableDevices); + if (availableDevices.empty()) { + std::cout << "No available device." << std::endl; + OH_NNModel_Destroy(&model); + return -1; + } + + // 模型编译阶段 + ret = CreateCompilation(model, availableDevices, &compilation); + if (ret != OH_NN_SUCCESS) { + std::cout << "CreateCompilation failed." << std::endl; + OH_NNModel_Destroy(&model); + OH_NNCompilation_Destroy(&compilation); + return -1; + } + + // 创建模型的推理执行器 + executor = CreateExecutor(compilation); + if (executor == nullptr) { + std::cout << "CreateExecutor failed, no executor is created." << std::endl; + OH_NNModel_Destroy(&model); + OH_NNCompilation_Destroy(&compilation); + return -1; + } + + // 使用上一步创建的执行器,执行单步推理计算 + ret = Run(executor); + if (ret != OH_NN_SUCCESS) { + std::cout << "Run failed." << std::endl; + OH_NNModel_Destroy(&model); + OH_NNCompilation_Destroy(&compilation); + OH_NNExecutor_Destroy(&executor); + return -1; + } + + // 释放申请的资源 + OH_NNModel_Destroy(&model); + OH_NNCompilation_Destroy(&compilation); + OH_NNExecutor_Destroy(&executor); + + return 0; + } + ``` + +## 调测验证 + +1. 准备应用样例的编译配置文件。 + + 新建一个 `CMakeLists.txt` 文件,为开发步骤中的应用样例文件 `nnrt_example.cpp` 添加编译配置。以下提供简单的 `CMakeLists.txt` 示例: + ```text + cmake_minimum_required(VERSION 3.16) + project(nnrt_example C CXX) + + add_executable(nnrt_example + ./nnrt_example.cpp + ) + + target_link_libraries(nnrt_example + neural_network_runtime.z + ) + ``` + +2. 编译应用样例。 + + 执行以下命令,在当前目录下新建build/目录,在build/目录下编译 `nnrt_example.cpp`,得到二进制文件 `nnrt_example`。 + ```shell + mkdir build && cd build + cmake -DCMAKE_TOOLCHAIN_FILE={交叉编译工具链的路径}/build/cmake/ohos.toolchain.cmake -DOHOS_ARCH=arm64-v8a -DOHOS_PLATFORM=OHOS -DOHOS_STL=c++_static .. + make . + ``` + +3. 执行以下代码,将样例推送到设备上执行。 + ```shell + # 将编译得到的 `nnrt_example` 推送到设备上,执行样例。 + hdc_std file send ./nnrt_example /data/local/tmp/. + + # 给测试用例可执行文件加上权限。 + hdc_std shell "chmod +x /data/local/tmp/nnrt_example" + + # 执行测试用例 + hdc_std shell "/data/local/tmp/nnrt_example" + ``` + + > **说明:** 如果样例执行正常,应该得到以下输出。 + ```text + Output index: 0, value is: 11.000000. + Output index: 1, value is: 13.000000. + Output index: 2, value is: 15.000000. + Output index: 3, value is: 17.000000. + Output index: 4, value is: 19.000000. + Output index: 5, value is: 21.000000. + Output index: 6, value is: 23.000000. + Output index: 7, value is: 25.000000. + Output index: 8, value is: 27.000000. + Output index: 9, value is: 29.000000. + Output index: 10, value is: 31.000000. + Output index: 11, value is: 33.000000. + ``` + +4. 检查Cache(可选) + + 如果在调测环境下,Neural Network Runtime对接的HDI服务支持Cache功能,执行完 `nnrt_example`, 可以在 `/data/local/tmp` 目录下 + 找到生成的缓存文件。 + + > **Cache功能说明:** 模型的IR需要传递到硬件驱动层,由HDI服务将统一的IR图,编译成硬件专用的计算图,编译的过程非常耗时。Neural Network Runtime支持 + 计算图缓存的特性,可以将HDI服务编译生成的计算图,缓存到设备存储中。当下一次在同一个加速芯片上编译同一个模型时,通过指定缓存的路径, + Neural Network Runtime可以直接加载缓存文件中的计算图,减少编译消耗的时间。 + + ```shell + ls /data/local/tmp + ``` + + 以下为打印结果 + ```text + # 0.nncache cache_info.nncache + ``` + + 如果缓存不再使用,需要手动删除缓存,可以参考以下命令。 + ```shell + rm /data/local/tmp/*nncache + ``` + +### 相关实例 + +针对Neural Network Runtime应用开发,可以参考一下实例: +- [Tensorflow Lite对接Neural Network Runtime](./example/deep_learning_framework/README_zh.md) + + +## 相关仓 + +- [HDF Framework](https://gitee.com/openharmony/drivers_hdf_core) +- [Mindspore](https://gitee.com/openharmony/third_party_mindspore) diff --git a/neural_network_runtime_add_op_model.png b/neural_network_runtime_add_op_model.png new file mode 100644 index 0000000000000000000000000000000000000000..6b2eaa15def968551cf7cf97a9c48882b79c0cdf Binary files /dev/null and b/neural_network_runtime_add_op_model.png differ diff --git a/neural_network_runtime_intro.png b/neural_network_runtime_intro.png new file mode 100644 index 0000000000000000000000000000000000000000..8f58413de5337ae7fb98aca126efe6f5fca2cefc Binary files /dev/null and b/neural_network_runtime_intro.png differ diff --git a/test/system_test/BUILD.gn b/test/system_test/BUILD.gn new file mode 100644 index 0000000000000000000000000000000000000000..dc68b1bbf08f15f9aa9453d519fc778e42006516 --- /dev/null +++ b/test/system_test/BUILD.gn @@ -0,0 +1,107 @@ +# Copyright (c) 2022 Huawei Device Co., Ltd. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import("//build/test.gni") + +module_output_path = "neural_network_runtime/" + +config("system_test_config") { + visibility = [ ":*" ] + + include_dirs = [ + "//foundation/ai/neural_network_runtime", + "//third_party/mindspore/mindspore/lite/mindir/include" + ] + + cflags = [ + "-Wall", + "-Wextra", + "-Werror", + ] +} + +ohos_systemtest("DeviceTest") { + module_out_path = module_output_path + sources = ["//foundation/ai/neural_network_runtime/test/system_test/device_test.cpp"] + + configs = [ ":system_test_config" ] + + deps = [ + "//foundation/ai/neural_network_runtime/frameworks:libneural_network_runtime", + "//third_party/googletest:gtest_main", + ] + + external_deps = [ + "hiviewdfx_hilog_native:libhilog", + "c_utils:utils", + "hitrace_native:hitrace_meter", + "drivers_interface_nnrt:libnnrt_proxy_1.0", + "hdf_core:libhdf_utils", + ] +} + +ohos_systemtest("End2EndTest") { + module_out_path = module_output_path + sources = [ + "//foundation/ai/neural_network_runtime/test/system_test/end_to_end_test.cpp", + "//foundation/ai/neural_network_runtime/test/system_test/common/nnrt_test.cpp", + ] + + configs = [ ":system_test_config" ] + + deps = [ + "//foundation/ai/neural_network_runtime/frameworks:libneural_network_runtime", + "//third_party/googletest:gtest_main", + ] + + external_deps = [ + "hiviewdfx_hilog_native:libhilog", + "c_utils:utils", + "hitrace_native:libhitracechain", + "hitrace_native:hitrace_meter", + "drivers_interface_nnrt:libnnrt_proxy_1.0", + "hdf_core:libhdf_utils", + "mindspore:mindir" + ] +} + +ohos_systemtest("StressTest") { + module_out_path = module_output_path + sources = [ + "//foundation/ai/neural_network_runtime/test/system_test/stress_test.cpp", + "//foundation/ai/neural_network_runtime/test/system_test/common/nnrt_test.cpp", + ] + + configs = [ ":system_test_config" ] + + deps = [ + "//foundation/ai/neural_network_runtime/frameworks:libneural_network_runtime", + "//third_party/googletest:gtest_main", + ] + + external_deps = [ + "hiviewdfx_hilog_native:libhilog", + "c_utils:utils", + "mindspore:mindir" + ] +} + +group("system_test") { + testonly = true + deps = [ + ":DeviceTest", + ":End2EndTest", + ":StressTest" + ] +} + diff --git a/test/system_test/common/nnrt_test.cpp b/test/system_test/common/nnrt_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..a431b2a498608856972c4d09de40aed757c9efa8 --- /dev/null +++ b/test/system_test/common/nnrt_test.cpp @@ -0,0 +1,234 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "nnrt_test.h" + +#include "securec.h" + +#include "common/log.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace SystemTest { +namespace { +std::unique_ptr TransformQuantParam(const CppQuantParam& cppQuantParam) +{ + // cppQuantParam.numBits empty means no quantization is applied, return nullptr directly. + if (cppQuantParam.numBits.empty()) { + return nullptr; + } + + std::unique_ptr quantParam = std::make_unique(); + quantParam->numBits = cppQuantParam.numBits.data(); + quantParam->quantCount = cppQuantParam.numBits.size(); + quantParam->scale = cppQuantParam.scale.data(); + quantParam->zeroPoint = cppQuantParam.zeroPoint.data(); + return quantParam; +} + +OH_NN_UInt32Array TransformUInt32Array(const std::vector& vector) +{ + uint32_t* data = (vector.empty()) ? nullptr : const_cast(vector.data()); + return {data, vector.size()}; +} +} // Anonymous namespace + +// AddTensors() expects tensors do not destruct and free before the test case end. +OH_NN_ReturnCode NNRtTest::AddTensors(const std::vector& cppTensors) +{ + OH_NN_Tensor tensor; + OH_NN_ReturnCode status{OH_NN_SUCCESS}; + for (const CppTensor& cppTensor : cppTensors) { + tensor = { + .dataType = cppTensor.dataType, + .dimensionCount = static_cast(cppTensor.dimensions.size()), + .dimensions = cppTensor.dimensions.empty() ? nullptr : cppTensor.dimensions.data(), + .type = cppTensor.type + }; + + const CppQuantParam& cppQuantParam = cppTensor.quantParam; + if ((cppQuantParam.numBits.size() != cppQuantParam.scale.size()) + || (cppQuantParam.scale.size() != cppQuantParam.zeroPoint.size())) { + LOGE("NNRtTest::AddTensors failed, get different number of numBits, scales and zeroPoints."); + return OH_NN_INVALID_PARAMETER; + } + // If no quantization is applied, quantParam == nullptr and no need to check. + std::unique_ptr quantParam = TransformQuantParam(cppQuantParam); + tensor.quantParam = quantParam.get(); + + m_tensors.emplace_back(tensor); + m_quantParams.emplace_back(std::move(quantParam)); + + status = OH_NNModel_AddTensor(m_model, &tensor); + if (status != OH_NN_SUCCESS) { + LOGE("NNRtTest::AddTensors failed, error happens when adding tensor."); + m_tensors.clear(); + m_quantParams.clear(); + return status; + } + + if (cppTensor.data != nullptr) { + uint32_t index = m_tensors.size() - 1; + status = OH_NNModel_SetTensorData(m_model, index, cppTensor.data, cppTensor.dataLength); + if (status != OH_NN_SUCCESS) { + LOGE("NNRtTest::AddTensors failed, error happens when setting value."); + m_tensors.clear(); + m_quantParams.clear(); + return status; + } + } + } + + return status; +} + +OH_NN_ReturnCode NNRtTest::AddOperation(OH_NN_OperationType opType, + const std::vector& paramIndices, + const std::vector& inputIndices, + const std::vector& outputIndices) +{ + const OH_NN_UInt32Array params = TransformUInt32Array(paramIndices); + const OH_NN_UInt32Array inputs = TransformUInt32Array(inputIndices); + const OH_NN_UInt32Array outputs = TransformUInt32Array(outputIndices); + + OH_NN_ReturnCode status = OH_NNModel_AddOperation(m_model, opType, ¶ms, &inputs, &outputs); + if (status == OH_NN_SUCCESS) { + Node node = { + .opType = opType, + .inputs = inputIndices, + .outputs = outputIndices, + .params = paramIndices + }; + m_nodes.emplace_back(node); + } + + return status; +} + +OH_NN_ReturnCode NNRtTest::SpecifyInputAndOutput(const std::vector& inputIndices, + const std::vector& outputIndices) +{ + const OH_NN_UInt32Array inputs = TransformUInt32Array(inputIndices); + const OH_NN_UInt32Array outputs = TransformUInt32Array(outputIndices); + + OH_NN_ReturnCode status = OH_NNModel_SpecifyInputsAndOutputs(m_model, &inputs, &outputs); + if (status == OH_NN_SUCCESS) { + m_inputs = inputIndices; + m_outputs = outputIndices; + } + + return status; +} + +OH_NN_ReturnCode NNRtTest::SetInput(uint32_t index, + const std::vector& dimensions, + const void* buffer, + size_t length) +{ + OH_NN_Tensor tensor = m_tensors[m_inputs[index]]; + tensor.dimensions = dimensions.data(); + + return OH_NNExecutor_SetInput(m_executor, index, &tensor, buffer, length); +} + +OH_NN_ReturnCode NNRtTest::SetOutput(uint32_t index, void* buffer, size_t length) +{ + return OH_NNExecutor_SetOutput(m_executor, index, buffer, length); +} + +OH_NN_ReturnCode NNRtTest::SetInputFromMemory(uint32_t index, + const std::vector& dimensions, + const void* buffer, + size_t length, + OH_NN_Memory** pMemory) +{ + if (buffer == nullptr) { + LOGE("NNRtTest::SetInputFromMemory failed, passed nullptr to buffer."); + return OH_NN_INVALID_PARAMETER; + } + + if (pMemory == nullptr) { + LOGE("NNRtTest::SetInputFromMemory failed, passed nullptr to pMemory."); + return OH_NN_INVALID_PARAMETER; + } + + OH_NN_Memory* memory = OH_NNExecutor_AllocateInputMemory(m_executor, index, length); + if (memory == nullptr) { + LOGE("NNRtTest::SetInputFromMemory failed, error happened when creating input memory."); + return OH_NN_MEMORY_ERROR; + } + + OH_NN_Tensor tensor = m_tensors[m_inputs[index]]; + tensor.dimensions = dimensions.data(); + + OH_NN_ReturnCode status = OH_NNExecutor_SetInputWithMemory(m_executor, index, &tensor, memory); + if (status != OH_NN_SUCCESS) { + LOGE("NNRtTest::SetInputFromMemory failed, error happened when setting input."); + OH_NNExecutor_DestroyInputMemory(m_executor, index, &memory); + } + + errno_t error_code = memcpy_s(const_cast(memory->data), memory->length, buffer, length); + if (error_code != EOK) { + LOGE("NNRtTest::SetInputFromMemory failed, error happens when copying data to OH_NN_Memory. Error code: %d.", + error_code); + OH_NNExecutor_DestroyInputMemory(m_executor, index, &memory); + return OH_NN_MEMORY_ERROR; + } + + *pMemory = memory; + return status; +} + +OH_NN_ReturnCode NNRtTest::SetOutputFromMemory(uint32_t index, size_t length, OH_NN_Memory** pMemory) +{ + if (pMemory == nullptr) { + LOGE("NNRtTest::SetOutputFromMemory failed, passed nullptr to pMemory."); + return OH_NN_INVALID_PARAMETER; + } + + OH_NN_Memory* memory = OH_NNExecutor_AllocateOutputMemory(m_executor, index, length); + if (memory == nullptr) { + LOGE("NNRtTest::SetOutputFromMemory failed, error happened when creating output memory."); + return OH_NN_MEMORY_ERROR; + } + + OH_NN_ReturnCode status = OH_NNExecutor_SetOutputWithMemory(m_executor, index, memory); + if (status != OH_NN_SUCCESS) { + LOGE("NNRtTest::SetOutputFromMemory failed, error happened when setting output."); + OH_NNExecutor_DestroyOutputMemory(m_executor, index, &memory); + } + + *pMemory = memory; + return status; +} + +OH_NN_ReturnCode NNRtTest::GetDevices() +{ + const size_t* devicesID{nullptr}; + uint32_t count{0}; + OH_NN_ReturnCode status = OH_NNDevice_GetAllDevicesID(&devicesID, &count); + if (status != OH_NN_SUCCESS) { + LOGE("NNRtTest::GetDevices failed, get all devices ID failed."); + return status; + } + + for (uint32_t i = 0; i < count; i++) { + m_devices.emplace_back(devicesID[i]); + } + return OH_NN_SUCCESS; +} +} // namespace SystemTest +} // NeuralNetworkRuntime +} // OHOS \ No newline at end of file diff --git a/test/system_test/common/nnrt_test.h b/test/system_test/common/nnrt_test.h new file mode 100644 index 0000000000000000000000000000000000000000..a117096d1bb71d7836dc7a4c95ed995283b351be --- /dev/null +++ b/test/system_test/common/nnrt_test.h @@ -0,0 +1,89 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_SYSTEM_TEST_NNRT_TEST +#define NEURAL_NETWORK_RUNTIME_SYSTEM_TEST_NNRT_TEST + +#include +#include +#include +#include + +#include "interfaces/kits/c/neural_network_runtime.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace SystemTest { +struct CppQuantParam { + std::vector numBits; + std::vector scale; + std::vector zeroPoint; +}; + +struct CppTensor { + OH_NN_DataType dataType{OH_NN_UNKNOWN}; + std::vector dimensions; + void* data{nullptr}; + size_t dataLength{0}; + CppQuantParam quantParam; + OH_NN_TensorType type{OH_NN_TENSOR}; +}; + +struct Node { + OH_NN_OperationType opType; + std::vector inputs; + std::vector outputs; + std::vector params; +}; + +class NNRtTest : public testing::Test { +public: + virtual OH_NN_ReturnCode AddTensors(const std::vector& cppTensors); + virtual OH_NN_ReturnCode AddOperation(OH_NN_OperationType opType, + const std::vector& paramIndices, + const std::vector& inputIndices, + const std::vector& outputIndices); + virtual OH_NN_ReturnCode SpecifyInputAndOutput(const std::vector& inputIndices, + const std::vector& outputIndices); + virtual OH_NN_ReturnCode SetInput(uint32_t index, + const std::vector& dimensions, + const void* buffer, + size_t length); + virtual OH_NN_ReturnCode SetOutput(uint32_t index, void* buffer, size_t length); + virtual OH_NN_ReturnCode SetInputFromMemory(uint32_t index, + const std::vector& dimensions, + const void* buffer, + size_t length, + OH_NN_Memory** pMemory); + virtual OH_NN_ReturnCode SetOutputFromMemory(uint32_t index, size_t length, OH_NN_Memory** pMemory); + virtual OH_NN_ReturnCode GetDevices(); + +protected: + OH_NNModel* m_model{nullptr}; + OH_NNCompilation* m_compilation{nullptr}; + OH_NNExecutor* m_executor{nullptr}; + + std::vector m_tensors; + std::vector> m_quantParams; + std::vector m_nodes; + std::vector m_inputs; + std::vector m_outputs; + std::vector m_devices; +}; +} // namespace SystemTest +} // NeuralNetworkRuntime +} // OHOS + +#endif // NEURAL_NETWORK_RUNTIME_SYSTEM_TEST_NNRT_TEST \ No newline at end of file diff --git a/test/system_test/device_test.cpp b/test/system_test/device_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..0fe24dee1a339de55efe56c4ff658d27959941a5 --- /dev/null +++ b/test/system_test/device_test.cpp @@ -0,0 +1,190 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include + +#include "interfaces/kits/c/neural_network_runtime.h" + +using namespace testing; +using namespace testing::ext; + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace SystemTest { +class DeviceTest : public testing::Test { +public: + void SetUp() {} + void TearDown() {} + +public: + std::string m_deviceName {"RK3568-CPU_Rockchip"}; + size_t m_deviceId {std::hash{}("RK3568-CPU_Rockchip")}; + OH_NN_DeviceType m_deviceType {OH_NN_CPU}; +}; + +/* + * @tc.name: device_001 + * @tc.desc: Get all devices id successfully. + * @tc.type: FUNC + */ +HWTEST_F(DeviceTest, device_001, testing::ext::TestSize.Level1) +{ + const size_t* allDeviceIds = nullptr; + uint32_t count {0}; + + OH_NN_ReturnCode ret = OH_NNDevice_GetAllDevicesID(&allDeviceIds, &count); + EXPECT_EQ(OH_NN_SUCCESS, ret); + + uint32_t expectCount = 1; + EXPECT_EQ(expectCount, count); + EXPECT_EQ(m_deviceId, *allDeviceIds); +} + +/* + * @tc.name: device_002 + * @tc.desc: Get all devices id with nullptr deviceId parameter. + * @tc.type: FUNC + */ +HWTEST_F(DeviceTest, device_002, testing::ext::TestSize.Level1) +{ + uint32_t count {0}; + + OH_NN_ReturnCode ret = OH_NNDevice_GetAllDevicesID(nullptr, &count); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: device_003 + * @tc.desc: Get all devices id with nullptr count parameter. + * @tc.type: FUNC + */ +HWTEST_F(DeviceTest, device_003, testing::ext::TestSize.Level1) +{ + const size_t* allDeviceIds = nullptr; + + OH_NN_ReturnCode ret = OH_NNDevice_GetAllDevicesID(&allDeviceIds, nullptr); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: device_004 + * @tc.desc: Get all devices id with not nullptr deviceId pointer. + * @tc.type: FUNC + */ +HWTEST_F(DeviceTest, device_004, testing::ext::TestSize.Level1) +{ + const size_t allDeviceIds = 0; + const size_t* pAllDeviceIds = &allDeviceIds; + uint32_t count {0}; + + OH_NN_ReturnCode ret = OH_NNDevice_GetAllDevicesID(&pAllDeviceIds, &count); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: device_005 + * @tc.desc: Get device name successfully. + * @tc.type: FUNC + */ +HWTEST_F(DeviceTest, device_005, testing::ext::TestSize.Level1) +{ + const char* name = nullptr; + OH_NN_ReturnCode ret = OH_NNDevice_GetName(m_deviceId, &name); + EXPECT_EQ(OH_NN_SUCCESS, ret); + std::string sName(name); + EXPECT_EQ(m_deviceName, sName); +} + +/* + * @tc.name: device_006 + * @tc.desc: Get device name with invalid deviceId. + * @tc.type: FUNC + */ +HWTEST_F(DeviceTest, device_006, testing::ext::TestSize.Level1) +{ + const size_t deviceId = 0; + const char* name = nullptr; + OH_NN_ReturnCode ret = OH_NNDevice_GetName(deviceId, &name); + EXPECT_EQ(OH_NN_FAILED, ret); +} + +/* + * @tc.name: device_007 + * @tc.desc: Get device name without nullptr name pointer. + * @tc.type: FUNC + */ +HWTEST_F(DeviceTest, device_007, testing::ext::TestSize.Level1) +{ + const size_t deviceId = 0; + const char* name = "name"; + OH_NN_ReturnCode ret = OH_NNDevice_GetName(deviceId, &name); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: device_008 + * @tc.desc: Get device name with nullptr name parameter. + * @tc.type: FUNC + */ +HWTEST_F(DeviceTest, device_008, testing::ext::TestSize.Level1) +{ + const size_t deviceId = 0; + OH_NN_ReturnCode ret = OH_NNDevice_GetName(deviceId, nullptr); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: device_009 + * @tc.desc: Get device type successfully. + * @tc.type: FUNC + */ +HWTEST_F(DeviceTest, device_009, testing::ext::TestSize.Level1) +{ + OH_NN_DeviceType type {OH_NN_OTHERS}; + OH_NN_ReturnCode ret = OH_NNDevice_GetType(m_deviceId, &type); + EXPECT_EQ(OH_NN_SUCCESS, ret); + EXPECT_EQ(m_deviceType, type); +} + +/* + * @tc.name: device_010 + * @tc.desc: Get device type with invalid deviceId. + * @tc.type: FUNC + */ +HWTEST_F(DeviceTest, device_010, testing::ext::TestSize.Level1) +{ + const size_t deviceId = 0; + OH_NN_DeviceType type {OH_NN_OTHERS}; + OH_NN_ReturnCode ret = OH_NNDevice_GetType(deviceId, &type); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: device_011 + * @tc.desc: Get device type with nullptr type. + * @tc.type: FUNC + */ +HWTEST_F(DeviceTest, device_011, testing::ext::TestSize.Level1) +{ + const size_t deviceId = 0; + OH_NN_ReturnCode ret = OH_NNDevice_GetType(deviceId, nullptr); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} +} // namespace SystemTest +} // namespace NeuralNetworkRuntime +} // namespace OHOS \ No newline at end of file diff --git a/test/system_test/end_to_end_test.cpp b/test/system_test/end_to_end_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..c668209096723b8b7b99b4d0ead8887e3b728ea5 --- /dev/null +++ b/test/system_test/end_to_end_test.cpp @@ -0,0 +1,617 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "end_to_end_test.h" + +#include +#include +#include +#include +#include + +#include "securec.h" + +#include "common/log.h" +#include "interfaces/kits/c/neural_network_runtime.h" + +namespace fs = std::filesystem; + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace SystemTest { +const float INPUT_ONE = 1.23; +const float INPUT_TWO = 2.34; +const float EXPECTED_OUTPUT = 5.91; +const int8_t EXPECTED_QUANT_OUTPUT = 10; +const float EPSILON = 1e-4; +const uint32_t NO_DEVICE_COUNT = 0; +const int32_t ELEMENT_COUNT = 12; +const uint32_t ADDEND_DATA_LENGTH = ELEMENT_COUNT * sizeof(float); +const std::string CACHE_DIR = "/data/local/tmp/nnrt_st_cache"; +const uint32_t CACHE_VERSION = 1; +const int REPEAT_TIMES = 100; + +// End2EndTest build a model with two connected add operations. +OH_NN_ReturnCode End2EndTest::BuildModel(const std::vector& tensors) +{ + m_model = OH_NNModel_Construct(); + if (m_model == nullptr) { + LOGE("End2EndTest::BuildModel failed, error happens when creating OH_NNModel."); + return OH_NN_MEMORY_ERROR; + } + + OH_NN_ReturnCode status = AddTensors(tensors); + if (status != OH_NN_SUCCESS) { + LOGE("End2EndTest::BuildModel failed, error happens when adding tensors."); + return status; + } + + status = AddOperation(OH_NN_OPS_ADD, {2}, {0, 1}, {3}); + if (status != OH_NN_SUCCESS) { + LOGE("End2EndTest::BuildModel failed, error happends when adding first Add operation into the model."); + return status; + } + + status = AddOperation(OH_NN_OPS_ADD, {2}, {3, 1}, {4}); + if (status != OH_NN_SUCCESS) { + LOGE("End2EndTest::BuildModel failed, error happends when adding second Add operation into the model."); + return status; + } + + status = SpecifyInputAndOutput({0, 1}, {4}); + if (status != OH_NN_SUCCESS) { + LOGE("End2EndTest::BuildModel failed, error happends when specifying the inputs and outputs."); + return status; + } + + status = OH_NNModel_Finish(m_model); + if (status != OH_NN_SUCCESS) { + LOGE("End2EndTest::BuildModel failed, error happends during constructing the model."); + return status; + } + + return status; +} + +OH_NN_ReturnCode End2EndTest::IsExpectedOutput(const float* outputBuffer) +{ + if (outputBuffer == nullptr) { + LOGE("End2EndTest::IsExpectedOutput failed, pass nullptr to outputBuffer."); + return OH_NN_INVALID_PARAMETER; + } + + for (int i = 0; i < ELEMENT_COUNT; i++) { + LOGI("Comparing inference output with expected value, output index: %d, output value: %f, " + "expected value: %f.", i, outputBuffer[i], EXPECTED_OUTPUT); + if (std::abs(outputBuffer[i] - EXPECTED_OUTPUT) > EPSILON) { + return OH_NN_FAILED; + } + } + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode End2EndTest::IsExpectedOutput(const OH_NN_Memory* outputMemory) +{ + if (outputMemory == nullptr) { + LOGE("End2EndTest::IsExpectedOutput failed, pass nullptr to outputMemory."); + return OH_NN_INVALID_PARAMETER; + } + + if (outputMemory->length == 0) { + LOGE("End2EndTest::IsExpectedOutput failed, outputMemory is empty."); + return OH_NN_FAILED; + } + + float* output = static_cast(const_cast(outputMemory->data)); + return IsExpectedOutput(output); +} + +/* + * @tc.name: end_to_end_test_001 + * @tc.desc: Test End-to-End operation of Neural Network Runtime. + * @tc.type: FUNC + */ +HWTEST_F(End2EndTest, end_to_end_test_001, testing::ext::TestSize.Level1) +{ + // Prepare tensors + int8_t activationValue{0}; + CppQuantParam quantParam{{}, {}, {}}; + CppTensor addend1{OH_NN_FLOAT32, {3, 2, 2}, nullptr, 0, quantParam, OH_NN_TENSOR}; + CppTensor addend2{OH_NN_FLOAT32, {3, 2, 2}, nullptr, 0, quantParam, OH_NN_TENSOR}; + CppTensor activation{OH_NN_INT8, {}, (void*)(&activationValue), 1, quantParam, OH_NN_ADD_ACTIVATIONTYPE}; + CppTensor immediateTensor{OH_NN_FLOAT32, {3, 2, 2}, nullptr, 0, quantParam, OH_NN_TENSOR}; + CppTensor output{OH_NN_FLOAT32, {3, 2, 2}, nullptr, 0, quantParam, OH_NN_TENSOR}; + std::vector tensors{addend1, addend2, activation, immediateTensor, output}; + + ASSERT_EQ(OH_NN_SUCCESS, BuildModel(tensors)); + + m_compilation = OH_NNCompilation_Construct(m_model); + ASSERT_NE(nullptr, m_compilation); + OH_NNModel_Destroy(&m_model); + ASSERT_EQ(nullptr, m_model); + + ASSERT_EQ(OH_NN_SUCCESS, GetDevices()); + ASSERT_GT(m_devices.size(), NO_DEVICE_COUNT); // Expect available accelerator. + size_t targetDevice = m_devices[0]; // Use the first device in system test. + ASSERT_EQ(OH_NN_SUCCESS, OH_NNCompilation_SetDevice(m_compilation, targetDevice)); + ASSERT_EQ(OH_NN_SUCCESS, OH_NNCompilation_Build(m_compilation)); + + m_executor = OH_NNExecutor_Construct(m_compilation); + ASSERT_NE(nullptr, m_executor); + OH_NNCompilation_Destroy(&m_compilation); + ASSERT_EQ(nullptr, m_compilation); + + // Set value of firstAddend + std::vector firstAddendValue(ELEMENT_COUNT, INPUT_ONE); + ASSERT_EQ(OH_NN_SUCCESS, SetInput(0, {3, 2, 2}, (void*)firstAddendValue.data(), ADDEND_DATA_LENGTH)); + + // Set value of secondAddend + std::vector secondAddendValue(ELEMENT_COUNT, INPUT_TWO); + ASSERT_EQ(OH_NN_SUCCESS, SetInput(1, {3, 2, 2}, (void*)secondAddendValue.data(), ADDEND_DATA_LENGTH)); + + // Set output buffer of output + float outputBuffer[ELEMENT_COUNT]; + ASSERT_EQ(OH_NN_SUCCESS, SetOutput(0, (void*)outputBuffer, ADDEND_DATA_LENGTH)); + + // Run inference and assert output value + ASSERT_EQ(OH_NN_SUCCESS, OH_NNExecutor_Run(m_executor)); + ASSERT_EQ(OH_NN_SUCCESS, IsExpectedOutput(outputBuffer)); + + OH_NNExecutor_Destroy(&m_executor); + ASSERT_EQ(nullptr, m_executor); +} + +/* + * @tc.name: end_to_end_test_002 + * @tc.desc: Test End-to-End operation of Neural Network Runtime using OH_NN_Memory + * @tc.type: FUNC + */ +HWTEST_F(End2EndTest, end_to_end_test_002, testing::ext::TestSize.Level1) +{ + // Prepare tensors + int8_t activationValue{0}; + CppQuantParam quantParam{{}, {}, {}}; + CppTensor addend1{OH_NN_FLOAT32, {3, 2, 2}, nullptr, 0, quantParam, OH_NN_TENSOR}; + CppTensor addend2{OH_NN_FLOAT32, {3, 2, 2}, nullptr, 0, quantParam, OH_NN_TENSOR}; + CppTensor activation{OH_NN_INT8, {}, (void*)(&activationValue), 1, quantParam, OH_NN_ADD_ACTIVATIONTYPE}; + CppTensor immediateTensor{OH_NN_FLOAT32, {3, 2, 2}, nullptr, 0, quantParam, OH_NN_TENSOR}; + CppTensor output{OH_NN_FLOAT32, {3, 2, 2}, nullptr, 0, quantParam, OH_NN_TENSOR}; + std::vector tensors{addend1, addend2, activation, immediateTensor, output}; + + ASSERT_EQ(OH_NN_SUCCESS, BuildModel(tensors)); + + m_compilation = OH_NNCompilation_Construct(m_model); + ASSERT_NE(nullptr, m_compilation); + OH_NNModel_Destroy(&m_model); + ASSERT_EQ(nullptr, m_model); + + ASSERT_EQ(OH_NN_SUCCESS, GetDevices()); + ASSERT_GT(m_devices.size(), NO_DEVICE_COUNT); // Expect available accelerator. + size_t targetDevice = m_devices[0]; // Use the first device in system test. + ASSERT_EQ(OH_NN_SUCCESS, OH_NNCompilation_SetDevice(m_compilation, targetDevice)); + ASSERT_EQ(OH_NN_SUCCESS, OH_NNCompilation_Build(m_compilation)); + + m_executor = OH_NNExecutor_Construct(m_compilation); + ASSERT_NE(nullptr, m_executor); + OH_NNCompilation_Destroy(&m_compilation); + ASSERT_EQ(nullptr, m_compilation); + + // Set value of firstAddend + std::vector firstAddendValue(ELEMENT_COUNT, INPUT_ONE); + OH_NN_Memory* firstAddendMemory; + ASSERT_EQ(OH_NN_SUCCESS, + SetInputFromMemory(0, {3, 2, 2}, (void*)firstAddendValue.data(), ADDEND_DATA_LENGTH, &firstAddendMemory)); + + // Set value of secondAddend + std::vector secondAddendValue(ELEMENT_COUNT, INPUT_TWO); + OH_NN_Memory* secondAddendMemory; + ASSERT_EQ(OH_NN_SUCCESS, + SetInputFromMemory(1, {3, 2, 2}, (void*)secondAddendValue.data(), ADDEND_DATA_LENGTH, &secondAddendMemory)); + + // Set output buffer of output + OH_NN_Memory* outputMemory; + ASSERT_EQ(OH_NN_SUCCESS, SetOutputFromMemory(0, ADDEND_DATA_LENGTH, &outputMemory)); + + // Run inference and assert output value + ASSERT_EQ(OH_NN_SUCCESS, OH_NNExecutor_Run(m_executor)); + ASSERT_EQ(OH_NN_SUCCESS, IsExpectedOutput(outputMemory)); + + OH_NNExecutor_DestroyInputMemory(m_executor, 0, &firstAddendMemory); + ASSERT_EQ(nullptr, firstAddendMemory); + OH_NNExecutor_DestroyInputMemory(m_executor, 1, &secondAddendMemory); + ASSERT_EQ(nullptr, secondAddendMemory); + OH_NNExecutor_DestroyOutputMemory(m_executor, 0, &outputMemory); + ASSERT_EQ(nullptr, outputMemory); + + OH_NNExecutor_Destroy(&m_executor); + ASSERT_EQ(nullptr, m_executor); +} + +/* + * @tc.name: end_to_end_test_003 + * @tc.desc: Test End-to-End operation of Neural Network Runtime with dynamic inputs. + * @tc.type: FUNC + */ +HWTEST_F(End2EndTest, end_to_end_test_003, testing::ext::TestSize.Level1) +{ + // Prepare tensors + int8_t activationValue{0}; + CppQuantParam quantParam{{}, {}, {}}; + std::vector value(ELEMENT_COUNT, INPUT_ONE); + CppTensor addend1{OH_NN_FLOAT32, {3, 2, 2}, (void*)value.data(), ADDEND_DATA_LENGTH, quantParam, OH_NN_TENSOR}; + CppTensor addend2{OH_NN_FLOAT32, {3, 2, 2}, nullptr, 0, quantParam, OH_NN_TENSOR}; + CppTensor activation{OH_NN_INT8, {}, (void*)(&activationValue), 1, quantParam, OH_NN_ADD_ACTIVATIONTYPE}; + CppTensor immediateTensor{OH_NN_FLOAT32, {3, 2, 2}, nullptr, 0, quantParam, OH_NN_TENSOR}; + CppTensor output{OH_NN_FLOAT32, {3, 2, 2}, nullptr, 0, quantParam, OH_NN_TENSOR}; + std::vector tensors{addend1, addend2, activation, immediateTensor, output}; + + m_model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, m_model); + ASSERT_EQ(OH_NN_SUCCESS, AddTensors(tensors)); + ASSERT_EQ(OH_NN_SUCCESS, AddOperation(OH_NN_OPS_ADD, {2}, {0, 1}, {3})); + ASSERT_EQ(OH_NN_SUCCESS, AddOperation(OH_NN_OPS_ADD, {2}, {3, 1}, {4})); + ASSERT_EQ(OH_NN_SUCCESS, SpecifyInputAndOutput({1}, {4})); + ASSERT_EQ(OH_NN_SUCCESS, OH_NNModel_Finish(m_model)); + + m_compilation = OH_NNCompilation_Construct(m_model); + ASSERT_NE(nullptr, m_compilation); + OH_NNModel_Destroy(&m_model); + ASSERT_EQ(nullptr, m_model); + + ASSERT_EQ(OH_NN_SUCCESS, GetDevices()); + ASSERT_GT(m_devices.size(), NO_DEVICE_COUNT); // Expect available accelerator. + size_t targetDevice = m_devices[0]; // Use the first device in system test. + ASSERT_EQ(OH_NN_SUCCESS, OH_NNCompilation_SetDevice(m_compilation, targetDevice)); + ASSERT_EQ(OH_NN_SUCCESS, OH_NNCompilation_Build(m_compilation)); + + m_executor = OH_NNExecutor_Construct(m_compilation); + ASSERT_NE(nullptr, m_executor); + OH_NNCompilation_Destroy(&m_compilation); + ASSERT_EQ(nullptr, m_compilation); + + // Set value of secondAddend + std::vector secondAddendValue(ELEMENT_COUNT, INPUT_TWO); + ASSERT_EQ(OH_NN_SUCCESS, SetInput(0, {3, 2, 2}, (void*)secondAddendValue.data(), ADDEND_DATA_LENGTH)); + + // Set output buffer of output + float outputBuffer[ELEMENT_COUNT]; + ASSERT_EQ(OH_NN_SUCCESS, SetOutput(0, (void*)outputBuffer, ADDEND_DATA_LENGTH)); + + // Run inference and assert output value + ASSERT_EQ(OH_NN_SUCCESS, OH_NNExecutor_Run(m_executor)); + ASSERT_EQ(OH_NN_SUCCESS, IsExpectedOutput(outputBuffer)); + + OH_NNExecutor_Destroy(&m_executor); + ASSERT_EQ(nullptr, m_executor); +} + +/* + * @tc.name: end_to_end_test_004 + * @tc.desc: Test End-to-End operation of Neural Network Runtime. + * @tc.type: FUNC + */ +HWTEST_F(End2EndTest, end_to_end_test_004, testing::ext::TestSize.Level1) +{ + // Prepare tensors + int8_t activationValue{0}; + CppQuantParam quantParam{{}, {}, {}}; + CppTensor addend1{OH_NN_FLOAT32, {-1, 2, 2}, nullptr, 0, quantParam, OH_NN_TENSOR}; + CppTensor addend2{OH_NN_FLOAT32, {3, 2, 2}, nullptr, 0, quantParam, OH_NN_TENSOR}; + CppTensor activation{OH_NN_INT8, {}, (void*)(&activationValue), 1, quantParam, OH_NN_ADD_ACTIVATIONTYPE}; + CppTensor immediateTensor{OH_NN_FLOAT32, {3, 2, 2}, nullptr, 0, quantParam, OH_NN_TENSOR}; + CppTensor output{OH_NN_FLOAT32, {3, 2, 2}, nullptr, 0, quantParam, OH_NN_TENSOR}; + std::vector tensors{addend1, addend2, activation, immediateTensor, output}; + + ASSERT_EQ(OH_NN_SUCCESS, BuildModel(tensors)); + + m_compilation = OH_NNCompilation_Construct(m_model); + ASSERT_NE(nullptr, m_compilation); + OH_NNModel_Destroy(&m_model); + ASSERT_EQ(nullptr, m_model); + + ASSERT_EQ(OH_NN_SUCCESS, GetDevices()); + ASSERT_GT(m_devices.size(), NO_DEVICE_COUNT); // Expect available accelerator. + size_t targetDevice = m_devices[0]; // Use the first device in system test. + ASSERT_EQ(OH_NN_SUCCESS, OH_NNCompilation_SetDevice(m_compilation, targetDevice)); + ASSERT_EQ(OH_NN_SUCCESS, OH_NNCompilation_Build(m_compilation)); + + m_executor = OH_NNExecutor_Construct(m_compilation); + ASSERT_NE(nullptr, m_executor); + OH_NNCompilation_Destroy(&m_compilation); + ASSERT_EQ(nullptr, m_compilation); + + // Set value of firstAddend + std::vector firstAddendValue(ELEMENT_COUNT, INPUT_ONE); + ASSERT_EQ(OH_NN_SUCCESS, SetInput(0, {3, 2, 2}, (void*)firstAddendValue.data(), ADDEND_DATA_LENGTH)); + + // Set value of secondAddend + std::vector secondAddendValue(ELEMENT_COUNT, INPUT_TWO); + ASSERT_EQ(OH_NN_SUCCESS, SetInput(1, {3, 2, 2}, (void*)secondAddendValue.data(), ADDEND_DATA_LENGTH)); + + // Set output buffer of output + float outputBuffer[ELEMENT_COUNT]; + ASSERT_EQ(OH_NN_SUCCESS, SetOutput(0, (void*)outputBuffer, ADDEND_DATA_LENGTH)); + + // Run inference and assert output value + ASSERT_EQ(OH_NN_SUCCESS, OH_NNExecutor_Run(m_executor)); + ASSERT_EQ(OH_NN_SUCCESS, IsExpectedOutput(outputBuffer)); + + OH_NNExecutor_Destroy(&m_executor); + ASSERT_EQ(nullptr, m_executor); +} + +/* + * @tc.name: end_to_end_test_005 + * @tc.desc: Test End-to-End execution with cache setting and loading. + * @tc.type: FUNC + */ +HWTEST_F(End2EndTest, end_to_end_test_005, testing::ext::TestSize.Level1) +{ + // Prepare tensors + int8_t activationValue{0}; + CppQuantParam quantParam{{}, {}, {}}; + CppTensor addend1{OH_NN_FLOAT32, {3, 2, 2}, nullptr, 0, quantParam, OH_NN_TENSOR}; + CppTensor addend2{OH_NN_FLOAT32, {3, 2, 2}, nullptr, 0, quantParam, OH_NN_TENSOR}; + CppTensor activation{OH_NN_INT8, {}, (void*)(&activationValue), 1, quantParam, OH_NN_ADD_ACTIVATIONTYPE}; + CppTensor immediateTensor{OH_NN_FLOAT32, {3, 2, 2}, nullptr, 0, quantParam, OH_NN_TENSOR}; + CppTensor output{OH_NN_FLOAT32, {3, 2, 2}, nullptr, 0, quantParam, OH_NN_TENSOR}; + std::vector tensors{addend1, addend2, activation, immediateTensor, output}; + ASSERT_EQ(OH_NN_SUCCESS, BuildModel(tensors)); + + ASSERT_EQ(OH_NN_SUCCESS, GetDevices()); + ASSERT_GT(m_devices.size(), NO_DEVICE_COUNT); // Expect available accelerator. + size_t targetDevice = m_devices[0]; // Use the first device in system test. + + // Used to export cache. + OH_NNCompilation* compilationCacheExporter = OH_NNCompilation_Construct(m_model); + ASSERT_NE(nullptr, compilationCacheExporter); + + const fs::path cachePath{CACHE_DIR}; + ASSERT_EQ(false, fs::exists(cachePath)); + ASSERT_EQ(true, fs::create_directory(cachePath)); + + ASSERT_EQ(OH_NN_SUCCESS, OH_NNCompilation_SetDevice(compilationCacheExporter, targetDevice)); + ASSERT_EQ(OH_NN_SUCCESS, OH_NNCompilation_SetCache(compilationCacheExporter, CACHE_DIR.c_str(), CACHE_VERSION)); + ASSERT_EQ(OH_NN_SUCCESS, OH_NNCompilation_Build(compilationCacheExporter)); + ASSERT_EQ(false, fs::is_empty(cachePath)); + OH_NNCompilation_Destroy(&compilationCacheExporter); + ASSERT_EQ(nullptr, compilationCacheExporter); + + // This compilation loads cache. + m_compilation = OH_NNCompilation_Construct(m_model); + ASSERT_NE(nullptr, m_compilation); + OH_NNModel_Destroy(&m_model); + ASSERT_EQ(nullptr, m_model); + + ASSERT_EQ(OH_NN_SUCCESS, OH_NNCompilation_SetDevice(m_compilation, targetDevice)); + ASSERT_EQ(OH_NN_SUCCESS, OH_NNCompilation_SetCache(m_compilation, CACHE_DIR.c_str(), CACHE_VERSION)); + ASSERT_EQ(OH_NN_SUCCESS, OH_NNCompilation_Build(m_compilation)); + + m_executor = OH_NNExecutor_Construct(m_compilation); + ASSERT_NE(nullptr, m_executor); + OH_NNCompilation_Destroy(&m_compilation); + ASSERT_EQ(nullptr, m_compilation); + + // Set value of firstAddend + std::vector firstAddendValue(ELEMENT_COUNT, INPUT_ONE); + ASSERT_EQ(OH_NN_SUCCESS, SetInput(0, {3, 2, 2}, (void*)firstAddendValue.data(), ADDEND_DATA_LENGTH)); + + // Set value of secondAddend + std::vector secondAddendValue(ELEMENT_COUNT, INPUT_TWO); + ASSERT_EQ(OH_NN_SUCCESS, SetInput(1, {3, 2, 2}, (void*)secondAddendValue.data(), ADDEND_DATA_LENGTH)); + + // Set output buffer of output + float outputBuffer[ELEMENT_COUNT]; + ASSERT_EQ(OH_NN_SUCCESS, SetOutput(0, (void*)outputBuffer, ADDEND_DATA_LENGTH)); + + // Run inference and assert output value + ASSERT_EQ(OH_NN_SUCCESS, OH_NNExecutor_Run(m_executor)); + ASSERT_EQ(OH_NN_SUCCESS, IsExpectedOutput(outputBuffer)); + + OH_NNExecutor_Destroy(&m_executor); + ASSERT_EQ(nullptr, m_executor); + + // If cache directory and files and delete, remove_all() should return a value larger than 1. + // The actual value depends on the implementation of NNRt service. + ASSERT_GT(fs::remove_all(cachePath), (std::uintmax_t)1); +} + +/* + * @tc.name: end_to_end_test_006 + * @tc.desc: Test End-to-End execution mixing SetInput and SetInputFromMemory functions. + * @tc.type: FUNC + */ +HWTEST_F(End2EndTest, end_to_end_test_006, testing::ext::TestSize.Level1) +{ + // Prepare tensors + int8_t activationValue{0}; + CppQuantParam quantParam{{}, {}, {}}; + CppTensor addend1{OH_NN_FLOAT32, {3, 2, 2}, nullptr, 0, quantParam, OH_NN_TENSOR}; + CppTensor addend2{OH_NN_FLOAT32, {3, 2, 2}, nullptr, 0, quantParam, OH_NN_TENSOR}; + CppTensor activation{OH_NN_INT8, {}, (void*)(&activationValue), 1, quantParam, OH_NN_ADD_ACTIVATIONTYPE}; + CppTensor immediateTensor{OH_NN_FLOAT32, {3, 2, 2}, nullptr, 0, quantParam, OH_NN_TENSOR}; + CppTensor output{OH_NN_FLOAT32, {3, 2, 2}, nullptr, 0, quantParam, OH_NN_TENSOR}; + std::vector tensors{addend1, addend2, activation, immediateTensor, output}; + + ASSERT_EQ(OH_NN_SUCCESS, BuildModel(tensors)); + + ASSERT_EQ(OH_NN_SUCCESS, GetDevices()); + ASSERT_GT(m_devices.size(), NO_DEVICE_COUNT); // Expect available accelerator. + size_t targetDevice = m_devices[0]; // Use the first device in system test. + + // This compilation loads cache. + m_compilation = OH_NNCompilation_Construct(m_model); + ASSERT_NE(nullptr, m_compilation); + OH_NNModel_Destroy(&m_model); + ASSERT_EQ(nullptr, m_model); + + ASSERT_EQ(OH_NN_SUCCESS, OH_NNCompilation_SetDevice(m_compilation, targetDevice)); + ASSERT_EQ(OH_NN_SUCCESS, OH_NNCompilation_Build(m_compilation)); + + m_executor = OH_NNExecutor_Construct(m_compilation); + ASSERT_NE(nullptr, m_executor); + OH_NNCompilation_Destroy(&m_compilation); + ASSERT_EQ(nullptr, m_compilation); + + // Set value of firstAddend + std::vector firstAddendValue(ELEMENT_COUNT, INPUT_ONE); + ASSERT_EQ(OH_NN_SUCCESS, SetInput(0, {3, 2, 2}, (void*)firstAddendValue.data(), ADDEND_DATA_LENGTH)); + + // Set value of secondAddend + std::vector secondAddendValue(ELEMENT_COUNT, INPUT_TWO); + OH_NN_Memory* secondAddendMemory; + ASSERT_EQ(OH_NN_SUCCESS, + SetInputFromMemory(1, {3, 2, 2}, (void*)secondAddendValue.data(), ADDEND_DATA_LENGTH, &secondAddendMemory)); + + // Set output buffer of output + OH_NN_Memory* outputMemory; + ASSERT_EQ(OH_NN_SUCCESS, SetOutputFromMemory(0, ADDEND_DATA_LENGTH, &outputMemory)); + + // Run inference and assert output value + ASSERT_EQ(OH_NN_SUCCESS, OH_NNExecutor_Run(m_executor)); + ASSERT_EQ(OH_NN_SUCCESS, IsExpectedOutput(outputMemory)); + + OH_NNExecutor_DestroyInputMemory(m_executor, 1, &secondAddendMemory); + ASSERT_EQ(nullptr, secondAddendMemory); + OH_NNExecutor_DestroyOutputMemory(m_executor, 0, &outputMemory); + ASSERT_EQ(nullptr, outputMemory); + + OH_NNExecutor_Destroy(&m_executor); + ASSERT_EQ(nullptr, m_executor); +} + +/* + * @tc.name: end_to_end_test_007 + * @tc.desc: Test End-to-End operation of Neural Network Runtime with quantization. + * @tc.type: FUNC + */ +HWTEST_F(End2EndTest, end_to_end_test_007, testing::ext::TestSize.Level1) +{ + // Prepare tensors + int8_t activationValue{0}; + CppQuantParam quantParam{{}, {}, {}}; + CppQuantParam quantParam1{{8}, {0.2}, {0}}; + CppQuantParam quantParam2{{8}, {0.4}, {0}}; + CppTensor addend1{OH_NN_INT8, {3, 2, 2}, nullptr, 0, quantParam1, OH_NN_TENSOR}; + CppTensor addend2{OH_NN_INT8, {3, 2, 2}, nullptr, 0, quantParam1, OH_NN_TENSOR}; + CppTensor activation{OH_NN_INT8, {}, (void*)(&activationValue), 1, quantParam, OH_NN_ADD_ACTIVATIONTYPE}; + CppTensor immediateTensor{OH_NN_INT8, {3, 2, 2}, nullptr, 0, quantParam1, OH_NN_TENSOR}; + CppTensor output{OH_NN_INT8, {3, 2, 2}, nullptr, 0, quantParam2, OH_NN_TENSOR}; + std::vector tensors{addend1, addend2, activation, immediateTensor, output}; + + ASSERT_EQ(OH_NN_SUCCESS, BuildModel(tensors)); + + m_compilation = OH_NNCompilation_Construct(m_model); + ASSERT_NE(nullptr, m_compilation); + OH_NNModel_Destroy(&m_model); + ASSERT_EQ(nullptr, m_model); + + ASSERT_EQ(OH_NN_SUCCESS, GetDevices()); + ASSERT_GT(m_devices.size(), NO_DEVICE_COUNT); // Expect available accelerator. + size_t targetDevice = m_devices[0]; // Use the first device in system test. + ASSERT_EQ(OH_NN_SUCCESS, OH_NNCompilation_SetDevice(m_compilation, targetDevice)); + ASSERT_EQ(OH_NN_SUCCESS, OH_NNCompilation_Build(m_compilation)); + + m_executor = OH_NNExecutor_Construct(m_compilation); + ASSERT_NE(nullptr, m_executor); + OH_NNCompilation_Destroy(&m_compilation); + ASSERT_EQ(nullptr, m_compilation); + + // Set value of firstAddend + std::vector firstAddendValue(ELEMENT_COUNT, 4); + ASSERT_EQ(OH_NN_SUCCESS, SetInput(0, {3, 2, 2}, (void*)firstAddendValue.data(), ADDEND_DATA_LENGTH)); + + // Set value of secondAddend + std::vector secondAddendValue(ELEMENT_COUNT, 8); + ASSERT_EQ(OH_NN_SUCCESS, SetInput(1, {3, 2, 2}, (void*)secondAddendValue.data(), ADDEND_DATA_LENGTH)); + + // Set output buffer of output + int8_t outputBuffer[ELEMENT_COUNT]; + ASSERT_EQ(OH_NN_SUCCESS, SetOutput(0, (void*)outputBuffer, ADDEND_DATA_LENGTH)); + + // Run inference and assert output value + ASSERT_EQ(OH_NN_SUCCESS, OH_NNExecutor_Run(m_executor)); + for (int i = 0; i < ELEMENT_COUNT; i++) { + printf("Comparing output with expected value, output index: %d, output value: %d, expected value: %d.", + i, static_cast(outputBuffer[i]), static_cast(EXPECTED_QUANT_OUTPUT)); + ASSERT_EQ(outputBuffer[i], EXPECTED_QUANT_OUTPUT); + } + + OH_NNExecutor_Destroy(&m_executor); + ASSERT_EQ(nullptr, m_executor); +} + +/* + * @tc.name: end_to_end_test_008 + * @tc.desc: Test End-to-End operation of Neural Network Runtime by calling OH_NNExecutor_Run multiple times. + * @tc.type: FUNC + */ +HWTEST_F(End2EndTest, end_to_end_test_008, testing::ext::TestSize.Level1) +{ + // Prepare tensors + int8_t activationValue{0}; + CppQuantParam quantParam{{}, {}, {}}; + CppTensor addend1{OH_NN_FLOAT32, {3, 2, 2}, nullptr, 0, quantParam, OH_NN_TENSOR}; + CppTensor addend2{OH_NN_FLOAT32, {3, 2, 2}, nullptr, 0, quantParam, OH_NN_TENSOR}; + CppTensor activation{OH_NN_INT8, {}, (void*)(&activationValue), 1, quantParam, OH_NN_ADD_ACTIVATIONTYPE}; + CppTensor immediateTensor{OH_NN_FLOAT32, {3, 2, 2}, nullptr, 0, quantParam, OH_NN_TENSOR}; + CppTensor output{OH_NN_FLOAT32, {3, 2, 2}, nullptr, 0, quantParam, OH_NN_TENSOR}; + std::vector tensors{addend1, addend2, activation, immediateTensor, output}; + + ASSERT_EQ(OH_NN_SUCCESS, BuildModel(tensors)); + + m_compilation = OH_NNCompilation_Construct(m_model); + ASSERT_NE(nullptr, m_compilation); + OH_NNModel_Destroy(&m_model); + ASSERT_EQ(nullptr, m_model); + + ASSERT_EQ(OH_NN_SUCCESS, GetDevices()); + ASSERT_GT(m_devices.size(), NO_DEVICE_COUNT); // Expect available accelerator. + size_t targetDevice = m_devices[0]; // Use the first device in system test. + ASSERT_EQ(OH_NN_SUCCESS, OH_NNCompilation_SetDevice(m_compilation, targetDevice)); + ASSERT_EQ(OH_NN_SUCCESS, OH_NNCompilation_Build(m_compilation)); + + m_executor = OH_NNExecutor_Construct(m_compilation); + ASSERT_NE(nullptr, m_executor); + OH_NNCompilation_Destroy(&m_compilation); + ASSERT_EQ(nullptr, m_compilation); + + std::vector firstAddendValue(ELEMENT_COUNT, INPUT_ONE); + std::vector secondAddendValue(ELEMENT_COUNT, INPUT_TWO); + float outputBuffer[ELEMENT_COUNT]; + + // Test inference multiple times. + for (int i = 0; i < REPEAT_TIMES; i++) { + + // Set value of firstAddend + ASSERT_EQ(OH_NN_SUCCESS, SetInput(0, {3, 2, 2}, (void*)firstAddendValue.data(), ADDEND_DATA_LENGTH)); + + // Set value of secondAddend + ASSERT_EQ(OH_NN_SUCCESS, SetInput(1, {3, 2, 2}, (void*)secondAddendValue.data(), ADDEND_DATA_LENGTH)); + + // Set output buffer of output + ASSERT_EQ(OH_NN_SUCCESS, SetOutput(0, (void*)outputBuffer, ADDEND_DATA_LENGTH)); + + // Run inference and assert output value + ASSERT_EQ(OH_NN_SUCCESS, OH_NNExecutor_Run(m_executor)); + ASSERT_EQ(OH_NN_SUCCESS, IsExpectedOutput(outputBuffer)); + } + + OH_NNExecutor_Destroy(&m_executor); + ASSERT_EQ(nullptr, m_executor); +} +} // namespace SystemTest +} // NeuralNetworkRuntime +} // OHOS \ No newline at end of file diff --git a/test/system_test/end_to_end_test.h b/test/system_test/end_to_end_test.h new file mode 100644 index 0000000000000000000000000000000000000000..7255bcebb8758e2c5acee29770d1b2d4f6ea6791 --- /dev/null +++ b/test/system_test/end_to_end_test.h @@ -0,0 +1,41 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef SYSTEM_TEST_END_TO_END_TEST +#define SYSTEM_TEST_END_TO_END_TEST + +#include +#include +#include + +#include "interfaces/kits/c/neural_network_runtime.h" +#include "test/system_test/common/nnrt_test.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace SystemTest { +class End2EndTest : public NNRtTest { +public: + End2EndTest() = default; + + OH_NN_ReturnCode BuildModel(const std::vector& tensors); + OH_NN_ReturnCode IsExpectedOutput(const float* outputBuffer); + OH_NN_ReturnCode IsExpectedOutput(const OH_NN_Memory* outputMemory); +}; +} // namespace SystemTest +} // NeuralNetworkRuntime +} // OHOS + +#endif // SYSTEM_TEST_END_TO_END_TEST \ No newline at end of file diff --git a/test/system_test/stress_test.cpp b/test/system_test/stress_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..21344faf1e334f606e439f294285101a3ef44b5d --- /dev/null +++ b/test/system_test/stress_test.cpp @@ -0,0 +1,185 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include +#include + +#include "securec.h" + +#include "test/system_test/common/nnrt_test.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace SystemTest { +constexpr int TMP_LENGTH = 32; +constexpr int PATH_LENGTH = 255; +constexpr int STRESS_COUNT = 10000000; +const float EPSILON = 1e-4; +const uint32_t NO_DEVICE_COUNT = 0; +const uint32_t ADDEND_DATA_LENGTH = 12 * sizeof(float); +const std::string VMRSS = "VmSize:"; + +class StressTest : public NNRtTest { +public: + StressTest() = default; +}; + +std::string GetVMRSS(pid_t pid) +{ + std::string fileName{"/proc/"}; + fileName += std::to_string(pid) + "/status"; + std::ifstream ifs(fileName, std::ios::binary); + if (!ifs.is_open()) { + std::cout << "Failed to open " << fileName << std::endl; + return ""; + } + + std::string vmRss; + // Extract physical memory use from process status. + while (!ifs.eof()) { + getline(ifs, vmRss); + // Compare the first seven characters, which is "VmSize:". + if (vmRss.compare(0, 7, VMRSS) == 0) { + break; + } + } + ifs.close(); + + time_t t = time(nullptr); + char tmp[TMP_LENGTH] {' '}; + strftime(&(tmp[1]), TMP_LENGTH * sizeof(char), "%Y-%m-%d %H:%M:%S", localtime(&t)); + + return vmRss + tmp; +} + +void PrintVMRSS(pid_t pid) +{ + char path[PATH_LENGTH]; + if (!getcwd(path, PATH_LENGTH)) { + std::cout << "Failed to get current path" << std::endl; + return; + } + std::string pathStr = path; + std::string pathFull = pathStr + "/RealtimeVMRSS_" + std::to_string(pid) + ".txt"; + + std::ofstream out(pathFull, std::ios::app); + if (!out.is_open()) { + std::cout << "Some error occurs" << std::endl; + return; + } + + while (true) { + std::string rss = GetVMRSS(pid); + if (rss.empty()) { + std::cout << "Some error occurs" << std::endl; + out.close(); + return; + } + + out << rss << std::endl; + sleep(1); + } +} + +/* + * @tc.name: stress_test_001 + * @tc.desc: Check memory leak by repeatly implement end-to-end execution. + * @tc.type: FUNC + */ +HWTEST_F(StressTest, stress_test_001, testing::ext::TestSize.Level1) +{ + std::cout << "Start RunDoubleConvStressTest test cast." << std::endl; + + pid_t pidOfStressTest = getpid(); + std::thread thread(PrintVMRSS, pidOfStressTest); + + size_t targetDevice{0}; + + int8_t activationValue{0}; + CppQuantParam quantParam{{}, {}, {}}; + CppTensor addend1{OH_NN_FLOAT32, {3, 2, 2}, nullptr, 0, quantParam, OH_NN_TENSOR}; + CppTensor addend2{OH_NN_FLOAT32, {3, 2, 2}, nullptr, 0, quantParam, OH_NN_TENSOR}; + CppTensor activation{OH_NN_INT8, {}, (void*)(&activationValue), 1, quantParam, OH_NN_ADD_ACTIVATIONTYPE}; + CppTensor output{OH_NN_FLOAT32, {3, 2, 2}, nullptr, 0, quantParam, OH_NN_TENSOR}; + std::vector tensors{addend1, addend2, activation, output}; + + std::vector firstAddendValue(12, 1.23); + std::vector secondAddendValue(12, 2.34); + float outputBuffer[12]; + std::vector expectedOutput(12, 3.57); + + for (int i = 0; i < STRESS_COUNT; i++) { + tensors = {addend1, addend2, activation, output}; + + m_model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, m_model); + ASSERT_EQ(OH_NN_SUCCESS, AddTensors(tensors)); + ASSERT_EQ(OH_NN_SUCCESS, AddOperation(OH_NN_OPS_ADD, {2}, {0, 1}, {3})); + ASSERT_EQ(OH_NN_SUCCESS, SpecifyInputAndOutput({0, 1}, {3})); + ASSERT_EQ(OH_NN_SUCCESS, OH_NNModel_Finish(m_model)); + + m_compilation = OH_NNCompilation_Construct(m_model); + ASSERT_NE(nullptr, m_compilation); + OH_NNModel_Destroy(&m_model); + ASSERT_EQ(nullptr, m_model); + + ASSERT_EQ(OH_NN_SUCCESS, GetDevices()); + ASSERT_GT(m_devices.size(), NO_DEVICE_COUNT); // Expect available accelerator. + targetDevice = m_devices[0]; // Use the first device in system test. + ASSERT_EQ(OH_NN_SUCCESS, OH_NNCompilation_SetDevice(m_compilation, targetDevice)); + ASSERT_EQ(OH_NN_SUCCESS, OH_NNCompilation_Build(m_compilation)); + + m_executor = OH_NNExecutor_Construct(m_compilation); + ASSERT_NE(nullptr, m_executor); + OH_NNCompilation_Destroy(&m_compilation); + ASSERT_EQ(nullptr, m_compilation); + + // Set value of firstAddend + ASSERT_EQ(OH_NN_SUCCESS, SetInput(0, {3, 2, 2}, (void*)firstAddendValue.data(), ADDEND_DATA_LENGTH)); + + // Set value of secondAddend + ASSERT_EQ(OH_NN_SUCCESS, SetInput(1, {3, 2, 2}, (void*)secondAddendValue.data(), ADDEND_DATA_LENGTH)); + + // Set output buffer of output + ASSERT_EQ(OH_NN_SUCCESS, SetOutput(0, (void*)outputBuffer, ADDEND_DATA_LENGTH)); + + // Run inference and assert output value + ASSERT_EQ(OH_NN_SUCCESS, OH_NNExecutor_Run(m_executor)); + for (int j = 0; j < 12; j++) { + ASSERT_LE(std::abs(outputBuffer[j]-expectedOutput[j]), EPSILON); + } + + OH_NNExecutor_Destroy(&m_executor); + ASSERT_EQ(nullptr, m_executor); + + m_tensors.clear(); + m_quantParams.clear(); + m_nodes.clear(); + m_inputs.clear(); + m_outputs.clear(); + m_devices.clear(); + + if (i % 1000 == 0) { + std::cout << "Execute " << i << "times." << std::endl; + } + } + thread.join(); +} +} // namespace SystemTest +} // NeuralNetworkRuntime +} // OHOS \ No newline at end of file diff --git a/test/unittest/BUILD.gn b/test/unittest/BUILD.gn new file mode 100644 index 0000000000000000000000000000000000000000..cca705240cd612b69029f7b3bd64ab7386f0482f --- /dev/null +++ b/test/unittest/BUILD.gn @@ -0,0 +1,23 @@ +# Copyright (c) 2022 Huawei Device Co., Ltd. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import("//build/ohos.gni") + +group("unittest") { + testonly = true + deps = [ + "inner_kits:inner_kits_unittest", + "components:components_unittest", + "ops:ops_unittest", + ] +} \ No newline at end of file diff --git a/test/unittest/common/base_test.cpp b/test/unittest/common/base_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..26529b32fae881ae3d2e4e9429924a6a2c5c87e5 --- /dev/null +++ b/test/unittest/common/base_test.cpp @@ -0,0 +1,43 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "base_test.h" + +using namespace OHOS::NeuralNetworkRuntime::Ops; +using namespace std; +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +void BaseTest::SetUp() {} + +void BaseTest::TearDown() {} + +std::shared_ptr BaseTest::TransToNNTensor( + OH_NN_DataType dataType, const std::vector& dim, const OH_NN_QuantParam* quantParam, + OH_NN_TensorType type) +{ + std::shared_ptr nnTensor = std::make_shared(); + OH_NN_Tensor tensor; + tensor.dataType = dataType; + tensor.dimensionCount = dim.size(); + tensor.dimensions = (dim.empty() ? nullptr : dim.data()); + tensor.quantParam = quantParam; + tensor.type = type; + nnTensor->BuildFromOHNNTensor(tensor); + return nnTensor; +} +} // namespace UnitTest +} // namespace NeuralNetworkRuntime +} // namespace OHOS diff --git a/test/unittest/common/base_test.h b/test/unittest/common/base_test.h new file mode 100644 index 0000000000000000000000000000000000000000..b77a3b67b1c400b60b3d8a1566792e571f1a860c --- /dev/null +++ b/test/unittest/common/base_test.h @@ -0,0 +1,37 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_BASE_TEST_H +#define NEURAL_NETWORK_RUNTIME_BASE_TEST_H + +#include +#include +#include "frameworks/native/ops_builder.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class BaseTest : public testing::Test { +public: + virtual void SetUp(); + virtual void TearDown(); + virtual std::shared_ptr TransToNNTensor( + OH_NN_DataType dataType, const std::vector &dim, const OH_NN_QuantParam* quantParam, + OH_NN_TensorType type); +}; +} // namespace UnitTest +} // namespace NeuralNetworkRuntime +} // namespace OHOS +#endif \ No newline at end of file diff --git a/test/unittest/common/compilation_mock_idevice.cpp b/test/unittest/common/compilation_mock_idevice.cpp new file mode 100644 index 0000000000000000000000000000000000000000..52f647d0af64ec0f253ad5121d18158b5bb111ce --- /dev/null +++ b/test/unittest/common/compilation_mock_idevice.cpp @@ -0,0 +1,286 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "common/utils.h" +#include "frameworks/native/device_manager.h" +#include "frameworks/native/hdi_device.h" +#include "frameworks/native/nn_tensor.h" +#include "test/unittest/common/mock_idevice.h" + +OH_NN_ReturnCode OHOS::HDI::Nnrt::V1_0::MockIPreparedModel::m_ExpectRetCode = OH_NN_OPERATION_FORBIDDEN; + +namespace OHOS { +namespace NeuralNetworkRuntime { +std::shared_ptr DeviceManager::GetDevice(size_t deviceId) const +{ + sptr idevice + = sptr(new (std::nothrow) OHOS::HDI::Nnrt::V1_0::MockIDevice()); + if (idevice == nullptr) { + LOGE("DeviceManager mock GetDevice failed, error happened when new sptr"); + return nullptr; + } + + std::shared_ptr device = CreateSharedPtr(idevice); + if (device == nullptr) { + LOGE("DeviceManager mock GetDevice failed, the device is nullptr"); + return nullptr; + } + + if (deviceId == 0) { + LOGE("DeviceManager mock GetDevice failed, the passed parameter deviceId is 0"); + return nullptr; + } else { + return device; + } +} + +OH_NN_ReturnCode HDIDevice::IsModelCacheSupported(bool& isSupported) +{ + // isSupported is false when expecting to return success + if (HDI::Nnrt::V1_0::MockIPreparedModel::m_ExpectRetCode == OH_NN_SUCCESS) { + // In order not to affect other use cases, set to the OH_NN_OPERATION_FORBIDDEN + HDI::Nnrt::V1_0::MockIPreparedModel::m_ExpectRetCode = OH_NN_OPERATION_FORBIDDEN; + isSupported = false; + return OH_NN_SUCCESS; + } + + if (HDI::Nnrt::V1_0::MockIPreparedModel::m_ExpectRetCode == OH_NN_FAILED) { + HDI::Nnrt::V1_0::MockIPreparedModel::m_ExpectRetCode = OH_NN_OPERATION_FORBIDDEN; + isSupported = false; + return OH_NN_FAILED; + } + + isSupported = true; + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode HDIDevice::GetSupportedOperation(std::shared_ptr model, + std::vector& ops) +{ + if (HDI::Nnrt::V1_0::MockIPreparedModel::m_ExpectRetCode == OH_NN_INVALID_FILE) { + HDI::Nnrt::V1_0::MockIPreparedModel::m_ExpectRetCode = OH_NN_OPERATION_FORBIDDEN; + ops.emplace_back(true); + return OH_NN_SUCCESS; + } + + if (model == nullptr) { + LOGE("HDIDevice mock GetSupportedOperation failed, Model is nullptr, cannot query supported operation."); + return OH_NN_NULL_PTR; + } + + if (HDI::Nnrt::V1_0::MockIPreparedModel::m_ExpectRetCode == OH_NN_SUCCESS) { + HDI::Nnrt::V1_0::MockIPreparedModel::m_ExpectRetCode = OH_NN_OPERATION_FORBIDDEN; + ops.emplace_back(false); + return OH_NN_SUCCESS; + } + + ops.emplace_back(true); + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode HDIDevice::IsDynamicInputSupported(bool& isSupported) +{ + if (HDI::Nnrt::V1_0::MockIPreparedModel::m_ExpectRetCode == OH_NN_FAILED) { + HDI::Nnrt::V1_0::MockIPreparedModel::m_ExpectRetCode = OH_NN_OPERATION_FORBIDDEN; + isSupported = false; + return OH_NN_FAILED; + } + + if (HDI::Nnrt::V1_0::MockIPreparedModel::m_ExpectRetCode == OH_NN_INVALID_PATH) { + HDI::Nnrt::V1_0::MockIPreparedModel::m_ExpectRetCode = OH_NN_OPERATION_FORBIDDEN; + isSupported = false; + return OH_NN_SUCCESS; + } + + isSupported = true; + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode HDIDevice::IsPerformanceModeSupported(bool& isSupported) +{ + if (HDI::Nnrt::V1_0::MockIPreparedModel::m_ExpectRetCode == OH_NN_FAILED) { + HDI::Nnrt::V1_0::MockIPreparedModel::m_ExpectRetCode = OH_NN_OPERATION_FORBIDDEN; + isSupported = false; + return OH_NN_FAILED; + } + + if (HDI::Nnrt::V1_0::MockIPreparedModel::m_ExpectRetCode == OH_NN_SUCCESS) { + HDI::Nnrt::V1_0::MockIPreparedModel::m_ExpectRetCode = OH_NN_OPERATION_FORBIDDEN; + isSupported = false; + return OH_NN_SUCCESS; + } + + isSupported = true; + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode HDIDevice::IsPrioritySupported(bool& isSupported) +{ + if (HDI::Nnrt::V1_0::MockIPreparedModel::m_ExpectRetCode == OH_NN_INVALID_PARAMETER) { + HDI::Nnrt::V1_0::MockIPreparedModel::m_ExpectRetCode = OH_NN_OPERATION_FORBIDDEN; + isSupported = false; + return OH_NN_INVALID_PARAMETER; + } + + if (HDI::Nnrt::V1_0::MockIPreparedModel::m_ExpectRetCode == OH_NN_SUCCESS) { + HDI::Nnrt::V1_0::MockIPreparedModel::m_ExpectRetCode = OH_NN_OPERATION_FORBIDDEN; + isSupported = false; + return OH_NN_SUCCESS; + } + + isSupported = true; + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode HDIDevice::IsFloat16PrecisionSupported(bool& isSupported) +{ + if (HDI::Nnrt::V1_0::MockIPreparedModel::m_ExpectRetCode == OH_NN_SUCCESS) { + HDI::Nnrt::V1_0::MockIPreparedModel::m_ExpectRetCode = OH_NN_OPERATION_FORBIDDEN; + isSupported = false; + return OH_NN_SUCCESS; + } + + if (HDI::Nnrt::V1_0::MockIPreparedModel::m_ExpectRetCode == OH_NN_MEMORY_ERROR) { + HDI::Nnrt::V1_0::MockIPreparedModel::m_ExpectRetCode = OH_NN_OPERATION_FORBIDDEN; + isSupported = false; + return OH_NN_MEMORY_ERROR; + } + + isSupported = true; + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode HDIDevice::PrepareModel(std::shared_ptr model, + const ModelConfig& config, + std::shared_ptr& preparedModel) +{ + if (model == nullptr) { + LOGE("HDIDevice mock PrepareModel failed, the model is nullptr"); + return OH_NN_INVALID_PARAMETER; + } + + if (config.enableFloat16 == false) { + LOGE("HDIDevice mock PrepareModel failed, the enableFloat16 is false"); + return OH_NN_FAILED; + } + + sptr hdiPreparedModel = sptr(new (std::nothrow) OHOS::HDI::Nnrt::V1_0::MockIPreparedModel()); + if (hdiPreparedModel == nullptr) { + LOGE("HDIDevice mock PrepareModel failed, error happened when new sptr"); + return OH_NN_NULL_PTR; + } + + preparedModel = CreateSharedPtr(hdiPreparedModel); + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode HDIPreparedModel::ExportModelCache(std::vector& modelCache) +{ + if (!modelCache.empty()) { + LOGE("HDIPreparedModel mock ExportModelCache failed, the modelCache is not empty"); + return OH_NN_INVALID_PARAMETER; + } + + if (HDI::Nnrt::V1_0::MockIPreparedModel::m_ExpectRetCode == OH_NN_FAILED) { + HDI::Nnrt::V1_0::MockIPreparedModel::m_ExpectRetCode = OH_NN_OPERATION_FORBIDDEN; + return OH_NN_FAILED; + } + + int bufferSize = 13; + ModelBuffer modelBuffer; + std::string aBuffer = "mock_buffer_a"; + modelBuffer.buffer = (void*)aBuffer.c_str(); + modelBuffer.length = bufferSize; + modelCache.emplace_back(modelBuffer); + + ModelBuffer modelBuffer2; + std::string bBuffer = "mock_buffer_b"; + modelBuffer2.buffer = (void*)bBuffer.c_str(); + modelBuffer2.length = bufferSize; + modelCache.emplace_back(modelBuffer2); + + return OH_NN_SUCCESS; +} + +void* HDIDevice::AllocateBuffer(size_t length) +{ + if (length == 0) { + LOGE("HDIDevice mock AllocateBuffer failed, the length param is invalid"); + return nullptr; + } + + if (HDI::Nnrt::V1_0::MockIPreparedModel::m_ExpectRetCode == OH_NN_NULL_PTR) { + HDI::Nnrt::V1_0::MockIPreparedModel::m_ExpectRetCode = OH_NN_OPERATION_FORBIDDEN; + return nullptr; + } + + void* buffer = (void*)malloc(length); + if (buffer == nullptr) { + LOGE("HDIDevice mock AllocateBuffer failed, the buffer is nullptr"); + return nullptr; + } + return buffer; +} + +OH_NN_ReturnCode HDIDevice::ReleaseBuffer(const void* buffer) +{ + if (buffer == nullptr) { + LOGE("HDIDevice mock ReleaseBuffer failed, the buffer is nullptr"); + return OH_NN_NULL_PTR; + } + + free(const_cast(buffer)); + buffer = nullptr; + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode HDIDevice::PrepareModelFromModelCache(const std::vector& modelCache, + const ModelConfig& config, + std::shared_ptr& preparedModel) +{ + if (HDI::Nnrt::V1_0::MockIPreparedModel::m_ExpectRetCode == OH_NN_FAILED) { + HDI::Nnrt::V1_0::MockIPreparedModel::m_ExpectRetCode = OH_NN_OPERATION_FORBIDDEN; + return OH_NN_FAILED; + } + + if (modelCache.size() == 0 || config.enableFloat16 == false) { + LOGE("HDIDevice mock PrepareModel failed, the modelCache size equals 0 or enableFloat16 is false"); + return OH_NN_FAILED; + } + + sptr hdiPreparedModel = sptr(new (std::nothrow) OHOS::HDI::Nnrt::V1_0::MockIPreparedModel()); + if (hdiPreparedModel == nullptr) { + LOGE("HDIDevice mock PrepareModelFromModelCache failed, error happened when new sptr"); + return OH_NN_NULL_PTR; + } + + preparedModel = CreateSharedPtr(hdiPreparedModel); + + return OH_NN_SUCCESS; +} + +bool NNTensor::IsDynamicShape() const +{ + if (HDI::Nnrt::V1_0::MockIPreparedModel::m_ExpectRetCode == OH_NN_FAILED) { + HDI::Nnrt::V1_0::MockIPreparedModel::m_ExpectRetCode = OH_NN_OPERATION_FORBIDDEN; + return false; + } + + return true; +} +} // namespace NeuralNetworkRuntime +} // namespace OHOS \ No newline at end of file diff --git a/test/unittest/common/executor_mock_device.cpp b/test/unittest/common/executor_mock_device.cpp new file mode 100644 index 0000000000000000000000000000000000000000..47934e0e3c2ccee30cc6e0643e558adef38e9bce --- /dev/null +++ b/test/unittest/common/executor_mock_device.cpp @@ -0,0 +1,104 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "frameworks/native/compilation.h" +#include "frameworks/native/execution_plan.h" +#include "frameworks/native/hdi_device.h" +#include "test/unittest/common/mock_idevice.h" + +OH_NN_ReturnCode OHOS::HDI::Nnrt::V1_0::MockIPreparedModel::m_ExpectRetCode = OH_NN_OPERATION_FORBIDDEN; + +namespace OHOS { +namespace NeuralNetworkRuntime { +std::shared_ptr ExecutionPlan::GetInputDevice() const +{ + sptr idevice + = sptr(new (std::nothrow) OHOS::HDI::Nnrt::V1_0::MockIDevice()); + std::shared_ptr device = std::make_shared(idevice); + return device; +} + +std::shared_ptr ExecutionPlan::GetOutputDevice() const +{ + sptr idevice + = sptr(new (std::nothrow) OHOS::HDI::Nnrt::V1_0::MockIDevice()); + std::shared_ptr device = std::make_shared(idevice); + return device; +} + +void* HDIDevice::AllocateBuffer(size_t length) +{ + if (length == 0) { + LOGE("The length param is invalid, length=0"); + return nullptr; + } + + void* buffer = (void*)malloc(length); + if (buffer == nullptr) { + LOGE("alloct buffer failed"); + return nullptr; + } + + if (OHOS::HDI::Nnrt::V1_0::MockIPreparedModel::m_ExpectRetCode == OH_NN_INVALID_PARAMETER) { + OHOS::HDI::Nnrt::V1_0::MockIPreparedModel::m_ExpectRetCode = OH_NN_OPERATION_FORBIDDEN; + return nullptr; + } + return buffer; +} + +OH_NN_ReturnCode HDIDevice::ReleaseBuffer(const void* buffer) +{ + if (buffer == nullptr) { + LOGE("alloct buffer failed"); + return OH_NN_FAILED; + } + free(const_cast(buffer)); + buffer = nullptr; + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode HDIPreparedModel::Run(const std::vector& inputs, const std::vector& outputs, + std::vector>& outputsDims, std::vector& isOutputBufferEnough) +{ + if (inputs.empty() || outputs.empty()) { + return OH_NN_INVALID_PARAMETER; + } + + if (OHOS::HDI::Nnrt::V1_0::MockIPreparedModel::m_ExpectRetCode == OH_NN_FAILED) { + OHOS::HDI::Nnrt::V1_0::MockIPreparedModel::m_ExpectRetCode = OH_NN_OPERATION_FORBIDDEN; + return OH_NN_INVALID_PARAMETER; + } + + isOutputBufferEnough.emplace_back(true); + outputsDims.emplace_back(outputs[0].dimensions); + + return OH_NN_SUCCESS; +} + +std::shared_ptr Compilation::GetExecutionPlan() const +{ + sptr hdiPreparedModel = OHOS::sptr(new (std::nothrow) OHOS::HDI::Nnrt::V1_0::HDI::Nnrt::V1_0::MockIPreparedModel()); + + std::shared_ptr preparedModel = std::make_shared(hdiPreparedModel); + sptr idevice + = OHOS::sptr(new (std::nothrow) OHOS::HDI::Nnrt::V1_0::MockIDevice()); + std::shared_ptr device = std::make_shared(idevice); + ExecutionPlan executor(preparedModel, device); + std::shared_ptr pExcutor = std::make_shared(executor); + return pExcutor; +} +} // namespace NeuralNetworkRuntime +} // namespace OHOS \ No newline at end of file diff --git a/test/unittest/common/file_utils.cpp b/test/unittest/common/file_utils.cpp new file mode 100644 index 0000000000000000000000000000000000000000..c6cd79f8559a820ae553cfa9b8d241b9026a144e --- /dev/null +++ b/test/unittest/common/file_utils.cpp @@ -0,0 +1,51 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "file_utils.h" + +#include +#include + +#include "common/log.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +FileUtils::FileUtils(const std::string &filename) :m_filename(filename) +{ +} + +FileUtils::~FileUtils() +{ + if (!m_filename.empty()) { + int ret = unlink(m_filename.c_str()); + if (ret != 0) { + LOGE("Failed to delete file: %s.", m_filename.c_str()); + } + } +} + +bool FileUtils::WriteFile(const std::string &data) +{ + std::ofstream outFile(m_filename); + if (!outFile.is_open()) { + LOGE("Failed to open file: %s.", m_filename.c_str()); + return false; + } + outFile.write(data.c_str(), data.length()); + outFile.close(); + return true; +} +} // namespace NeuralNetworkRuntime +} // namespace OHOS diff --git a/test/unittest/common/file_utils.h b/test/unittest/common/file_utils.h new file mode 100644 index 0000000000000000000000000000000000000000..b1a8526fa2b60281e14648cd7f59d51dbf9cc1c7 --- /dev/null +++ b/test/unittest/common/file_utils.h @@ -0,0 +1,34 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_UNITTEST_FILE_UTILS_H +#define NEURAL_NETWORK_RUNTIME_UNITTEST_FILE_UTILS_H + +#include + +namespace OHOS { +namespace NeuralNetworkRuntime { +class FileUtils { +public: + explicit FileUtils(const std::string &filename); + ~FileUtils(); + bool WriteFile(const std::string &data); + +private: + std::string m_filename; +}; +} // namespace NeuralNetworkRuntime +} // namespace OHOS +#endif diff --git a/test/unittest/common/inner_model_mock_device.cpp b/test/unittest/common/inner_model_mock_device.cpp new file mode 100644 index 0000000000000000000000000000000000000000..386ee5ba60e5ad17f44d8168ff3badb88c051d22 --- /dev/null +++ b/test/unittest/common/inner_model_mock_device.cpp @@ -0,0 +1,72 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include + +#include "common/utils.h" +#include "frameworks/native/inner_model.h" +#include "frameworks/native/hdi_device.h" +#include "frameworks/native/device_manager.h" +#include "frameworks/native/ops/div_builder.h" +#include "mock_idevice.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +// Mock the palce where the devicemanager GetDevice is called in inner_model build function. +std::shared_ptr DeviceManager::GetDevice(size_t deviceId) const +{ + sptr idevice = + sptr(new (std::nothrow) OHOS::HDI::Nnrt::V1_0::MockIDevice()); + + if (idevice == nullptr) { + LOGE("DeviceManager mock GetDevice failed, error happened when new sptr"); + return nullptr; + } else { + std::shared_ptr device = CreateSharedPtr(idevice); + if (device == nullptr) { + LOGE("DeviceManager mock GetDevice failed, device is nullptr"); + return nullptr; + } + + if (deviceId == 0) { + return nullptr; + } else { + return device; + } + } +} + +// Mock the palce where the operator GetPrimitive is called in inner_model build function. +Ops::LiteGraphPrimitvePtr Ops::DivBuilder::GetPrimitive() +{ + Ops::LiteGraphPrimitvePtr primitive = {nullptr, DestroyLiteGraphTensor}; + return primitive; +} + +// Mock the palce where the device GetSupportedOperation is called in inner_model build function. +OH_NN_ReturnCode HDIDevice::GetSupportedOperation(std::shared_ptr model, + std::vector& supportedOperations) +{ + supportedOperations = {true, true, true}; + + if (model->name_ == "Loaded_NNR_Model") { + return OH_NN_UNAVALIDABLE_DEVICE; + } else { + return OH_NN_SUCCESS; + } +} +} // NeuralNetworkRuntime +} // OHOS diff --git a/test/unittest/common/mock_idevice.cpp b/test/unittest/common/mock_idevice.cpp new file mode 100644 index 0000000000000000000000000000000000000000..a6dab85ef70fd3c0048ba8e7196986384c744d0d --- /dev/null +++ b/test/unittest/common/mock_idevice.cpp @@ -0,0 +1,54 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "mock_idevice.h" + +namespace OHOS { +namespace HDI { +namespace Nnrt { +namespace V1_0 { +sptr INnrtDevice::Get(bool isStub) +{ + return INnrtDevice::Get("device_service", isStub); +} + +sptr INnrtDevice::Get(const std::string& serviceName, bool isStub) +{ + if (isStub) { + return nullptr; + } + + sptr mockIDevice = sptr(new (std::nothrow) MockIDevice()); + if (mockIDevice == nullptr) { + return nullptr; + } + std::string deviceName = "MockDevice"; + EXPECT_CALL(*((V1_0::MockIDevice*)mockIDevice.GetRefPtr()), GetDeviceName(::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<0>(deviceName), ::testing::Return(HDF_SUCCESS))); + + std::string vendorName = "MockVendor"; + EXPECT_CALL(*((V1_0::MockIDevice*)mockIDevice.GetRefPtr()), GetVendorName(::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<0>(vendorName), ::testing::Return(HDF_SUCCESS))); + + V1_0::DeviceStatus deviceStatus = V1_0::DeviceStatus::AVAILABLE; + EXPECT_CALL(*((V1_0::MockIDevice*)mockIDevice.GetRefPtr()), GetDeviceStatus(::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<0>(deviceStatus), ::testing::Return(HDF_SUCCESS))); + + return mockIDevice; +} +} // V1_0 +} // Nnrt +} // HDI +} // OHOS \ No newline at end of file diff --git a/test/unittest/common/mock_idevice.h b/test/unittest/common/mock_idevice.h new file mode 100644 index 0000000000000000000000000000000000000000..64e8231c331b3bdfac76a9e1df8a391036518056 --- /dev/null +++ b/test/unittest/common/mock_idevice.h @@ -0,0 +1,62 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_MOCK_IDEVICE_H +#define NEURAL_NETWORK_RUNTIME_MOCK_IDEVICE_H + +#include + +#include "frameworks/native/hdi_prepared_model.h" +#include "frameworks/native/memory_manager.h" +#include "frameworks/native/transform.h" + +namespace OHOS { +namespace HDI { +namespace Nnrt { +namespace V1_0 { +class MockIDevice : public INnrtDevice { +public: + MOCK_METHOD1(GetDeviceName, int32_t(std::string&)); + MOCK_METHOD1(GetVendorName, int32_t(std::string&)); + MOCK_METHOD1(GetDeviceType, int32_t(DeviceType&)); + MOCK_METHOD1(GetDeviceStatus, int32_t(DeviceStatus&)); + MOCK_METHOD2(GetSupportedOperation, int32_t(const Model&, std::vector&)); + MOCK_METHOD1(IsFloat16PrecisionSupported, int32_t(bool&)); + MOCK_METHOD1(IsPerformanceModeSupported, int32_t(bool&)); + MOCK_METHOD1(IsPrioritySupported, int32_t(bool&)); + MOCK_METHOD1(IsDynamicInputSupported, int32_t(bool&)); + MOCK_METHOD3(PrepareModel, int32_t(const Model&, const ModelConfig&, OHOS::sptr&)); + MOCK_METHOD1(IsModelCacheSupported, int32_t(bool&)); + MOCK_METHOD3(PrepareModelFromModelCache, int32_t(const std::vector&, const ModelConfig&, + OHOS::sptr&)); + MOCK_METHOD2(AllocateBuffer, int32_t(uint32_t, SharedBuffer&)); + MOCK_METHOD1(ReleaseBuffer, int32_t(const SharedBuffer&)); + MOCK_METHOD2(GetVersion, int32_t(uint32_t&, uint32_t&)); +}; + +class MockIPreparedModel : public IPreparedModel { +public: + MOCK_METHOD1(ExportModelCache, int32_t(std::vector&)); + MOCK_METHOD4(Run, int32_t(const std::vector&, const std::vector&, + std::vector>&, std::vector&)); + MOCK_METHOD2(GetVersion, int32_t(uint32_t&, uint32_t&)); + + static OH_NN_ReturnCode m_ExpectRetCode; +}; +} // V1_0 +} // Nnrt +} // HDI +} // OHOS +#endif // NEURAL_NETWORK_RUNTIME_MOCK_IDEVICE_H diff --git a/test/unittest/components/BUILD.gn b/test/unittest/components/BUILD.gn new file mode 100644 index 0000000000000000000000000000000000000000..877f0843b6d6a7a72c558b58e8c1c25b13659df7 --- /dev/null +++ b/test/unittest/components/BUILD.gn @@ -0,0 +1,336 @@ +# Copyright (c) 2022 Huawei Device Co., Ltd. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import("//build/test.gni") + +module_output_path = "neural_network_runtime/" + +config("module_private_config") { + visibility = [ ":*" ] + + include_dirs = [ + "//third_party/googletest/googlemock/include", + "//foundation/ai/neural_network_runtime", + "//foundation/communication/ipc/interfaces/innerkits/ipc_core/include", + "//third_party/mindspore/mindspore/lite/mindir/include" + ] +} + +ohos_unittest("CompilationTest") { + module_out_path = module_output_path + + sources = [ "//foundation/ai/neural_network_runtime/test/unittest/components/compilation/compilation_test.cpp" ] + sources += [ "//foundation/ai/neural_network_runtime/test/unittest/common/compilation_mock_idevice.cpp" ] + configs = [ ":module_private_config" ] + + deps = [ + "//foundation/ai/neural_network_runtime/frameworks:libneural_network_runtime", + "//third_party/googletest:gtest_main", + "//third_party/googletest:gmock_main", + ] + + external_deps = [ + "c_utils:utils", + "hdf_core:libhdf_utils", + "hitrace_native:libhitracechain", + "hiviewdfx_hilog_native:libhilog", + "drivers_interface_nnrt:libnnrt_proxy_1.0", + "mindspore:mindir" + ] +} + +ohos_unittest("ExecutorTest") { + module_out_path = module_output_path + + sources = [ "//foundation/ai/neural_network_runtime/test/unittest/components/executor/executor_test.cpp" ] + sources += [ "//foundation/ai/neural_network_runtime/test/unittest/common/executor_mock_device.cpp" ] + configs = [ ":module_private_config" ] + + deps = [ + "//foundation/ai/neural_network_runtime/frameworks:libneural_network_runtime", + "//third_party/googletest:gtest_main", + "//third_party/googletest:gmock_main", + ] + + external_deps = [ + "c_utils:utils", + "hdf_core:libhdf_utils", + "hitrace_native:libhitracechain", + "hiviewdfx_hilog_native:libhilog", + "drivers_interface_nnrt:libnnrt_proxy_1.0", + "mindspore:mindir" + ] +} + +ohos_unittest("DeviceManagerTest") { + module_out_path = module_output_path + + sources = [ "//foundation/ai/neural_network_runtime/test/unittest/components/device_manager/device_manager_test.cpp" ] + sources += [ "//foundation/ai/neural_network_runtime/test/unittest/common/mock_idevice.cpp" ] + configs = [ ":module_private_config" ] + + deps = [ + "//foundation/ai/neural_network_runtime/frameworks:libneural_network_runtime", + "//third_party/googletest:gtest_main", + "//third_party/googletest:gmock_main", + ] + + external_deps = [ + "c_utils:utils", + "hitrace_native:libhitracechain", + "hiviewdfx_hilog_native:libhilog", + "drivers_interface_nnrt:libnnrt_proxy_1.0", + "mindspore:mindir" + ] +} + +ohos_unittest("DeviceRegistrarTest") { + module_out_path = module_output_path + + sources = [ "//foundation/ai/neural_network_runtime/test/unittest/components/device_registrar/device_registrar_test.cpp" ] + sources += [ "//foundation/ai/neural_network_runtime/test/unittest/common/mock_idevice.cpp" ] + configs = [ ":module_private_config" ] + + deps = [ + "//foundation/ai/neural_network_runtime/frameworks:libneural_network_runtime", + "//third_party/googletest:gtest_main", + "//third_party/googletest:gmock_main", + ] + + external_deps = [ + "c_utils:utils", + "hitrace_native:libhitracechain", + "hiviewdfx_hilog_native:libhilog", + "drivers_interface_nnrt:libnnrt_proxy_1.0", + "mindspore:mindir" + ] +} + +ohos_unittest("HDIDeviceTest") { + module_out_path = module_output_path + + sources = [ "//foundation/ai/neural_network_runtime/test/unittest/components/hdi_device/hdi_device_test.cpp" ] + sources += [ "//foundation/ai/neural_network_runtime/test/unittest/common/mock_idevice.cpp" ] + sources += [ "//foundation/ai/neural_network_runtime/test/unittest/common/file_utils.cpp" ] + configs = [ ":module_private_config" ] + + deps = [ + "//foundation/ai/neural_network_runtime/frameworks:libneural_network_runtime", + "//third_party/googletest:gtest_main", + "//third_party/googletest:gmock_main", + ] + + external_deps = [ + "c_utils:utils", + "hitrace_native:libhitracechain", + "hiviewdfx_hilog_native:libhilog", + "drivers_interface_nnrt:libnnrt_proxy_1.0", + "mindspore:mindir" + ] +} + +ohos_unittest("HDIPreparedModelTest") { + module_out_path = module_output_path + + sources = [ "//foundation/ai/neural_network_runtime/test/unittest/components/hdi_prepared_model/hdi_prepared_model_test.cpp" ] + sources += [ "//foundation/ai/neural_network_runtime/test/unittest/common/mock_idevice.cpp" ] + sources += [ "//foundation/ai/neural_network_runtime/test/unittest/common/file_utils.cpp" ] + configs = [ ":module_private_config" ] + + deps = [ + "//foundation/ai/neural_network_runtime/frameworks:libneural_network_runtime", + "//third_party/googletest:gtest_main", + "//third_party/googletest:gmock_main", + ] + + external_deps = [ + "c_utils:utils", + "hitrace_native:libhitracechain", + "hiviewdfx_hilog_native:libhilog", + "drivers_interface_nnrt:libnnrt_proxy_1.0", + "mindspore:mindir" + ] +} + +ohos_unittest("MemoryManagerTest") { + module_out_path = module_output_path + + sources = [ "//foundation/ai/neural_network_runtime/test/unittest/components/memory_manager/memory_manager_test.cpp" ] + sources += [ "//foundation/ai/neural_network_runtime/test/unittest/common/file_utils.cpp" ] + configs = [ ":module_private_config" ] + + deps = [ + "//foundation/ai/neural_network_runtime/frameworks:libneural_network_runtime", + "//third_party/googletest:gtest_main", + "//third_party/googletest:gmock_main", + ] + + external_deps = [ + "hitrace_native:libhitracechain", + "hiviewdfx_hilog_native:libhilog", + "drivers_interface_nnrt:libnnrt_proxy_1.0", + "mindspore:mindir" + ] +} + +ohos_unittest("TransformTest") { + module_out_path = module_output_path + + sources = [ "//foundation/ai/neural_network_runtime/test/unittest/components/transform/transform_test.cpp" ] + configs = [ ":module_private_config" ] + + deps = [ + "//foundation/ai/neural_network_runtime/frameworks:libneural_network_runtime", + "//third_party/googletest:gtest_main", + "//third_party/googletest:gmock_main", + ] + + external_deps = [ + "hitrace_native:libhitracechain", + "hiviewdfx_hilog_native:libhilog", + "drivers_interface_nnrt:libnnrt_proxy_1.0", + "mindspore:mindir" + ] +} + +ohos_unittest("InnerModelTest") { + module_out_path = module_output_path + + sources = [ "//foundation/ai/neural_network_runtime/test/unittest/components/inner_model/inner_model_test.cpp" ] + sources += [ "//foundation/ai/neural_network_runtime/test/unittest/common/inner_model_mock_device.cpp" ] + configs = [ ":module_private_config" ] + + deps = [ + "//foundation/ai/neural_network_runtime/frameworks:libneural_network_runtime", + "//third_party/googletest:gtest_main", + "//third_party/googletest:gmock_main", + ] + + external_deps = [ + "c_utils:utils", + "hdf_core:libhdf_utils", + "hitrace_native:libhitracechain", + "hiviewdfx_hilog_native:libhilog", + "drivers_interface_nnrt:libnnrt_proxy_1.0", + "mindspore:mindir" + ] +} + +ohos_unittest("NnTensorTest") { + module_out_path = module_output_path + + sources = [ "//foundation/ai/neural_network_runtime/test/unittest/components/inner_model/nn_tensor_test.cpp" ] + configs = [ ":module_private_config" ] + + deps = [ + "//foundation/ai/neural_network_runtime/frameworks:libneural_network_runtime", + "//third_party/googletest:gtest_main", + "//third_party/googletest:gmock_main", + ] + + external_deps = [ + "c_utils:utils", + "hdf_core:libhdf_utils", + "hitrace_native:libhitracechain", + "hiviewdfx_hilog_native:libhilog", + "drivers_interface_nnrt:libnnrt_proxy_1.0", + "mindspore:mindir" + ] +} + +ohos_unittest("NnValidationTest") { + module_out_path = module_output_path + + sources = [ "//foundation/ai/neural_network_runtime/test/unittest/components/inner_model/nn_validation_test.cpp" ] + configs = [ ":module_private_config" ] + + deps = [ + "//foundation/ai/neural_network_runtime/frameworks:libneural_network_runtime", + "//third_party/googletest:gtest_main", + "//third_party/googletest:gmock_main", + ] + + external_deps = [ + "c_utils:utils", + "hdf_core:libhdf_utils", + "hitrace_native:libhitracechain", + "hiviewdfx_hilog_native:libhilog", + "drivers_interface_nnrt:libnnrt_proxy_1.0", + "mindspore:mindir" + ] +} + +ohos_unittest("OpsRegistryTest") { + module_out_path = module_output_path + + sources = [ "//foundation/ai/neural_network_runtime/test/unittest/components/inner_model/ops_regitstry_test.cpp" ] + configs = [ ":module_private_config" ] + + deps = [ + "//foundation/ai/neural_network_runtime/frameworks:libneural_network_runtime", + "//third_party/googletest:gtest_main", + "//third_party/googletest:gmock_main", + ] + + external_deps = [ + "c_utils:utils", + "hdf_core:libhdf_utils", + "hitrace_native:libhitracechain", + "hiviewdfx_hilog_native:libhilog", + "drivers_interface_nnrt:libnnrt_proxy_1.0", + "mindspore:mindir" + ] +} + +ohos_unittest("NeuralNetworkRuntimeTest") { + module_out_path = module_output_path + + sources = [ "//foundation/ai/neural_network_runtime/test/unittest/components/neural_network_runtime_test/neural_network_runtime_test.cpp" ] + sources += [ "//foundation/ai/neural_network_runtime/test/unittest/common/executor_mock_device.cpp" ] + + configs = [ ":module_private_config" ] + + deps = [ + "//foundation/ai/neural_network_runtime/frameworks:libneural_network_runtime", + "//third_party/googletest:gtest_main", + "//third_party/googletest:gmock_main", + ] + + external_deps = [ + "c_utils:utils", + "hdf_core:libhdf_utils", + "hitrace_native:libhitracechain", + "hiviewdfx_hilog_native:libhilog", + "drivers_interface_nnrt:libnnrt_proxy_1.0", + "mindspore:mindir" + ] +} + +group("components_unittest") { + testonly = true + deps = [ + ":CompilationTest", + ":ExecutorTest", + ":DeviceManagerTest", + ":DeviceRegistrarTest", + ":HDIDeviceTest", + ":HDIPreparedModelTest", + ":MemoryManagerTest", + ":TransformTest", + ":InnerModelTest", + ":NnTensorTest", + ":NnValidationTest", + ":OpsRegistryTest", + ":NeuralNetworkRuntimeTest", + ] +} diff --git a/test/unittest/components/compilation/compilation_test.cpp b/test/unittest/components/compilation/compilation_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..8529ccb2ed4fd0eadf897a26c0aa7c77e73dad14 --- /dev/null +++ b/test/unittest/components/compilation/compilation_test.cpp @@ -0,0 +1,1143 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "compilation_test.h" + +#include + +#include "mindir.h" + +#include "test/unittest/common/mock_idevice.h" + +using namespace OHOS::NeuralNetworkRuntime; +using namespace OHOS::HDI::Nnrt::V1_0; + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +static const int DATA_VALUE = 1; +static const int DATA_NUM = 36; +static const int DIM_NUM = 3; +OH_NN_ReturnCode CompilationTest::BuildModelGraph(NeuralNetworkRuntime::InnerModel& innerModel) +{ + // liteGraph is released internally by innerModel + mindspore::lite::LiteGraph* liteGraph = new (std::nothrow) mindspore::lite::LiteGraph; + EXPECT_NE(nullptr, liteGraph); + + liteGraph->name_ = "testGraph"; + liteGraph->input_indices_ = {0}; + liteGraph->output_indices_ = {1}; + liteGraph->all_tensors_ = {nullptr}; + const std::vector quant_params {}; + const std::vector data(DATA_NUM, DATA_VALUE); + const std::vector dim = {DIM_NUM, DIM_NUM}; + + for (size_t indexInput = 0; indexInput < liteGraph->input_indices_.size(); ++indexInput) { + liteGraph->all_tensors_.emplace_back(mindspore::lite::MindIR_Tensor_Create(liteGraph->name_, + mindspore::lite::DATA_TYPE_FLOAT32, dim, mindspore::lite::FORMAT_NCHW, data, quant_params)); + } + + for (size_t indexOutput = 0; indexOutput < liteGraph->output_indices_.size(); ++indexOutput) { + liteGraph->all_tensors_.emplace_back(mindspore::lite::MindIR_Tensor_Create(liteGraph->name_, + mindspore::lite::DATA_TYPE_FLOAT32, dim, mindspore::lite::FORMAT_NCHW, data, quant_params)); + } + + OH_NN_ReturnCode ret = innerModel.BuildFromLiteGraph(liteGraph); + return ret; +} + +void CompilationTest::SetConfig(Compilation& compilationTest) +{ + std::size_t deviceId = 1; + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.SetDevice(deviceId)); + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.SetPerformance(OH_NN_PERFORMANCE_EXTREME)); + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.SetPriority(OH_NN_PRIORITY_HIGH)); + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.SetEnableFp16(true)); +} + +void CompilationTest::WriteFile(uint64_t version, uint64_t fileNumber, std::size_t cacheDeviceId) +{ + uint64_t cacheSize = 4; + uint64_t writeSize = 7; + uint64_t cacheInfo[7] = {}; + auto cacheInfoPtr = cacheInfo; + *cacheInfoPtr++ = fileNumber; + *cacheInfoPtr++ = version; + *cacheInfoPtr++ = cacheDeviceId; + for (uint64_t i = 0; i < cacheSize; ++i) { + *cacheInfoPtr++ = i; + } + std::ofstream inFile("cache_info.nncache", std::ios::binary | std::ios::out | std::ios::trunc); + inFile.write(reinterpret_cast(cacheInfo), writeSize * sizeof(uint64_t)); + inFile.close(); +} + +void CompilationTest::BuildCompilation(InnerModel& innerModel) +{ + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilationTest(&innerModel); + SetConfig(compilationTest); + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.SetCacheDir("./", 1)); + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.Build()); +} + +/* + * @tc.name: compilation_set_device_001 + * @tc.desc: Verify the set deviceId after compilation finish of the SetDevice function. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_set_device_001, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilationTest(&innerModel); + SetConfig(compilationTest); + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.Build()); + + std::size_t deviceId = 1; + OH_NN_ReturnCode ret = compilationTest.SetDevice(deviceId); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/* + * @tc.name: compilation_set_device_002 + * @tc.desc: Verify the deviceId does not exist of the SetDevice function. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_set_device_002, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilationTest(&innerModel); + size_t deviceId = 0; + OH_NN_ReturnCode ret = compilationTest.SetDevice(deviceId); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: compilation_set_device_003 + * @tc.desc: Verify the error happened when getting supported operation of the SetDevice function. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_set_device_003, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + Compilation compilationTest(&innerModel); + std::size_t deviceId = 1; + OH_NN_ReturnCode ret = compilationTest.SetDevice(deviceId); + EXPECT_EQ(OH_NN_NULL_PTR, ret); +} + +/* + * @tc.name: compilation_set_device_004 + * @tc.desc: Verify the current device not support the model of the SetDevice function. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_set_device_004, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilationTest(&innerModel); + std::size_t deviceId = 1; + MockIPreparedModel::m_ExpectRetCode = OH_NN_SUCCESS; + OH_NN_ReturnCode ret = compilationTest.SetDevice(deviceId); + EXPECT_EQ(OH_NN_FAILED, ret); +} + +/* + * @tc.name: compilation_set_device_005 + * @tc.desc: Verify the error happened when checking whether device supports dynamic input of the SetDevice function. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_set_device_005, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilationTest(&innerModel); + std::size_t deviceId = 1; + MockIPreparedModel::m_ExpectRetCode = OH_NN_FAILED; + OH_NN_ReturnCode ret = compilationTest.SetDevice(deviceId); + EXPECT_EQ(OH_NN_FAILED, ret); +} + +/* + * @tc.name: compilation_set_device_006 + * @tc.desc: Verify the device does not support dynamic shape inputs of the SetDevice function. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_set_device_006, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilationTest(&innerModel); + std::size_t deviceId = 1; + MockIPreparedModel::m_ExpectRetCode = OH_NN_INVALID_PATH; + OH_NN_ReturnCode ret = compilationTest.SetDevice(deviceId); + EXPECT_EQ(OH_NN_FAILED, ret); +} + +/* + * @tc.name: compilation_set_device_007 + * @tc.desc: Verify the set normal deviceId of the SetDevice function. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_set_device_007, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilationTest(&innerModel); + std::size_t deviceId = 1; + OH_NN_ReturnCode ret = compilationTest.SetDevice(deviceId); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/* + * @tc.name: compilation_set_cachedir_001 + * @tc.desc: Verify the set cache after compilation finish of the SetCacheDir function. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_set_cachedir_001, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilationTest(&innerModel); + SetConfig(compilationTest); + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.Build()); + + std::size_t deviceId = 1; + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, compilationTest.SetDevice(deviceId)); + + OH_NN_ReturnCode ret = compilationTest.SetCacheDir("../", 1); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/* + * @tc.name: compilation_set_cachedir_002 + * @tc.desc: Verify the not set device of the SetCacheDir function. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_set_cachedir_002, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + Compilation compilationTest(&innerModel); + OH_NN_ReturnCode ret = compilationTest.SetCacheDir("../", 1); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/* + * @tc.name: compilation_set_cachedir_003 + * @tc.desc: Verify the Fail to query whether the device is available to save cache model of the SetCacheDir function. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_set_cachedir_003, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilationTest(&innerModel); + std::size_t deviceId = 1; + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.SetDevice(deviceId)); + + MockIPreparedModel::m_ExpectRetCode = OH_NN_FAILED; + OH_NN_ReturnCode ret = compilationTest.SetCacheDir("../", 1); + EXPECT_EQ(OH_NN_FAILED, ret); +} + +/* + * @tc.name: compilation_set_cachedir_004 + * @tc.desc: Verify the device is unavailable to save cache model of the SetCacheDir function. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_set_cachedir_004, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilationTest(&innerModel); + std::size_t deviceId = 1; + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.SetDevice(deviceId)); + + MockIPreparedModel::m_ExpectRetCode = OH_NN_SUCCESS; + OH_NN_ReturnCode ret = compilationTest.SetCacheDir("../", 1); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/* + * @tc.name: compilation_set_cachedir_005 + * @tc.desc: Verify the cache model path is invalid of the SetCacheDir function. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_set_cachedir_005, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilationTest(&innerModel); + std::size_t deviceId = 1; + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.SetDevice(deviceId)); + OH_NN_ReturnCode ret = compilationTest.SetCacheDir("../compilation_test.cpp", 1); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: compilation_set_cachedir_006 + * @tc.desc: Verify the cache model path is not a directory of the SetCacheDir function. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_set_cachedir_006, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilationTest(&innerModel); + std::size_t deviceId = 1; + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.SetDevice(deviceId)); + OH_NN_ReturnCode ret = compilationTest.SetCacheDir("./CompilationTest", 1); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: compilation_set_cachedir_007 + * @tc.desc: Verify the success of the SetCacheDir function. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_set_cachedir_007, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilationTest(&innerModel); + std::size_t deviceId = 1; + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.SetDevice(deviceId)); + OH_NN_ReturnCode ret = compilationTest.SetCacheDir("../", 1); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/* + * @tc.name: compilation_set_performance_001 + * @tc.desc: Verify the set performance after compilation finish of the SetPerformance function. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_set_performance_001, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilationTest(&innerModel); + + SetConfig(compilationTest); + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.Build()); + + std::size_t deviceId = 1; + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, compilationTest.SetDevice(deviceId)); + + OH_NN_ReturnCode ret = compilationTest.SetPerformance(OH_NN_PERFORMANCE_NONE); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/* + * @tc.name: compilation_set_performance_002 + * @tc.desc: Verify the set performance before set device of the SetPerformance function. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_set_performance_002, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + Compilation compilationTest(&innerModel); + OH_NN_ReturnCode ret = compilationTest.SetPerformance(OH_NN_PERFORMANCE_NONE); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/* + * @tc.name: compilation_set_performance_003 + * @tc.desc: Verify the call device failed of the SetPerformance function. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_set_performance_003, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilationTest(&innerModel); + + std::size_t deviceId = 1; + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.SetDevice(deviceId)); + + MockIPreparedModel::m_ExpectRetCode = OH_NN_FAILED; + OH_NN_ReturnCode ret = compilationTest.SetPerformance(OH_NN_PERFORMANCE_NONE); + EXPECT_EQ(OH_NN_FAILED, ret); +} + +/* + * @tc.name: compilation_set_performance_004 + * @tc.desc: Verify the device is not support performance setting of the SetPerformance function. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_set_performance_004, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilationTest(&innerModel); + + std::size_t deviceId = 1; + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.SetDevice(deviceId)); + + MockIPreparedModel::m_ExpectRetCode = OH_NN_SUCCESS; + OH_NN_ReturnCode ret = compilationTest.SetPerformance(OH_NN_PERFORMANCE_NONE); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/* + * @tc.name: compilation_set_performance_005 + * @tc.desc: Verify the passed invalid performance of the SetPerformance function. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_set_performance_005, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilationTest(&innerModel); + + std::size_t deviceId = 1; + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.SetDevice(deviceId)); + + OH_NN_PerformanceMode performance = static_cast(5); + OH_NN_ReturnCode ret = compilationTest.SetPerformance(performance); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: compilation_set_performance_006 + * @tc.desc: Verify the success of the SetPerformance function. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_set_performance_006, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilationTest(&innerModel); + + std::size_t deviceId = 1; + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.SetDevice(deviceId)); + + OH_NN_ReturnCode ret = compilationTest.SetPerformance(OH_NN_PERFORMANCE_NONE); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/* + * @tc.name: compilation_set_priority_001 + * @tc.desc: Verify the set priority after compilation finish of the SetPriority function. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_set_priority_001, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilationTest(&innerModel); + + SetConfig(compilationTest); + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.Build()); + + std::size_t deviceId = 1; + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, compilationTest.SetDevice(deviceId)); + + OH_NN_ReturnCode ret = compilationTest.SetPriority(OH_NN_PRIORITY_LOW); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/* + * @tc.name: compilation_set_priority_002 + * @tc.desc: Verify the set priority before set device of the SetPriority function. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_set_priority_002, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + Compilation compilationTest(&innerModel); + + OH_NN_ReturnCode ret = compilationTest.SetPriority(OH_NN_PRIORITY_LOW); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/* + * @tc.name: compilation_set_priority_003 + * @tc.desc: Verify the call device failed of the SetPriority function. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_set_priority_003, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilationTest(&innerModel); + + std::size_t deviceId = 1; + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.SetDevice(deviceId)); + + MockIPreparedModel::m_ExpectRetCode = OH_NN_INVALID_PARAMETER; + OH_NN_ReturnCode ret = compilationTest.SetPriority(OH_NN_PRIORITY_LOW); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: compilation_set_priority_004 + * @tc.desc: Verify the device is not support priority setting of the SetPriority function. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_set_priority_004, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilationTest(&innerModel); + + std::size_t deviceId = 1; + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.SetDevice(deviceId)); + + MockIPreparedModel::m_ExpectRetCode = OH_NN_SUCCESS; + OH_NN_ReturnCode ret = compilationTest.SetPriority(OH_NN_PRIORITY_LOW); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/* + * @tc.name: compilation_set_priority_005 + * @tc.desc: Verify the passed invalid priority of the SetPriority function. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_set_priority_005, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilationTest(&innerModel); + + std::size_t deviceId = 1; + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.SetDevice(deviceId)); + + OH_NN_Priority priority = static_cast(5);; + OH_NN_ReturnCode ret = compilationTest.SetPriority(priority); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: compilation_set_priority_006 + * @tc.desc: Verify the success of the SetPriority function. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_set_priority_006, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilationTest(&innerModel); + + std::size_t deviceId = 1; + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.SetDevice(deviceId)); + + OH_NN_ReturnCode ret = compilationTest.SetPriority(OH_NN_PRIORITY_LOW); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/* + * @tc.name: compilation_set_enable_fp16_001 + * @tc.desc: Verify the enable float16 after compilation finish of the SetEnableFp16 function. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_set_enable_fp16_001, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilationTest(&innerModel); + + SetConfig(compilationTest); + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.Build()); + + std::size_t deviceId = 1; + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, compilationTest.SetDevice(deviceId)); + + OH_NN_ReturnCode ret = compilationTest.SetEnableFp16(true); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/* + * @tc.name: compilation_set_enable_fp16_002 + * @tc.desc: Verify the set enable fp16 before set device of the SetEnableFp16 function. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_set_enable_fp16_002, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + Compilation compilationTest(&innerModel); + + OH_NN_ReturnCode ret = compilationTest.SetEnableFp16(true); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/* + * @tc.name: compilation_set_enable_fp16_003 + * @tc.desc: Verify the call device failed of the SetEnableFp16 function. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_set_enable_fp16_003, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilationTest(&innerModel); + + std::size_t deviceId = 1; + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.SetDevice(deviceId)); + + MockIPreparedModel::m_ExpectRetCode = OH_NN_MEMORY_ERROR; + OH_NN_ReturnCode ret = compilationTest.SetEnableFp16(true); + EXPECT_EQ(OH_NN_MEMORY_ERROR, ret); +} + +/* + * @tc.name: compilation_set_enable_fp16_004 + * @tc.desc: Verify the device is not support float16 precision setting of the SetEnableFp16 function. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_set_enable_fp16_004, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilationTest(&innerModel); + + std::size_t deviceId = 1; + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.SetDevice(deviceId)); + + MockIPreparedModel::m_ExpectRetCode = OH_NN_SUCCESS; + OH_NN_ReturnCode ret = compilationTest.SetEnableFp16(true); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/* + * @tc.name: compilation_set_enable_fp16_005 + * @tc.desc: Verify the success of the SetEnableFp16 function. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_set_enable_fp16_005, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilationTest(&innerModel); + + std::size_t deviceId = 1; + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.SetDevice(deviceId)); + + OH_NN_ReturnCode ret = compilationTest.SetEnableFp16(true); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/* + * @tc.name: compilation_get_input_tensors_001 + * @tc.desc: Verify the normal input tensors of the GetInputTensors function. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_get_input_tensors_001, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + Compilation compilationTest(&innerModel); + EXPECT_EQ(innerModel.GetInputTensors(), compilationTest.GetInputTensors()); +} + +/* + * @tc.name: compilation_get_output_tensors_001 + * @tc.desc: Verify the normal output tensors of the GetOutputTensors function. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_get_output_tensors_001, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + Compilation compilationTest(&innerModel); + EXPECT_EQ(innerModel.GetOutputTensors(), compilationTest.GetOutputTensors()); +} + +/* + * @tc.name: compilation_get_execution_plan_001 + * @tc.desc: Verify the passed nullptr of the GetExecutionPlan function. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_get_execution_plan_001, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + Compilation compilationTest(&innerModel); + EXPECT_EQ(nullptr, compilationTest.GetExecutionPlan()); +} + +/* + * @tc.name: compilation_is_dynamic_shape_001 + * @tc.desc: Verify the input tensor is empth of the IsDynamicShape function. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_is_dynamic_shape_001, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + Compilation compilationTest(&innerModel); + EXPECT_EQ(false, compilationTest.IsDynamicShape()); +} + +/* + * @tc.name: compilation_is_dynamic_shape_002 + * @tc.desc: Verify the return true of the IsDynamicShape function. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_is_dynamic_shape_002, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilationTest(&innerModel); + EXPECT_EQ(true, compilationTest.IsDynamicShape()); +} + +/* + * @tc.name: compilation_is_dynamic_shape_003 + * @tc.desc: Verify the return false of the IsDynamicShape function. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_is_dynamic_shape_003, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilationTest(&innerModel); + MockIPreparedModel::m_ExpectRetCode = OH_NN_FAILED; + EXPECT_EQ(false, compilationTest.IsDynamicShape()); +} + +/* + * @tc.name: compilation_is_build_001 + * @tc.desc: Verify return false of the IsBuild function. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_is_build_001, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + Compilation compilationTest(&innerModel); + EXPECT_EQ(false, compilationTest.IsBuild()); +} + +/* + * @tc.name: compilation_build_001 + * @tc.desc: Verify the build after compilation finish of the Build function. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_build_001, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilationTest(&innerModel); + SetConfig(compilationTest); + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.Build()); + + OH_NN_ReturnCode ret = compilationTest.Build(); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/* + * @tc.name: compilation_build_002 + * @tc.desc: Verify the not set device of the Build function. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_build_002, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilationTest(&innerModel); + OH_NN_ReturnCode ret = compilationTest.Build(); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/* + * @tc.name: compilation_build_003 + * @tc.desc: Verify the preparing model failed of the Build function without set cache path. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_build_003, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + Compilation compilationTest(&innerModel); + MockIPreparedModel::m_ExpectRetCode = OH_NN_INVALID_FILE; + SetConfig(compilationTest); + OH_NN_ReturnCode ret = compilationTest.Build(); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: compilation_build_004 + * @tc.desc: Verify the success of the Build function without set cache path. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_build_004, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilationTest(&innerModel); + SetConfig(compilationTest); + OH_NN_ReturnCode ret = compilationTest.Build(); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/* + * @tc.name: compilation_build_005 + * @tc.desc: Verify the preparing model failed of the Build function without cache file. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_build_005, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + Compilation compilationTest(&innerModel); + std::size_t deviceId = 1; + MockIPreparedModel::m_ExpectRetCode = OH_NN_INVALID_FILE; + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.SetDevice(deviceId)); + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.SetCacheDir("./", 1)); + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.SetEnableFp16(true)); + OH_NN_ReturnCode ret = compilationTest.Build(); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: compilation_build_006 + * @tc.desc: Verify the export model cache failed of the Build function without cache file. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_build_006, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilationTest(&innerModel); + std::size_t deviceId = 1; + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.SetDevice(deviceId)); + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.SetCacheDir("./", 1)); + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.SetEnableFp16(true)); + + MockIPreparedModel::m_ExpectRetCode = OH_NN_FAILED; + OH_NN_ReturnCode ret = compilationTest.Build(); + EXPECT_EQ(OH_NN_FAILED, ret); +} + +/* + * @tc.name: compilation_build_007 + * @tc.desc: Verify the model cache file is invalid to generating cache mode of the Build function without cache file. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_build_007, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilationTest(&innerModel); + std::size_t deviceId = 1; + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.SetDevice(deviceId)); + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.SetCacheDir("/sys", 1)); + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.SetEnableFp16(true)); + OH_NN_ReturnCode ret = compilationTest.Build(); + EXPECT_EQ(OH_NN_INVALID_FILE, ret); +} + +/* + * @tc.name: compilation_build_008 + * @tc.desc: Verify the success to generating cache mode of the Build function without cache file. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_build_008, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilationTest(&innerModel); + std::size_t deviceId = 1; + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.SetDevice(deviceId)); + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.SetCacheDir("./", 1)); + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.SetEnableFp16(true)); + OH_NN_ReturnCode ret = compilationTest.Build(); + EXPECT_EQ(0, remove("0.nncache")); + EXPECT_EQ(0, remove("1.nncache")); + EXPECT_EQ(0, remove("cache_info.nncache")); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/* + * @tc.name: compilation_build_009 + * @tc.desc: Verify the Fail to get the content of info cache file of the Build. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_build_009, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + BuildCompilation(innerModel); + + Compilation compilationTest(&innerModel); + SetConfig(compilationTest); + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.SetCacheDir("./", 1)); + + std::ofstream createFile("cache_info.nncache"); + createFile.close(); + OH_NN_ReturnCode ret = compilationTest.Build(); + EXPECT_EQ(0, remove("0.nncache")); + EXPECT_EQ(0, remove("1.nncache")); + EXPECT_EQ(0, remove("cache_info.nncache")); + EXPECT_EQ(OH_NN_INVALID_FILE, ret); +} + +/* + * @tc.name: compilation_build_010 + * @tc.desc: Verify the deviceId in the cache files is different from current deviceId of the Build function. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_build_010, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + BuildCompilation(innerModel); + WriteFile(1, 4, 2); + + Compilation compilationTest(&innerModel); + SetConfig(compilationTest); + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.SetCacheDir("./", 1)); + + OH_NN_ReturnCode ret = compilationTest.Build(); + EXPECT_EQ(0, remove("0.nncache")); + EXPECT_EQ(0, remove("1.nncache")); + EXPECT_EQ(0, remove("cache_info.nncache")); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: compilation_build_011 + * @tc.desc: Verify the info cache file has been changed of the Build function. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_build_011, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + BuildCompilation(innerModel); + WriteFile(1, 100, 1); + + Compilation compilationTest(&innerModel); + SetConfig(compilationTest); + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.SetCacheDir("./", 1)); + + OH_NN_ReturnCode ret = compilationTest.Build(); + EXPECT_EQ(0, remove("0.nncache")); + EXPECT_EQ(0, remove("1.nncache")); + EXPECT_EQ(0, remove("cache_info.nncache")); + EXPECT_EQ(OH_NN_INVALID_FILE, ret); +} + +/* + * @tc.name: compilation_build_012 + * @tc.desc: Verify the Preparing model failed of the Build function model version is greater than cached versio. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_build_012, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + Compilation compilationTest(&innerModel); + MockIPreparedModel::m_ExpectRetCode = OH_NN_INVALID_FILE; + SetConfig(compilationTest); + WriteFile(0, 4, 1); + + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.SetCacheDir("./", 1)); + + std::ofstream inFile("0.nncache", std::ios::binary | std::ios::out | std::ios::trunc); + inFile.close(); + + OH_NN_ReturnCode ret = compilationTest.Build(); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: compilation_build_013 + * @tc.desc: Verify that the build function return success message with model version is greater than cached version + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_build_013, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilationTest(&innerModel); + SetConfig(compilationTest); + WriteFile(0, 1, 1); + + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.SetCacheDir("./", 1)); + + std::ofstream inFile("0.nncache", std::ios::binary | std::ios::out | std::ios::trunc); + inFile.close(); + + OH_NN_ReturnCode ret = compilationTest.Build(); + EXPECT_EQ(0, remove("0.nncache")); + EXPECT_EQ(0, remove("1.nncache")); + EXPECT_EQ(0, remove("cache_info.nncache")); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/* + * @tc.name: compilation_build_014 + * @tc.desc: Verify the model version is less than version cache of the Build function. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_build_014, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + BuildCompilation(innerModel); + WriteFile(3, 4, 1); + + Compilation compilationTest(&innerModel); + SetConfig(compilationTest); + + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.SetCacheDir("./", 1)); + + OH_NN_ReturnCode ret = compilationTest.Build(); + EXPECT_EQ(0, remove("0.nncache")); + EXPECT_EQ(0, remove("1.nncache")); + EXPECT_EQ(0, remove("cache_info.nncache")); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/* + * @tc.name: compilation_build_015 + * @tc.desc: Verify the checking cache model failed of the Build function with release buffer. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_build_015, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + BuildCompilation(innerModel); + EXPECT_EQ(0, remove("1.nncache")); + + Compilation compilationTest(&innerModel); + SetConfig(compilationTest); + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.SetCacheDir("./", 1)); + + OH_NN_ReturnCode ret = compilationTest.Build(); + EXPECT_EQ(0, remove("0.nncache")); + EXPECT_EQ(0, remove("cache_info.nncache")); + EXPECT_EQ(OH_NN_INVALID_FILE, ret); +} + +/* + * @tc.name: compilation_build_016 + * @tc.desc: Verify the get cache file length of the Build function. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_build_016, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + BuildCompilation(innerModel); + + Compilation compilationTest(&innerModel); + SetConfig(compilationTest); + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.SetCacheDir("./", 1)); + + std::ofstream inFile("0.nncache", std::ios::binary | std::ios::out | std::ios::trunc); + inFile.close(); + + OH_NN_ReturnCode ret = compilationTest.Build(); + EXPECT_EQ(0, remove("0.nncache")); + EXPECT_EQ(0, remove("1.nncache")); + EXPECT_EQ(0, remove("cache_info.nncache")); + EXPECT_EQ(OH_NN_INVALID_FILE, ret); +} + +/* + * @tc.name: compilation_build_017 + * @tc.desc: Verify the fail to create file buffer of the Build function. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_build_017, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + BuildCompilation(innerModel); + + Compilation compilationTest(&innerModel); + SetConfig(compilationTest); + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.SetCacheDir("./", 1)); + + MockIPreparedModel::m_ExpectRetCode = OH_NN_NULL_PTR; + OH_NN_ReturnCode ret = compilationTest.Build(); + EXPECT_EQ(0, remove("0.nncache")); + EXPECT_EQ(0, remove("1.nncache")); + EXPECT_EQ(0, remove("cache_info.nncache")); + EXPECT_EQ(OH_NN_NULL_PTR, ret); +} + +/* + * @tc.name: compilation_build_018 + * @tc.desc: Verify the cache model file has been changed of the Build function. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_build_018, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + BuildCompilation(innerModel); + + Compilation compilationTest(&innerModel); + SetConfig(compilationTest); + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.SetCacheDir("./", 1)); + + uint64_t version = 1; + uint64_t fileNumber = 1; + std::size_t cacheDeviceId = 1; + uint64_t cacheInfo[7] = {}; + auto cacheInfoPtr = cacheInfo; + *cacheInfoPtr++ = fileNumber; + *cacheInfoPtr++ = version; + *cacheInfoPtr++ = cacheDeviceId; + for (uint64_t i = 0; i < 4; ++i) { + *cacheInfoPtr++ = i; + } + + std::ofstream onFile("0.nncache", std::ios::binary | std::ios::out | std::ios::trunc); + onFile.write(reinterpret_cast(cacheInfo), 7 * sizeof(uint64_t)); + onFile.close(); + + OH_NN_ReturnCode ret = compilationTest.Build(); + EXPECT_EQ(0, remove("0.nncache")); + EXPECT_EQ(0, remove("1.nncache")); + EXPECT_EQ(0, remove("cache_info.nncache")); + EXPECT_EQ(OH_NN_INVALID_FILE, ret); +} + +/* + * @tc.name: compilation_build_019 + * @tc.desc: Verify the preparing model from cache failed of the Build function with load cache build. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_build_019, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + BuildCompilation(innerModel); + + Compilation compilationTest(&innerModel); + SetConfig(compilationTest); + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.SetCacheDir("./", 1)); + + MockIPreparedModel::m_ExpectRetCode = OH_NN_FAILED; + OH_NN_ReturnCode ret = compilationTest.Build(); + EXPECT_EQ(0, remove("0.nncache")); + EXPECT_EQ(0, remove("1.nncache")); + EXPECT_EQ(0, remove("cache_info.nncache")); + EXPECT_EQ(OH_NN_FAILED, ret); +} + +/* + * @tc.name: compilation_build_020 + * @tc.desc: Verify the success of the Build function with load cache build. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_build_020, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + BuildCompilation(innerModel); + + Compilation compilationTest(&innerModel); + SetConfig(compilationTest); + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.SetCacheDir("./", 1)); + + OH_NN_ReturnCode ret = compilationTest.Build(); + EXPECT_EQ(0, remove("0.nncache")); + EXPECT_EQ(0, remove("1.nncache")); + EXPECT_EQ(0, remove("cache_info.nncache")); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} +} // namespace UnitTest +} // namespace NeuralNetworkRuntime +} // namespace OHOS diff --git a/test/unittest/components/compilation/compilation_test.h b/test/unittest/components/compilation/compilation_test.h new file mode 100644 index 0000000000000000000000000000000000000000..8217f4f3acec605c1dd10cb2b198b180c38ce8bd --- /dev/null +++ b/test/unittest/components/compilation/compilation_test.h @@ -0,0 +1,38 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_COMPILATION_UNITTEST_H +#define NEURAL_NETWORK_RUNTIME_COMPILATION_UNITTEST_H + +#include + +#include "frameworks/native/compilation.h" +#include "frameworks/native/inner_model.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class CompilationTest : public testing::Test { +public: + OH_NN_ReturnCode BuildModelGraph(NeuralNetworkRuntime::InnerModel& innerModel); + void SetConfig(Compilation& compilationTest); + void WriteFile(uint64_t version, uint64_t fileNumber, std::size_t cacheDeviceId); + void BuildCompilation(InnerModel& innerModel); +}; +} // namespace UnitTest +} // namespace NeuralNetworkRuntime +} // namespace OHOS + +#endif // NEURAL_NETWORK_RUNTIME_COMPILATION_UNITTEST_H \ No newline at end of file diff --git a/test/unittest/components/device_manager/device_manager_test.cpp b/test/unittest/components/device_manager/device_manager_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..3e5068962be3e663510bb60607c2656db7769921 --- /dev/null +++ b/test/unittest/components/device_manager/device_manager_test.cpp @@ -0,0 +1,242 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include + +#include "common/log.h" +#include "frameworks/native/device_manager.h" +#include "frameworks/native/hdi_device.h" +#include "test/unittest/common/mock_idevice.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime; +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class DeviceManagerTest : public testing::Test { +protected: + void MockInit(OHOS::sptr device, const std::vector& typeVect, + const std::string& deviceName, const std::string& vendorName); +}; + +void DeviceManagerTest::MockInit(OHOS::sptr device, const std::vector& typeVect, + const std::string& deviceName, const std::string& vendorName) +{ + const size_t typeSize = 4; + int index = 0; + EXPECT_EQ(typeSize, typeVect.size()); + EXPECT_CALL(*device, GetDeviceName(::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<0>(deviceName), + ::testing::Return(typeVect[index++]))); + + EXPECT_CALL(*device, GetVendorName(::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<0>(vendorName), + ::testing::Return(typeVect[index++]))); + + V1_0::DeviceStatus deviceStatus = V1_0::DeviceStatus::AVAILABLE; + EXPECT_CALL(*device, GetDeviceStatus(::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<0>(deviceStatus), + ::testing::Return(typeVect[index++]))); + + uint32_t majorVer = 1; + uint32_t minorVer = 0; + EXPECT_CALL(*device, GetVersion(::testing::_, ::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<0>(majorVer), ::testing::SetArgReferee<1>(minorVer), + ::testing::Return(typeVect[index++]))); +} + +/** + * @tc.name: devicemanager_getalldeviceid_001 + * @tc.desc: Verify the GetAllDeviceId function return deviceid list is not null. + * @tc.type: FUNC + */ +HWTEST_F(DeviceManagerTest, devicemanager_getalldeviceid_001, TestSize.Level0) +{ + auto &deviceManager = DeviceManager::GetInstance(); + std::vector idVect = deviceManager.GetAllDeviceId(); + EXPECT_NE((size_t)0, idVect.size()); + + const size_t expectDeviceId {std::hash{}("MockDevice_MockVendor")}; + EXPECT_EQ(expectDeviceId, idVect[0]); + + const std::string expectDeviceName = "MockDevice"; + std::string deviceName = ""; + std::shared_ptr retDevice = deviceManager.GetDevice(idVect[0]); + retDevice->GetDeviceName(deviceName); + EXPECT_EQ(deviceName, expectDeviceName); +} + +/** + * @tc.name: devicemanager_getdevice_001 + * @tc.desc: Verify the GetDevice function return nullptr in case of deviceId invalid. + * @tc.type: FUNC + */ +HWTEST_F(DeviceManagerTest, devicemanager_getdevice_001, TestSize.Level0) +{ + auto &deviceManager = DeviceManager::GetInstance(); + const size_t deviceId = 1; + std::shared_ptr result = deviceManager.GetDevice(deviceId); + EXPECT_EQ(nullptr, result); +} + +/** + * @tc.name: devicemanager_getdevice_002 + * @tc.desc: Verify the GetDevice function validate device name return specified device name. + * @tc.type: FUNC + */ +HWTEST_F(DeviceManagerTest, devicemanager_getdevice_002, TestSize.Level0) +{ + auto &deviceManager = DeviceManager::GetInstance(); + std::vector idVect = deviceManager.GetAllDeviceId(); + EXPECT_EQ((size_t)1, idVect.size()); + size_t deviceId = idVect[0]; + std::shared_ptr result = deviceManager.GetDevice(deviceId); + EXPECT_NE(nullptr, result); + + const std::string expectDeviceNameA = "MockDevice"; + std::string deviceName = ""; + result->GetDeviceName(deviceName); + EXPECT_EQ(deviceName, expectDeviceNameA); +} + +/** + * @tc.name: devicemanager_registerdevice_001 + * @tc.desc: Verify the RegisterDevice function register repeatly. + * @tc.type: FUNC + */ +HWTEST_F(DeviceManagerTest, devicemanager_registerdevice_001, TestSize.Level0) +{ + std::vector typeVect = {HDF_SUCCESS, HDF_SUCCESS, HDF_SUCCESS, HDF_SUCCESS}; + OHOS::sptr device = OHOS::sptr(new (std::nothrow) V1_0::MockIDevice()); + EXPECT_NE(device.GetRefPtr(), nullptr); + + std::string deviceName = "MockDevice"; + std::string vendorName = "MockVendor"; + MockInit(device, typeVect, deviceName, vendorName); + + std::function()> creator = + [&device]()->std::shared_ptr {return std::make_shared(device);}; + auto& deviceManager = DeviceManager::GetInstance(); + OH_NN_ReturnCode result = deviceManager.RegisterDevice(creator); + EXPECT_EQ(OH_NN_FAILED, result); +} + +/** + * @tc.name: devicemanager_registerdevice_002 + * @tc.desc: Verify the RegisterDevice function return invalid parameter. + * @tc.type: FUNC + */ +HWTEST_F(DeviceManagerTest, devicemanager_registerdevice_002, TestSize.Level0) +{ + std::function()> creator = + []()->std::shared_ptr {return nullptr;}; + auto& deviceManager = DeviceManager::GetInstance(); + OH_NN_ReturnCode result = deviceManager.RegisterDevice(creator); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, result); +} + +/** + * @tc.name: devicemanager_registerdevice_003 + * @tc.desc: Verify the RegisterDevice function return unavailable device in case of device name invalid param. + * @tc.type: FUNC + */ +HWTEST_F(DeviceManagerTest, devicemanager_registerdevice_003, TestSize.Level0) +{ + std::vector typeVect = {HDF_FAILURE, HDF_SUCCESS, HDF_SUCCESS, HDF_SUCCESS}; + OHOS::sptr device = OHOS::sptr(new (std::nothrow) V1_0::MockIDevice()); + EXPECT_NE(device.GetRefPtr(), nullptr); + + std::string deviceName = "MockDevice"; + std::string vendorName = "MockVendor"; + MockInit(device, typeVect, deviceName, vendorName); + + std::function()> creator = + [&device]()->std::shared_ptr {return std::make_shared(device);}; + auto& deviceManager = DeviceManager::GetInstance(); + OH_NN_ReturnCode result = deviceManager.RegisterDevice(creator); + EXPECT_EQ(OH_NN_UNAVALIDABLE_DEVICE, result); +} + +/** + * @tc.name: devicemanager_registerdevice_004 + * @tc.desc: Verify the RegisterDevice function return unavailable device in case of vendor name failure. + * @tc.type: FUNC + */ +HWTEST_F(DeviceManagerTest, devicemanager_registerdevice_004, TestSize.Level0) +{ + std::vector typeVect = {HDF_SUCCESS, HDF_FAILURE, HDF_SUCCESS, HDF_SUCCESS}; + OHOS::sptr device = OHOS::sptr(new (std::nothrow) V1_0::MockIDevice()); + EXPECT_NE(device.GetRefPtr(), nullptr); + + std::string deviceName = "MockDevice"; + std::string vendorName = "MockVendor"; + MockInit(device, typeVect, deviceName, vendorName); + + std::function()> creator = + [&device]()->std::shared_ptr {return std::make_shared(device);}; + auto& deviceManager = DeviceManager::GetInstance(); + OH_NN_ReturnCode result = deviceManager.RegisterDevice(creator); + EXPECT_EQ(OH_NN_UNAVALIDABLE_DEVICE, result); +} + +/** + * @tc.name: devicemanager_registerdevice_005 + * @tc.desc: Verify the RegisterDevice function return success. + * @tc.type: FUNC + */ +HWTEST_F(DeviceManagerTest, devicemanager_registerdevice_005, TestSize.Level0) +{ + std::vector typeVect = {HDF_SUCCESS, HDF_SUCCESS, HDF_SUCCESS, HDF_SUCCESS}; + OHOS::sptr device = OHOS::sptr(new (std::nothrow) V1_0::MockIDevice()); + EXPECT_NE(device.GetRefPtr(), nullptr); + + std::string deviceName = "MockDeviceA"; + std::string vendorName = "MockVendorA"; + MockInit(device, typeVect, deviceName, vendorName); + + std::function()> creator = + [&device]()->std::shared_ptr {return std::make_shared(device);}; + auto& deviceManager = DeviceManager::GetInstance(); + OH_NN_ReturnCode result = deviceManager.RegisterDevice(creator); + EXPECT_EQ(OH_NN_SUCCESS, result); + + std::vector idVect = deviceManager.GetAllDeviceId(); + EXPECT_NE((size_t)0, idVect.size()); + + const size_t expectDeviceId {std::hash{}("MockDeviceA_MockVendorA")}; + EXPECT_EQ(expectDeviceId, idVect[0]); + + const std::string expectDeviceName = "MockDeviceA_MockVendorA"; + const std::string retDeviceName = deviceManager.GetDeviceName(idVect[0]); + EXPECT_EQ(retDeviceName, expectDeviceName); +} + +/** + * @tc.name: devicemanager_getdevicename_001 + * @tc.desc: Verify the GetDevice function return empty string in case of deviceid invalid. + * @tc.type: FUNC + */ +HWTEST_F(DeviceManagerTest, devicemanager_getdevicename_001, TestSize.Level0) +{ + auto &deviceManager = DeviceManager::GetInstance(); + const size_t deviceId = 1; + std::string result = deviceManager.GetDeviceName(deviceId); + EXPECT_EQ("", result); +} +} // namespace UnitTest +} // namespace NeuralNetworkRuntime +} // namespace OHOS diff --git a/test/unittest/components/device_registrar/device_registrar_test.cpp b/test/unittest/components/device_registrar/device_registrar_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..4f1ec7cee8563a456d0c5001350a9827bfa1b4ea --- /dev/null +++ b/test/unittest/components/device_registrar/device_registrar_test.cpp @@ -0,0 +1,264 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include + +#include +#include +#include + +#include "common/log.h" +#include "interfaces/oem/cpp_api/device_registrar.h" +#include "frameworks/native/hdi_device.h" +#include "frameworks/native/device_manager.h" +#include "test/unittest/common/mock_idevice.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime; +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class IRegisterDevice : public HDI::HdiBase { +public: + DECLARE_HDI_DESCRIPTOR(u"ohos.hdi.nnrt.v1_0.IRegisterDevice"); + + virtual ~IRegisterDevice() = default; + + static sptr Get(bool isStub = false); + static sptr Get(const std::string& serviceName, bool isStub = false); + + virtual int32_t GetDeviceName(std::string& name) = 0; + + virtual int32_t GetVendorName(std::string& name) = 0; + + virtual int32_t GetDeviceType(V1_0::DeviceType& deviceType) = 0; + + virtual int32_t GetDeviceStatus(V1_0::DeviceStatus& status) = 0; + + virtual int32_t GetSupportedOperation(const V1_0::Model& model, std::vector& ops) = 0; + + virtual int32_t IsFloat16PrecisionSupported(bool& isSupported) = 0; + + virtual int32_t IsPerformanceModeSupported(bool& isSupported) = 0; + + virtual int32_t IsPrioritySupported(bool& isSupported) = 0; + + virtual int32_t IsDynamicInputSupported(bool& isSupported) = 0; + + virtual int32_t PrepareModel(const V1_0::Model& model, const V1_0::ModelConfig& config, + sptr& preparedModel) = 0; + + virtual int32_t IsModelCacheSupported(bool& isSupported) = 0; + + virtual int32_t PrepareModelFromModelCache(const std::vector& modelCache, + const V1_0::ModelConfig& config, sptr& preparedModel) = 0; + + virtual int32_t AllocateBuffer(uint32_t length, V1_0::SharedBuffer& buffer) = 0; + + virtual int32_t ReleaseBuffer(const V1_0::SharedBuffer& buffer) = 0; + + virtual int32_t GetVersion(uint32_t& majorVer, uint32_t& minorVer) + { + majorVer = INNRT_DEVICE_MAJOR_VERSION; + minorVer = INNRT_DEVICE_MINOR_VERSION; + return HDF_SUCCESS; + } +}; + +class SimulationDevice : public Device { +public: + explicit SimulationDevice(OHOS::sptr device) {}; + + OH_NN_ReturnCode GetDeviceName(std::string& name) override + { + name = "MockIDeviceA"; + return OH_NN_SUCCESS; + }; + OH_NN_ReturnCode GetVendorName(std::string& name) override + { + name = "MockVendorA"; + return OH_NN_SUCCESS; + }; + OH_NN_ReturnCode GetDeviceType(OH_NN_DeviceType& deviceType) override + { + return OH_NN_SUCCESS; + }; + OH_NN_ReturnCode GetDeviceStatus(DeviceStatus& status) override + { + status = DeviceStatus::AVAILABLE; + return OH_NN_SUCCESS; + }; + OH_NN_ReturnCode GetSupportedOperation(std::shared_ptr model, + std::vector& ops) override + { + return OH_NN_SUCCESS; + }; + + OH_NN_ReturnCode IsFloat16PrecisionSupported(bool& isSupported) override + { + return OH_NN_SUCCESS; + }; + OH_NN_ReturnCode IsPerformanceModeSupported(bool& isSupported) override + { + return OH_NN_SUCCESS; + }; + OH_NN_ReturnCode IsPrioritySupported(bool& isSupported) override + { + return OH_NN_SUCCESS; + }; + OH_NN_ReturnCode IsDynamicInputSupported(bool& isSupported) override + { + return OH_NN_SUCCESS; + }; + OH_NN_ReturnCode IsModelCacheSupported(bool& isSupported) override + { + return OH_NN_SUCCESS; + }; + + OH_NN_ReturnCode PrepareModel(std::shared_ptr model, const ModelConfig& config, + std::shared_ptr& preparedModel) override + { + return OH_NN_SUCCESS; + }; + OH_NN_ReturnCode PrepareModelFromModelCache(const std::vector& modelCache, const ModelConfig& config, + std::shared_ptr& preparedModel) override + { + return OH_NN_SUCCESS; + }; + + void *AllocateBuffer(size_t length) override + { + return nullptr; + }; + OH_NN_ReturnCode ReleaseBuffer(const void* buffer) override + { + return OH_NN_SUCCESS; + }; +}; + +class MockIDeviceImp : public IRegisterDevice { +public: + MOCK_METHOD1(GetDeviceName, int32_t(std::string&)); + MOCK_METHOD1(GetVendorName, int32_t(std::string&)); + MOCK_METHOD1(GetDeviceType, int32_t(V1_0::DeviceType&)); + MOCK_METHOD1(GetDeviceStatus, int32_t(V1_0::DeviceStatus&)); + MOCK_METHOD2(GetSupportedOperation, int32_t(const V1_0::Model&, std::vector&)); + MOCK_METHOD1(IsFloat16PrecisionSupported, int32_t(bool&)); + MOCK_METHOD1(IsPerformanceModeSupported, int32_t(bool&)); + MOCK_METHOD1(IsPrioritySupported, int32_t(bool&)); + MOCK_METHOD1(IsDynamicInputSupported, int32_t(bool&)); + MOCK_METHOD3(PrepareModel, + int32_t(const V1_0::Model&, const V1_0::ModelConfig&, OHOS::sptr&)); + MOCK_METHOD1(IsModelCacheSupported, int32_t(bool&)); + MOCK_METHOD3(PrepareModelFromModelCache, int32_t(const std::vector&, const V1_0::ModelConfig&, + OHOS::sptr&)); + MOCK_METHOD2(AllocateBuffer, int32_t(uint32_t, V1_0::SharedBuffer&)); + MOCK_METHOD1(ReleaseBuffer, int32_t(const V1_0::SharedBuffer&)); + MOCK_METHOD2(GetVersion, int32_t(uint32_t&, uint32_t&)); +}; + +sptr IRegisterDevice::Get(bool isStub) +{ + return IRegisterDevice::Get("device_service", isStub); +} + +sptr IRegisterDevice::Get(const std::string& serviceName, bool isStub) +{ + if (isStub) { + return nullptr; + } + + sptr mockIDevice = sptr(new (std::nothrow) MockIDeviceImp()); + if (mockIDevice.GetRefPtr() == nullptr) { + LOGE("Failed to new MockIDeviceImp object."); + return nullptr; + } + + std::string deviceName = "MockIDeviceA"; + EXPECT_CALL(*((MockIDeviceImp *)mockIDevice.GetRefPtr()), GetDeviceName(::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<0>(deviceName), ::testing::Return(HDF_SUCCESS))); + + std::string vendorName = "MockVendorA"; + EXPECT_CALL(*((MockIDeviceImp *)mockIDevice.GetRefPtr()), GetVendorName(::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<0>(vendorName), ::testing::Return(HDF_SUCCESS))); + + V1_0::DeviceStatus deviceStatus = V1_0::DeviceStatus::AVAILABLE; + EXPECT_CALL(*((MockIDeviceImp *)mockIDevice.GetRefPtr()), GetDeviceStatus(::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<0>(deviceStatus), ::testing::Return(HDF_SUCCESS))); + return mockIDevice; +} + +class DeviceRegistrarTest : public testing::Test { +public: + DeviceRegistrarTest() = default; + ~DeviceRegistrarTest() = default; +}; + +std::shared_ptr CreateDeviceObjectCallback() +{ + OHOS::sptr device = IRegisterDevice::Get(false); + EXPECT_NE(device, nullptr); + std::shared_ptr m_mockDevice = std::make_unique(device); + return m_mockDevice; +} + +std::shared_ptr CreateNullObjectCallback() +{ + return nullptr; +} + +/* * + * @tc.name: devicemanager_getalldeviceid_001 + * @tc.desc: Verify the Constructor function register object success. + * @tc.type: FUNC + */ +HWTEST_F(DeviceRegistrarTest, deviceregistrar_constructor_001, TestSize.Level0) +{ + CreateDevice creator = CreateDeviceObjectCallback; + std::unique_ptr deviceRegister = std::make_unique(creator); + EXPECT_NE(deviceRegister, nullptr); + auto &deviceManager = DeviceManager::GetInstance(); + std::vector idVect = deviceManager.GetAllDeviceId(); + EXPECT_EQ((size_t)2, idVect.size()); + + const size_t expectDeviceId {std::hash{}("MockDevice_MockVendor")}; + EXPECT_EQ(expectDeviceId, idVect[1]); + + const std::string expectDeviceNameA = "MockDevice"; + std::string deviceName = ""; + std::shared_ptr retDevice = deviceManager.GetDevice(idVect[1]); + retDevice->GetDeviceName(deviceName); + EXPECT_EQ(deviceName, expectDeviceNameA); + + const std::string expectDeviceNameB = "MockDevice_MockVendor"; + std::string queryDeviceName = deviceManager.GetDeviceName(idVect[1]); + EXPECT_EQ(queryDeviceName, expectDeviceNameB); +} + +/* * + * @tc.name: devicemanager_getalldeviceid_002 + * @tc.desc: Verify the Constructor function register object creator return nullptr, used for branch coverage. + * @tc.type: FUNC + */ +HWTEST_F(DeviceRegistrarTest, deviceregistrar_constructor_002, TestSize.Level0) +{ + CreateDevice creator = CreateNullObjectCallback; + std::unique_ptr deviceRegister = std::make_unique(creator); + EXPECT_NE(deviceRegister, nullptr); +} +} // namespace UnitTest +} // namespace NeuralNetworkRuntime +} // namespace OHOS diff --git a/test/unittest/components/executor/executor_test.cpp b/test/unittest/components/executor/executor_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..5d13e51600f36395b41fff685076199b18e03673 --- /dev/null +++ b/test/unittest/components/executor/executor_test.cpp @@ -0,0 +1,1206 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "executor_test.h" + +#include "common/scoped_trace.h" +#include "frameworks/native/compilation.h" +#include "frameworks/native/inner_model.h" +#include "test/unittest/common/mock_idevice.h" + +using namespace OHOS::NeuralNetworkRuntime; +using namespace OHOS::NeuralNetworkRuntime::Ops; +using namespace OHOS::HDI::Nnrt::V1_0; +using namespace OHOS::HiviewDFX; + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +using NNTensorPtr = std::shared_ptr; + +MSLITE::LiteGraph* ExecutorTest::BuildLiteGraph(const std::vector dim, const std::vector dimOut) +{ + MSLITE::LiteGraph* liteGraph = new (std::nothrow) MSLITE::LiteGraph(); + if (liteGraph == nullptr) { + LOGE("liteGraph build failed"); + return nullptr; + } + liteGraph->name_ = "testGraph"; + liteGraph->input_indices_.emplace_back(0); + liteGraph->output_indices_.emplace_back(1); + const std::vector quant_params; + + for (size_t indexInput = 0; indexInput < liteGraph->input_indices_.size(); ++indexInput) { + const std::vector data(36, 1); + void* liteGraphTensor1 = MSLITE::MindIR_Tensor_Create(liteGraph->name_, + MSLITE::DATA_TYPE_FLOAT32, dim, MSLITE::FORMAT_NCHW, data, quant_params); + liteGraph->all_tensors_.emplace_back(liteGraphTensor1); + } + + for (size_t indexOutput = 0; indexOutput < liteGraph->output_indices_.size(); ++indexOutput) { + const std::vector dataOut(36, 1); + void* liteGraphTensor2 = MSLITE::MindIR_Tensor_Create(liteGraph->name_, + MSLITE::DATA_TYPE_FLOAT32, dimOut, MSLITE::FORMAT_NCHW, dataOut, quant_params); + liteGraph->all_tensors_.emplace_back(liteGraphTensor2); + } + + return liteGraph; +} + +OH_NN_Tensor ExecutorTest::SetTensor(OH_NN_DataType dataType, uint32_t dimensionCount, const int32_t *dimensions, + const OH_NN_QuantParam *quantParam, OH_NN_TensorType type) +{ + OH_NN_Tensor tensor; + tensor.dataType = dataType; + tensor.dimensionCount = dimensionCount; + tensor.dimensions = dimensions; + tensor.quantParam = quantParam; + tensor.type = type; + + return tensor; +} + +void ExecutorTest::SetMermory(OH_NN_Memory** &memory) +{ + float dataArry[9] = {0, 1, 2, 3, 4, 5, 6, 7, 8}; + void* const data = dataArry; + OH_NN_Memory memoryPtr = {data, 9 * sizeof(float)}; + OH_NN_Memory* ptr = &memoryPtr; + memory = &ptr; +} + +/* + * @tc.name: executor_set_input_001 + * @tc.desc: Verify that the SetInput function returns a successful message. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_set_input_001, testing::ext::TestSize.Level0) +{ + const MSLITE::LiteGraph* liteGraphTest = BuildLiteGraph(m_dim, m_dimOut); + InnerModel innerModel; + innerModel.BuildFromLiteGraph(liteGraphTest); + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + OH_NN_Tensor tensor = SetTensor(OH_NN_FLOAT32, m_dimensionCount, m_dimArry, nullptr, OH_NN_TENSOR); + void* buffer = m_dataArry; + size_t length = 9 * sizeof(float); + + OH_NN_ReturnCode ret = executorTest.SetInput(m_index, tensor, buffer, length); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/* + * @tc.name: executor_set_input_002 + * @tc.desc: Verify that the SetInput function returns a failed message with out-of-range index. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_set_input_002, testing::ext::TestSize.Level0) +{ + const MSLITE::LiteGraph* liteGraphTest = BuildLiteGraph(m_dim, m_dimOut); + InnerModel innerModel; + innerModel.BuildFromLiteGraph(liteGraphTest); + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + m_index = 6; + OH_NN_Tensor tensor = SetTensor(OH_NN_FLOAT32, m_dimensionCount, m_dimArry, nullptr, OH_NN_TENSOR); + size_t length = 9 * sizeof(float); + void* buffer = m_dataArry; + + OH_NN_ReturnCode ret = executorTest.SetInput(m_index, tensor, buffer, length); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: executor_set_input_003 + * @tc.desc: Verify that the SetInput function returns a failed message with dynamic shape. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_set_input_003, testing::ext::TestSize.Level0) +{ + const MSLITE::LiteGraph* liteGraphTest = BuildLiteGraph(m_dim, m_dimOut); + InnerModel innerModel; + innerModel.BuildFromLiteGraph(liteGraphTest); + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + const int dim = -1; + m_dimensionCount = 1; + OH_NN_Tensor tensor = SetTensor(OH_NN_FLOAT32, m_dimensionCount, &dim, nullptr, OH_NN_TENSOR); + size_t length = 1 * sizeof(float); + float data = 0; + void* buffer = &data; + + OH_NN_ReturnCode ret = executorTest.SetInput(m_index, tensor, buffer, length); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: executor_set_input_004 + * @tc.desc: Verify that the SetInput function returns a failed message with invalid tensor's dataType. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_set_input_004, testing::ext::TestSize.Level0) +{ + const MSLITE::LiteGraph* liteGraphTest = BuildLiteGraph(m_dim, m_dimOut); + InnerModel innerModel; + innerModel.BuildFromLiteGraph(liteGraphTest); + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + OH_NN_Tensor tensor = SetTensor(OH_NN_INT64, m_dimensionCount, m_dimArry, nullptr, OH_NN_TENSOR); + void* buffer = m_dataArry; + size_t length = 9 * sizeof(float); + + OH_NN_ReturnCode ret = executorTest.SetInput(m_index, tensor, buffer, length); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: executor_set_input_005 + * @tc.desc: Verify that the SetInput function returns a failed message with invalid length. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_set_input_005, testing::ext::TestSize.Level0) +{ + const MSLITE::LiteGraph* liteGraphTest = BuildLiteGraph(m_dim, m_dimOut); + InnerModel innerModel; + innerModel.BuildFromLiteGraph(liteGraphTest); + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + + OH_NN_Tensor tensor = SetTensor(OH_NN_FLOAT32, m_dimensionCount, m_dimArry, nullptr, OH_NN_TENSOR); + size_t length = 1 * sizeof(float); + void* buffer = m_dataArry; + + OH_NN_ReturnCode ret = executorTest.SetInput(m_index, tensor, buffer, length); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: executor_set_input_006 + * @tc.desc: Verify that the SetInput function returns a failed message with allocating buffer is unsuccessfully. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_set_input_006, testing::ext::TestSize.Level0) +{ + HDI::Nnrt::V1_0::MockIPreparedModel::m_ExpectRetCode = OH_NN_INVALID_PARAMETER; + const MSLITE::LiteGraph* liteGraphTest = BuildLiteGraph(m_dim, m_dimOut); + InnerModel innerModel; + innerModel.BuildFromLiteGraph(liteGraphTest); + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + OH_NN_Tensor tensor = SetTensor(OH_NN_FLOAT32, m_dimensionCount, m_dimArry, nullptr, OH_NN_TENSOR); + void* buffer = m_dataArry; + size_t length = 9 * sizeof(float); + + OH_NN_ReturnCode ret = executorTest.SetInput(m_index, tensor, buffer, length); + EXPECT_EQ(OH_NN_MEMORY_ERROR, ret); +} + +/* + * @tc.name: executor_set_input_007 + * @tc.desc: Verify that the SetInput function returns a failed message with empty buffer. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_set_input_007, testing::ext::TestSize.Level0) +{ + const MSLITE::LiteGraph* liteGraphTest = BuildLiteGraph(m_dim, m_dimOut); + InnerModel innerModel; + innerModel.BuildFromLiteGraph(liteGraphTest); + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + OH_NN_Tensor tensor = SetTensor(OH_NN_FLOAT32, m_dimensionCount, m_dimArry, nullptr, OH_NN_TENSOR); + void* buffer = nullptr; + size_t length = 9 * sizeof(float); + + OH_NN_ReturnCode ret = executorTest.SetInput(m_index, tensor, buffer, length); + EXPECT_EQ(OH_NN_MEMORY_ERROR, ret); +} + +/* + * @tc.name: executor_set_input_008 + * @tc.desc: Verify that the SetInput function returns a successful message with dataLength <= curBufferLength. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_set_input_008, testing::ext::TestSize.Level0) +{ + const MSLITE::LiteGraph* liteGraphTest = BuildLiteGraph(m_dim, m_dimOut); + InnerModel innerModel; + innerModel.BuildFromLiteGraph(liteGraphTest); + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + OH_NN_Tensor tensor = SetTensor(OH_NN_FLOAT32, m_dimensionCount, m_dimArry, nullptr, OH_NN_TENSOR); + float dataArry[15] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14}; + void* buffer = dataArry; + size_t length = 9 * sizeof(float); + + EXPECT_EQ(OH_NN_SUCCESS, executorTest.SetInput(m_index, tensor, buffer, length)); + + float expectArry[9] = {0, 1, 2, 3, 4, 5, 6, 7, 8}; + void* expectBuffer = expectArry; + OH_NN_ReturnCode ret = executorTest.SetInput(m_index, tensor, expectBuffer, length); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/* + * @tc.name: executor_set_input_009 + * @tc.desc: Verify that the SetInput function returns a failed message with length less than dataLength. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_set_input_009, testing::ext::TestSize.Level0) +{ + const MSLITE::LiteGraph* liteGraphTest = BuildLiteGraph(m_dim, m_dimOut); + InnerModel innerModel; + innerModel.BuildFromLiteGraph(liteGraphTest); + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + OH_NN_Tensor tensor = SetTensor(OH_NN_FLOAT32, m_dimensionCount, m_dimArry, nullptr, OH_NN_TENSOR); + void* const data = m_dataArry; + OH_NN_Memory memory = {data, 9 * sizeof(float)}; + + EXPECT_EQ(OH_NN_SUCCESS, executorTest.SetInputFromMemory(m_index, tensor, memory)); + + float expectData = 0; + void* buffer = &expectData; + size_t length = 1 * sizeof(float); + + OH_NN_ReturnCode ret = executorTest.SetInput(m_index, tensor, buffer, length); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: executor_set_input_010 + * @tc.desc: Verify that the SetInput function returns a failed message with BuildFromOHNNTensor unsuccessfully. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_set_input_010, testing::ext::TestSize.Level0) +{ + const MSLITE::LiteGraph* liteGraphTest = BuildLiteGraph(m_dim, m_dimOut); + InnerModel innerModel; + innerModel.BuildFromLiteGraph(liteGraphTest); + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + m_dimensionCount = 0; + OH_NN_Tensor tensor = SetTensor(OH_NN_UNKNOWN, m_dimensionCount, m_dimArry, nullptr, OH_NN_TENSOR); + void* buffer = m_dataArry; + size_t length = 9 * sizeof(float); + + OH_NN_ReturnCode ret = executorTest.SetInput(m_index, tensor, buffer, length); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: executor_set_input_011 + * @tc.desc: Verify that the SetInput function returns a successful message with dataLength <= curBufferLength. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_set_input_011, testing::ext::TestSize.Level0) +{ + const std::vector expectDim = {3, -1}; + const MSLITE::LiteGraph* liteGraphTest = BuildLiteGraph(expectDim, m_dimOut); + InnerModel innerModel; + innerModel.BuildFromLiteGraph(liteGraphTest); + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + OH_NN_Tensor tensor = SetTensor(OH_NN_FLOAT32, m_dimensionCount, m_dimArry, nullptr, OH_NN_TENSOR); + void* buffer = m_dataArry; + size_t length = 9 * sizeof(float); + EXPECT_EQ(OH_NN_SUCCESS, executorTest.SetInput(m_index, tensor, buffer, length)); + + const int32_t testDim[2] = {3, 5}; + OH_NN_Tensor expectTensor = SetTensor(OH_NN_FLOAT32, m_dimensionCount, testDim, nullptr, OH_NN_TENSOR); + size_t expectLength = 15 * sizeof(float); + float expectArry[15] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14}; + void* expectBuffer = expectArry; + OH_NN_ReturnCode ret = executorTest.SetInput(m_index, expectTensor, expectBuffer, expectLength); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/* + * @tc.name: executor_set_input_from_memory_001 + * @tc.desc: Verify that the SetInputFromMemory function returns a successful message. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_set_input_from_memory_001, testing::ext::TestSize.Level0) +{ + const MSLITE::LiteGraph* liteGraphTest = BuildLiteGraph(m_dim, m_dimOut); + InnerModel innerModel; + innerModel.BuildFromLiteGraph(liteGraphTest); + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + OH_NN_Tensor tensor = SetTensor(OH_NN_FLOAT32, m_dimensionCount, m_dimArry, nullptr, OH_NN_TENSOR); + void* const data = m_dataArry; + OH_NN_Memory memory = {data, 9 * sizeof(float)}; + + OH_NN_ReturnCode ret = executorTest.SetInputFromMemory(m_index, tensor, memory); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/* + * @tc.name: executor_set_input_from_memory_002 + * @tc.desc: Verify that the SetInputFromMemory function returns a failed message with out-of-range index. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_set_input_from_memory_002, testing::ext::TestSize.Level0) +{ + const MSLITE::LiteGraph* liteGraphTest = BuildLiteGraph(m_dim, m_dimOut); + InnerModel innerModel; + innerModel.BuildFromLiteGraph(liteGraphTest); + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + m_index = 6; + OH_NN_Tensor tensor = SetTensor(OH_NN_FLOAT32, m_dimensionCount, m_dimArry, nullptr, OH_NN_TENSOR); + void* const data = m_dataArry; + OH_NN_Memory memory = {data, 9 * sizeof(float)}; + + OH_NN_ReturnCode ret = executorTest.SetInputFromMemory(m_index, tensor, memory); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: executor_set_input_from_memory_003 + * @tc.desc: Verify that the SetInputFromMemory function returns a failed message with dynamic shape. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_set_input_from_memory_003, testing::ext::TestSize.Level0) +{ + const MSLITE::LiteGraph* liteGraphTest = BuildLiteGraph(m_dim, m_dimOut); + InnerModel innerModel; + innerModel.BuildFromLiteGraph(liteGraphTest); + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + const int dim = -1; + OH_NN_Tensor tensor; + tensor.dataType = OH_NN_FLOAT32; + tensor.dimensionCount = 1; + tensor.dimensions = &dim; + tensor.quantParam = nullptr; + tensor.type = OH_NN_TENSOR; + float value = 0; + void* const data = &value; + OH_NN_Memory memory = {data, 1 * sizeof(float)}; + + OH_NN_ReturnCode ret = executorTest.SetInputFromMemory(m_index, tensor, memory); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: executor_set_input_from_memory_004 + * @tc.desc: Verify that the SetInputFromMemory function returns a failed message with invalid tensor's dataType. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_set_input_from_memory_004, testing::ext::TestSize.Level0) +{ + const MSLITE::LiteGraph* liteGraphTest = BuildLiteGraph(m_dim, m_dimOut); + InnerModel innerModel; + innerModel.BuildFromLiteGraph(liteGraphTest); + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + OH_NN_Tensor tensor = SetTensor(OH_NN_INT64, m_dimensionCount, m_dimArry, nullptr, OH_NN_TENSOR); + void* const data = m_dataArry; + OH_NN_Memory memory = {data, 9 * sizeof(float)}; + + OH_NN_ReturnCode ret = executorTest.SetInputFromMemory(m_index, tensor, memory); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: executor_set_input_from_memory_005 + * @tc.desc: Verify that the SetInput function returns a failed message with invalid memory.length. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_set_input_from_memory_005, testing::ext::TestSize.Level0) +{ + const MSLITE::LiteGraph* liteGraphTest = BuildLiteGraph(m_dim, m_dimOut); + InnerModel innerModel; + innerModel.BuildFromLiteGraph(liteGraphTest); + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + OH_NN_Tensor tensor = SetTensor(OH_NN_FLOAT32, m_dimensionCount, m_dimArry, nullptr, OH_NN_TENSOR); + void* const data = m_dataArry; + OH_NN_Memory memory = {data, 1 * sizeof(float)}; + + OH_NN_ReturnCode ret = executorTest.SetInputFromMemory(m_index, tensor, memory); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: executor_set_output_001 + * @tc.desc: Verify that the SetOutput function returns a successful message. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_set_output_001, testing::ext::TestSize.Level0) +{ + const MSLITE::LiteGraph* liteGraphTest = BuildLiteGraph(m_dim, m_dimOut); + InnerModel innerModel; + innerModel.BuildFromLiteGraph(liteGraphTest); + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + size_t length = 9 * sizeof(float); + void* buffer = m_dataArry; + + OH_NN_ReturnCode ret = executorTest.SetOutput(m_index, buffer, length); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/* + * @tc.name: executor_set_output_002 + * @tc.desc: Verify that the SetOutput function returns a failed message with out-of-range index. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_set_output_002, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + m_index = 6; + size_t length = 9 * sizeof(float); + void* buffer = m_dataArry; + + OH_NN_ReturnCode ret = executorTest.SetOutput(m_index, buffer, length); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: executor_set_output_003 + * @tc.desc: Verify that the SetOutput function returns a failed message with invalid length. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_set_output_003, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + size_t length = 2 * sizeof(float); + void* buffer = m_dataArry; + + OH_NN_ReturnCode ret = executorTest.SetOutput(m_index, buffer, length); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: executor_set_output_004 + * @tc.desc: Verify that the SetOutput function returns a failed message with allocating buffer is failed. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_set_output_004, testing::ext::TestSize.Level0) +{ + HDI::Nnrt::V1_0::MockIPreparedModel::m_ExpectRetCode = OH_NN_INVALID_PARAMETER; + const MSLITE::LiteGraph* liteGraphTest = BuildLiteGraph(m_dim, m_dimOut); + InnerModel innerModel; + innerModel.BuildFromLiteGraph(liteGraphTest); + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + size_t length = 9 * sizeof(float); + void* buffer = m_dataArry; + + OH_NN_ReturnCode ret = executorTest.SetOutput(m_index, buffer, length); + EXPECT_EQ(OH_NN_MEMORY_ERROR, ret); +} + +/* + * @tc.name: executor_set_output_005 + * @tc.desc: Verify that the SetOutput function returns a successful message. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_set_output_005, testing::ext::TestSize.Level0) +{ + const MSLITE::LiteGraph* liteGraphTest = BuildLiteGraph(m_dim, m_dimOut); + InnerModel innerModel; + innerModel.BuildFromLiteGraph(liteGraphTest); + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + void* const data = m_dataArry; + OH_NN_Memory memory = {data, 9 * sizeof(float)}; + EXPECT_EQ(OH_NN_SUCCESS, executorTest.SetOutputFromMemory(m_index, memory)); + + size_t length = 1 * sizeof(float); + float expectData = 0; + void* buffer = &expectData; + OH_NN_ReturnCode ret = executorTest.SetOutput(m_index, buffer, length); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: executor_set_output_006 + * @tc.desc: Verify that the SetOutput function returns a successful message with length <= curBufferLength. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_set_output_006, testing::ext::TestSize.Level0) +{ + const MSLITE::LiteGraph* liteGraphTest = BuildLiteGraph(m_dim, m_dimOut); + InnerModel innerModel; + innerModel.BuildFromLiteGraph(liteGraphTest); + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + size_t length = 9 * sizeof(float); + void* buffer = m_dataArry; + EXPECT_EQ(OH_NN_SUCCESS, executorTest.SetOutput(m_index, buffer, length)); + + float expectDataArry[9] = {0, 1, 2, 3, 4, 5, 6, 7, 8}; + void* expectBuffer = expectDataArry; + OH_NN_ReturnCode ret = executorTest.SetOutput(m_index, expectBuffer, length); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/* + * @tc.name: executor_set_output_007 + * @tc.desc: Verify that the SetOutput function returns a successful message with length > curBufferLength. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_set_output_007, testing::ext::TestSize.Level0) +{ + const MSLITE::LiteGraph* liteGraphTest = BuildLiteGraph(m_dim, m_dimOut); + InnerModel innerModel; + innerModel.BuildFromLiteGraph(liteGraphTest); + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + size_t length = 9 * sizeof(float); + void* buffer = m_dataArry; + EXPECT_EQ(OH_NN_SUCCESS, executorTest.SetOutput(m_index, buffer, length)); + + size_t expectLength = 15 * sizeof(float); + float expectDataArry[15] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14}; + void* expectBuffer = expectDataArry; + OH_NN_ReturnCode ret = executorTest.SetOutput(m_index, expectBuffer, expectLength); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/* + * @tc.name: executor_set_output_from_memory_001 + * @tc.desc: Verify that the SetOutputFromMemory function returns a successful message. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_set_output_from_memory_001, testing::ext::TestSize.Level0) +{ + const MSLITE::LiteGraph* liteGraphTest = BuildLiteGraph(m_dim, m_dimOut); + InnerModel innerModel; + innerModel.BuildFromLiteGraph(liteGraphTest); + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + void* const data = m_dataArry; + OH_NN_Memory memory = {data, 9 * sizeof(float)}; + + OH_NN_ReturnCode ret = executorTest.SetOutputFromMemory(m_index, memory); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/* + * @tc.name: executor_set_output_from_memory_002 + * @tc.desc: Verify that the SetOutputFromMemory function returns a failed message with out-of-range index. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_set_output_from_memory_002, testing::ext::TestSize.Level0) +{ + const MSLITE::LiteGraph* liteGraphTest = BuildLiteGraph(m_dim, m_dimOut); + InnerModel innerModel; + innerModel.BuildFromLiteGraph(liteGraphTest); + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + m_index = 6; + void* const data = m_dataArry; + OH_NN_Memory memory = {data, 9 * sizeof(float)}; + + OH_NN_ReturnCode ret = executorTest.SetOutputFromMemory(m_index, memory); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: executor_set_output_from_memory_003 + * @tc.desc: Verify that the SetOutputFromMemory function returns a failed message with invalid memory.length. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_set_output_from_memory_003, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + void* const data = m_dataArry; + OH_NN_Memory memory = {data, 0}; + + OH_NN_ReturnCode ret = executorTest.SetOutputFromMemory(m_index, memory); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: executor_set_output_from_memory_004 + * @tc.desc: Verify that the SetOutputFromMemory function returns a failed message with memory.length < dataLength. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_set_output_from_memory_004, testing::ext::TestSize.Level0) +{ + const std::vector expectDim = {4, 4}; + const MSLITE::LiteGraph* liteGraphTest = BuildLiteGraph(m_dim, expectDim); + InnerModel innerModel; + innerModel.BuildFromLiteGraph(liteGraphTest); + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + void* const data = m_dataArry; + OH_NN_Memory memory = {data, 9 * sizeof(float)}; + + OH_NN_ReturnCode ret = executorTest.SetOutputFromMemory(m_index, memory); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: executor_set_output_from_memory_005 + * @tc.desc: Verify that the SetOutputFromMemory function returns a successful message. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_set_output_from_memory_005, testing::ext::TestSize.Level0) +{ + const MSLITE::LiteGraph* liteGraphTest = BuildLiteGraph(m_dim, m_dimOut); + InnerModel innerModel; + innerModel.BuildFromLiteGraph(liteGraphTest); + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + size_t length = 9 * sizeof(float); + void* buffer = m_dataArry; + + EXPECT_EQ(OH_NN_SUCCESS, executorTest.SetOutput(m_index, buffer, length)); + void* const data = m_dataArry; + OH_NN_Memory memory = {data, 9 * sizeof(float)}; + + OH_NN_ReturnCode ret = executorTest.SetOutputFromMemory(m_index, memory); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/* + * @tc.name: executor_get_output_dimensions_001 + * @tc.desc: Verify that the GetOutputShape function returns a successful message. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_get_output_dimensions_001, testing::ext::TestSize.Level0) +{ + const MSLITE::LiteGraph* liteGraphTest = BuildLiteGraph(m_dim, m_dimOut); + InnerModel innerModel; + innerModel.BuildFromLiteGraph(liteGraphTest); + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + OH_NN_Tensor tensor = SetTensor(OH_NN_FLOAT32, m_dimensionCount, m_dimArry, nullptr, OH_NN_TENSOR); + size_t length = 9 * sizeof(float); + void* buffer = m_dataArry; + + EXPECT_EQ(OH_NN_SUCCESS, executorTest.SetInput(m_index, tensor, buffer, length)); + EXPECT_EQ(OH_NN_SUCCESS, executorTest.SetOutput(m_index, buffer, length)); + EXPECT_EQ(OH_NN_SUCCESS, executorTest.Run()); + + int32_t expectDim[2] = {3, 3}; + int32_t* ptr = expectDim; + int32_t** dimensions = &ptr; + uint32_t dimensionCount = 2; + + OH_NN_ReturnCode ret = executorTest.GetOutputShape(m_index, dimensions, dimensionCount); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/* + * @tc.name: executor_get_output_dimensions_002 + * @tc.desc: Verify that the GetOutputShape function returns a failed message without run. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_get_output_dimensions_002, testing::ext::TestSize.Level0) +{ + size_t length = 9 * sizeof(float); + void* buffer = m_dataArry; + const MSLITE::LiteGraph* liteGraphTest = BuildLiteGraph(m_dim, m_dimOut); + InnerModel innerModel; + innerModel.BuildFromLiteGraph(liteGraphTest); + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + OH_NN_Tensor tensor = SetTensor(OH_NN_FLOAT32, m_dimensionCount, m_dimArry, nullptr, OH_NN_TENSOR); + + EXPECT_EQ(OH_NN_SUCCESS, executorTest.SetInput(m_index, tensor, buffer, length)); + EXPECT_EQ(OH_NN_SUCCESS, executorTest.SetOutput(m_index, buffer, length)); + + int32_t testDim[2] = {3, 3}; + int32_t* ptr = testDim; + int32_t** dimensions = &ptr; + uint32_t dimensionCount = 2; + + OH_NN_ReturnCode ret = executorTest.GetOutputShape(m_index, dimensions, dimensionCount); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/* + * @tc.name: executor_get_output_dimensions_003 + * @tc.desc: Verify that the GetOutputShape function returns a failed message with out-of-range index. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_get_output_dimensions_003, testing::ext::TestSize.Level0) +{ + const MSLITE::LiteGraph* liteGraphTest = BuildLiteGraph(m_dim, m_dimOut); + InnerModel innerModel; + innerModel.BuildFromLiteGraph(liteGraphTest); + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + void* buffer = m_dataArry; + size_t length = 9 * sizeof(float); + OH_NN_Tensor tensor = SetTensor(OH_NN_FLOAT32, m_dimensionCount, m_dimArry, nullptr, OH_NN_TENSOR); + + EXPECT_EQ(OH_NN_SUCCESS, executorTest.SetInput(m_index, tensor, buffer, length)); + EXPECT_EQ(OH_NN_SUCCESS, executorTest.SetOutput(m_index, buffer, length)); + EXPECT_EQ(OH_NN_SUCCESS, executorTest.Run()); + + uint32_t testIndex = 6; + int32_t testDim[2] = {3, 3}; + int32_t* ptr = testDim; + int32_t** dimensions = &ptr; + uint32_t dimensionCount = 2; + + OH_NN_ReturnCode ret = executorTest.GetOutputShape(testIndex, dimensions, dimensionCount); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: executor_create_input_memory_001 + * @tc.desc: Verify that the CreateInputMemory function returns a successful message. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_create_input_memory_001, testing::ext::TestSize.Level0) +{ + const MSLITE::LiteGraph* liteGraphTest = BuildLiteGraph(m_dim, m_dimOut); + InnerModel innerModel; + innerModel.BuildFromLiteGraph(liteGraphTest); + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + OH_NN_Memory** memory = nullptr; + SetMermory(memory); + size_t length = 9 * sizeof(float); + + OH_NN_ReturnCode ret = executorTest.CreateInputMemory(m_index, length, memory); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/* + * @tc.name: executor_create_input_memory_002 + * @tc.desc: Verify that the CreateInputMemory function returns a failed message with out-of-range index. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_create_input_memory_002, testing::ext::TestSize.Level0) +{ + const MSLITE::LiteGraph* liteGraphTest = BuildLiteGraph(m_dim, m_dimOut); + InnerModel innerModel; + innerModel.BuildFromLiteGraph(liteGraphTest); + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + size_t length = 9 * sizeof(float); + OH_NN_Memory** memory = nullptr; + SetMermory(memory); + + m_index = 6; + OH_NN_ReturnCode ret = executorTest.CreateInputMemory(m_index, length, memory); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: executor_create_input_memory_003 + * @tc.desc: Verify that the CreateInputMemory function returns a failed message with allocating buffer unsuccessfully. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_create_input_memory_003, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + HDI::Nnrt::V1_0::MockIPreparedModel::m_ExpectRetCode = OH_NN_INVALID_PARAMETER; + const MSLITE::LiteGraph* liteGraphTest = BuildLiteGraph(m_dim, m_dimOut); + innerModel.BuildFromLiteGraph(liteGraphTest); + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + size_t length = 9 * sizeof(float); + OH_NN_Memory** memory = nullptr; + SetMermory(memory); + + OH_NN_ReturnCode ret = executorTest.CreateInputMemory(m_index, length, memory); + EXPECT_EQ(OH_NN_MEMORY_ERROR, ret); +} + +/* + * @tc.name: executor_destroy_input_memory_001 + * @tc.desc: Verify that the DestroyInputMemory function returns a successful message. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_destroy_input_memory_001, testing::ext::TestSize.Level0) +{ + const MSLITE::LiteGraph* liteGraphTest = BuildLiteGraph(m_dim, m_dimOut); + InnerModel innerModel; + innerModel.BuildFromLiteGraph(liteGraphTest); + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + size_t length = 9 * sizeof(float); + float dataArry[9] = {0, 1, 2, 3, 4, 5, 6, 7, 8}; + void* const data = dataArry; + OH_NN_Memory memoryPtr = {data, 9 * sizeof(float)}; + OH_NN_Memory* ptr = &memoryPtr; + OH_NN_Memory** memory = &ptr; + + EXPECT_EQ(OH_NN_SUCCESS, executorTest.CreateInputMemory(m_index, length, memory)); + OH_NN_ReturnCode ret = executorTest.DestroyInputMemory(m_index, memory); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/* + * @tc.name: executor_destroy_input_memory_002 + * @tc.desc: Verify that the DestroyInputMemory function returns a failed message with out-of-range index. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_destroy_input_memory_002, testing::ext::TestSize.Level0) +{ + const MSLITE::LiteGraph* liteGraphTest = BuildLiteGraph(m_dim, m_dimOut); + InnerModel innerModel; + innerModel.BuildFromLiteGraph(liteGraphTest); + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + size_t length = 9 * sizeof(float); + OH_NN_Memory** memory = nullptr; + SetMermory(memory); + + uint32_t testIndex = 6; + EXPECT_EQ(OH_NN_SUCCESS, executorTest.CreateInputMemory(m_index, length, memory)); + OH_NN_ReturnCode ret = executorTest.DestroyInputMemory(testIndex, memory); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: executor_destroy_input_memory_003 + * @tc.desc: Verify that the DestroyInputMemory function returns a failed message without creating memory. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_destroy_input_memory_003, testing::ext::TestSize.Level0) +{ + const MSLITE::LiteGraph* liteGraphTest = BuildLiteGraph(m_dim, m_dimOut); + InnerModel innerModel; + innerModel.BuildFromLiteGraph(liteGraphTest); + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + OH_NN_Memory** memory = nullptr; + SetMermory(memory); + + OH_NN_ReturnCode ret = executorTest.DestroyInputMemory(m_index, memory); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: executor_destroy_input_memory_004 + * @tc.desc: Verify that the DestroyInputMemory function returns a failed message with invalid memory.data. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_destroy_input_memory_004, testing::ext::TestSize.Level0) +{ + const MSLITE::LiteGraph* liteGraphTest = BuildLiteGraph(m_dim, m_dimOut); + InnerModel innerModel; + innerModel.BuildFromLiteGraph(liteGraphTest); + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + size_t length = 9 * sizeof(float); + OH_NN_Memory** memory = nullptr; + SetMermory(memory); + + EXPECT_EQ(OH_NN_SUCCESS, executorTest.CreateInputMemory(m_index, length, memory)); + + float arry[9] = {0, 0, 0, 0, 0, 0, 0, 0, 0}; + void* const expectData = arry; + OH_NN_Memory mptr = {expectData, 9 * sizeof(float)}; + OH_NN_Memory* expectPtr = &mptr; + OH_NN_Memory** expectMemory = &expectPtr; + + OH_NN_ReturnCode ret = executorTest.DestroyInputMemory(m_index, expectMemory); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: executor_create_output_memory_001 + * @tc.desc: Verify that the CreateOutputMemory function returns a successful message. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_create_output_memory_001, testing::ext::TestSize.Level0) +{ + const MSLITE::LiteGraph* liteGraphTest = BuildLiteGraph(m_dim, m_dimOut); + InnerModel innerModel; + innerModel.BuildFromLiteGraph(liteGraphTest); + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + size_t length = 9 * sizeof(float); + OH_NN_Memory** memory = nullptr; + SetMermory(memory); + + OH_NN_ReturnCode ret = executorTest.CreateOutputMemory(m_index, length, memory); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/* + * @tc.name: executor_create_output_memory_002 + * @tc.desc: Verify that the CreateOutputMemory function returns a failed message with out-of-range index. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_create_output_memory_002, testing::ext::TestSize.Level0) +{ + const MSLITE::LiteGraph* liteGraphTest = BuildLiteGraph(m_dim, m_dimOut); + InnerModel innerModel; + innerModel.BuildFromLiteGraph(liteGraphTest); + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + m_index = 6; + size_t length = 9 * sizeof(float); + OH_NN_Memory** memory = nullptr; + SetMermory(memory); + + OH_NN_ReturnCode ret = executorTest.CreateOutputMemory(m_index, length, memory); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: executor_create_output_memory_003 + * @tc.desc: Verify that the CreateOutputMemory function returns a failed message with allocating buffer unsuccessfully. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_create_output_memory_003, testing::ext::TestSize.Level0) +{ + HDI::Nnrt::V1_0::MockIPreparedModel::m_ExpectRetCode = OH_NN_INVALID_PARAMETER; + const MSLITE::LiteGraph* liteGraphTest = BuildLiteGraph(m_dim, m_dimOut); + InnerModel innerModel; + innerModel.BuildFromLiteGraph(liteGraphTest); + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + size_t length = 9 * sizeof(float); + OH_NN_Memory** memory = nullptr; + SetMermory(memory); + + OH_NN_ReturnCode ret = executorTest.CreateOutputMemory(m_index, length, memory); + EXPECT_EQ(OH_NN_MEMORY_ERROR, ret); +} + +/* + * @tc.name: executor_destroy_output_memory_001 + * @tc.desc: Verify that the DestroyOutputMemory function returns a successful message. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_destroy_output_memory_001, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + const MSLITE::LiteGraph* liteGraphTest = BuildLiteGraph(m_dim, m_dimOut); + innerModel.BuildFromLiteGraph(liteGraphTest); + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + size_t length = 9 * sizeof(float); + OH_NN_Memory** memory = nullptr; + SetMermory(memory); + + EXPECT_EQ(OH_NN_SUCCESS, executorTest.CreateOutputMemory(m_index, length, memory)); + OH_NN_ReturnCode ret = executorTest.DestroyOutputMemory(m_index, memory); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/* + * @tc.name: executor_destroy_output_memory_002 + * @tc.desc: Verify that the DestroyOutputMemory function returns a failed message with out-of-range index. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_destroy_output_memory_002, testing::ext::TestSize.Level0) +{ + const MSLITE::LiteGraph* liteGraphTest = BuildLiteGraph(m_dim, m_dimOut); + InnerModel innerModel; + innerModel.BuildFromLiteGraph(liteGraphTest); + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + uint32_t testIndex = 6; + size_t length = 9 * sizeof(float); + OH_NN_Memory** memory = nullptr; + SetMermory(memory); + + EXPECT_EQ(OH_NN_SUCCESS, executorTest.CreateOutputMemory(m_index, length, memory)); + OH_NN_ReturnCode ret = executorTest.DestroyOutputMemory(testIndex, memory); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: executor_destroy_output_memory_003 + * @tc.desc: Verify that the DestroyOutputMemory function returns a failed message without creating memory. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_destroy_output_memory_003, testing::ext::TestSize.Level0) +{ + const MSLITE::LiteGraph* liteGraphTest = BuildLiteGraph(m_dim, m_dimOut); + InnerModel innerModel; + innerModel.BuildFromLiteGraph(liteGraphTest); + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + OH_NN_Memory** memory = nullptr; + SetMermory(memory); + + OH_NN_ReturnCode ret = executorTest.DestroyOutputMemory(m_index, memory); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: executor_destroy_output_memory_004 + * @tc.desc: Verify that the DestroyOutputMemory function returns a failed message with invalid memory.data. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_destroy_output_memory_004, testing::ext::TestSize.Level0) +{ + const MSLITE::LiteGraph* liteGraphTest = BuildLiteGraph(m_dim, m_dimOut); + InnerModel innerModel; + innerModel.BuildFromLiteGraph(liteGraphTest); + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + size_t length = 9 * sizeof(float); + OH_NN_Memory** memory = nullptr; + SetMermory(memory); + + EXPECT_EQ(OH_NN_SUCCESS, executorTest.CreateOutputMemory(m_index, length, memory)); + + float arry[9] = {0, 0, 0, 0, 0, 0, 0, 0, 0}; + void* const expectData = arry; + OH_NN_Memory mptr = {expectData, 9 * sizeof(float)}; + OH_NN_Memory* expectPtr = &mptr; + OH_NN_Memory** expectMemory = &expectPtr; + + OH_NN_ReturnCode ret = executorTest.DestroyOutputMemory(m_index, expectMemory); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: executor_run_test_001 + * @tc.desc: Verify that the Run function returns a successful message. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_run_test_001, testing::ext::TestSize.Level0) +{ + HiviewDFX::HiTraceId traceId = HiTraceChain::Begin("executor_run_test_001", HITRACE_FLAG_TP_INFO); + const MSLITE::LiteGraph* liteGraphTest = BuildLiteGraph(m_dim, m_dimOut); + InnerModel innerModel; + innerModel.BuildFromLiteGraph(liteGraphTest); + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + size_t length = 9 * sizeof(float); + OH_NN_Tensor tensor = SetTensor(OH_NN_FLOAT32, m_dimensionCount, m_dimArry, nullptr, OH_NN_TENSOR); + void* buffer = m_dataArry; + + EXPECT_EQ(OH_NN_SUCCESS, executorTest.SetInput(m_index, tensor, buffer, length)); + EXPECT_EQ(OH_NN_SUCCESS, executorTest.SetOutput(m_index, buffer, length)); + OH_NN_ReturnCode ret = executorTest.Run(); + HiTraceChain::End(traceId); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/* + * @tc.name: executor_run_test_002 + * @tc.desc: Verify that the DestroyOutputMemory function returns a failed message without SetInput. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_run_test_002, testing::ext::TestSize.Level0) +{ + const MSLITE::LiteGraph* liteGraphTest = BuildLiteGraph(m_dim, m_dimOut); + InnerModel innerModel; + innerModel.BuildFromLiteGraph(liteGraphTest); + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + OH_NN_ReturnCode ret = executorTest.Run(); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: executor_run_test_003 + * @tc.desc: Verify that the DestroyOutputMemory function returns a failed message without SetOutput. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_run_test_003, testing::ext::TestSize.Level0) +{ + const MSLITE::LiteGraph* liteGraphTest = BuildLiteGraph(m_dim, m_dimOut); + InnerModel innerModel; + innerModel.BuildFromLiteGraph(liteGraphTest); + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + OH_NN_Tensor tensor = SetTensor(OH_NN_FLOAT32, m_dimensionCount, m_dimArry, nullptr, OH_NN_TENSOR); + void* buffer = m_dataArry; + size_t length = 9 * sizeof(float); + + EXPECT_EQ(OH_NN_SUCCESS, executorTest.SetInput(m_index, tensor, buffer, length)); + OH_NN_ReturnCode ret = executorTest.Run(); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: executor_run_test_004 + * @tc.desc: Verify that the DestroyOutputMemory function returns a failed message with failed executionPlan.Run. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_run_test_004, testing::ext::TestSize.Level0) +{ + HDI::Nnrt::V1_0::MockIPreparedModel::m_ExpectRetCode = OH_NN_FAILED; + const MSLITE::LiteGraph* liteGraphTest = BuildLiteGraph(m_dim, m_dimOut); + InnerModel innerModel; + innerModel.BuildFromLiteGraph(liteGraphTest); + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + OH_NN_Tensor tensor = SetTensor(OH_NN_FLOAT32, m_dimensionCount, m_dimArry, nullptr, OH_NN_TENSOR); + void* buffer = m_dataArry; + size_t length = 9 * sizeof(float); + + EXPECT_EQ(OH_NN_SUCCESS, executorTest.SetInput(m_index, tensor, buffer, length)); + EXPECT_EQ(OH_NN_SUCCESS, executorTest.SetOutput(m_index, buffer, length)); + OH_NN_ReturnCode ret = executorTest.Run(); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} +} // namespace UnitTest +} // namespace NeuralNetworkRuntime +} // namespace OHOS \ No newline at end of file diff --git a/test/unittest/components/executor/executor_test.h b/test/unittest/components/executor/executor_test.h new file mode 100644 index 0000000000000000000000000000000000000000..05837b5bfe3895c89b5651432fceffbb812192a1 --- /dev/null +++ b/test/unittest/components/executor/executor_test.h @@ -0,0 +1,48 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_EXECUTOR_UNITTEST_H +#define NEURAL_NETWORK_RUNTIME_EXECUTOR_UNITTEST_H + +#include + +#include "mindir.h" + +#include "frameworks/native/executor.h" + +namespace MSLITE = mindspore::lite; +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class ExecutorTest : public testing::Test { +public: + MSLITE::LiteGraph* BuildLiteGraph(const std::vector dim, const std::vector dimOut); + OH_NN_Tensor SetTensor(OH_NN_DataType dataType, uint32_t dimensionCount, const int32_t *dimensions, + const OH_NN_QuantParam *quantParam, OH_NN_TensorType type); + void SetMermory(OH_NN_Memory** &memory); + +public: + uint32_t m_index {0}; + const std::vector m_dim {3, 3}; + const std::vector m_dimOut {3, 3}; + const int32_t m_dimArry[2] {3, 3}; + uint32_t m_dimensionCount {2}; + float m_dataArry[9] {0, 1, 2, 3, 4, 5, 6, 7, 8}; +}; +} // namespace UnitTest +} // namespace NeuralNetworkRuntime +} // namespace OHOS + +#endif // NEURAL_NETWORK_RUNTIME_EXECUTOR_UNITTEST_H \ No newline at end of file diff --git a/test/unittest/components/hdi_device/hdi_device_test.cpp b/test/unittest/components/hdi_device/hdi_device_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..07925bf69aa8d1dd60eed179007006240fe43a05 --- /dev/null +++ b/test/unittest/components/hdi_device/hdi_device_test.cpp @@ -0,0 +1,875 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "frameworks/native/hdi_device.h" +#include "test/unittest/common/mock_idevice.h" +#include "test/unittest/common/file_utils.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime; +namespace mindspore { +namespace lite { +OHOS::HDI::Nnrt::V1_0::Model* MindIR_LiteGraph_To_Model(const LiteGraph* lite_graph, + const OHOS::HDI::Nnrt::V1_0::SharedBuffer& buffer) +{ + return new (std::nothrow) OHOS::HDI::Nnrt::V1_0::Model(); +} + +void MindIR_Model_Destroy(OHOS::HDI::Nnrt::V1_0::Model** model) +{ + if ((model != nullptr) && (*model != nullptr)) { + delete *model; + *model = nullptr; + } +} + +size_t MindIR_LiteGraph_GetConstTensorSize(const mindspore::lite::LiteGraph* lite_graph) +{ + return 1; +} +} +} + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class HDIDeviceTest : public testing::Test { +protected: + void GetBuffer(void*& buffer, size_t length); + OH_NN_ReturnCode PrepareModel(int32_t allocBufferType, int32_t prepareType); +}; + +void HDIDeviceTest::GetBuffer(void*& buffer, size_t length) +{ + std::string data = "ABCD"; + const size_t dataLength = 100; + data.resize(dataLength, '+'); + + std::string filename = "/data/log/memory-001.dat"; + FileUtils fileUtils(filename); + fileUtils.WriteFile(data); + + int fd = open(filename.c_str(), O_RDWR); + EXPECT_NE(fd, -1); + + const auto &memoryManager = MemoryManager::GetInstance(); + buffer = memoryManager->MapMemory(fd, length); + EXPECT_NE(buffer, nullptr); + + const char* result = static_cast(buffer); + int index = 0; + EXPECT_EQ('A', result[index++]); + EXPECT_EQ('B', result[index++]); + EXPECT_EQ('C', result[index++]); + EXPECT_EQ('D', result[index++]); + close(fd); +} + +OH_NN_ReturnCode HDIDeviceTest::PrepareModel(int32_t allocBufferType, int32_t prepareType) +{ + std::shared_ptr model = std::make_shared(); + OHOS::sptr sp = OHOS::sptr(new (std::nothrow) V1_0::MockIDevice()); + EXPECT_NE(sp, nullptr); + + std::unique_ptr hdiDevice = std::make_unique(sp); + EXPECT_NE(hdiDevice, nullptr); + + V1_0::SharedBuffer buffer {1, 1, 0, 1}; + EXPECT_CALL(*sp, AllocateBuffer(::testing::_, ::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<1>(buffer), ::testing::Return(allocBufferType))); + + std::shared_ptr preparedModel; + const int position = 2; + OHOS::sptr iPreparedModel = + OHOS::sptr(new (std::nothrow) V1_0::MockIPreparedModel()); + EXPECT_CALL(*sp, PrepareModel(::testing::_, ::testing::_, ::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee(iPreparedModel), + ::testing::Return(prepareType))); + + ModelConfig config; + OH_NN_ReturnCode result = hdiDevice->PrepareModel(model, config, preparedModel); + return result; +} + +/* * + * @tc.name: hdidevice_constructor_001 + * @tc.desc: Verify the Constructor function return object success. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_constructor_001, TestSize.Level0) +{ + OHOS::sptr device = V1_0::INnrtDevice::Get(false); + EXPECT_NE(device, nullptr); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); +} + +/* * + * @tc.name: hdidevice_getdevicename_001 + * @tc.desc: Verify the GetDeviceName function validate device name success. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_getdevicename_001, TestSize.Level0) +{ + OHOS::sptr device = V1_0::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + std::string deviceName = "MockDevice"; + EXPECT_CALL(*((V1_0::MockIDevice *)device.GetRefPtr()), GetDeviceName(::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<0>(deviceName), ::testing::Return(HDF_SUCCESS))); + + const std::string expectDeviceName = "MockDevice"; + std::string newDeviceName = ""; + OH_NN_ReturnCode result = hdiDevice->GetDeviceName(newDeviceName); + EXPECT_EQ(OH_NN_SUCCESS, result); + EXPECT_EQ(expectDeviceName, newDeviceName); +} + +/* * + * @tc.name: hdidevice_getdevicename_002 + * @tc.desc: Verify the GetDeviceName function return unavailable device. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_getdevicename_002, TestSize.Level0) +{ + OHOS::sptr device = V1_0::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + std::string deviceName = "MockDevice"; + EXPECT_CALL(*((V1_0::MockIDevice *)device.GetRefPtr()), GetDeviceName(::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<0>(deviceName), ::testing::Return(HDF_FAILURE))); + OH_NN_ReturnCode result = hdiDevice->GetDeviceName(deviceName); + EXPECT_EQ(OH_NN_UNAVALIDABLE_DEVICE, result); +} + +/* * + * @tc.name: hdidevice_getvendorname_001 + * @tc.desc: Verify the GetVendorName function validate vendor name success. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_getvendorname_001, TestSize.Level0) +{ + OHOS::sptr device = V1_0::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + std::string vendorName = "MockVendor"; + EXPECT_CALL(*((V1_0::MockIDevice *)device.GetRefPtr()), GetVendorName(::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<0>(vendorName), ::testing::Return(HDF_SUCCESS))); + + const std::string expectDeviceName = "MockVendor"; + std::string newVendorName = ""; + OH_NN_ReturnCode result = hdiDevice->GetVendorName(newVendorName); + EXPECT_EQ(OH_NN_SUCCESS, result); + EXPECT_EQ(expectDeviceName, newVendorName); +} + +/* * + * @tc.name: hdidevice_getvendorname_002 + * @tc.desc: Verify the GetVendorName function return unavailable device. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_getvendorname_002, TestSize.Level0) +{ + OHOS::sptr device = V1_0::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + std::string vendorName = "MockVendor"; + EXPECT_CALL(*((V1_0::MockIDevice *)device.GetRefPtr()), GetVendorName(::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<0>(vendorName), ::testing::Return(HDF_FAILURE))); + OH_NN_ReturnCode result = hdiDevice->GetVendorName(vendorName); + EXPECT_EQ(OH_NN_UNAVALIDABLE_DEVICE, result); +} + +/* * + * @tc.name: hdidevice_getdevicetype_001 + * @tc.desc: Verify the GetDeviceType function validate device type success. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_getdevicetype_001, TestSize.Level0) +{ + OHOS::sptr device = V1_0::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + V1_0::DeviceType iDeviceType = V1_0::DeviceType::CPU; + EXPECT_CALL(*((V1_0::MockIDevice *)device.GetRefPtr()), GetDeviceType(::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<0>(iDeviceType), ::testing::Return(HDF_SUCCESS))); + + OH_NN_DeviceType expectDeviceType = OH_NN_CPU; + OH_NN_DeviceType newDeviceType = OH_NN_CPU; + OH_NN_ReturnCode result = hdiDevice->GetDeviceType(newDeviceType); + EXPECT_EQ(OH_NN_SUCCESS, result); + EXPECT_EQ(expectDeviceType, newDeviceType); +} + +/* * + * @tc.name: hdidevice_getdevicetype_002 + * @tc.desc: Verify the GetDeviceType function return unavailable device. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_getdevicetype_002, TestSize.Level0) +{ + OHOS::sptr device = V1_0::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + + OH_NN_DeviceType deviceType = OH_NN_CPU; + V1_0::DeviceType iDeviceType = V1_0::DeviceType::CPU; + EXPECT_CALL(*((V1_0::MockIDevice *)device.GetRefPtr()), GetDeviceType(::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<0>(iDeviceType), ::testing::Return(HDF_FAILURE))); + OH_NN_ReturnCode result = hdiDevice->GetDeviceType(deviceType); + EXPECT_EQ(OH_NN_UNAVALIDABLE_DEVICE, result); +} + +/* * + * @tc.name: hdidevice_getdevicestatus_001 + * @tc.desc: Verify the GetDeviceStatus function validate device status success. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_getdevicestatus_001, TestSize.Level0) +{ + OHOS::sptr device = V1_0::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + + V1_0::DeviceStatus iDeviceStatus = V1_0::DeviceStatus::AVAILABLE; + EXPECT_CALL(*((V1_0::MockIDevice *)device.GetRefPtr()), GetDeviceStatus(::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<0>(iDeviceStatus), ::testing::Return(HDF_SUCCESS))); + + const DeviceStatus expectDeviceStatus = AVAILABLE; + DeviceStatus newDeviceStatus = AVAILABLE; + OH_NN_ReturnCode result = hdiDevice->GetDeviceStatus(newDeviceStatus); + EXPECT_EQ(OH_NN_SUCCESS, result); + EXPECT_EQ(expectDeviceStatus, newDeviceStatus); +} + +/* * + * @tc.name: hdidevice_getdevicestatus_002 + * @tc.desc: Verify the GetDeviceStatus function return unavailable device. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_getdevicestatus_002, TestSize.Level0) +{ + OHOS::sptr device = V1_0::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + DeviceStatus deviceStatus = AVAILABLE; + V1_0::DeviceStatus iDeviceStatus = V1_0::DeviceStatus::AVAILABLE; + EXPECT_CALL(*((V1_0::MockIDevice *)device.GetRefPtr()), GetDeviceStatus(::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<0>(iDeviceStatus), ::testing::Return(HDF_FAILURE))); + OH_NN_ReturnCode result = hdiDevice->GetDeviceStatus(deviceStatus); + EXPECT_EQ(OH_NN_UNAVALIDABLE_DEVICE, result); +} + +/* * + * @tc.name: hdidevice_getsupportedoperation_001 + * @tc.desc: Verify the GetSupportedOperation function return success. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_getsupportedoperation_001, TestSize.Level0) +{ + std::vector ops {true}; + std::shared_ptr model = std::make_shared(); + OHOS::sptr device = V1_0::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + + V1_0::SharedBuffer buffer {1, 1, 0, 1}; + EXPECT_CALL(*((V1_0::MockIDevice *)device.GetRefPtr()), AllocateBuffer(::testing::_, ::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<1>(buffer), ::testing::Return(HDF_SUCCESS))); + + EXPECT_CALL(*((V1_0::MockIDevice *)device.GetRefPtr()), GetSupportedOperation(::testing::_, ::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<1>(ops), ::testing::Return(HDF_SUCCESS))); + + std::vector newOps {true}; + const std::vector expectOps {true}; + OH_NN_ReturnCode result = hdiDevice->GetSupportedOperation(model, newOps); + EXPECT_EQ(OH_NN_SUCCESS, result); + auto expectOpsSize = expectOps.size(); + for (size_t i = 0; i < expectOpsSize; ++i) { + EXPECT_EQ(expectOps[i], newOps[i]); + } +} + +/* * + * @tc.name: hdidevice_getsupportedoperation_002 + * @tc.desc: Verify the GetSupportedOperation function return failed in case of allocate buffer failure. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_getsupportedoperation_002, TestSize.Level0) +{ + std::vector ops; + std::shared_ptr model = std::make_shared(); + OHOS::sptr device = V1_0::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + + V1_0::SharedBuffer buffer {1, 1, 0, 1}; + EXPECT_CALL(*((V1_0::MockIDevice *)device.GetRefPtr()), AllocateBuffer(::testing::_, ::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<1>(buffer), ::testing::Return(HDF_FAILURE))); + + OH_NN_ReturnCode result = hdiDevice->GetSupportedOperation(model, ops); + EXPECT_EQ(OH_NN_FAILED, result); +} + +/* * + * @tc.name: hdidevice_getsupportedoperation_003 + * @tc.desc: Verify the GetSupportedOperation function return nullptr. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_getsupportedoperation_003, TestSize.Level0) +{ + OHOS::sptr device = V1_0::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + + std::shared_ptr model = nullptr; + std::vector ops; + OH_NN_ReturnCode result = hdiDevice->GetSupportedOperation(model, ops); + EXPECT_EQ(OH_NN_NULL_PTR, result); +} + +/* * + * @tc.name: hdidevice_getsupportedoperation_004 + * @tc.desc: Verify the GetSupportedOperation function return unavalidable device. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_getsupportedoperation_004, TestSize.Level0) +{ + std::vector ops {true}; + std::shared_ptr model = std::make_shared(); + OHOS::sptr device = V1_0::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + + V1_0::SharedBuffer buffer {2, 1, 0, 1}; + EXPECT_CALL(*((V1_0::MockIDevice *)device.GetRefPtr()), AllocateBuffer(::testing::_, ::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<1>(buffer), ::testing::Return(HDF_SUCCESS))); + + EXPECT_CALL(*((V1_0::MockIDevice *)device.GetRefPtr()), GetSupportedOperation(::testing::_, ::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<1>(ops), ::testing::Return(HDF_FAILURE))); + + std::vector newOps {true}; + OH_NN_ReturnCode result = hdiDevice->GetSupportedOperation(model, newOps); + EXPECT_EQ(OH_NN_UNAVALIDABLE_DEVICE, result); +} + +/* * + * @tc.name: hdidevice_isfloat16precisionsupported_001 + * @tc.desc: Verify the IsFloat16PrecisionSupported function return success. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_isfloat16precisionsupported_001, TestSize.Level0) +{ + OHOS::sptr device = V1_0::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + + bool isSupported = false; + EXPECT_CALL(*((V1_0::MockIDevice *)device.GetRefPtr()), IsFloat16PrecisionSupported(::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<0>(isSupported), ::testing::Return(HDF_SUCCESS))); + OH_NN_ReturnCode result = hdiDevice->IsFloat16PrecisionSupported(isSupported); + EXPECT_EQ(OH_NN_SUCCESS, result); +} + +/* * + * @tc.name: hdidevice_isfloat16precisionsupported_002 + * @tc.desc: Verify the IsFloat16PrecisionSupported function return unavailable device. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_isfloat16precisionsupported_002, TestSize.Level0) +{ + OHOS::sptr device = V1_0::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + + bool isSupported = false; + EXPECT_CALL(*((V1_0::MockIDevice *)device.GetRefPtr()), IsFloat16PrecisionSupported(::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<0>(isSupported), ::testing::Return(HDF_FAILURE))); + OH_NN_ReturnCode result = hdiDevice->IsFloat16PrecisionSupported(isSupported); + EXPECT_EQ(OH_NN_UNAVALIDABLE_DEVICE, result); +} + +/* * + * @tc.name: hdidevice_isperformancemodesupported_001 + * @tc.desc: Verify the IsPerformanceModeSupported function return success. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_isperformancemodesupported_001, TestSize.Level0) +{ + OHOS::sptr device = V1_0::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + + bool isSupported = false; + EXPECT_CALL(*((V1_0::MockIDevice *)device.GetRefPtr()), IsPerformanceModeSupported(::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<0>(isSupported), ::testing::Return(HDF_SUCCESS))); + + bool newIsSupported = false; + const bool expectIsSupported = false; + OH_NN_ReturnCode result = hdiDevice->IsPerformanceModeSupported(newIsSupported); + EXPECT_EQ(OH_NN_SUCCESS, result); + EXPECT_EQ(expectIsSupported, newIsSupported); +} + +/* * + * @tc.name: hdidevice_isperformancemodesupported_002 + * @tc.desc: Verify the IsPerformanceModeSupported function return unavailable device. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_isperformancemodesupported_002, TestSize.Level0) +{ + OHOS::sptr device = V1_0::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + + bool isSupported = false; + EXPECT_CALL(*((V1_0::MockIDevice *)device.GetRefPtr()), IsPerformanceModeSupported(::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<0>(isSupported), ::testing::Return(HDF_FAILURE))); + OH_NN_ReturnCode result = hdiDevice->IsPerformanceModeSupported(isSupported); + EXPECT_EQ(OH_NN_UNAVALIDABLE_DEVICE, result); +} + +/* * + * @tc.name: hdidevice_isprioritysupported_001 + * @tc.desc: Verify the IsPrioritySupported function return success. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_isprioritysupported_001, TestSize.Level0) +{ + OHOS::sptr device = V1_0::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + + bool isSupported = false; + EXPECT_CALL(*((V1_0::MockIDevice *)device.GetRefPtr()), IsPrioritySupported(::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<0>(isSupported), ::testing::Return(HDF_SUCCESS))); + + bool newIsSupported = false; + bool expectIsSupported = false; + OH_NN_ReturnCode result = hdiDevice->IsPrioritySupported(newIsSupported); + EXPECT_EQ(OH_NN_SUCCESS, result); + EXPECT_EQ(newIsSupported, expectIsSupported); +} + +/* * + * @tc.name: hdidevice_isprioritysupported_002 + * @tc.desc: Verify the IsPrioritySupported function return unavailable device. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_isprioritysupported_002, TestSize.Level0) +{ + OHOS::sptr device = V1_0::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + + bool isSupported = false; + EXPECT_CALL(*((V1_0::MockIDevice *)device.GetRefPtr()), IsPrioritySupported(::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<0>(isSupported), ::testing::Return(HDF_FAILURE))); + OH_NN_ReturnCode result = hdiDevice->IsPrioritySupported(isSupported); + EXPECT_EQ(OH_NN_UNAVALIDABLE_DEVICE, result); +} + +/* * + * @tc.name: hdidevice_isdynamicinputsupported_001 + * @tc.desc: Verify the IsDynamicInputSupported function return success. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_isdynamicinputsupported_001, TestSize.Level0) +{ + OHOS::sptr device = V1_0::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + + bool isSupported = false; + EXPECT_CALL(*((V1_0::MockIDevice *)device.GetRefPtr()), IsDynamicInputSupported(::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<0>(isSupported), ::testing::Return(HDF_SUCCESS))); + + bool newIsSupported = false; + bool expectIsSupported = false; + OH_NN_ReturnCode result = hdiDevice->IsDynamicInputSupported(newIsSupported); + EXPECT_EQ(OH_NN_SUCCESS, result); + EXPECT_EQ(newIsSupported, expectIsSupported); +} + +/* * + * @tc.name: hdidevice_isdynamicinputsupported_002 + * @tc.desc: Verify the IsDynamicInputSupported function return unavailable device. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_isdynamicinputsupported_002, TestSize.Level0) +{ + OHOS::sptr device = V1_0::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + + bool isSupported = false; + EXPECT_CALL(*((V1_0::MockIDevice *)device.GetRefPtr()), IsDynamicInputSupported(::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<0>(isSupported), ::testing::Return(HDF_FAILURE))); + OH_NN_ReturnCode result = hdiDevice->IsDynamicInputSupported(isSupported); + EXPECT_EQ(OH_NN_UNAVALIDABLE_DEVICE, result); +} + +/* * + * @tc.name: hdidevice_isdynamicinputsupported_001 + * @tc.desc: Verify the IsModelCacheSupported function return success. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_ismodelcachesupported_001, TestSize.Level0) +{ + OHOS::sptr device = V1_0::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + + bool isSupported = false; + EXPECT_CALL(*((V1_0::MockIDevice *)device.GetRefPtr()), IsModelCacheSupported(::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<0>(isSupported), ::testing::Return(HDF_SUCCESS))); + + bool newIsSupported = false; + bool expectIsSupported = false; + OH_NN_ReturnCode result = hdiDevice->IsModelCacheSupported(newIsSupported); + EXPECT_EQ(OH_NN_SUCCESS, result); + EXPECT_EQ(expectIsSupported, newIsSupported); +} + +/* * + * @tc.name: hdidevice_isdynamicinputsupported_002 + * @tc.desc: Verify the IsModelCacheSupported function return unavailable device. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_ismodelcachesupported_002, TestSize.Level0) +{ + OHOS::sptr device = V1_0::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + + bool isSupported = false; + EXPECT_CALL(*((V1_0::MockIDevice *)device.GetRefPtr()), IsModelCacheSupported(::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<0>(isSupported), ::testing::Return(HDF_FAILURE))); + OH_NN_ReturnCode result = hdiDevice->IsModelCacheSupported(isSupported); + EXPECT_EQ(OH_NN_UNAVALIDABLE_DEVICE, result); +} + +/* * + * @tc.name: hdidevice_preparemodel_001 + * @tc.desc: Verify the PrepareModel function return success. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_preparemodel_001, TestSize.Level0) +{ + int32_t allocBufferType = HDF_SUCCESS; + int32_t prepareType = HDF_SUCCESS; + OH_NN_ReturnCode result = PrepareModel(allocBufferType, prepareType); + EXPECT_EQ(OH_NN_SUCCESS, result); +} + +/* * + * @tc.name: hdidevice_preparemodel_002 + * @tc.desc: Verify the PrepareModel function return invalid parameter. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_preparemodel_002, TestSize.Level0) +{ + OHOS::sptr device = V1_0::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + + std::shared_ptr model = nullptr; + ModelConfig config; + std::shared_ptr preparedModel; + OH_NN_ReturnCode result = hdiDevice->PrepareModel(model, config, preparedModel); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, result); +} + +/* * + * @tc.name: hdidevice_preparemodel_003 + * @tc.desc: Verify the PrepareModel function return failed. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_preparemodel_003, TestSize.Level0) +{ + int32_t allocBufferType = HDF_SUCCESS; + int32_t prepareType = HDF_FAILURE; + OH_NN_ReturnCode result = PrepareModel(allocBufferType, prepareType); + EXPECT_EQ(OH_NN_FAILED, result); +} + +/* * + * @tc.name: hdidevice_preparemodel_004 + * @tc.desc: Verify the PrepareModel function return failed. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_preparemodel_004, TestSize.Level0) +{ + int32_t allocBufferType = HDF_FAILURE; + int32_t prepareType = HDF_FAILURE; + OH_NN_ReturnCode result = PrepareModel(allocBufferType, prepareType); + EXPECT_EQ(OH_NN_FAILED, result); +} + +/* * + * @tc.name: hdidevice_preparemodelfrommodelcache_001 + * @tc.desc: Verify the PrepareModelFromModelCache function return success. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_preparemodelfrommodelcache_001, TestSize.Level0) +{ + size_t length = 100; + void *buffer = nullptr; + GetBuffer(buffer, length); + + std::vector modelCache = { { buffer, 100 } }; + ModelConfig config; + + OHOS::sptr sp = OHOS::sptr(new (std::nothrow) V1_0::MockIDevice()); + EXPECT_NE(sp, nullptr); + + std::unique_ptr hdiDevice = std::make_unique(sp); + EXPECT_NE(hdiDevice, nullptr); + + std::shared_ptr preparedModel; + + OHOS::sptr iPreparedModel = + OHOS::sptr(new (std::nothrow) V1_0::MockIPreparedModel()); + EXPECT_CALL(*sp, PrepareModelFromModelCache(::testing::_, ::testing::_, ::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<2>(iPreparedModel), ::testing::Return(HDF_SUCCESS))); + + OH_NN_ReturnCode result = hdiDevice->PrepareModelFromModelCache(modelCache, config, preparedModel); + const auto &memoryManager = MemoryManager::GetInstance(); + memoryManager->UnMapMemory(buffer); + EXPECT_EQ(OH_NN_SUCCESS, result); +} + +/* * + * @tc.name: hdidevice_preparemodelfrommodelcache_002 + * @tc.desc: Verify the PrepareModelFromModelCache function return unavailable device. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_preparemodelfrommodelcache_002, TestSize.Level0) +{ + size_t length = 100; + void *buffer = nullptr; + GetBuffer(buffer, length); + + OHOS::sptr sp = OHOS::sptr(new (std::nothrow) V1_0::MockIDevice()); + EXPECT_NE(sp, nullptr); + + std::unique_ptr hdiDevice = std::make_unique(sp); + EXPECT_NE(hdiDevice, nullptr); + + std::vector modelCache = { { buffer, 100 } }; + ModelConfig config; + OHOS::sptr preModel = + OHOS::sptr(new (std::nothrow) V1_0::MockIPreparedModel()); + EXPECT_NE(preModel, nullptr); + + std::shared_ptr preparedModel = std::make_shared(preModel); + + OHOS::sptr iPreparedModel = + OHOS::sptr(new (std::nothrow) V1_0::MockIPreparedModel); + EXPECT_CALL(*sp, PrepareModelFromModelCache(::testing::_, ::testing::_, ::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<2>(iPreparedModel), ::testing::Return(HDF_FAILURE))); + + OH_NN_ReturnCode result = hdiDevice->PrepareModelFromModelCache(modelCache, config, preparedModel); + EXPECT_EQ(OH_NN_UNAVALIDABLE_DEVICE, result); +} + +/* * + * @tc.name: hdidevice_preparemodelfrommodelcache_003 + * @tc.desc: Verify the PrepareModelFromModelCache function return nullptr. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_preparemodelfrommodelcache_003, TestSize.Level0) +{ + OHOS::sptr device = V1_0::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + + std::vector modelCache = { { nullptr, 0 } }; + ModelConfig config; + std::shared_ptr preparedModel; + OH_NN_ReturnCode result = hdiDevice->PrepareModelFromModelCache(modelCache, config, preparedModel); + EXPECT_EQ(OH_NN_NULL_PTR, result); +} + +/* * + * @tc.name: hdidevice_allocatebuffer_001 + * @tc.desc: Verify the AllocateBuffer function return nullptr. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_allocatebuffer_001, TestSize.Level0) +{ + OHOS::sptr device = V1_0::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + + V1_0::SharedBuffer buffer; + EXPECT_CALL(*((V1_0::MockIDevice *)device.GetRefPtr()), AllocateBuffer(::testing::_, ::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<1>(buffer), ::testing::Return(HDF_FAILURE))); + + size_t length = 8; + void *result = hdiDevice->AllocateBuffer(length); + EXPECT_EQ(nullptr, result); + hdiDevice->ReleaseBuffer(result); +} + +/* * + * @tc.name: hdidevice_allocatebuffer_002 + * @tc.desc: Verify the AllocateBuffer function return nullptr and HDF_FAILURE. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_allocatebuffer_002, TestSize.Level0) +{ + OHOS::sptr device = V1_0::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + + size_t length = 8; + void *result = hdiDevice->AllocateBuffer(length); + EXPECT_EQ(nullptr, result); + hdiDevice->ReleaseBuffer(result); +} + +/* * + * @tc.name: hdidevice_allocatebuffer_003 + * @tc.desc: Verify the AllocateBuffer function return nullptr in case of 0 size. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_allocatebuffer_003, TestSize.Level0) +{ + OHOS::sptr device = V1_0::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + + size_t length = 0; + void *result = hdiDevice->AllocateBuffer(length); + EXPECT_EQ(nullptr, result); +} + +/* * + * @tc.name: hdidevice_releasebuffer_001 + * @tc.desc: Verify the ReleaseBuffer function validate buffer success. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_releasebuffer_001, TestSize.Level0) +{ + size_t length = 100; + void *buffer = nullptr; + GetBuffer(buffer, length); + + OHOS::sptr device = V1_0::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + + EXPECT_CALL(*((V1_0::MockIDevice *)device.GetRefPtr()), ReleaseBuffer(::testing::_)) + .WillRepeatedly(::testing::Return(HDF_SUCCESS)); + + EXPECT_NE(hdiDevice, nullptr); + hdiDevice->ReleaseBuffer(buffer); + const auto &memoryManager = MemoryManager::GetInstance(); + memoryManager->UnMapMemory(buffer); +} + +/* * + * @tc.name: hdidevice_releasebuffer_002 + * @tc.desc: Verify the ReleaseBuffer function validate AllocateBuffer return nullptr. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_releasebuffer_002, TestSize.Level0) +{ + OHOS::sptr device = V1_0::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + + V1_0::SharedBuffer sharedbuffer; + EXPECT_CALL(*((V1_0::MockIDevice *)device.GetRefPtr()), AllocateBuffer(::testing::_, ::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<1>(sharedbuffer), ::testing::Return(HDF_FAILURE))); + + EXPECT_CALL(*((V1_0::MockIDevice *)device.GetRefPtr()), ReleaseBuffer(::testing::_)) + .WillRepeatedly(::testing::Return(HDF_FAILURE)); + + size_t length = 8; + void *buffer = hdiDevice->AllocateBuffer(length); + hdiDevice->ReleaseBuffer(buffer); +} + +/* * + * @tc.name: hdidevice_releasebuffer_003 + * @tc.desc: Verify the ReleaseBuffer function validate param buffer is nullptr. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_releasebuffer_003, TestSize.Level0) +{ + OHOS::sptr device = V1_0::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + + void *buffer = nullptr; + hdiDevice->ReleaseBuffer(buffer); +} + +/* * + * @tc.name: hdidevice_releasebuffer_004 + * @tc.desc: Verify the ReleaseBuffer function validate invalid buffer. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_releasebuffer_004, TestSize.Level0) +{ + const size_t length = 100; + auto* buffer = new(std::nothrow) char[length]; + OHOS::sptr device = V1_0::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + + hdiDevice->ReleaseBuffer(buffer); + delete[] buffer; + buffer = nullptr; +} + +/* * + * @tc.name: hdidevice_releasebuffer_005 + * @tc.desc: Verify the ReleaseBuffer function validate moc object's ReleaseBuffer return failure. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_releasebuffer_005, TestSize.Level0) +{ + size_t length = 100; + void *buffer = nullptr; + GetBuffer(buffer, length); + + OHOS::sptr device = V1_0::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + + EXPECT_CALL(*((V1_0::MockIDevice *)device.GetRefPtr()), ReleaseBuffer(::testing::_)) + .WillRepeatedly(::testing::Return(HDF_FAILURE)); + + hdiDevice->ReleaseBuffer(buffer); + const auto &memoryManager = MemoryManager::GetInstance(); + memoryManager->UnMapMemory(buffer); +} +} // namespace UnitTest +} // namespace NeuralNetworkRuntime +} // namespace OHOS diff --git a/test/unittest/components/hdi_prepared_model/hdi_prepared_model_test.cpp b/test/unittest/components/hdi_prepared_model/hdi_prepared_model_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..d946b6312390730ad7e02d588ad78aab8df8bcea --- /dev/null +++ b/test/unittest/components/hdi_prepared_model/hdi_prepared_model_test.cpp @@ -0,0 +1,344 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include + +#include +#include + +#include "common/log.h" +#include "frameworks/native/hdi_prepared_model.h" +#include "frameworks/native/memory_manager.h" +#include "frameworks/native/transform.h" +#include "test/unittest/common/mock_idevice.h" +#include "test/unittest/common/file_utils.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime; +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class HDIPreparedModelTest : public testing::Test { +protected: + void GetBuffer(void*& buffer, size_t length); + void InitTensor(std::vector& inputs, void* buffer, size_t length); + OH_NN_ReturnCode Run(std::vector& inputs); +}; + +void HDIPreparedModelTest::GetBuffer(void*& buffer, size_t length) +{ + std::string data = "ABCD"; + const size_t dataLength = 100; + data.resize(dataLength, '-'); + + std::string filename = "/data/log/memory-001.dat"; + FileUtils fileUtils(filename); + fileUtils.WriteFile(data); + + int fd = open(filename.c_str(), O_RDWR); + EXPECT_NE(-1, fd); + + const auto& memoryManager = MemoryManager::GetInstance(); + buffer = memoryManager->MapMemory(fd, length); + close(fd); +} + +void HDIPreparedModelTest::InitTensor(std::vector& inputs, void* buffer, size_t length) +{ + IOTensor inputTensor; + inputTensor.dataType = OH_NN_INT8; + inputTensor.dataType = OH_NN_INT8; + inputTensor.format = OH_NN_FORMAT_NCHW; + inputTensor.data = buffer; + inputTensor.length = length; + inputs.emplace_back(std::move(inputTensor)); +} + +OH_NN_ReturnCode HDIPreparedModelTest::Run(std::vector& inputs) +{ + const int vvPosition = 2; + const int vPosition = 3; + std::vector outputs; + std::vector> outputsDims {{0}}; + std::vector isOutputBufferEnough {}; + + OHOS::sptr sp = + OHOS::sptr(new (std::nothrow) V1_0::MockIPreparedModel()); + EXPECT_NE(sp, nullptr); + + std::unique_ptr preparedModel = std::make_unique(sp); + EXPECT_CALL(*sp, Run(::testing::_, ::testing::_, ::testing::_, ::testing::_)) + .WillRepeatedly(::testing::DoAll( + ::testing::SetArgReferee(outputsDims), + ::testing::SetArgReferee(isOutputBufferEnough), + ::testing::Return(HDF_SUCCESS)) + ); + + OH_NN_ReturnCode result = preparedModel->Run(inputs, outputs, outputsDims, isOutputBufferEnough); + return result; +} + +/** + * @tc.name: hidpreparedmodel_constructor_001 + * @tc.desc: Verify the Constructor function validate constructor success. + * @tc.type: FUNC + */ +HWTEST_F(HDIPreparedModelTest, hidpreparedmodel_constructor_001, TestSize.Level0) +{ + OHOS::sptr hdiPreparedModel = + OHOS::sptr(new (std::nothrow) V1_0::MockIPreparedModel()); + EXPECT_NE(hdiPreparedModel, nullptr); + + std::unique_ptr preparedModel = std::make_unique(hdiPreparedModel); + EXPECT_NE(preparedModel, nullptr); +} + +/** + * @tc.name: hidpreparedmodel_exportmodelcache_001 + * @tc.desc: Verify the ExportModelCache function return memory error. + * @tc.type: FUNC + */ +HWTEST_F(HDIPreparedModelTest, hidpreparedmodel_exportmodelcache_001, TestSize.Level0) +{ + std::vector bufferVect = {{100, 100, 0, 100}}; + OHOS::sptr hdiPreparedModel = + OHOS::sptr(new (std::nothrow) V1_0::MockIPreparedModel()); + std::unique_ptr preparedModel = std::make_unique(hdiPreparedModel); + std::vector modelCache; + EXPECT_CALL(*((V1_0::MockIPreparedModel*)hdiPreparedModel.GetRefPtr()), + ExportModelCache(::testing::_)) + .WillRepeatedly( + ::testing::DoAll( + ::testing::SetArgReferee<0>(bufferVect), + ::testing::Return(HDF_SUCCESS) + ) + ); + + OH_NN_ReturnCode result = preparedModel->ExportModelCache(modelCache); + EXPECT_EQ(OH_NN_MEMORY_ERROR, result); +} + +/** + * @tc.name: hidpreparedmodel_exportmodelcache_002 + * @tc.desc: Verify the ExportModelCache function return success. + * @tc.type: FUNC + */ +HWTEST_F(HDIPreparedModelTest, hidpreparedmodel_exportmodelcache_002, TestSize.Level0) +{ + std::vector bufferVect; + OHOS::sptr mockPreparedModel = + OHOS::sptr(new (std::nothrow) V1_0::MockIPreparedModel()); + EXPECT_NE(mockPreparedModel, nullptr); + + std::unique_ptr preparedModel = std::make_unique(mockPreparedModel); + std::vector modelCache; + EXPECT_CALL(*((V1_0::MockIPreparedModel*)mockPreparedModel.GetRefPtr()), + ExportModelCache(::testing::_)) + .WillRepeatedly( + ::testing::DoAll( + ::testing::SetArgReferee<0>(bufferVect), + ::testing::Return(HDF_SUCCESS) + ) + ); + + OH_NN_ReturnCode result = preparedModel->ExportModelCache(modelCache); + EXPECT_EQ(OH_NN_SUCCESS, result); +} + +/** + * @tc.name: hidpreparedmodel_exportmodelcache_003 + * @tc.desc: Verify the ExportModelCache function return invalid parameter. + * @tc.type: FUNC + */ +HWTEST_F(HDIPreparedModelTest, hidpreparedmodel_exportmodelcache_003, TestSize.Level0) +{ + OHOS::sptr hdiPreparedModel = + OHOS::sptr(new (std::nothrow) V1_0::MockIPreparedModel()); + EXPECT_NE(hdiPreparedModel, nullptr); + + std::unique_ptr preparedModel = std::make_unique(hdiPreparedModel); + std::vector modelCache {{nullptr, 0}}; + OH_NN_ReturnCode result = preparedModel->ExportModelCache(modelCache); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, result); +} + +/** + * @tc.name: hidpreparedmodel_exportmodelcache_004 + * @tc.desc: Verify the ExportModelCache function return unvailable device. + * @tc.type: FUNC + */ +HWTEST_F(HDIPreparedModelTest, hidpreparedmodel_exportmodelcache_004, TestSize.Level0) +{ + std::vector bufferVect = {{100, 100, 0, 100}}; + OHOS::sptr mockPreparedModel = + OHOS::sptr(new (std::nothrow) V1_0::MockIPreparedModel()); + EXPECT_NE(mockPreparedModel, nullptr); + + std::unique_ptr preparedModel = std::make_unique(mockPreparedModel); + std::vector modelCache; + EXPECT_CALL(*((V1_0::MockIPreparedModel*)mockPreparedModel.GetRefPtr()), + ExportModelCache(::testing::_)) + .WillRepeatedly( + ::testing::DoAll( + ::testing::SetArgReferee<0>(bufferVect), + ::testing::Return(HDF_FAILURE) + ) + ); + + OH_NN_ReturnCode result = preparedModel->ExportModelCache(modelCache); + EXPECT_EQ(OH_NN_UNAVALIDABLE_DEVICE, result); +} + +/** + * @tc.name: hidpreparedmodel_run_001 + * @tc.desc: Verify the Run function return invalid parameter. + * @tc.type: FUNC + */ +HWTEST_F(HDIPreparedModelTest, hidpreparedmodel_run_001, TestSize.Level0) +{ + IOTensor inputTensor; + inputTensor.dataType = OH_NN_INT8; + + IOTensor outputTensor; + outputTensor.dataType = OH_NN_INT8; + std::vector inputs; + inputs.emplace_back(std::move(inputTensor)); + std::vector outputs; + + std::vector iOutputTensors; + V1_0::IOTensor iTensor; + iOutputTensors.emplace_back(iTensor); + std::vector> outputsDims {{0}}; + std::vector isOutputBufferEnough {}; + + std::shared_ptr sp = std::make_shared(); + OHOS::sptr hdiPreparedModel = + OHOS::sptr(new (std::nothrow) V1_0::MockIPreparedModel()); + EXPECT_NE(hdiPreparedModel, nullptr); + + std::unique_ptr preparedModel = std::make_unique(hdiPreparedModel); + OH_NN_ReturnCode result = preparedModel->Run(inputs, outputs, outputsDims, isOutputBufferEnough); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, result); +} + +/** + * @tc.name: hidpreparedmodel_run_002 + * @tc.desc: Verify the Run function return success. + * @tc.type: FUNC + */ +HWTEST_F(HDIPreparedModelTest, hidpreparedmodel_run_002, TestSize.Level0) +{ + const size_t length = 100; + void* buffer = nullptr; + GetBuffer(buffer, length); + + std::vector inputs; + std::vector outputs; + InitTensor(inputs, buffer, length); + + OH_NN_ReturnCode result = Run(inputs); + EXPECT_EQ(OH_NN_SUCCESS, result); + const auto& memoryManager = MemoryManager::GetInstance(); + memoryManager->UnMapMemory(buffer); +} + +/** + * @tc.name: hidpreparedmodel_run_003 + * @tc.desc: Verify the Run function return unavailable device in case of run failure. + * @tc.type: FUNC + */ +HWTEST_F(HDIPreparedModelTest, hidpreparedmodel_run_003, TestSize.Level0) +{ + const size_t length = 100; + void* buffer = nullptr; + GetBuffer(buffer, length); + + std::vector inputs; + std::vector outputs; + InitTensor(inputs, buffer, length); + + std::vector> outputsDims {}; + std::vector isOutputBufferEnough {}; + + OHOS::sptr sp = + OHOS::sptr(new (std::nothrow) V1_0::MockIPreparedModel()); + EXPECT_NE(sp, nullptr); + + std::unique_ptr preparedModel = std::make_unique(sp); + + EXPECT_CALL(*sp, Run(::testing::_, ::testing::_, ::testing::_, ::testing::_)) + .WillRepeatedly( + ::testing::DoAll( + ::testing::SetArgReferee<2>(outputsDims), + ::testing::SetArgReferee<3>(isOutputBufferEnough), + ::testing::Return(HDF_FAILURE) + ) + ); + + OH_NN_ReturnCode result = preparedModel->Run(inputs, outputs, outputsDims, isOutputBufferEnough); + EXPECT_EQ(OH_NN_UNAVALIDABLE_DEVICE, result); + const auto& memoryManager = MemoryManager::GetInstance(); + memoryManager->UnMapMemory(buffer); +} + +/** + * @tc.name: hidpreparedmodel_run_004 + * @tc.desc: Verify the Run function return invalid parameter. + * @tc.type: FUNC + */ +HWTEST_F(HDIPreparedModelTest, hidpreparedmodel_run_004, TestSize.Level0) +{ + std::vector inputs; + InitTensor(inputs, nullptr, 0); + OH_NN_ReturnCode result = Run(inputs); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, result); +} + +/** + * @tc.name: hidpreparedmodel_run_005 + * @tc.desc: Verify the Run function return invalid parameter in case of output invalid. + * @tc.type: FUNC + */ +HWTEST_F(HDIPreparedModelTest, hidpreparedmodel_run_005, TestSize.Level0) +{ + const size_t length = 100; + void* buffer = nullptr; + GetBuffer(buffer, length); + + std::vector inputs; + std::vector outputs; + InitTensor(inputs, buffer, length); + InitTensor(outputs, nullptr, 0); + + std::vector> outputsDims {}; + std::vector isOutputBufferEnough {}; + + OHOS::sptr sp = + OHOS::sptr(new (std::nothrow) V1_0::MockIPreparedModel()); + EXPECT_NE(sp, nullptr); + + std::unique_ptr preparedModel = std::make_unique(sp); + + OH_NN_ReturnCode result = preparedModel->Run(inputs, outputs, outputsDims, isOutputBufferEnough); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, result); + const auto& memoryManager = MemoryManager::GetInstance(); + memoryManager->UnMapMemory(buffer); +} +} // namespace UnitTest +} // namespace NeuralNetworkRuntime +} // namespace OHOS diff --git a/test/unittest/components/inner_model/inner_model_test.cpp b/test/unittest/components/inner_model/inner_model_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..e40c0422576273d205f66e960a48f00d4f11c3f7 --- /dev/null +++ b/test/unittest/components/inner_model/inner_model_test.cpp @@ -0,0 +1,825 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include + +#include "common/utils.h" +#include "common/log.h" +#include "frameworks/native/nn_tensor.h" +#include "frameworks/native/inner_model.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime; + +namespace NNRT { +namespace UnitTest { +class InnerModelTest : public testing::Test { +public: + void SetLiteGraph(mindspore::lite::LiteGraph* liteGraph); + void SetTensors(); + void SetIndices(); + +public: + InnerModel m_innerModelTest; + + std::vector m_dimInput{3, 3}; + std::vector m_dimOutput{3, 3}; + std::vector m_inputIndices{0}; + std::vector m_outputIndices{1}; + + OH_NN_OperationType m_opType{OH_NN_OPS_ADD}; + + OH_NN_UInt32Array m_inputs; + OH_NN_UInt32Array m_outputs; + OH_NN_UInt32Array m_params; + + uint32_t m_paramIndexs[1]{3}; + uint32_t m_inputIndexs[2]{0, 1}; + uint32_t m_outputIndexs[1]{2}; +}; + +void InnerModelTest::SetLiteGraph(mindspore::lite::LiteGraph* liteGraph) +{ + liteGraph->name_ = "testGraph"; + liteGraph->input_indices_ = m_inputIndices; + liteGraph->output_indices_ = m_outputIndices; + + const std::vector quant_params {}; + + for (size_t indexInput = 0; indexInput < liteGraph->input_indices_.size(); ++indexInput) { + const std::vector data(36, 1); + liteGraph->all_tensors_.emplace_back(mindspore::lite::MindIR_Tensor_Create(liteGraph->name_, + mindspore::lite::DATA_TYPE_FLOAT32, m_dimInput, mindspore::lite::FORMAT_NCHW, data, quant_params)); + } + + for (size_t indexOutput = 0; indexOutput < liteGraph->output_indices_.size(); ++indexOutput) { + const std::vector dataOut(36, 1); + liteGraph->all_tensors_.emplace_back(mindspore::lite::MindIR_Tensor_Create(liteGraph->name_, + mindspore::lite::DATA_TYPE_FLOAT32, m_dimOutput, mindspore::lite::FORMAT_NCHW, dataOut, quant_params)); + } +} + +void InnerModelTest::SetTensors() +{ + const int dim[2] = {2, 2}; + const OH_NN_Tensor& tensor = {OH_NN_FLOAT32, 2, dim, nullptr, OH_NN_TENSOR}; + + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.AddTensor(tensor)); + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.AddTensor(tensor)); + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.AddTensor(tensor)); + + const OH_NN_Tensor& tensorParam = {OH_NN_INT8, 0, nullptr, nullptr, OH_NN_ADD_ACTIVATIONTYPE}; + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.AddTensor(tensorParam)); +} + +void InnerModelTest::SetIndices() +{ + m_params.data = m_paramIndexs; + m_params.size = sizeof(m_paramIndexs) / sizeof(uint32_t); + + m_inputs.data = m_inputIndexs; + m_inputs.size = sizeof(m_inputIndexs) / sizeof(uint32_t); + + m_outputs.data = m_outputIndexs; + m_outputs.size = sizeof(m_outputIndexs) / sizeof(uint32_t); +} + +/** + * @tc.name: inner_model_construct_nntensor_from_litegraph_001 + * @tc.desc: Verify the input_indices is empty of the construct_nntensor_from_litegraph function + * @tc.type: FUNC + */ +HWTEST_F(InnerModelTest, inner_model_construct_nntensor_from_litegraph_001, TestSize.Level1) +{ + mindspore::lite::LiteGraph* liteGraph = new (std::nothrow) mindspore::lite::LiteGraph(); + EXPECT_NE(nullptr, liteGraph); + m_inputIndices = {}; + + SetLiteGraph(liteGraph); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_innerModelTest.BuildFromLiteGraph(liteGraph)); + mindspore::lite::MindIR_LiteGraph_Destroy(&liteGraph); +} + +/** + * @tc.name: inner_model_construct_nntensor_from_litegraph_002 + * @tc.desc: Verify the input_indices is out of bounds of the construct_nntensor_from_litegraph function + * @tc.type: FUNC + */ +HWTEST_F(InnerModelTest, inner_model_construct_nntensor_from_litegraph_002, TestSize.Level1) +{ + mindspore::lite::LiteGraph* liteGraph = new (std::nothrow) mindspore::lite::LiteGraph(); + EXPECT_NE(nullptr, liteGraph); + m_inputIndices = {6}; + + SetLiteGraph(liteGraph); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_innerModelTest.BuildFromLiteGraph(liteGraph)); + mindspore::lite::MindIR_LiteGraph_Destroy(&liteGraph); +} + +/** + * @tc.name: inner_model_construct_nntensor_from_litegraph_003 + * @tc.desc: Verify the success of the construct_nntensor_from_litegraph function + * @tc.type: FUNC + */ +HWTEST_F(InnerModelTest, inner_model_construct_nntensor_from_litegraph_003, TestSize.Level1) +{ + mindspore::lite::LiteGraph* liteGraph = new (std::nothrow) mindspore::lite::LiteGraph(); + EXPECT_NE(nullptr, liteGraph); + + SetLiteGraph(liteGraph); + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.BuildFromLiteGraph(liteGraph)); +} + +/** + * @tc.name: inner_model_construct_nntensor_from_litegraph_004 + * @tc.desc: Verify the nntensor build failed nullptr return of the construct_nntensor_from_litegraph function + * @tc.type: FUNC + */ +HWTEST_F(InnerModelTest, inner_model_construct_nntensor_from_litegraph_004, TestSize.Level1) +{ + mindspore::lite::LiteGraph* liteGraph = new (std::nothrow) mindspore::lite::LiteGraph(); + EXPECT_NE(nullptr, liteGraph); + m_dimInput = {3, -3}; + + SetLiteGraph(liteGraph); + EXPECT_EQ(OH_NN_NULL_PTR, m_innerModelTest.BuildFromLiteGraph(liteGraph)); + mindspore::lite::MindIR_LiteGraph_Destroy(&liteGraph); +} + +/** + * @tc.name: inner_model_construct_nntensor_from_litegraph_005 + * @tc.desc: Verify the output indices out of bounds of the construct_nntensor_from_litegraph function + * @tc.type: FUNC + */ +HWTEST_F(InnerModelTest, inner_model_construct_nntensor_from_litegraph_005, TestSize.Level1) +{ + mindspore::lite::LiteGraph* liteGraph = new (std::nothrow) mindspore::lite::LiteGraph(); + EXPECT_NE(nullptr, liteGraph); + m_outputIndices = {6}; + + SetLiteGraph(liteGraph); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_innerModelTest.BuildFromLiteGraph(liteGraph)); + mindspore::lite::MindIR_LiteGraph_Destroy(&liteGraph); +} + +/** + * @tc.name: inner_model_build_from_lite_graph_001 + * @tc.desc: Verify the litegraph is nullptr of the build_from_lite_graph function + * @tc.type: FUNC + */ +HWTEST_F(InnerModelTest, inner_model_build_from_lite_graph_001, TestSize.Level1) +{ + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_innerModelTest.BuildFromLiteGraph(nullptr)); +} + +/** + * @tc.name: inner_model_build_from_lite_graph_002 + * @tc.desc: Verify the buildfromlitegraph twice forbidden of the build_from_lite_graph function + * @tc.type: FUNC + */ +HWTEST_F(InnerModelTest, inner_model_build_from_lite_graph_002, TestSize.Level1) +{ + mindspore::lite::LiteGraph* liteGraph = new (std::nothrow) mindspore::lite::LiteGraph(); + EXPECT_NE(nullptr, liteGraph); + + SetLiteGraph(liteGraph); + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.BuildFromLiteGraph(liteGraph)); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, m_innerModelTest.BuildFromLiteGraph(liteGraph)); +} + +/** + * @tc.name: inner_model_build_from_lite_graph_003 + * @tc.desc: Verify the litegraph->alltensors is empty of the build_from_lite_graph function + * @tc.type: FUNC + */ +HWTEST_F(InnerModelTest, inner_model_build_from_lite_graph_003, TestSize.Level1) +{ + mindspore::lite::LiteGraph* liteGraph = new (std::nothrow) mindspore::lite::LiteGraph(); + EXPECT_NE(nullptr, liteGraph); + liteGraph->name_ = "testGraph"; + liteGraph->input_indices_ = {0}; + liteGraph->output_indices_ = {1}; + + const int32_t dimInput[2] = {2, 2}; + const OH_NN_Tensor& tensor = {OH_NN_INT8, 2, dimInput, nullptr, OH_NN_TENSOR}; + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.AddTensor(tensor)); + + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, m_innerModelTest.BuildFromLiteGraph(liteGraph)); + mindspore::lite::MindIR_LiteGraph_Destroy(&liteGraph); +} + + +/** + * @tc.name: inner_model_add_tensor_001 + * @tc.desc: Verify the success of the addtensor function + * @tc.type: FUNC + */ +HWTEST_F(InnerModelTest, inner_model_add_tensor_001, TestSize.Level1) +{ + const int32_t dimInput[2] = {2, 2}; + const OH_NN_Tensor& tensor = {OH_NN_INT8, 2, dimInput, nullptr, OH_NN_TENSOR}; + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.AddTensor(tensor)); +} + +/** + * @tc.name: inner_model_add_tensor_002 + * @tc.desc: Verify the addtensor after buildfromlitegraph of the addtensor function + * @tc.type: FUNC + */ +HWTEST_F(InnerModelTest, inner_model_add_tensor_002, TestSize.Level1) +{ + mindspore::lite::LiteGraph* liteGraph = new (std::nothrow) mindspore::lite::LiteGraph(); + EXPECT_NE(nullptr, liteGraph); + SetLiteGraph(liteGraph); + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.BuildFromLiteGraph(liteGraph)); + + const int32_t dimInput[2] = {2, 2}; + const OH_NN_Tensor& tensor = {OH_NN_INT8, 2, dimInput, nullptr, OH_NN_TENSOR}; + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, m_innerModelTest.AddTensor(tensor)); +} + +/** + * @tc.name: inner_model_add_tensor_003 + * @tc.desc: Verify the buildfromnntensor failed of the addtensor function + * @tc.type: FUNC + */ +HWTEST_F(InnerModelTest, inner_model_add_tensor_003, TestSize.Level1) +{ + const int32_t dimInput[2] = {2, -2}; + const OH_NN_Tensor& tensor = {OH_NN_INT8, 2, dimInput, nullptr, OH_NN_TENSOR}; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_innerModelTest.AddTensor(tensor)); +} + + +/** + * @tc.name: inner_model_set_tensor_value_001 + * @tc.desc: Verify the success of the set_tensor_value function + * @tc.type: FUNC + */ +HWTEST_F(InnerModelTest, inner_model_set_tensor_value_001, TestSize.Level1) +{ + SetTensors(); + + uint32_t index = 3; + const int8_t activation = 0; + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.SetTensorValue(index, + static_cast(&activation), sizeof(int8_t))); +} + +/** + * @tc.name: inner_model_set_tensor_value_002 + * @tc.desc: Verify the index out of bounds of the set_tensor_value function + * @tc.type: FUNC + */ +HWTEST_F(InnerModelTest, inner_model_set_tensor_value_002, TestSize.Level1) +{ + SetTensors(); + + uint32_t index = 6; + const int8_t activation = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_innerModelTest.SetTensorValue(index, + static_cast(&activation), sizeof(int8_t))); +} + +/** + * @tc.name: inner_model_set_tensor_value_003 + * @tc.desc: Verify the buffer value is nullptr of the set_tensor_value function + * @tc.type: FUNC + */ +HWTEST_F(InnerModelTest, inner_model_set_tensor_value_003, TestSize.Level1) +{ + SetTensors(); + + uint32_t index = 3; + const int8_t activation = 0; + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.SetTensorValue(index, + nullptr, sizeof(activation))); +} + +/** + * @tc.name: inner_model_set_tensor_value_004 + * @tc.desc: Verify the length invalid of the set_tensor_value function + * @tc.type: FUNC + */ +HWTEST_F(InnerModelTest, inner_model_set_tensor_value_004, TestSize.Level1) +{ + SetTensors(); + + uint32_t index = 3; + const int8_t activation = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_innerModelTest.SetTensorValue(index, + static_cast(&activation), 0)); +} + +/** + * @tc.name: inner_model_set_tensor_value_005 + * @tc.desc: Verify the after buildgraph of the set_tensor_value function + * @tc.type: FUNC + */ +HWTEST_F(InnerModelTest, inner_model_set_tensor_value_005, TestSize.Level1) +{ + uint32_t index = 3; + const int8_t activation = 0; + + mindspore::lite::LiteGraph* liteGraph = new (std::nothrow) mindspore::lite::LiteGraph(); + EXPECT_NE(nullptr, liteGraph); + SetLiteGraph(liteGraph); + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.BuildFromLiteGraph(liteGraph)); + + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, m_innerModelTest.SetTensorValue(index, + static_cast(&activation), sizeof(int8_t))); +} + +/** + * @tc.name: inner_model_set_tensor_value_006 + * @tc.desc: Verify the set value twice of the set_tensor_value function + * @tc.type: FUNC + */ +HWTEST_F(InnerModelTest, inner_model_set_tensor_value_006, TestSize.Level1) +{ + SetTensors(); + + uint32_t index = 3; + const int8_t activation = 0; + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.SetTensorValue(index, + static_cast(&activation), sizeof(int8_t))); + + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_innerModelTest.SetTensorValue(index, + static_cast(&activation), sizeof(int8_t))); +} + +/** + * @tc.name: inner_model_set_tensor_value_007 + * @tc.desc: Verify the tensor dynamicShape of the set_tensor_value function + * @tc.type: FUNC + */ +HWTEST_F(InnerModelTest, inner_model_set_tensor_value_007, TestSize.Level1) +{ + const int32_t dimInput[2] = {2, -1}; + const OH_NN_Tensor& tensor = {OH_NN_FLOAT32, 2, dimInput, nullptr, OH_NN_TENSOR}; + + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.AddTensor(tensor)); + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.AddTensor(tensor)); + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.AddTensor(tensor)); + const OH_NN_Tensor& tensorParam = {OH_NN_INT8, 0, nullptr, nullptr, OH_NN_ADD_ACTIVATIONTYPE}; + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.AddTensor(tensorParam)); + + uint32_t index = 0; + float x[4] = {0, 1, 2, 3}; + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, m_innerModelTest.SetTensorValue(index, + x, sizeof(x)- 1)); +} + +/** + * @tc.name: inner_model_add_operation_001 + * @tc.desc: Verify the success of the addoperation function + * @tc.type: FUNC + */ +HWTEST_F(InnerModelTest, inner_model_add_operation_001, TestSize.Level1) +{ + SetIndices(); + + SetTensors(); + + uint32_t index = 3; + const int8_t activation = 0; + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.SetTensorValue(index, + static_cast(&activation), sizeof(int8_t))); + + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.AddOperation(m_opType, m_params, m_inputs, m_outputs)); +} + +/** + * @tc.name: inner_model_add_operation_002 + * @tc.desc: Verify the after buildgraph of the addtensor function + * @tc.type: FUNC + */ +HWTEST_F(InnerModelTest, inner_model_add_operation_002, TestSize.Level1) +{ + OH_NN_OperationType m_opType = OH_NN_OPS_ADD; + OH_NN_UInt32Array m_inputs; + OH_NN_UInt32Array m_outputs; + OH_NN_UInt32Array m_params; + + mindspore::lite::LiteGraph* liteGraph = new (std::nothrow) mindspore::lite::LiteGraph(); + EXPECT_NE(nullptr, liteGraph); + SetLiteGraph(liteGraph); + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.BuildFromLiteGraph(liteGraph)); + + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, m_innerModelTest.AddOperation(m_opType, m_params, m_inputs, + m_outputs)); +} + +/** + * @tc.name: inner_model_add_operation_003 + * @tc.desc: Verify the without set buffer of the addtensor function + * @tc.type: FUNC + */ +HWTEST_F(InnerModelTest, inner_model_add_operation_003, TestSize.Level1) +{ + SetIndices(); + SetTensors(); + + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_innerModelTest.AddOperation(m_opType, m_params, m_inputs, m_outputs)); +} + +/** + * @tc.name: inner_model_add_operation_004 + * @tc.desc: Verify the output indices equal to input indices of the addtensor function + * @tc.type: FUNC + */ +HWTEST_F(InnerModelTest, inner_model_add_operation_004, TestSize.Level1) +{ + m_outputIndexs[0] = 0; + + SetIndices(); + SetTensors(); + + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_innerModelTest.AddOperation(m_opType, m_params, m_inputs, m_outputs)); +} + +/** + * @tc.name: inner_model_add_operation_005 + * @tc.desc: Verify the optype invalid of the addtensor function + * @tc.type: FUNC + */ +HWTEST_F(InnerModelTest, inner_model_add_operation_005, TestSize.Level1) +{ + m_opType = OH_NN_OperationType(99); + + SetIndices(); + SetTensors(); + + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_innerModelTest.AddOperation(m_opType, m_params, m_inputs, m_outputs)); +} + +/** + * @tc.name: inner_model_add_operation_006 + * @tc.desc: Verify the input indices out of bounds of the addoperation function + * @tc.type: FUNC + */ +HWTEST_F(InnerModelTest, inner_model_add_operation_006, TestSize.Level1) +{ + m_inputIndexs[1] = 6; + + SetIndices(); + SetTensors(); + + uint32_t index = 3; + const int8_t activation = 0; + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.SetTensorValue(index, + static_cast(&activation), sizeof(int8_t))); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_innerModelTest.AddOperation(m_opType, m_params, m_inputs, m_outputs)); +} + +/** + * @tc.name: inner_model_add_operation_007 + * @tc.desc: Verify the param indices out of bounds of the addoperation function + * @tc.type: FUNC + */ +HWTEST_F(InnerModelTest, inner_model_add_operation_007, TestSize.Level1) +{ + m_paramIndexs[0] = 6; + + SetIndices(); + SetTensors(); + + uint32_t index = 3; + const int8_t activation = 0; + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.SetTensorValue(index, + static_cast(&activation), sizeof(int8_t))); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_innerModelTest.AddOperation(m_opType, m_params, m_inputs, m_outputs)); +} + +/** + * @tc.name: inner_model_add_operation_008 + * @tc.desc: Verify the input indices size is 0 of the addoperation function + * @tc.type: FUNC + */ +HWTEST_F(InnerModelTest, inner_model_add_operation_008, TestSize.Level1) +{ + SetIndices(); + + m_inputs.size = 0; + m_inputs.data = nullptr; + SetTensors(); + + uint32_t index = 3; + const int8_t activation = 0; + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.SetTensorValue(index, + static_cast(&activation), sizeof(int8_t))); + + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_innerModelTest.AddOperation(m_opType, m_params, m_inputs, m_outputs)); +} + +/** + * @tc.name: inner_model_add_operation_009 + * @tc.desc: Verify the output indices size is 0 of the addoperation function + * @tc.type: FUNC + */ +HWTEST_F(InnerModelTest, inner_model_add_operation_009, TestSize.Level1) +{ + SetIndices(); + + m_outputs.size = 0; + m_outputs.data = nullptr; + SetTensors(); + + uint32_t index = 3; + const int8_t activation = 0; + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.SetTensorValue(index, + static_cast(&activation), sizeof(int8_t))); + + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_innerModelTest.AddOperation(m_opType, m_params, m_inputs, m_outputs)); +} + +/** + * @tc.name: inner_model_add_operation_010 + * @tc.desc: Verify the ops build failed of the addoperation function + * @tc.type: FUNC + */ +HWTEST_F(InnerModelTest, inner_model_add_operation_010, TestSize.Level1) +{ + SetIndices(); + + const int32_t dimInput1[2] = {2, 2}; + const OH_NN_Tensor& tensor = {OH_NN_FLOAT32, 2, dimInput1, nullptr, OH_NN_TENSOR}; + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.AddTensor(tensor)); + const int32_t dimInput2[2] = {2, 2}; + const OH_NN_Tensor& tensor1 = {OH_NN_FLOAT32, 2, dimInput2, nullptr, OH_NN_TENSOR}; + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.AddTensor(tensor1)); + const int32_t dimOutput[2] = {2, 2}; + const OH_NN_Tensor& tensor2 = {OH_NN_FLOAT32, 2, dimOutput, nullptr, OH_NN_TENSOR}; + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.AddTensor(tensor2)); + const OH_NN_Tensor& tensor3 = {OH_NN_INT8, 0, nullptr, nullptr, OH_NN_DIV_ACTIVATIONTYPE}; + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.AddTensor(tensor3)); + + uint32_t index = 3; + const int8_t activation = 0; + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.SetTensorValue(index, + static_cast(&activation), sizeof(int8_t))); + + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_innerModelTest.AddOperation(m_opType, m_params, m_inputs, m_outputs)); +} + +/** + * @tc.name: inner_model_specify_inputs_and_outputs_001 + * @tc.desc: Verify the success of the specify_inputs_and_outputs function + * @tc.type: FUNC + */ +HWTEST_F(InnerModelTest, inner_model_specify_inputs_and_outputs_001, TestSize.Level1) +{ + SetIndices(); + SetTensors(); + + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.SpecifyInputsAndOutputs(m_inputs, m_outputs)); + + std::vector> inTensors = m_innerModelTest.GetInputTensors(); + EXPECT_EQ(inTensors.size(), m_inputs.size); + std::vector> outTensors = m_innerModelTest.GetOutputTensors(); + EXPECT_EQ(outTensors.size(), m_outputs.size); +} + +/** + * @tc.name: inner_model_specify_inputs_and_outputs_002 + * @tc.desc: Verify the after buildgraph of the specify_inputs_and_outputs function + * @tc.type: FUNC + */ +HWTEST_F(InnerModelTest, inner_model_specify_inputs_and_outputs_002, TestSize.Level1) +{ + OH_NN_UInt32Array inputs; + OH_NN_UInt32Array outputs; + inputs.data = m_inputIndexs; + inputs.size = sizeof(m_inputIndexs) / sizeof(uint32_t); + outputs.data = nullptr; + outputs.size = sizeof(m_outputIndexs) / sizeof(uint32_t); + + mindspore::lite::LiteGraph* liteGraph = new (std::nothrow) mindspore::lite::LiteGraph(); + EXPECT_NE(nullptr, liteGraph); + SetLiteGraph(liteGraph); + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.BuildFromLiteGraph(liteGraph)); + + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, m_innerModelTest.SpecifyInputsAndOutputs(inputs, outputs)); +} + +/** + * @tc.name: inner_model_specify_inputs_and_outputs_003 + * @tc.desc: Verify the output indices is nullptr but length not 0 of the specify_inputs_and_outputs function + * @tc.type: FUNC + */ +HWTEST_F(InnerModelTest, inner_model_specify_inputs_and_outputs_003, TestSize.Level1) +{ + SetIndices(); + + m_outputs.data = nullptr; + SetTensors(); + + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_innerModelTest.SpecifyInputsAndOutputs(m_inputs, m_outputs)); +} + +/** + * @tc.name: inner_model_specify_inputs_and_outputs_004 + * @tc.desc: Verify the specift twice of the specify_inputs_and_outputs function + * @tc.type: FUNC + */ +HWTEST_F(InnerModelTest, inner_model_specify_inputs_and_outputs_004, TestSize.Level1) +{ + SetIndices(); + SetTensors(); + + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.SpecifyInputsAndOutputs(m_inputs, m_outputs)); + + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, m_innerModelTest.SpecifyInputsAndOutputs(m_inputs, m_outputs)); +} + +/** + * @tc.name: inner_model_build_001 + * @tc.desc: Verify the success of the build function + * @tc.type: FUNC + */ +HWTEST_F(InnerModelTest, inner_model_build_001, TestSize.Level1) +{ + SetIndices(); + SetTensors(); + + uint32_t index = 3; + const int8_t activation = 0; + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.SetTensorValue(index, + static_cast(&activation), sizeof(int8_t))); + + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.AddOperation(m_opType, m_params, m_inputs, m_outputs)); + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.SpecifyInputsAndOutputs(m_inputs, m_outputs)); + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.Build()); + EXPECT_EQ(true, m_innerModelTest.IsBuild()); +} + +/** + * @tc.name: inner_model_build_002 + * @tc.desc: Verify the build twice forbidden of the build function + * @tc.type: FUNC + */ +HWTEST_F(InnerModelTest, inner_model_build_002, TestSize.Level1) +{ + SetIndices(); + SetTensors(); + + uint32_t index = 3; + const int8_t activation = 0; + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.SetTensorValue(index, + static_cast(&activation), sizeof(int8_t))); + + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.AddOperation(m_opType, m_params, m_inputs, m_outputs)); + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.SpecifyInputsAndOutputs(m_inputs, m_outputs)); + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.Build()); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, m_innerModelTest.Build()); +} + +/** + * @tc.name: inner_model_build_003 + * @tc.desc: Verify the params not match optype of the build function + * @tc.type: FUNC + */ +HWTEST_F(InnerModelTest, inner_model_build_003, TestSize.Level1) +{ + OH_NN_OperationType m_opType = OH_NN_OPS_DIV; + + SetIndices(); + + const int dim[2] = {2, 2}; + const OH_NN_Tensor& tensor = {OH_NN_FLOAT32, 2, dim, nullptr, OH_NN_TENSOR}; + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.AddTensor(tensor)); + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.AddTensor(tensor)); + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.AddTensor(tensor)); + const OH_NN_Tensor& tensorParam = {OH_NN_INT8, 0, nullptr, nullptr, OH_NN_DIV_ACTIVATIONTYPE}; + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.AddTensor(tensorParam)); + + uint32_t index = 3; + const int8_t activation = 0; + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.SetTensorValue(index, + static_cast(&activation), sizeof(int8_t))); + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.SpecifyInputsAndOutputs(m_inputs, m_outputs)); + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.AddOperation(m_opType, m_params, m_inputs, m_outputs)); + EXPECT_EQ(OH_NN_FAILED, m_innerModelTest.Build()); +} + +/** + * @tc.name: inner_model_build_004 + * @tc.desc: Verify the success of the build function + * @tc.type: FUNC + */ +HWTEST_F(InnerModelTest, inner_model_build_004, TestSize.Level1) +{ + SetIndices(); + SetTensors(); + + uint32_t index = 3; + const int8_t activation = 0; + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.SetTensorValue(index, + static_cast(&activation), sizeof(int8_t))); + + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.AddOperation(m_opType, m_params, m_inputs, m_outputs)); + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.SpecifyInputsAndOutputs(m_inputs, m_outputs)); + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.Build()); +} + +/** + * @tc.name: inner_model_get_supported_operation_001 + * @tc.desc: Verify the success of the get_supported_operation function + * @tc.type: FUNC + */ +HWTEST_F(InnerModelTest, inner_model_get_supported_operation_001, TestSize.Level1) +{ + const bool *isSupported = nullptr; + uint32_t opCount = 1; + + SetIndices(); + SetTensors(); + + uint32_t index = 3; + const int8_t activation = 0; + size_t deviceID = 10; + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.SetTensorValue(index, + static_cast(&activation), sizeof(int8_t))); + + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.AddOperation(m_opType, m_params, m_inputs, m_outputs)); + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.SpecifyInputsAndOutputs(m_inputs, m_outputs)); + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.Build()); + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.GetSupportedOperations(deviceID, &isSupported, opCount)); +} + +/** + * @tc.name: inner_model_get_supported_operation_002 + * @tc.desc: Verify the mock hdi device result of the get_supported_operation function + * @tc.type: FUNC + */ +HWTEST_F(InnerModelTest, inner_model_get_supported_operation_002, TestSize.Level1) +{ + size_t deviceID = 10; + const bool *isSupported = nullptr; + uint32_t opCount = 1; + + mindspore::lite::LiteGraph* liteGraph = new (std::nothrow) mindspore::lite::LiteGraph(); + EXPECT_NE(nullptr, liteGraph); + SetLiteGraph(liteGraph); + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.BuildFromLiteGraph(liteGraph)); + + EXPECT_EQ(OH_NN_UNAVALIDABLE_DEVICE, m_innerModelTest.GetSupportedOperations(deviceID, &isSupported, opCount)); +} + +/** + * @tc.name: inner_model_get_supported_operation_003 + * @tc.desc: Verify the mock device manager of the get_supported_operation function + * @tc.type: FUNC + */ +HWTEST_F(InnerModelTest, inner_model_get_supported_operation_003, TestSize.Level1) +{ + const bool *isSupported = nullptr; + uint32_t opCount = 1; + + SetIndices(); + SetTensors(); + + uint32_t index = 3; + const int8_t activation = 0; + size_t deviceID = 0; + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.SetTensorValue(index, + static_cast(&activation), sizeof(int8_t))); + + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.AddOperation(m_opType, m_params, m_inputs, m_outputs)); + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.SpecifyInputsAndOutputs(m_inputs, m_outputs)); + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.Build()); + EXPECT_EQ(OH_NN_FAILED, m_innerModelTest.GetSupportedOperations(deviceID, &isSupported, opCount)); + + std::shared_ptr liteGraph = m_innerModelTest.GetLiteGraphs(); + EXPECT_EQ(liteGraph->name_, "NNR_Model"); +} + +/** + * @tc.name: inner_model_get_supported_operation_004 + * @tc.desc: Verify the before build of the get_supported_operation function + * @tc.type: FUNC + */ +HWTEST_F(InnerModelTest, inner_model_get_supported_operation_004, TestSize.Level1) +{ + size_t deviceID = 10; + const bool *isSupported = nullptr; + uint32_t opCount = 1; + + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, m_innerModelTest.GetSupportedOperations(deviceID, &isSupported, opCount)); +} +} // namespace UnitTest +} // namespace NNRT diff --git a/test/unittest/components/inner_model/nn_tensor_test.cpp b/test/unittest/components/inner_model/nn_tensor_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..a288c26902bbd8c384f263a39f1bdfae9228e466 --- /dev/null +++ b/test/unittest/components/inner_model/nn_tensor_test.cpp @@ -0,0 +1,525 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include + +#include "frameworks/native/validation.h" +#include "frameworks/native/nn_tensor.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime; +using namespace OHOS::NeuralNetworkRuntime::Validation; + +namespace NNRT { +namespace UnitTest { +class NnTensorTest : public testing::Test { +}; + +/** + * @tc.name: nn_tensor_parse_dimensions_001 + * @tc.desc: Verify the success of the parse_dimensions function + * @tc.type: FUNC + */ +HWTEST_F(NnTensorTest, nn_tensor_parse_dimensions_001, TestSize.Level1) +{ + const int dim[2] = {2, 2}; + OH_NN_Tensor tensor{OH_NN_FLOAT32, 2, dim, nullptr, OH_NN_TENSOR}; + + NNTensor nnTensor; + EXPECT_EQ(OH_NN_SUCCESS, nnTensor.BuildFromOHNNTensor(tensor)); +} + +/** + * @tc.name: nn_tensor_parse_dimensions_002 + * @tc.desc: Verify the invalid dimensions of the parse_dimensions function + * @tc.type: FUNC + */ +HWTEST_F(NnTensorTest, nn_tensor_parse_dimensions_002, TestSize.Level1) +{ + OH_NN_Tensor tensor; + tensor.dataType = OH_NN_FLOAT32; + tensor.dimensionCount = 2; + tensor.dimensions = nullptr; + tensor.quantParam = nullptr; + tensor.type = OH_NN_TENSOR; + + NNTensor nnTensor; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, nnTensor.BuildFromOHNNTensor(tensor)); +} + +/** + * @tc.name: nn_tensor_parse_dimensions_003 + * @tc.desc: Verify the invalid shape tensor of the parse_dimensions function + * @tc.type: FUNC + */ +HWTEST_F(NnTensorTest, nn_tensor_parse_dimensions_003, TestSize.Level1) +{ + const int dim[2] = {2, -2}; + OH_NN_Tensor tensor{OH_NN_FLOAT32, 2, dim, nullptr, OH_NN_TENSOR}; + + NNTensor nnTensor; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, nnTensor.BuildFromOHNNTensor(tensor)); +} + +/** + * @tc.name: nn_tensor_parse_dimensions_004 + * @tc.desc: Verify the dynamic shape of the parse_dimensions function + * @tc.type: FUNC + */ +HWTEST_F(NnTensorTest, nn_tensor_parse_dimensions_004, TestSize.Level1) +{ + const int dim[2] = {2, -1}; + OH_NN_Tensor tensor{OH_NN_FLOAT32, 2, dim, nullptr, OH_NN_TENSOR}; + + NNTensor nnTensor; + EXPECT_EQ(OH_NN_SUCCESS, nnTensor.BuildFromOHNNTensor(tensor)); +} + +/** + * @tc.name: nn_tensor_parse_dimensions_005 + * @tc.desc: Verify the dims out of bounds of the parse_dimensions function + * @tc.type: FUNC + */ +HWTEST_F(NnTensorTest, nn_tensor_parse_dimensions_005, TestSize.Level1) +{ + const int dim[3] = {1000000, 1000000, 10000000}; + OH_NN_Tensor tensor{OH_NN_FLOAT32, 2, dim, nullptr, OH_NN_TENSOR}; + + NNTensor nnTensor; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, nnTensor.BuildFromOHNNTensor(tensor)); +} + + +/** + * @tc.name: nn_tensor_parse_quant_params_001 + * @tc.desc: Verify the success of the parse_quant_params function + * @tc.type: FUNC + */ +HWTEST_F(NnTensorTest, nn_tensor_parse_quant_params_001, TestSize.Level1) +{ + const double scale = 1.0; + const int32_t zeroPoint = 0; + const uint32_t numBits = 8; + const OH_NN_QuantParam quantParam = {1, &numBits, &scale, &zeroPoint}; + + NNTensor nnTensor; + const int dim[2] = {2, 2}; + OH_NN_Tensor tensor{OH_NN_FLOAT32, 2, dim, &quantParam, OH_NN_TENSOR}; + + EXPECT_EQ(OH_NN_SUCCESS, nnTensor.BuildFromOHNNTensor(tensor)); +} + +/** + * @tc.name: nn_tensor_parse_quant_params_002 + * @tc.desc: Verify the invalid numbits of the parse_quant_params function + * @tc.type: FUNC + */ +HWTEST_F(NnTensorTest, nn_tensor_parse_quant_params_002, TestSize.Level1) +{ + const double scale = 1.0; + const int32_t zeroPoint = 0; + const uint32_t numBits = 16; + const OH_NN_QuantParam quantParam = {1, &numBits, &scale, &zeroPoint}; + + NNTensor nnTensor; + const int dim[2] = {2, 2}; + OH_NN_Tensor tensor{OH_NN_FLOAT32, 2, dim, &quantParam, OH_NN_TENSOR}; + + EXPECT_EQ(OH_NN_INVALID_PARAMETER, nnTensor.BuildFromOHNNTensor(tensor)); +} + +/** + * @tc.name: nn_tensor_parse_quant_params_004 + * @tc.desc: Verify the invalid scale of the parse_quant_params function + * @tc.type: FUNC + */ +HWTEST_F(NnTensorTest, nn_tensor_parse_quant_params_004, TestSize.Level1) +{ + const int32_t zeroPoint = 0; + const uint32_t numBits = 8; + const OH_NN_QuantParam quantParam = {1, &numBits, nullptr, &zeroPoint}; + + NNTensor nnTensor; + const int dim[2] = {2, 2}; + OH_NN_Tensor tensor{OH_NN_FLOAT32, 2, dim, &quantParam, OH_NN_TENSOR}; + + EXPECT_EQ(OH_NN_INVALID_PARAMETER, nnTensor.BuildFromOHNNTensor(tensor)); +} + +/** + * @tc.name: nn_tensor_parse_quant_params_005 + * @tc.desc: Verify the invalid zeropoint of the parse_quant_params function + * @tc.type: FUNC + */ +HWTEST_F(NnTensorTest, nn_tensor_parse_quant_params_005, TestSize.Level1) +{ + const double scale = 1.0; + const uint32_t numBits = 8; + const OH_NN_QuantParam quantParam = {1, &numBits, &scale, nullptr}; + + NNTensor nnTensor; + const int dim[2] = {2, 2}; + OH_NN_Tensor tensor{OH_NN_FLOAT32, 2, dim, &quantParam, OH_NN_TENSOR}; + + EXPECT_EQ(OH_NN_INVALID_PARAMETER, nnTensor.BuildFromOHNNTensor(tensor)); +} + +/** + * @tc.name: nn_tensor_set_dimensions_001 + * @tc.desc: Verify the success of the set_dimensions function + * @tc.type: FUNC + */ +HWTEST_F(NnTensorTest, nn_tensor_set_dimensions_001, TestSize.Level1) +{ + const int dim[2] = {2, -1}; + OH_NN_Tensor tensor{OH_NN_FLOAT32, 2, dim, nullptr, OH_NN_TENSOR}; + const std::vector dimensions = {2, 3}; + + NNTensor nnTensor; + EXPECT_EQ(OH_NN_SUCCESS, nnTensor.BuildFromOHNNTensor(tensor)); + EXPECT_EQ(OH_NN_SUCCESS, nnTensor.SetDimensions(dimensions)); +} + +/** + * @tc.name: nn_tensor_set_dimensions_002 + * @tc.desc: Verify the dim out of bounds of the set_dimensions function + * @tc.type: FUNC + */ +HWTEST_F(NnTensorTest, nn_tensor_set_dimensions_002, TestSize.Level1) +{ + const int dim[2] = {2, -1}; + OH_NN_Tensor tensor{OH_NN_FLOAT32, 2, dim, nullptr, OH_NN_TENSOR}; + + NNTensor nnTensor; + const std::vector dimensions = {2, 3, 5}; + EXPECT_EQ(OH_NN_SUCCESS, nnTensor.BuildFromOHNNTensor(tensor)); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, nnTensor.SetDimensions(dimensions)); +} + +/** + * @tc.name: nn_tensor_compare_attribute_001 + * @tc.desc: Verify the success of the CompareAttribute function + * @tc.type: FUNC + */ +HWTEST_F(NnTensorTest, nn_tensor_compare_attribute_001, TestSize.Level1) +{ + const int dim[2] = {2, 2}; + OH_NN_Tensor tensor{OH_NN_FLOAT32, 2, dim, nullptr, OH_NN_TENSOR}; + + NNTensor nnTensor; + NNTensor expectTensor; + EXPECT_EQ(OH_NN_SUCCESS, nnTensor.BuildFromOHNNTensor(tensor)); + expectTensor = std::move(nnTensor); + EXPECT_EQ(true, nnTensor.CompareAttribute(nnTensor)); +} + +/** + * @tc.name: nn_tensor_compare_attribute_002 + * @tc.desc: Verify the datatype not equal of the CompareAttribute function + * @tc.type: FUNC + */ +HWTEST_F(NnTensorTest, nn_tensor_compare_attribute_002, TestSize.Level1) +{ + const int dim[2] = {2, 2}; + OH_NN_Tensor tensor{OH_NN_FLOAT32, 2, dim, nullptr, OH_NN_TENSOR}; + + NNTensor nnTensor; + NNTensor expectTensor; + EXPECT_EQ(OH_NN_SUCCESS, nnTensor.BuildFromOHNNTensor(tensor)); + + const int dimExpect[2] = {2, 2}; + OH_NN_Tensor tensorExpect{OH_NN_INT32, 2, dimExpect, nullptr, OH_NN_TENSOR}; + EXPECT_EQ(OH_NN_SUCCESS, expectTensor.BuildFromOHNNTensor(tensorExpect)); + + EXPECT_EQ(false, nnTensor.CompareAttribute(expectTensor)); +} + +/** + * @tc.name: nn_tensor_compare_attribute_003 + * @tc.desc: Verify the dim size not equal of the CompareAttribute function + * @tc.type: FUNC + */ +HWTEST_F(NnTensorTest, nn_tensor_compare_attribute_003, TestSize.Level1) +{ + const int dim[2] = {2, 2}; + OH_NN_Tensor tensor{OH_NN_FLOAT32, 2, dim, nullptr, OH_NN_TENSOR}; + + NNTensor nnTensor; + NNTensor expectTensor; + EXPECT_EQ(OH_NN_SUCCESS, nnTensor.BuildFromOHNNTensor(tensor)); + + const int dimExpect[3] = {2, 2, 3}; + OH_NN_Tensor tensorExpect{OH_NN_FLOAT32, 3, dimExpect, nullptr, OH_NN_TENSOR}; + EXPECT_EQ(OH_NN_SUCCESS, expectTensor.BuildFromOHNNTensor(tensorExpect)); + + EXPECT_EQ(false, nnTensor.CompareAttribute(expectTensor)); +} + +/** + * @tc.name: nn_tensor_compare_attribute_004 + * @tc.desc: Verify the dim value not equal of the CompareAttribute function + * @tc.type: FUNC + */ +HWTEST_F(NnTensorTest, nn_tensor_compare_attribute_004, TestSize.Level1) +{ + const int dim[2] = {2, 2}; + OH_NN_Tensor tensor{OH_NN_FLOAT32, 2, dim, nullptr, OH_NN_TENSOR}; + + NNTensor nnTensor; + NNTensor expectTensor; + EXPECT_EQ(OH_NN_SUCCESS, nnTensor.BuildFromOHNNTensor(tensor)); + + const int dimExpect[2] = {2, 3}; + OH_NN_Tensor tensorExpect{OH_NN_FLOAT32, 2, dimExpect, nullptr, OH_NN_TENSOR}; + EXPECT_EQ(OH_NN_SUCCESS, expectTensor.BuildFromOHNNTensor(tensorExpect)); + + EXPECT_EQ(false, nnTensor.CompareAttribute(expectTensor)); +} + +/** + * @tc.name: nn_tensor_is_scalar_001 + * @tc.desc: Verify the success of the is_scalar function + * @tc.type: FUNC + */ +HWTEST_F(NnTensorTest, nn_tensor_is_scalar_001, TestSize.Level1) +{ + const int dim[2] = {2, 2}; + OH_NN_Tensor tensor{OH_NN_FLOAT32, 2, dim, nullptr, OH_NN_TENSOR}; + + NNTensor nnTensor; + EXPECT_EQ(OH_NN_SUCCESS, nnTensor.BuildFromOHNNTensor(tensor)); + EXPECT_EQ(false, nnTensor.IsScalar()); +} + +/** + * @tc.name: nn_tensor_build_from_tensor_001 + * @tc.desc: Verify the success of the build_from_tensor function + * @tc.type: FUNC + */ +HWTEST_F(NnTensorTest, nn_tensor_convert_to_io_tensor_001, TestSize.Level1) +{ + const int dim[2] = {2, 2}; + OH_NN_Tensor tensor{OH_NN_FLOAT32, 2, dim, nullptr, OH_NN_TENSOR}; + + NNTensor nnTensor; + EXPECT_EQ(OH_NN_SUCCESS, nnTensor.BuildFromOHNNTensor(tensor)); + + int8_t* activationValue = new (std::nothrow) int8_t[1]{0}; + EXPECT_NE(nullptr, activationValue); + + // After SetBuffer, this memory is released by NNTensor + nnTensor.SetBuffer(activationValue, sizeof(int8_t)); + IOTensor ioTensor; + nnTensor.ConvertToIOTensor(ioTensor); + EXPECT_EQ(sizeof(int8_t), ioTensor.length); +} + +/** + * @tc.name: nn_tensor_get_buffer_length_001 + * @tc.desc: Verify the success of the get_buffer_length function + * @tc.type: FUNC + */ +HWTEST_F(NnTensorTest, nn_tensor_get_buffer_length_001, TestSize.Level1) +{ + const int dim[2] = {2, 2}; + OH_NN_Tensor tensor{OH_NN_FLOAT32, 2, dim, nullptr, OH_NN_TENSOR}; + + NNTensor nnTensor; + EXPECT_EQ(OH_NN_SUCCESS, nnTensor.BuildFromOHNNTensor(tensor)); + int8_t* activationValue = new (std::nothrow) int8_t[1]{0}; + EXPECT_NE(nullptr, activationValue); + + // After SetBuffer, this memory is released by NNTensor + nnTensor.SetBuffer(activationValue, sizeof(int8_t)); + size_t length = sizeof(int8_t); + EXPECT_EQ(length, nnTensor.GetBufferLength()); +} + +/** + * @tc.name: nn_tensor_get_format_001 + * @tc.desc: Verify the success of the get_format function + * @tc.type: FUNC + */ +HWTEST_F(NnTensorTest, nn_tensor_get_format_001, TestSize.Level1) +{ + const int dim[2] = {2, 2}; + OH_NN_Tensor tensor{OH_NN_FLOAT32, 2, dim, nullptr, OH_NN_TENSOR}; + + NNTensor nnTensor; + EXPECT_EQ(OH_NN_SUCCESS, nnTensor.BuildFromOHNNTensor(tensor)); + OH_NN_Format format = OH_NN_FORMAT_NHWC; + EXPECT_EQ(format, nnTensor.GetFormat()); +} + +/** + * @tc.name: nn_tensor_get_name_001 + * @tc.desc: Verify the success of the get name function + * @tc.type: FUNC + */ +HWTEST_F(NnTensorTest, nn_tensor_get_name_001, TestSize.Level1) +{ + NNTensor nnTensor; + const std::string& name = "test"; + nnTensor.SetName(name); + EXPECT_EQ(name, nnTensor.GetName()); +} + +/** + * @tc.name: nn_tensor_get_quant_param_001 + * @tc.desc: Verify the success of the get_quant_param function + * @tc.type: FUNC + */ +HWTEST_F(NnTensorTest, nn_tensor_get_quant_param_001, TestSize.Level1) +{ + const int dim[2] = {2, 2}; + OH_NN_Tensor tensor{OH_NN_FLOAT32, 2, dim, nullptr, OH_NN_TENSOR}; + + NNTensor nnTensor; + EXPECT_EQ(OH_NN_SUCCESS, nnTensor.BuildFromOHNNTensor(tensor)); + + std::vector quantParam = nnTensor.GetQuantParam(); + size_t quantSize = 0; + EXPECT_EQ(quantSize, quantParam.size()); +} + +/** + * @tc.name: nn_tensor_build_from_tensor_002 + * @tc.desc: Verify the invalid datatype value of the build_from_tensor function + * @tc.type: FUNC + */ +HWTEST_F(NnTensorTest, nn_tensor_build_from_tensor_002, TestSize.Level1) +{ + const int dim[2] = {2, 2}; + + int dataTypeTest = 13; + OH_NN_DataType dataType = (OH_NN_DataType)dataTypeTest; + OH_NN_Tensor tensor{dataType, 2, dim, nullptr, OH_NN_TENSOR}; + + NNTensor nnTensor; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, nnTensor.BuildFromOHNNTensor(tensor)); +} + +/** + * @tc.name: nn_tensor_convert_to_lite_graph_tensor_001 + * @tc.desc: Verify the success of the convert_to_lite_graph function + * @tc.type: FUNC + */ +HWTEST_F(NnTensorTest, nn_tensor_convert_to_lite_graph_tensor_001, TestSize.Level1) +{ + const int dim[2] = {2, 2}; + OH_NN_Tensor tensor{OH_NN_FLOAT32, 2, dim, nullptr, OH_NN_TENSOR}; + + NNTensor nnTensor; + EXPECT_EQ(OH_NN_SUCCESS, nnTensor.BuildFromOHNNTensor(tensor)); + + LiteGraphTensorPtr tensorPtr = {nullptr, DestroyLiteGraphTensor}; + EXPECT_NE(tensorPtr, nnTensor.ConvertToLiteGraphTensor()); +} + +/** + * @tc.name: nn_tensor_convert_to_lite_graph_tensor_002 + * @tc.desc: Verify the success with quant of the convert_to_lite_graph function + * @tc.type: FUNC + */ +HWTEST_F(NnTensorTest, nn_tensor_convert_to_lite_graph_tensor_002, TestSize.Level1) +{ + const int dim[2] = {2, 2}; + + OH_NN_Tensor tensor; + tensor.dataType = OH_NN_FLOAT32; + tensor.dimensionCount = 2; + tensor.dimensions = dim; + const double scale = 1.0; + const int32_t zeroPoint = 0; + const uint32_t numBits = 8; + const OH_NN_QuantParam quantParam = {1, &numBits, &scale, &zeroPoint}; + tensor.quantParam = &quantParam; + tensor.type = OH_NN_TENSOR; + + NNTensor nnTensor; + EXPECT_EQ(OH_NN_SUCCESS, nnTensor.BuildFromOHNNTensor(tensor)); + + LiteGraphTensorPtr tensorPtr = {nullptr, DestroyLiteGraphTensor}; + EXPECT_NE(tensorPtr, nnTensor.ConvertToLiteGraphTensor()); +} + +/** + * @tc.name: nn_tensor_build_001 + * @tc.desc: Verify the success of the build function + * @tc.type: FUNC + */ +HWTEST_F(NnTensorTest, nn_tensor_build_001, TestSize.Level1) +{ + OH_NN_DataType dataType = OH_NN_FLOAT32; + const std::vector dimensions = {2, 2}; + const std::vector quantParam = {{8, 1.0, 0}, {8, 1.0, 0}, {8, 1.0, 0}}; + OH_NN_TensorType type = OH_NN_ADD_ACTIVATIONTYPE; + + NNTensor nnTensor; + EXPECT_EQ(OH_NN_SUCCESS, nnTensor.Build(dataType, dimensions, quantParam, type)); +} + +/** + * @tc.name: nn_tensor_build_002 + * @tc.desc: Verify the invalid datatype value of the build function + * @tc.type: FUNC + */ +HWTEST_F(NnTensorTest, nn_tensor_build_002, TestSize.Level1) +{ + int dataTypeTest = 13; + OH_NN_DataType dataType = (OH_NN_DataType)dataTypeTest; + const std::vector dimensions = {2, 2}; + const std::vector quantParam = {{8, 1.0, 0}, {8, 1.0, 0}, {8, 1.0, 0}}; + OH_NN_TensorType type = OH_NN_ADD_ACTIVATIONTYPE; + + NNTensor nnTensor; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, nnTensor.Build(dataType, dimensions, quantParam, type)); +} + +/** + * @tc.name: nn_tensor_build_003 + * @tc.desc: Verify the dynamic shape of the build function + * @tc.type: FUNC + */ +HWTEST_F(NnTensorTest, nn_tensor_build_003, TestSize.Level1) +{ + OH_NN_DataType dataType = OH_NN_FLOAT32; + const std::vector dimensions = {2, -2}; + const std::vector quantParam = {{8, 1.0, 0}, {8, 1.0, 0}, {8, 1.0, 0}}; + OH_NN_TensorType type = OH_NN_ADD_ACTIVATIONTYPE; + + NNTensor nnTensor; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, nnTensor.Build(dataType, dimensions, quantParam, type)); +} + +/** + * @tc.name: nn_tensor_build_004 + * @tc.desc: Verify the invalid numbits of the build function + * @tc.type: FUNC + */ +HWTEST_F(NnTensorTest, nn_tensor_build_004, TestSize.Level1) +{ + OH_NN_DataType dataType = OH_NN_FLOAT32; + const std::vector dimensions = {2, 2}; + const std::vector quantParam = {{2, 1.0, 0}, {2, 1.0, 0}, {2, 1.0, 0}}; + OH_NN_TensorType type = OH_NN_ADD_ACTIVATIONTYPE; + + NNTensor nnTensor; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, nnTensor.Build(dataType, dimensions, quantParam, type)); +} +} // namespace UnitTest +} // namespace NNRT diff --git a/test/unittest/components/inner_model/nn_validation_test.cpp b/test/unittest/components/inner_model/nn_validation_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..49a2e81e465aa0582e6df75634466b02522bfbd2 --- /dev/null +++ b/test/unittest/components/inner_model/nn_validation_test.cpp @@ -0,0 +1,175 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include + +#include "frameworks/native/validation.h" +#include "frameworks/native/nn_tensor.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime; +using namespace OHOS::NeuralNetworkRuntime::Validation; + +namespace NNRT { +namespace UnitTest { +class NnValidationTest : public testing::Test { +}; + +/** + * @tc.name: nn_validation_validate_tensor_datatype_001 + * @tc.desc: Verify the success of the validate_tensor_datatype function + * @tc.type: FUNC + */ +HWTEST_F(NnValidationTest, nn_validation_validate_tensor_datatype_001, TestSize.Level1) +{ + int dataTypeTest = 12; + OH_NN_DataType dataType = (OH_NN_DataType)dataTypeTest; + EXPECT_EQ(true, ValidateTensorDataType(dataType)); +} + +/** + * @tc.name: nn_validation_validate_tensor_datatype_002 + * @tc.desc: Verify the gt bounds of the validate_tensor_datatype function + * @tc.type: FUNC + */ +HWTEST_F(NnValidationTest, nn_validation_validate_tensor_datatype_002, TestSize.Level1) +{ + int dataTypeTest = 13; + OH_NN_DataType dataType = (OH_NN_DataType)dataTypeTest; + EXPECT_EQ(false, ValidateTensorDataType(dataType)); +} + +/** + * @tc.name: nn_validation_validate_tensor_datatype_003 + * @tc.desc: Verify the lt bounds of the validate_tensor_datatype function + * @tc.type: FUNC + */ +HWTEST_F(NnValidationTest, nn_validation_validate_tensor_datatype_003, TestSize.Level1) +{ + int dataTypeTest = -1; + OH_NN_DataType dataType = (OH_NN_DataType)dataTypeTest; + EXPECT_EQ(false, ValidateTensorDataType(dataType)); +} + +/** + * @tc.name: nn_validation_validate_preformance_mode_001 + * @tc.desc: Verify the success of the validate_preformance_mode function + * @tc.type: FUNC + */ +HWTEST_F(NnValidationTest, nn_validation_validate_preformance_mode_001, TestSize.Level1) +{ + int performanceModeTest = 4; + OH_NN_PerformanceMode performanceMode = (OH_NN_PerformanceMode)performanceModeTest; + EXPECT_EQ(true, ValidatePerformanceMode(performanceMode)); +} + +/** + * @tc.name: nn_validation_validate_preformance_mode_002 + * @tc.desc: Verify the gt bounds of the validate_preformance_mode function + * @tc.type: FUNC + */ +HWTEST_F(NnValidationTest, nn_validation_validate_preformance_mode_002, TestSize.Level1) +{ + int performanceModeTest = 5; + OH_NN_PerformanceMode performanceMode = (OH_NN_PerformanceMode)performanceModeTest; + EXPECT_EQ(false, ValidatePerformanceMode(performanceMode)); +} + +/** + * @tc.name: nn_validation_validate_preformance_mode_003 + * @tc.desc: Verify the lt bounds of the validate_preformance_mode function + * @tc.type: FUNC + */ +HWTEST_F(NnValidationTest, nn_validation_validate_preformance_mode_003, TestSize.Level1) +{ + int performanceModeTest = -1; + OH_NN_PerformanceMode performanceMode = (OH_NN_PerformanceMode)performanceModeTest; + EXPECT_EQ(false, ValidatePerformanceMode(performanceMode)); +} + +/** + * @tc.name: nn_validation_validate_priority_001 + * @tc.desc: Verify the success of the validate_priority function + * @tc.type: FUNC + */ +HWTEST_F(NnValidationTest, nn_validation_validate_priority_001, TestSize.Level1) +{ + int priorityTest = 2; + OH_NN_Priority priority = (OH_NN_Priority)priorityTest; + EXPECT_EQ(true, ValidatePriority(priority)); +} + +/** + * @tc.name: nn_validation_validate_priority_002 + * @tc.desc: Verify the gt bounds of the validate_priority function + * @tc.type: FUNC + */ +HWTEST_F(NnValidationTest, nn_validation_validate_priority_002, TestSize.Level1) +{ + int priorityTest = 4; + OH_NN_Priority priority = (OH_NN_Priority)priorityTest; + EXPECT_EQ(false, ValidatePriority(priority)); +} + +/** + * @tc.name: nn_validation_validate_priority_003 + * @tc.desc: Verify the lt bounds of the validate_priority function + * @tc.type: FUNC + */ +HWTEST_F(NnValidationTest, nn_validation_validate_priority_003, TestSize.Level1) +{ + int priorityTest = -1; + OH_NN_Priority priority = (OH_NN_Priority)priorityTest; + EXPECT_EQ(false, ValidatePriority(priority)); +} + +/** + * @tc.name: nn_validation_fusetype_001 + * @tc.desc: Verify the success of the validate_fusetype function + * @tc.type: FUNC + */ +HWTEST_F(NnValidationTest, nn_validation_fusetype_001, TestSize.Level1) +{ + int fuseTypeTest = 2; + OH_NN_FuseType fuseType = (OH_NN_FuseType)fuseTypeTest; + EXPECT_EQ(true, ValidateFuseType(fuseType)); +} + +/** + * @tc.name: nn_validation_fusetype_002 + * @tc.desc: Verify the gt bounds of the validate_fusetype function + * @tc.type: FUNC + */ +HWTEST_F(NnValidationTest, nn_validation_fusetype_002, TestSize.Level1) +{ + int fuseTypeTest = 3; + OH_NN_FuseType fuseType = (OH_NN_FuseType)fuseTypeTest; + EXPECT_EQ(false, ValidateFuseType(fuseType)); +} + +/** + * @tc.name: nn_validation_fusetype_003 + * @tc.desc: Verify the lt bounds of the validate_fusetype function + * @tc.type: FUNC + */ +HWTEST_F(NnValidationTest, nn_validation_fusetype_003, TestSize.Level1) +{ + int fuseTypeTest = -1; + OH_NN_FuseType fuseType = (OH_NN_FuseType)fuseTypeTest; + EXPECT_EQ(false, ValidateFuseType(fuseType)); +} +} // namespace UnitTest +} // namespace NNRT diff --git a/test/unittest/components/inner_model/ops_regitstry_test.cpp b/test/unittest/components/inner_model/ops_regitstry_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..de3cc84846f07e6cdc33067e5b09754de8a5e998 --- /dev/null +++ b/test/unittest/components/inner_model/ops_regitstry_test.cpp @@ -0,0 +1,64 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include + +#include "frameworks/native/validation.h" +#include "frameworks/native/ops_registry.h" +#include "frameworks/native/ops/add_builder.h" +#include "frameworks/native/ops/div_builder.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime; +using namespace OHOS::NeuralNetworkRuntime::Validation; +using namespace OHOS::NeuralNetworkRuntime::Ops; + +namespace NNRT { +namespace UnitTest { +class OpsRegistryTest : public testing::Test { +}; + +/** + * @tc.name: registry_001 + * @tc.desc: Verify the registry success the registar function + * @tc.type: FUNC + */ +HWTEST_F(OpsRegistryTest, registry_001, TestSize.Level1) +{ + const int newRegistryOperationType = 100; + REGISTER_OPS(AddBuilder, OH_NN_OperationType(newRegistryOperationType)); + + OpsRegistry& opsregistry = OpsRegistry::GetSingleton(); + EXPECT_NE(nullptr, opsregistry.GetOpsBuilder(OH_NN_OperationType(newRegistryOperationType))); +} + +/** + * @tc.name: registry_002 + * @tc.desc: Verify the registry twice the registar function + * @tc.type: FUNC + */ +HWTEST_F(OpsRegistryTest, registry_002, TestSize.Level1) +{ + const int newRegistryOperationType = 1000; + REGISTER_OPS(AddBuilder, OH_NN_OperationType(newRegistryOperationType)); + + OpsRegistry& opsregistry = OpsRegistry::GetSingleton(); + EXPECT_NE(nullptr, opsregistry.GetOpsBuilder(OH_NN_OperationType(newRegistryOperationType))); + + REGISTER_OPS(DivBuilder, OH_NN_OperationType(newRegistryOperationType)); +} +} // namespace UnitTest +} // namespace NNRT diff --git a/test/unittest/components/memory_manager/memory_manager_test.cpp b/test/unittest/components/memory_manager/memory_manager_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..eba193dd3a78bd69b28f0673be0aa303fee23aef --- /dev/null +++ b/test/unittest/components/memory_manager/memory_manager_test.cpp @@ -0,0 +1,231 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include + +#include +#include +#include +#include + +#include + +#include "interfaces/oem/cpp_api/cpp_type.h" +#include "frameworks/native/memory_manager.h" +#include "test/unittest/common/file_utils.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime; +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class MemoryManagerTest : public testing::Test { +public: + MemoryManagerTest() = default; + ~MemoryManagerTest() = default; +}; + +/** + * @tc.name: memorymanagertest_mapmemory_001 + * @tc.desc: Verify the MapMemory function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(MemoryManagerTest, memorymanagertest_mapmemory_001, TestSize.Level0) +{ + const auto& memoryManager = MemoryManager::GetInstance(); + int fd = -1; + size_t length = 0; + void* result = memoryManager->MapMemory(fd, length); + EXPECT_EQ(nullptr, result); +} + +/** + * @tc.name: memorymanagertest_mapmemory_002 + * @tc.desc: Verify the MapMemory function return nullptr in case of length 0. + * @tc.type: FUNC + */ +HWTEST_F(MemoryManagerTest, memorymanagertest_mapmemory_002, TestSize.Level0) +{ + const auto& memoryManager = MemoryManager::GetInstance(); + int fd = 0; + size_t length = 0; + void* result = memoryManager->MapMemory(fd, length); + EXPECT_EQ(nullptr, result); +} + +/** + * @tc.name: memorymanagertest_mapmemory_003 + * @tc.desc: Verify the MapMemory function return nullptr in case of fd 0. + * @tc.type: FUNC + */ +HWTEST_F(MemoryManagerTest, memorymanagertest_mapmemory_003, TestSize.Level0) +{ + const auto& memoryManager = MemoryManager::GetInstance(); + int fd = 0; + size_t length = 1; + void* result = memoryManager->MapMemory(fd, length); + EXPECT_EQ(nullptr, result); +} + +/** + * @tc.name: memorymanagertest_mapmemory_004 + * @tc.desc: Verify the MapMemory function validate mapmemory content success. + * @tc.type: FUNC + */ +HWTEST_F(MemoryManagerTest, memorymanagertest_mapmemory_004, TestSize.Level0) +{ + std::string data = "ABCD"; + const size_t dataLength = 100; + data.resize(dataLength, '*'); + + std::string filename = "/data/log/memory-001.dat"; + FileUtils fileUtils(filename); + fileUtils.WriteFile(data); + + int fd = open(filename.c_str(), O_RDWR); + EXPECT_NE(-1, fd); + + size_t length = 4; + const auto& memoryManager = MemoryManager::GetInstance(); + char* result = static_cast(memoryManager->MapMemory(fd, length)); + EXPECT_NE(nullptr, result); + EXPECT_EQ('A', static_cast(result[0])); + EXPECT_EQ('B', static_cast(result[1])); + EXPECT_EQ('C', static_cast(result[2])); + EXPECT_EQ('D', static_cast(result[3])); + memoryManager->UnMapMemory(result); + close(fd); +} + +/** + * @tc.name: memorymanagertest_unmapmemory_001 + * @tc.desc: Verify the UnMapMemory function validate behavior. + * @tc.type: FUNC + */ +HWTEST_F(MemoryManagerTest, memorymanagertest_unmapmemory_001, TestSize.Level0) +{ + const auto& memoryManager = MemoryManager::GetInstance(); + void* memory = nullptr; + memoryManager->UnMapMemory(memory); +} + +/** + * @tc.name: memorymanagertest_unmapmemory_002 + * @tc.desc: Verify the UnMapMemory function validate behavior + * @tc.type: FUNC + */ +HWTEST_F(MemoryManagerTest, memorymanagertest_unmapmemory_002, TestSize.Level0) +{ + const auto& memoryManager = MemoryManager::GetInstance(); + void* memory = malloc(10); + memoryManager->UnMapMemory(memory); + free(memory); +} + +/** + * @tc.name: memorymanagertest_unmapmemory_003 + * @tc.desc: Verify the UnMapMemory function pairwise behavior. + * @tc.type: FUNC + */ +HWTEST_F(MemoryManagerTest, memorymanagertest_unmapmemory_003, TestSize.Level0) +{ + std::string data = "ABCD"; + const size_t dataLength = 100; + data.resize(dataLength, '/'); + + std::string filename = "/data/log/memory-001.dat"; + FileUtils fileUtils(filename); + fileUtils.WriteFile(data); + + int fd = 0; + fd = open(filename.c_str(), O_RDWR); + EXPECT_NE(-1, fd); + + size_t length = 10; + const auto& memoryManager = MemoryManager::GetInstance(); + void* buffer = memoryManager->MapMemory(fd, length); + memoryManager->UnMapMemory(buffer); + close(fd); +} + +/** + * @tc.name: memorymanagertest_getmemory_001 + * @tc.desc: Verify the GetMemory function return nullptr. + * @tc.type: FUNC + */ +HWTEST_F(MemoryManagerTest, memorymanagertest_getmemory_001, TestSize.Level0) +{ + const auto& memoryManager = MemoryManager::GetInstance(); + void* buffer = nullptr; + Memory memory; + OH_NN_ReturnCode result = memoryManager->GetMemory(buffer, memory); + EXPECT_EQ(OH_NN_NULL_PTR, result); +} + +/** + * @tc.name: memorymanagertest_getmemory_002 + * @tc.desc: Verify the GetMemory function return invalid parameter. + * @tc.type: FUNC + */ +HWTEST_F(MemoryManagerTest, memorymanagertest_getmemory_002, TestSize.Level0) +{ + const auto& memoryManager = MemoryManager::GetInstance(); + void* buffer = malloc(10); + Memory memory; + OH_NN_ReturnCode result = memoryManager->GetMemory(buffer, memory); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, result); + free(buffer); +} + +/** + * @tc.name: memorymanagertest_getmemory_003 + * @tc.desc: Verify the GetMemory function validate memory content success. + * @tc.type: FUNC + */ +HWTEST_F(MemoryManagerTest, memorymanagertest_getmemory_003, TestSize.Level0) +{ + std::string data = "ABCD"; + const size_t dataLength = 100; + data.resize(dataLength, '%'); + + std::string filename = "/data/log/memory-001.dat"; + FileUtils fileUtils(filename); + fileUtils.WriteFile(data); + + int fd = 0; + fd = open(filename.c_str(), O_RDWR); + EXPECT_NE(-1, fd); + + size_t length = 4; + const auto& memoryManager = MemoryManager::GetInstance(); + void* buffer = memoryManager->MapMemory(fd, length); + close(fd); + + Memory memory; + OH_NN_ReturnCode result = memoryManager->GetMemory(buffer, memory); + EXPECT_EQ(OH_NN_SUCCESS, result); + EXPECT_NE(nullptr, memory.data); + + const char* tmpData = static_cast(memory.data); + EXPECT_EQ('A', static_cast(tmpData[0])); + EXPECT_EQ('B', static_cast(tmpData[1])); + EXPECT_EQ('C', static_cast(tmpData[2])); + EXPECT_EQ('D', static_cast(tmpData[3])); + memoryManager->UnMapMemory(buffer); +} +} // namespace UnitTest +} // namespace NeuralNetworkRuntime +} // namespace OHOS diff --git a/test/unittest/components/neural_network_runtime_test/neural_network_runtime_test.cpp b/test/unittest/components/neural_network_runtime_test/neural_network_runtime_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..404f2e80d118d23b3478d60fe8de3fcf551fadc1 --- /dev/null +++ b/test/unittest/components/neural_network_runtime_test/neural_network_runtime_test.cpp @@ -0,0 +1,2221 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "neural_network_runtime_test.h" + +#include "mindir.h" + +#include "common/utils.h" +#include "frameworks/native/compilation.h" +#include "frameworks/native/device_manager.h" +#include "frameworks/native/hdi_device.h" +#include "test/unittest/common/mock_idevice.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +OH_NN_ReturnCode HDIDevice::PrepareModel(std::shared_ptr model, + const ModelConfig& config, + std::shared_ptr& preparedModel) +{ + if (model == nullptr) { + return OH_NN_INVALID_PARAMETER; + } + + if (config.enableFloat16 == false) { + return OH_NN_FAILED; + } + + sptr iPreparedModel = sptr(new OHOS::HDI::Nnrt::V1_0::MockIPreparedModel()); + if (iPreparedModel == nullptr) { + LOGE("HDIDevice mock PrepareModel failed, error happened when new sptr"); + return OH_NN_NULL_PTR; + } + + preparedModel = CreateSharedPtr(iPreparedModel); + return OH_NN_SUCCESS; +} + +std::shared_ptr DeviceManager::GetDevice(size_t deviceId) const +{ + sptr idevice + = sptr(new (std::nothrow) OHOS::HDI::Nnrt::V1_0::MockIDevice()); + if (idevice == nullptr) { + LOGE("DeviceManager mock GetDevice failed, error happened when new sptr"); + return nullptr; + } + + std::shared_ptr device = CreateSharedPtr(idevice); + if (device == nullptr) { + LOGE("DeviceManager mock GetDevice failed, the device is nullptr"); + return nullptr; + } + + if (deviceId == 0) { + LOGE("DeviceManager mock GetDevice failed, the passed parameter deviceId is 0"); + return nullptr; + } else { + return device; + } +} + +OH_NN_ReturnCode HDIDevice::GetDeviceType(OH_NN_DeviceType& deviceType) +{ + if (deviceType == OH_NN_OTHERS) { + return OH_NN_UNAVALIDABLE_DEVICE; + } + + return OH_NN_SUCCESS; +} + +const std::string& DeviceManager::GetDeviceName(size_t deviceId) +{ + static std::string deviceName = ""; + if (deviceId == 0) { + return deviceName; + } + + deviceName = "deviceId"; + return deviceName; +} + +const std::vector& DeviceManager::GetAllDeviceId() +{ + static std::vector deviceIds; + if (OHOS::HDI::Nnrt::V1_0::MockIPreparedModel::m_ExpectRetCode == OH_NN_FAILED) { + // In order not to affect other use cases, set to the OH_NN_OPERATION_FORBIDDEN + OHOS::HDI::Nnrt::V1_0::MockIPreparedModel::m_ExpectRetCode = OH_NN_OPERATION_FORBIDDEN; + return deviceIds; + } + std::size_t device = 1; + deviceIds.emplace_back(device); + return deviceIds; +} + +OH_NN_ReturnCode HDIDevice::IsModelCacheSupported(bool& isSupported) +{ + isSupported = true; + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode HDIDevice::IsPerformanceModeSupported(bool& isSupported) +{ + isSupported = true; + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode HDIDevice::IsPrioritySupported(bool& isSupported) +{ + isSupported = true; + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode HDIDevice::IsFloat16PrecisionSupported(bool& isSupported) +{ + isSupported = true; + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode HDIDevice::GetSupportedOperation(std::shared_ptr model, + std::vector& ops) +{ + if (model == nullptr) { + LOGE("HDIDevice mock GetSupportedOperation failed, Model is nullptr, cannot query supported operation."); + return OH_NN_NULL_PTR; + } + + ops.emplace_back(true); + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode HDIDevice::IsDynamicInputSupported(bool& isSupported) +{ + isSupported = true; + return OH_NN_SUCCESS; +} +} // namespace NeuralNetworkRuntime +} // namespace OHOS + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Unittest { +OH_NN_ReturnCode NeuralNetworkRuntimeTest::BuildModelGraph(InnerModel& innerModel) +{ + // liteGraph is released internally by innerModel + mindspore::lite::LiteGraph* liteGraph = new (std::nothrow) mindspore::lite::LiteGraph; + EXPECT_NE(nullptr, liteGraph); + + liteGraph->name_ = "testGraph"; + liteGraph->input_indices_ = {0}; + liteGraph->output_indices_ = {1}; + liteGraph->all_tensors_ = {nullptr}; + const std::vector data(36, 1); + const std::vector dim = {3, 3}; + const std::vector quant_params {}; + + for (size_t indexInput = 0; indexInput < liteGraph->input_indices_.size(); ++indexInput) { + liteGraph->all_tensors_.emplace_back(mindspore::lite::MindIR_Tensor_Create(liteGraph->name_, + mindspore::lite::DATA_TYPE_FLOAT32, dim, mindspore::lite::FORMAT_NCHW, data, quant_params)); + } + + for (size_t indexOutput = 0; indexOutput < liteGraph->output_indices_.size(); ++indexOutput) { + liteGraph->all_tensors_.emplace_back(mindspore::lite::MindIR_Tensor_Create(liteGraph->name_, + mindspore::lite::DATA_TYPE_FLOAT32, dim, mindspore::lite::FORMAT_NCHW, data, quant_params)); + } + + return innerModel.BuildFromLiteGraph(liteGraph); +} + +void NeuralNetworkRuntimeTest::InitIndices() +{ + m_inputIndices.data = m_inputIndexs; + m_inputIndices.size = sizeof(m_inputIndexs) / sizeof(uint32_t); + + m_outputIndices.data = m_outputIndexs; + m_outputIndices.size = sizeof(m_outputIndexs) / sizeof(uint32_t); + + m_paramIndices.data = m_paramIndexs; + m_paramIndices.size = sizeof(m_paramIndexs) / sizeof(uint32_t); +} + +void NeuralNetworkRuntimeTest::AddModelTensor(InnerModel& innerModel) +{ + const int dim[2] = {2, 2}; + const OH_NN_Tensor& tensor = {OH_NN_FLOAT32, 2, dim, nullptr, OH_NN_TENSOR}; + + EXPECT_EQ(OH_NN_SUCCESS, innerModel.AddTensor(tensor)); + EXPECT_EQ(OH_NN_SUCCESS, innerModel.AddTensor(tensor)); + EXPECT_EQ(OH_NN_SUCCESS, innerModel.AddTensor(tensor)); + + const OH_NN_Tensor& tensorParam = {OH_NN_INT8, 0, nullptr, nullptr, OH_NN_ADD_ACTIVATIONTYPE}; + EXPECT_EQ(OH_NN_SUCCESS, innerModel.AddTensor(tensorParam)); +} + +void NeuralNetworkRuntimeTest::SetTensor() +{ + m_tensor.dataType = OH_NN_INT32; + m_tensor.dimensionCount = 0; + m_tensor.dimensions = nullptr; + m_tensor.quantParam = nullptr; + m_tensor.type = OH_NN_TENSOR; +} + +void NeuralNetworkRuntimeTest::SetInnerBuild(InnerModel& innerModel) +{ + uint32_t index = 3; + const int8_t activation = 0; + EXPECT_EQ(OH_NN_SUCCESS, innerModel.SetTensorValue(index, + static_cast(&activation), sizeof(int8_t))); + + OH_NN_OperationType opType{OH_NN_OPS_ADD}; + EXPECT_EQ(OH_NN_SUCCESS, innerModel.AddOperation(opType, m_paramIndices, m_inputIndices, m_outputIndices)); + EXPECT_EQ(OH_NN_SUCCESS, innerModel.SpecifyInputsAndOutputs(m_inputIndices, m_outputIndices)); + EXPECT_EQ(OH_NN_SUCCESS, innerModel.Build()); +} + +void NeuralNetworkRuntimeTest::SetInputAndOutput(Executor& executor) +{ + size_t length = 9 * sizeof(int32_t); + float input[9] = {0, 1, 2, 3, 4, 5, 6, 7, 8}; + void *buffer = input; + uint32_t index = 0; + + SetTensor(); + EXPECT_EQ(OH_NN_SUCCESS, executor.SetInput(index, m_tensor, buffer, length)); + EXPECT_EQ(OH_NN_SUCCESS, executor.SetOutput(index, buffer, length)); + EXPECT_EQ(OH_NN_SUCCESS, executor.Run()); +} + +/* + * @tc.name: model_construct_001 + * @tc.desc: Verify the return model of the OH_NNModel_Construct function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, model_construct_001, testing::ext::TestSize.Level0) +{ + OH_NNModel* ret = OH_NNModel_Construct(); + EXPECT_NE(nullptr, ret); +} + +/* + * @tc.name: model_add_tensor_001 + * @tc.desc: Verify the OH_NNModel is nullptr of the OH_NNModel_Tensor function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, model_add_tensor_001, testing::ext::TestSize.Level0) +{ + OH_NNModel* model = nullptr; + const int32_t dimInput[2] = {2, 2}; + const OH_NN_Tensor tensor = {OH_NN_INT8, 2, dimInput, nullptr, OH_NN_TENSOR}; + OH_NN_ReturnCode ret = OH_NNModel_AddTensor(model, &tensor); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: model_add_tensor_002 + * @tc.desc: Verify the OH_NN_Tensor is nullptr of the OH_NNModel_AddTensor function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, model_add_tensor_002, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + + OH_NNModel* model = reinterpret_cast(&innerModel); + OH_NN_Tensor* tensor = nullptr; + OH_NN_ReturnCode ret = OH_NNModel_AddTensor(model, tensor); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: model_add_tensor_003 + * @tc.desc: Verify the success of the OH_NNModel_AddTensor function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, model_add_tensor_003, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + OH_NNModel* model = reinterpret_cast(&innerModel); + + const int32_t dimInput[2] = {2, 2}; + const OH_NN_Tensor tensor = {OH_NN_INT8, 2, dimInput, nullptr, OH_NN_TENSOR}; + OH_NN_ReturnCode ret = OH_NNModel_AddTensor(model, &tensor); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/* + * @tc.name: model_add_operation_001 + * @tc.desc: Verify the OH_NNModel is nullptr of the OH_NNModel_AddOperation function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, model_add_operation_001, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + OH_NNModel* model = nullptr; + OH_NN_OperationType opType{OH_NN_OPS_ADD}; + + InitIndices(); + AddModelTensor(innerModel); + + uint32_t index = 3; + const int8_t activation = 0; + EXPECT_EQ(OH_NN_SUCCESS, innerModel.SetTensorValue(index, + static_cast(&activation), sizeof(int8_t))); + + OH_NN_ReturnCode ret = OH_NNModel_AddOperation(model, opType, &m_paramIndices, &m_inputIndices, &m_outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: model_add_operation_002 + * @tc.desc: Verify the paramIndices is nullptr of the OH_NNModel_AddOperation function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, model_add_operation_002, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + OH_NNModel* model = reinterpret_cast(&innerModel); + OH_NN_OperationType opType{OH_NN_OPS_ADD}; + + m_inputIndices.data = m_inputIndexs; + m_inputIndices.size = sizeof(m_inputIndexs) / sizeof(uint32_t); + + m_outputIndices.data = m_outputIndexs; + m_outputIndices.size = sizeof(m_outputIndexs) / sizeof(uint32_t); + + AddModelTensor(innerModel); + uint32_t index = 3; + const int8_t activation = 0; + EXPECT_EQ(OH_NN_SUCCESS, innerModel.SetTensorValue(index, + static_cast(&activation), sizeof(int8_t))); + + OH_NN_ReturnCode ret = OH_NNModel_AddOperation(model, opType, nullptr, &m_inputIndices, &m_outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: model_add_operation_003 + * @tc.desc: Verify the inputIndices is nullptr of the OH_NNModel_AddOperation function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, model_add_operation_003, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + OH_NNModel* model = reinterpret_cast(&innerModel); + OH_NN_OperationType opType{OH_NN_OPS_ADD}; + + m_paramIndices.data = m_paramIndexs; + m_paramIndices.size = sizeof(m_paramIndexs) / sizeof(uint32_t); + + m_outputIndices.data = m_outputIndexs; + m_outputIndices.size = sizeof(m_outputIndexs) / sizeof(uint32_t); + + AddModelTensor(innerModel); + uint32_t index = 3; + const int8_t activation = 0; + EXPECT_EQ(OH_NN_SUCCESS, innerModel.SetTensorValue(index, + static_cast(&activation), sizeof(int8_t))); + + OH_NN_ReturnCode ret = OH_NNModel_AddOperation(model, opType, &m_paramIndices, nullptr, &m_outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: model_add_operation_004 + * @tc.desc: Verify the outputIndices is nullptr of the OH_NNModel_AddOperation function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, model_add_operation_004, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + OH_NNModel* model = reinterpret_cast(&innerModel); + OH_NN_OperationType opType{OH_NN_OPS_ADD}; + + m_paramIndices.data = m_paramIndexs; + m_paramIndices.size = sizeof(m_paramIndexs) / sizeof(uint32_t); + + m_inputIndices.data = m_inputIndexs; + m_inputIndices.size = sizeof(m_inputIndexs) / sizeof(uint32_t); + + AddModelTensor(innerModel); + uint32_t index = 3; + const int8_t activation = 0; + EXPECT_EQ(OH_NN_SUCCESS, innerModel.SetTensorValue(index, + static_cast(&activation), sizeof(int8_t))); + + OH_NN_ReturnCode ret = OH_NNModel_AddOperation(model, opType, &m_paramIndices, &m_inputIndices, nullptr); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: model_add_operation_005 + * @tc.desc: Verify the success of the OH_NNModel_AddOperation function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, model_add_operation_005, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + OH_NNModel* model = reinterpret_cast(&innerModel); + OH_NN_OperationType opType{OH_NN_OPS_ADD}; + + InitIndices(); + AddModelTensor(innerModel); + + uint32_t index = 3; + const int8_t activation = 0; + EXPECT_EQ(OH_NN_SUCCESS, innerModel.SetTensorValue(index, + static_cast(&activation), sizeof(int8_t))); + + OH_NN_ReturnCode ret = OH_NNModel_AddOperation(model, opType, &m_paramIndices, &m_inputIndices, &m_outputIndices); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/* + * @tc.name: model_set_tensor_data_001 + * @tc.desc: Verify the OH_NNModel is nullptr of the OH_NNModel_SetTensorData function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, model_set_tensor_data_001, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + OH_NNModel* model = nullptr; + AddModelTensor(innerModel); + + uint32_t index = 3; + const int8_t activation = 0; + + OH_NN_ReturnCode ret = OH_NNModel_SetTensorData(model, index, static_cast(&activation), + sizeof(int8_t)); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: model_set_tensor_data_002 + * @tc.desc: Verify the data is nullptr of the OH_NNModel_SetTensorData function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, model_set_tensor_data_002, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + OH_NNModel* model = reinterpret_cast(&innerModel); + AddModelTensor(innerModel); + + uint32_t index = 3; + + OH_NN_ReturnCode ret = OH_NNModel_SetTensorData(model, index, nullptr, sizeof(int8_t)); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: model_set_tensor_data_003 + * @tc.desc: Verify the length is 0 of the OH_NNModel_SetTensorData function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, model_set_tensor_data_003, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + OH_NNModel* model = reinterpret_cast(&innerModel); + AddModelTensor(innerModel); + + uint32_t index = 3; + const int8_t activation = 0; + + OH_NN_ReturnCode ret = OH_NNModel_SetTensorData(model, index, static_cast(&activation), 0); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: model_set_tensor_data_004 + * @tc.desc: Verify the successs of the OH_NNModel_SetTensorData function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, model_set_tensor_data_004, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + OH_NNModel* model = reinterpret_cast(&innerModel); + AddModelTensor(innerModel); + + uint32_t index = 3; + const int8_t activation = 0; + + OH_NN_ReturnCode ret = OH_NNModel_SetTensorData(model, index, static_cast(&activation), + sizeof(int8_t)); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/* + * @tc.name: model_specify_inputs_and_outputs_001 + * @tc.desc: Verify the OH_NNModel is nullptr of the OH_NNModel_SpecifyInputsAndOutputs function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, model_specify_inputs_and_outputs_001, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + OH_NNModel* model = nullptr; + + InitIndices(); + AddModelTensor(innerModel); + + OH_NN_ReturnCode ret = OH_NNModel_SpecifyInputsAndOutputs(model, &m_inputIndices, &m_outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: model_specify_inputs_and_outputs_002 + * @tc.desc: Verify the inputIndices is nullptr of the OH_NNModel_SpecifyInputsAndOutputs function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, model_specify_inputs_and_outputs_002, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + OH_NNModel* model = reinterpret_cast(&innerModel); + + InitIndices(); + AddModelTensor(innerModel); + + OH_NN_ReturnCode ret = OH_NNModel_SpecifyInputsAndOutputs(model, nullptr, &m_outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: model_specify_inputs_and_outputs_003 + * @tc.desc: Verify the outputIndices is nullptr of the OH_NNModel_SpecifyInputsAndOutputs function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, model_specify_inputs_and_outputs_003, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + OH_NNModel* model = reinterpret_cast(&innerModel); + + InitIndices(); + AddModelTensor(innerModel); + + OH_NN_ReturnCode ret = OH_NNModel_SpecifyInputsAndOutputs(model, &m_inputIndices, nullptr); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: model_specify_inputs_and_outputs_004 + * @tc.desc: Verify the success of the OH_NNModel_SpecifyInputsAndOutputs function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, model_specify_inputs_and_outputs_004, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + OH_NNModel* model = reinterpret_cast(&innerModel); + + InitIndices(); + AddModelTensor(innerModel); + + OH_NN_ReturnCode ret = OH_NNModel_SpecifyInputsAndOutputs(model, &m_inputIndices, &m_outputIndices); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/* + * @tc.name: model_finish_001 + * @tc.desc: Verify the OH_NNModel is nullptr of the OH_NNModel_Finish function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, model_finish_001, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + OH_NNModel* model = nullptr; + + OH_NN_OperationType opType{OH_NN_OPS_ADD}; + + InitIndices(); + AddModelTensor(innerModel); + + uint32_t index = 3; + const int8_t activation = 0; + EXPECT_EQ(OH_NN_SUCCESS, innerModel.SetTensorValue(index, static_cast(&activation), + sizeof(int8_t))); + + EXPECT_EQ(OH_NN_SUCCESS, innerModel.AddOperation(opType, m_paramIndices, m_inputIndices, m_outputIndices)); + EXPECT_EQ(OH_NN_SUCCESS, innerModel.SpecifyInputsAndOutputs(m_inputIndices, m_outputIndices)); + + OH_NN_ReturnCode ret = OH_NNModel_Finish(model); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: model_finish_002 + * @tc.desc: Verify the success of the OH_NNModel_Finish function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, model_finish_002, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + OH_NNModel* model = reinterpret_cast(&innerModel); + + OH_NN_OperationType opType{OH_NN_OPS_ADD}; + + InitIndices(); + AddModelTensor(innerModel); + + const int8_t activation = 0; + uint32_t index = 3; + EXPECT_EQ(OH_NN_SUCCESS, innerModel.SetTensorValue(index, + static_cast(&activation), sizeof(int8_t))); + + EXPECT_EQ(OH_NN_SUCCESS, innerModel.AddOperation(opType, m_paramIndices, m_inputIndices, m_outputIndices)); + EXPECT_EQ(OH_NN_SUCCESS, innerModel.SpecifyInputsAndOutputs(m_inputIndices, m_outputIndices)); + + OH_NN_ReturnCode ret = OH_NNModel_Finish(model); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/* + * @tc.name: model_destroy_001 + * @tc.desc: Verify the OH_NNModel is nullptr of the OH_NNModel_Destroy function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, model_destroy_001, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + OH_NNModel** pModel = nullptr; + OH_NNModel_Destroy(pModel); + EXPECT_EQ(nullptr, pModel); +} + +/* + * @tc.name: model_destroy_002 + * @tc.desc: Verify the *OH_NNModel is nullptr of the OH_NNModel_Destroy function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, model_destroy_002, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + OH_NNModel* model = nullptr; + OH_NNModel** pModel = &model; + OH_NNModel_Destroy(pModel); + EXPECT_EQ(nullptr, model); +} + +/* + * @tc.name: model_destroy_003 + * @tc.desc: Verify the normal model of the OH_NNModel_Destroy function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, model_destroy_003, testing::ext::TestSize.Level0) +{ + InnerModel* innerModel = new InnerModel(); + EXPECT_NE(nullptr, innerModel); + OH_NNModel* model = reinterpret_cast(innerModel); + OH_NNModel_Destroy(&model); + EXPECT_EQ(nullptr, model); +} + +/* + * @tc.name: model_get_available_operation_001 + * @tc.desc: Verify the OH_NNModel is nullptr of the OH_NNModel_GetAvailableOperations function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, model_get_available_operation_001, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + OH_NNModel* model = nullptr; + + uint32_t opCount = 1; + const bool *pIsAvailable = nullptr; + + InitIndices(); + AddModelTensor(innerModel); + SetInnerBuild(innerModel); + + size_t deviceID = 10; + OH_NN_ReturnCode ret = OH_NNModel_GetAvailableOperations(model, deviceID, &pIsAvailable, &opCount); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: model_get_available_operation_002 + * @tc.desc: Verify the isAvailable is nullptr of the OH_NNModel_GetAvailableOperations function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, model_get_available_operation_002, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + OH_NNModel* model = reinterpret_cast(&innerModel); + + uint32_t opCount = 1; + InitIndices(); + AddModelTensor(innerModel); + SetInnerBuild(innerModel); + + size_t deviceID = 10; + OH_NN_ReturnCode ret = OH_NNModel_GetAvailableOperations(model, deviceID, nullptr, &opCount); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: model_get_available_operation_003 + * @tc.desc: Verify the *isAvailable is no nullptr of the OH_NNModel_GetAvailableOperations function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, model_get_available_operation_003, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + OH_NNModel* model = reinterpret_cast(&innerModel); + + const bool isAvailable = true; + const bool *pIsAvailable = &isAvailable; + uint32_t opCount = 1; + + InitIndices(); + AddModelTensor(innerModel); + SetInnerBuild(innerModel); + + size_t deviceID = 10; + OH_NN_ReturnCode ret = OH_NNModel_GetAvailableOperations(model, deviceID, &pIsAvailable, &opCount); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: model_get_available_operation_004 + * @tc.desc: Verify the opCount is nullptr of the OH_NNModel_GetAvailableOperations function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, model_get_available_operation_004, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + OH_NNModel* model = reinterpret_cast(&innerModel); + + const bool *pIsAvailable = nullptr; + uint32_t* opCount = nullptr; + + InitIndices(); + AddModelTensor(innerModel); + SetInnerBuild(innerModel); + + size_t deviceID = 10; + OH_NN_ReturnCode ret = OH_NNModel_GetAvailableOperations(model, deviceID, &pIsAvailable, opCount); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: model_get_available_operation_005 + * @tc.desc: Verify the success of the OH_NNModel_GetAvailableOperations function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, model_get_available_operation_005, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + OH_NNModel* model = reinterpret_cast(&innerModel); + + const bool *pIsAvailable = nullptr; + uint32_t opCount = 1; + + InitIndices(); + AddModelTensor(innerModel); + SetInnerBuild(innerModel); + + size_t deviceID = 10; + OH_NN_ReturnCode ret = OH_NNModel_GetAvailableOperations(model, deviceID, &pIsAvailable, &opCount); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/* + * @tc.name: compilation_construct_001 + * @tc.desc: Verify the OH_NNModel is nullptr of the OH_NNCompilation_Construct function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, compilation_construct_001, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + const OH_NNModel* model = nullptr; + OH_NNCompilation* ret = OH_NNCompilation_Construct(model); + EXPECT_EQ(nullptr, ret); +} + +/* + * @tc.name: compilation_construct_002 + * @tc.desc: Verify the not OH_NNModel_Build before creating compilation of the OH_NNCompilation_Construct function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, compilation_construct_002, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + OH_NNModel* model = reinterpret_cast(&innerModel); + OH_NNCompilation* ret = OH_NNCompilation_Construct(model); + EXPECT_EQ(nullptr, ret); +} + +/* + * @tc.name: compilation_construct_003 + * @tc.desc: Verify the normal model of the OH_NNCompilation_Construct function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, compilation_construct_003, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + OH_NNModel* model = reinterpret_cast(&innerModel); + OH_NNCompilation* ret = OH_NNCompilation_Construct(model); + EXPECT_NE(nullptr, ret); +} + +/* + * @tc.name: compilation_set_device_001 + * @tc.desc: Verify the OH_NNCompilation is nullptr of the OH_NNCompilation_SetDevice function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, compilation_set_device_001, testing::ext::TestSize.Level0) +{ + OH_NNCompilation* compilation = nullptr; + size_t deviceId = 1; + OH_NN_ReturnCode ret = OH_NNCompilation_SetDevice(compilation, deviceId); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: compilation_set_device_002 + * @tc.desc: Verify the success of the OH_NNCompilation_SetDevice function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, compilation_set_device_002, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilation(&innerModel); + OH_NNCompilation* nnCompilation = reinterpret_cast(&compilation); + size_t deviceId = 1; + OH_NN_ReturnCode ret = OH_NNCompilation_SetDevice(nnCompilation, deviceId); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/* + * @tc.name: compilation_set_cache_001 + * @tc.desc: Verify the OH_NNCompilation is nullptr of the OH_NNCompilation_SetCache function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, compilation_set_cache_001, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilation(&innerModel); + OH_NNCompilation* nnCompilation = nullptr; + const char* cacheDir = "../"; + uint32_t version = 1; + std::size_t deviceId = 1; + EXPECT_EQ(OH_NN_SUCCESS, compilation.SetDevice(deviceId)); + OH_NN_ReturnCode ret = OH_NNCompilation_SetCache(nnCompilation, cacheDir, version); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: compilation_set_cache_002 + * @tc.desc: Verify the cachePath is nullptr of the OH_NNCompilation_SetCache function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, compilation_set_cache_002, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilation(&innerModel); + OH_NNCompilation* nnCompilation = reinterpret_cast(&compilation); + const char* cacheDir = nullptr; + uint32_t version = 1; + std::size_t deviceId = 1; + EXPECT_EQ(OH_NN_SUCCESS, compilation.SetDevice(deviceId)); + OH_NN_ReturnCode ret = OH_NNCompilation_SetCache(nnCompilation, cacheDir, version); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: compilation_set_cache_003 + * @tc.desc: Verify the success of the OH_NNCompilation_SetCache function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, compilation_set_cache_003, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilation(&innerModel); + OH_NNCompilation* nnCompilation = reinterpret_cast(&compilation); + const char* cacheDir = "../"; + uint32_t version = 1; + std::size_t deviceId = 1; + EXPECT_EQ(OH_NN_SUCCESS, compilation.SetDevice(deviceId)); + OH_NN_ReturnCode ret = OH_NNCompilation_SetCache(nnCompilation, cacheDir, version); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/* + * @tc.name: compilation_set_performance_mode_001 + * @tc.desc: Verify the OH_NNCompilation is nullptr of the OH_NNCompilation_SetPerformanceMode function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, compilation_set_performance_mode_001, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilation(&innerModel); + OH_NNCompilation* nnCompilation = nullptr; + OH_NN_PerformanceMode performanceMode = OH_NN_PERFORMANCE_NONE; + + std::size_t deviceId = 1; + EXPECT_EQ(OH_NN_SUCCESS, compilation.SetDevice(deviceId)); + OH_NN_ReturnCode ret = OH_NNCompilation_SetPerformanceMode(nnCompilation, performanceMode); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: compilation_set_performance_mode_002 + * @tc.desc: Verify the success of the OH_NNCompilation_SetPerformanceMode function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, compilation_set_performance_mode_002, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilation(&innerModel); + OH_NNCompilation* nnCompilation = reinterpret_cast(&compilation); + OH_NN_PerformanceMode performanceMode = OH_NN_PERFORMANCE_NONE; + + std::size_t deviceId = 1; + EXPECT_EQ(OH_NN_SUCCESS, compilation.SetDevice(deviceId)); + + OH_NN_ReturnCode ret = OH_NNCompilation_SetPerformanceMode(nnCompilation, performanceMode); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/* + * @tc.name: compilation_set_priority_001 + * @tc.desc: Verify the OH_NNCompilation is nullptr of the OH_NNCompilation_SetPriority function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, compilation_set_priority_001, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilation(&innerModel); + OH_NNCompilation* nnCompilation = nullptr; + OH_NN_Priority priority = OH_NN_PRIORITY_LOW; + + std::size_t deviceId = 1; + EXPECT_EQ(OH_NN_SUCCESS, compilation.SetDevice(deviceId)); + + OH_NN_ReturnCode ret = OH_NNCompilation_SetPriority(nnCompilation, priority); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: compilation_set_priority_002 + * @tc.desc: Verify the success of the OH_NNCompilation_SetPriority function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, compilation_set_priority_002, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilation(&innerModel); + OH_NNCompilation* nnCompilation = reinterpret_cast(&compilation); + OH_NN_Priority priority = OH_NN_PRIORITY_LOW; + + std::size_t deviceId = 1; + EXPECT_EQ(OH_NN_SUCCESS, compilation.SetDevice(deviceId)); + + OH_NN_ReturnCode ret = OH_NNCompilation_SetPriority(nnCompilation, priority); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/* + * @tc.name: compilation_set_enable_float16_001 + * @tc.desc: Verify the OH_NNCompilation is nullptr of the OH_NNCompilation_EnableFloat16 function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, compilation_set_enable_float16_001, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilation(&innerModel); + OH_NNCompilation* nnCompilation = nullptr; + bool enableFloat16 = true; + + std::size_t deviceId = 1; + EXPECT_EQ(OH_NN_SUCCESS, compilation.SetDevice(deviceId)); + + OH_NN_ReturnCode ret = OH_NNCompilation_EnableFloat16(nnCompilation, enableFloat16); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: compilation_set_enable_float16_002 + * @tc.desc: Verify the success of the OH_NNCompilation_EnableFloat16 function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, compilation_set_enable_float16_002, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilation(&innerModel); + OH_NNCompilation* nnCompilation = reinterpret_cast(&compilation); + bool enableFloat16 = true; + + std::size_t deviceId = 1; + EXPECT_EQ(OH_NN_SUCCESS, compilation.SetDevice(deviceId)); + + OH_NN_ReturnCode ret = OH_NNCompilation_EnableFloat16(nnCompilation, enableFloat16); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/* + * @tc.name: compilation_build_001 + * @tc.desc: Verify the OH_NNCompilation is nullptr of the OH_NNCompilation_Build function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, compilation_build_001, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilation(&innerModel); + OH_NNCompilation* nnCompilation = nullptr; + + std::size_t deviceId = 1; + EXPECT_EQ(OH_NN_SUCCESS, compilation.SetDevice(deviceId)); + EXPECT_EQ(OH_NN_SUCCESS, compilation.SetPerformance(OH_NN_PERFORMANCE_EXTREME)); + EXPECT_EQ(OH_NN_SUCCESS, compilation.SetPriority(OH_NN_PRIORITY_HIGH)); + EXPECT_EQ(OH_NN_SUCCESS, compilation.SetEnableFp16(true)); + + OH_NN_ReturnCode ret = OH_NNCompilation_Build(nnCompilation); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: compilation_build_002 + * @tc.desc: Verify the success of the OH_NNCompilation_Build function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, compilation_build_002, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilation(&innerModel); + OH_NNCompilation* nnCompilation = reinterpret_cast(&compilation); + + std::size_t deviceId = 1; + EXPECT_EQ(OH_NN_SUCCESS, compilation.SetDevice(deviceId)); + EXPECT_EQ(OH_NN_SUCCESS, compilation.SetPerformance(OH_NN_PERFORMANCE_EXTREME)); + EXPECT_EQ(OH_NN_SUCCESS, compilation.SetPriority(OH_NN_PRIORITY_HIGH)); + EXPECT_EQ(OH_NN_SUCCESS, compilation.SetEnableFp16(true)); + + OH_NN_ReturnCode ret = OH_NNCompilation_Build(nnCompilation); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/* + * @tc.name: compilation_destroy_001 + * @tc.desc: Verify the OH_NNCompilation is nullptr of the OH_NNCompilation_Destroy function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, compilation_destroy_001, testing::ext::TestSize.Level0) +{ + OH_NNCompilation** pCompilation = nullptr; + OH_NNCompilation_Destroy(pCompilation); + EXPECT_EQ(nullptr, pCompilation); +} + +/* + * @tc.name: compilation_destroy_002 + * @tc.desc: Verify the *OH_NNCompilation is nullptr of the OH_NNCompilation_Destroy function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, compilation_destroy_002, testing::ext::TestSize.Level0) +{ + OH_NNCompilation* compilation = nullptr; + OH_NNCompilation** pCompilation = &compilation; + OH_NNCompilation_Destroy(pCompilation); + EXPECT_EQ(nullptr, compilation); +} + +/* + * @tc.name: compilation_destroy_003 + * @tc.desc: Verify the normal model of the OH_NNCompilation_Destroy function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, compilation_destroy_003, testing::ext::TestSize.Level0) +{ + InnerModel* innerModel = new InnerModel(); + EXPECT_NE(nullptr, innerModel); + Compilation* compilation = new(std::nothrow) Compilation(innerModel); + EXPECT_NE(nullptr, compilation); + OH_NNCompilation* nnCompilation = reinterpret_cast(compilation); + OH_NNCompilation_Destroy(&nnCompilation); + EXPECT_EQ(nullptr, nnCompilation); +} + +/** + * @tc.name: excutor_construct_001 + * @tc.desc: Verify the OH_NNCompilation is nullptr of the OH_NNExecutor_Construct function + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, excutor_construct_001, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilation(&innerModel); + + std::size_t deviceId = 1; + EXPECT_EQ(OH_NN_SUCCESS, compilation.SetDevice(deviceId)); + EXPECT_EQ(OH_NN_SUCCESS, compilation.SetEnableFp16(true)); + EXPECT_EQ(OH_NN_SUCCESS, compilation.SetPerformance(OH_NN_PERFORMANCE_EXTREME)); + EXPECT_EQ(OH_NN_SUCCESS, compilation.SetPriority(OH_NN_PRIORITY_HIGH)); + EXPECT_EQ(OH_NN_SUCCESS, compilation.Build()); + + OH_NNCompilation* nnCompilation = nullptr; + OH_NNExecutor* executor = OH_NNExecutor_Construct(nnCompilation); + EXPECT_EQ(nullptr, executor); +} + +/** + * @tc.name: excutor_construct_002 + * @tc.desc: Verify the not OH_NNCompilation_Build before creating executor of the OH_NNExecutor_Construct function + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, excutor_construct_002, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilation(&innerModel); + OH_NNCompilation* nnCompilation = reinterpret_cast(&compilation); + OH_NNExecutor * executor = OH_NNExecutor_Construct(nnCompilation); + EXPECT_EQ(nullptr, executor); +} + +/** + * @tc.name: excutor_construct_003 + * @tc.desc: Verify the success of the OH_NNExecutor_Construct function + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, excutor_construct_003, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilation(&innerModel); + + std::size_t deviceId = 1; + EXPECT_EQ(OH_NN_SUCCESS, compilation.SetDevice(deviceId)); + EXPECT_EQ(OH_NN_SUCCESS, compilation.SetPerformance(OH_NN_PERFORMANCE_EXTREME)); + EXPECT_EQ(OH_NN_SUCCESS, compilation.SetPriority(OH_NN_PRIORITY_HIGH)); + EXPECT_EQ(OH_NN_SUCCESS, compilation.SetEnableFp16(true)); + EXPECT_EQ(OH_NN_SUCCESS, compilation.Build()); + + OH_NNCompilation* nnCompilation = reinterpret_cast(&compilation); + OH_NNExecutor * executor = OH_NNExecutor_Construct(nnCompilation); + EXPECT_NE(nullptr, executor); +} + +/** + * @tc.name: excutor_setinput_001 + * @tc.desc: Verify the OH_NNExecutor is nullptr of the OH_NNExecutor_SetInput function + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, excutor_setinput_001, testing::ext::TestSize.Level0) +{ + SetTensor(); + + float input[9] = {0, 1, 2, 3, 4, 5, 6, 7, 8}; + const void *buffer = input; + size_t length = 2 * sizeof(float); + uint32_t inputIndex = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNExecutor_SetInput(nullptr, inputIndex, &m_tensor, buffer, length)); +} + +/** + * @tc.name: excutor_setinput_002 + * @tc.desc: Verify the OH_NN_Tensor is nullptr of the OH_NNExecutor_SetInput function + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, excutor_setinput_002, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation innerCompilation(&innerModel); + Executor executor(&innerCompilation); + OH_NNExecutor* nnExecutor = reinterpret_cast(&executor); + + uint32_t inputIndex = 0; + float input[9] = {0, 1, 2, 3, 4, 5, 6, 7, 8}; + const void *buffer = input; + size_t length = 2 * sizeof(float); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNExecutor_SetInput(nnExecutor, inputIndex, nullptr, buffer, length)); +} + +/** + * @tc.name: excutor_setinput_003 + * @tc.desc: Verify the data is nullptr of the OH_NNExecutor_SetInput function + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, excutor_setinput_003, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation innerCompilation(&innerModel); + Executor executor(&innerCompilation); + OH_NNExecutor* nnExecutor = reinterpret_cast(&executor); + + SetTensor(); + + uint32_t inputIndex = 0; + const void *buffer = nullptr; + size_t length = 2 * sizeof(float); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNExecutor_SetInput(nnExecutor, inputIndex, &m_tensor, buffer, length)); +} + +/** + * @tc.name: excutor_setinput_004 + * @tc.desc: Verify the length is 0 of the OH_NNExecutor_SetInput function + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, excutor_setinput_004, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation innerCompilation(&innerModel); + Executor executor(&innerCompilation); + OH_NNExecutor* nnExecutor = reinterpret_cast(&executor); + + uint32_t inputIndex = 0; + SetTensor(); + + size_t length = 0; + float input[9] = {0, 1, 2, 3, 4, 5, 6, 7, 8}; + const void *buffer = input; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNExecutor_SetInput(nnExecutor, inputIndex, &m_tensor, buffer, length)); +} + +/** + * @tc.name: excutor_setinput_005 + * @tc.desc: Verify the success of the OH_NNExecutor_SetInput function + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, excutor_setinput_005, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation innerCompilation(&innerModel); + Executor executor(&innerCompilation); + OH_NNExecutor* nnExecutor = reinterpret_cast(&executor); + + uint32_t inputIndex = 0; + SetTensor(); + + float input[9] = {0, 1, 2, 3, 4, 5, 6, 7, 8}; + const void *buffer = input; + size_t length = 9 * sizeof(int32_t); + OH_NN_ReturnCode ret = OH_NNExecutor_SetInput(nnExecutor, inputIndex, &m_tensor, buffer, length); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/** + * @tc.name: excutor_setoutput_001 + * @tc.desc: Verify the OH_NNExecutor is nullptr of the OH_NNExecutor_SetOutput function + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, excutor_setoutput_001, testing::ext::TestSize.Level0) +{ + uint32_t outputIndex = 0; + float input[9] = {0, 1, 2, 3, 4, 5, 6, 7, 8}; + void *buffer = input; + size_t length = 9 * sizeof(int32_t); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNExecutor_SetOutput(nullptr, outputIndex, buffer, length)); +} + +/** + * @tc.name: excutor_setoutput_002 + * @tc.desc: Verify the data is nullptr of the OH_NNExecutor_SetOutput function + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, excutor_setoutput_002, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation innerCompilation(&innerModel); + Executor executor(&innerCompilation); + OH_NNExecutor* nnExecutor = reinterpret_cast(&executor); + + uint32_t outputIndex = 0; + void *buffer = nullptr; + size_t length = 9 * sizeof(int32_t); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNExecutor_SetOutput(nnExecutor, outputIndex, buffer, length)); +} + +/** + * @tc.name: excutor_setoutput_003 + * @tc.desc: Verify the length is 0 of the OH_NNExecutor_SetOutput function + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, excutor_setoutput_003, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation innerCompilation(&innerModel); + Executor executor(&innerCompilation); + OH_NNExecutor* nnExecutor = reinterpret_cast(&executor); + + uint32_t outputIndex = 0; + float input[9] = {0, 1, 2, 3, 4, 5, 6, 7, 8}; + void *buffer = input; + size_t length = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNExecutor_SetOutput(nnExecutor, outputIndex, buffer, length)); +} + +/** + * @tc.name: excutor_setoutput_004 + * @tc.desc: Verify the success of the OH_NNExecutor_SetOutput function + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, excutor_setoutput_004, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation innerCompilation(&innerModel); + Executor executor(&innerCompilation); + OH_NNExecutor* nnExecutor = reinterpret_cast(&executor); + + uint32_t outputIndex = 0; + float input[9] = {0, 1, 2, 3, 4, 5, 6, 7, 8}; + void *buffer = input; + size_t length = 9 * sizeof(int32_t); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNExecutor_SetOutput(nnExecutor, outputIndex, buffer, length)); +} + +/** + * @tc.name: excutor_getoutputshape_001 + * @tc.desc: Verify the OH_NNExecutor is nullptr of the OH_NNExecutor_GetOutputShape function + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, excutor_getoutputshape_001, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation innerCompilation(&innerModel); + Executor executor(&innerCompilation); + OH_NNExecutor* nnExecutor = nullptr; + + SetInputAndOutput(executor); + + int32_t* ptr = nullptr; + int32_t** shape = &ptr; + uint32_t length = 2; + uint32_t outputIndex = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNExecutor_GetOutputShape(nnExecutor, outputIndex, + shape, &length)); +} + +/** + * @tc.name: excutor_getoutputshape_002 + * @tc.desc: Verify the shape is nullptr of the OH_NNExecutor_GetOutputShape function + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, excutor_getoutputshape_002, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation innerCompilation(&innerModel); + Executor executor(&innerCompilation); + OH_NNExecutor* nnExecutor = reinterpret_cast(&executor); + + SetInputAndOutput(executor); + + uint32_t outputIndex = 0; + int32_t** shape = nullptr; + uint32_t length = 2; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNExecutor_GetOutputShape(nnExecutor, outputIndex, + shape, &length)); +} + +/** + * @tc.name: excutor_getoutputshape_003 + * @tc.desc: Verify the *shape is not nullptr of the OH_NNExecutor_GetOutputShape function + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, excutor_getoutputshape_003, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation innerCompilation(&innerModel); + Executor executor(&innerCompilation); + OH_NNExecutor* nnExecutor = reinterpret_cast(&executor); + + SetInputAndOutput(executor); + + int32_t expectDim[2] = {3, 3}; + int32_t* ptr = expectDim; + int32_t** shape = &ptr; + uint32_t length = 2; + uint32_t outputIndex = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNExecutor_GetOutputShape(nnExecutor, outputIndex, + shape, &length)); +} + +/** + * @tc.name: excutor_getoutputshape_004 + * @tc.desc: Verify the length is nullptr of the OH_NNExecutor_GetOutputShape function + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, excutor_getoutputshape_004, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation innerCompilation(&innerModel); + Executor executor(&innerCompilation); + OH_NNExecutor* nnExecutor = reinterpret_cast(&executor); + + SetInputAndOutput(executor); + + int32_t* ptr = nullptr; + int32_t** shape = &ptr; + uint32_t outputIndex = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNExecutor_GetOutputShape(nnExecutor, outputIndex, shape, nullptr)); +} + +/** + * @tc.name: excutor_getoutputshape_005 + * @tc.desc: Verify the success of the OH_NNExecutor_GetOutputShape function + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, excutor_getoutputshape_005, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation innerCompilation(&innerModel); + Executor executor(&innerCompilation); + OH_NNExecutor* nnExecutor = reinterpret_cast(&executor); + + SetInputAndOutput(executor); + + int32_t* ptr = nullptr; + int32_t** shape = &ptr; + uint32_t length = 2; + uint32_t outputIndex = 0; + EXPECT_EQ(OH_NN_SUCCESS, OH_NNExecutor_GetOutputShape(nnExecutor, outputIndex, shape, &length)); +} + +/** + * @tc.name: excutor_run_001 + * @tc.desc: Verify the OH_NNExecutor is nullptr of the OH_NNExecutor_Run function + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, excutor_run_001, testing::ext::TestSize.Level0) +{ + OH_NNExecutor* nnExecutor = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNExecutor_Run(nnExecutor)); +} + +/** + * @tc.name: excutor_run_002 + * @tc.desc: Verify the success of the OH_NNExecutor_Run function + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, excutor_run_002, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation innerCompilation(&innerModel); + Executor executor(&innerCompilation); + OH_NNExecutor* nnExecutor = reinterpret_cast(&executor); + + uint32_t index = 0; + size_t length = 9 * sizeof(int32_t); + float input[9] = {0, 1, 2, 3, 4, 5, 6, 7, 8}; + void *buffer = input; + + SetTensor(); + EXPECT_EQ(OH_NN_SUCCESS, executor.SetInput(index, m_tensor, buffer, length)); + EXPECT_EQ(OH_NN_SUCCESS, executor.SetOutput(index, buffer, length)); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNExecutor_Run(nnExecutor)); +} + +/* + * @tc.name: executor_allocate_input_memory_001 + * @tc.desc: Verify the OH_NNExecutor is nullptr of the OH_NNExecutor_AllocateInputMemory function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, executor_allocate_input_memory_001, testing::ext::TestSize.Level0) +{ + OH_NNExecutor* nnExecutor = nullptr; + uint32_t outputIndex = 0; + size_t length = 9 * sizeof(float); + + OH_NN_Memory* ret = OH_NNExecutor_AllocateInputMemory(nnExecutor, outputIndex, length); + EXPECT_EQ(nullptr, ret); +} + +/* + * @tc.name: executor_allocate_input_memory_002 + * @tc.desc: Verify the passed length equals 0 of the OH_NNExecutor_AllocateInputMemory function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, executor_allocate_input_memory_002, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation innerCompilation(&innerModel); + Executor executor(&innerCompilation); + OH_NNExecutor* nnExecutor = reinterpret_cast(&executor); + + uint32_t outputIndex = 0; + size_t length = 0; + + OH_NN_Memory* ret = OH_NNExecutor_AllocateInputMemory(nnExecutor, outputIndex, length); + EXPECT_EQ(nullptr, ret); +} + +/* + * @tc.name: executor_allocate_input_memory_003 + * @tc.desc: Verify the error when creating input memory in executor of the OH_NNExecutor_AllocateInputMemory function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, executor_allocate_input_memory_003, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation innerCompilation(&innerModel); + Executor executor(&innerCompilation); + OH_NNExecutor* nnExecutor = reinterpret_cast(&executor); + + uint32_t outputIndex = 6; + size_t length = 9 * sizeof(float); + + OH_NN_Memory* ret = OH_NNExecutor_AllocateInputMemory(nnExecutor, outputIndex, length); + EXPECT_EQ(nullptr, ret); +} + +/* + * @tc.name: executor_allocate_input_memory_004 + * @tc.desc: Verify the success of the OH_NNExecutor_AllocateInputMemory function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, executor_allocate_input_memory_004, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation innerCompilation(&innerModel); + Executor executor(&innerCompilation); + OH_NNExecutor* nnExecutor = reinterpret_cast(&executor); + + uint32_t outputIndex = 0; + size_t length = 9 * sizeof(float); + + OH_NN_Memory* ret = OH_NNExecutor_AllocateInputMemory(nnExecutor, outputIndex, length); + EXPECT_NE(nullptr, ret); +} + +/* + * @tc.name: executor_allocate_output_memory_001 + * @tc.desc: Verify the OH_NNExecutor is nullptr of the OH_NNExecutor_AllocateOutputMemory function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, executor_allocate_output_memory_001, testing::ext::TestSize.Level0) +{ + OH_NNExecutor* nnExecutor = nullptr; + uint32_t outputIndex = 0; + size_t length = 9 * sizeof(float); + + OH_NN_Memory* ret = OH_NNExecutor_AllocateOutputMemory(nnExecutor, outputIndex, length); + EXPECT_EQ(nullptr, ret); +} + +/* + * @tc.name: executor_allocate_output_memory_002 + * @tc.desc: Verify the passed length equals 0 of the OH_NNExecutor_AllocateOutputMemory function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, executor_allocate_output_memory_002, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation innerCompilation(&innerModel); + Executor executor(&innerCompilation); + OH_NNExecutor* nnExecutor = reinterpret_cast(&executor); + + uint32_t outputIndex = 0; + size_t length = 0; + + OH_NN_Memory* ret = OH_NNExecutor_AllocateOutputMemory(nnExecutor, outputIndex, length); + EXPECT_EQ(nullptr, ret); +} + +/* + * @tc.name: executor_allocate_output_memory_003 + * @tc.desc: Verify the error when create output memory in executor of the OH_NNExecutor_AllocateOutputMemory function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, executor_allocate_output_memory_003, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation innerCompilation(&innerModel); + Executor executor(&innerCompilation); + OH_NNExecutor* nnExecutor = reinterpret_cast(&executor); + + uint32_t outputIndex = 6; + size_t length = 9 * sizeof(float); + + OH_NN_Memory* ret = OH_NNExecutor_AllocateOutputMemory(nnExecutor, outputIndex, length); + EXPECT_EQ(nullptr, ret); +} + +/* + * @tc.name: executor_allocate_output_memory_004 + * @tc.desc: Verify the success of the OH_NNExecutor_AllocateOutputMemory function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, executor_allocate_output_memory_004, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation innerCompilation(&innerModel); + Executor executor(&innerCompilation); + OH_NNExecutor* nnExecutor = reinterpret_cast(&executor); + + uint32_t outputIndex = 0; + size_t length = 9 * sizeof(float); + + OH_NN_Memory* ret = OH_NNExecutor_AllocateOutputMemory(nnExecutor, outputIndex, length); + EXPECT_NE(nullptr, ret); +} + + +/* + * @tc.name: executor_destroy_input_memory_001 + * @tc.desc: Verify the OH_NNExecutor is nullptr of the OH_NNExecutor_DestroyInputMemory function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, executor_destroy_input_memory_001, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + BuildModelGraph(innerModel); + Compilation innerCompilation(&innerModel); + Executor executor(&innerCompilation); + OH_NNExecutor* nnExecutor = nullptr; + + uint32_t inputIndex = 0; + float dataArry[9] {0, 1, 2, 3, 4, 5, 6, 7, 8}; + void* const data = dataArry; + OH_NN_Memory memory = {data, 9 * sizeof(float)}; + OH_NN_Memory* pMemory = &memory; + size_t length = 9 * sizeof(float); + EXPECT_EQ(OH_NN_SUCCESS, executor.CreateInputMemory(inputIndex, length, &pMemory)); + OH_NNExecutor_DestroyInputMemory(nnExecutor, inputIndex, &pMemory); + EXPECT_EQ(nullptr, nnExecutor); +} + +/* + * @tc.name: executor_destroy_input_memory_002 + * @tc.desc: Verify the memory is nullptr of the OH_NNExecutor_DestroyInputMemory function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, executor_destroy_input_memory_002, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + BuildModelGraph(innerModel); + Compilation innerCompilation(&innerModel); + Executor executor(&innerCompilation); + OH_NNExecutor* nnExecutor = reinterpret_cast(&executor); + + uint32_t inputIndex = 0; + OH_NN_Memory** memory = nullptr; + OH_NNExecutor_DestroyInputMemory(nnExecutor, inputIndex, memory); + EXPECT_EQ(nullptr, memory); +} + +/* + * @tc.name: executor_destroy_input_memory_003 + * @tc.desc: Verify the *memory is nullptr of the OH_NNExecutor_DestroyInputMemory function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, executor_destroy_input_memory_003, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + BuildModelGraph(innerModel); + Compilation innerCompilation(&innerModel); + Executor executor(&innerCompilation); + OH_NNExecutor* nnExecutor = reinterpret_cast(&executor); + + uint32_t inputIndex = 0; + OH_NN_Memory* memory = nullptr; + OH_NN_Memory** pMemory = &memory; + OH_NNExecutor_DestroyInputMemory(nnExecutor, inputIndex, pMemory); + EXPECT_EQ(nullptr, memory); +} + +/* + * @tc.name: executor_destroy_input_memory_004 + * @tc.desc: Verify the error happened when destroying input memory of the OH_NNExecutor_DestroyInputMemory function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, executor_destroy_input_memory_004, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + BuildModelGraph(innerModel); + Compilation innerCompilation(&innerModel); + Executor executor(&innerCompilation); + OH_NNExecutor* nnExecutor = reinterpret_cast(&executor); + + uint32_t inputIndex = 6; + float dataArry[9] {0, 1, 2, 3, 4, 5, 6, 7, 8}; + void* const data = dataArry; + OH_NN_Memory memory = {data, 9 * sizeof(float)}; + OH_NN_Memory* pMemory = &memory; + OH_NNExecutor_DestroyInputMemory(nnExecutor, inputIndex, &pMemory); + EXPECT_NE(nullptr, pMemory); +} + +/* + * @tc.name: executor_destroy_input_memory_005 + * @tc.desc: Verify the success of the OH_NNExecutor_DestroyInputMemory function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, executor_destroy_input_memory_005, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + BuildModelGraph(innerModel); + Compilation innerCompilation(&innerModel); + Executor executor(&innerCompilation); + OH_NNExecutor* nnExecutor = reinterpret_cast(&executor); + + uint32_t inputIndex = 0; + float dataArry[9] {0, 1, 2, 3, 4, 5, 6, 7, 8}; + void* const data = dataArry; + OH_NN_Memory memory = {data, 9 * sizeof(float)}; + OH_NN_Memory* pMemory = &memory; + size_t length = 9 * sizeof(float); + EXPECT_EQ(OH_NN_SUCCESS, executor.CreateInputMemory(inputIndex, length, &pMemory)); + OH_NNExecutor_DestroyInputMemory(nnExecutor, inputIndex, &pMemory); + EXPECT_EQ(nullptr, pMemory); +} + +/* + * @tc.name: executor_destroy_output_memory_001 + * @tc.desc: Verify the OH_NNExecutor is nullptr of the OH_NNExecutor_DestroyOutputMemory function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, executor_destroy_output_memory_001, testing::ext::TestSize.Level0) +{ + OH_NNExecutor* nnExecutor = nullptr; + uint32_t outputIndex = 0; + float dataArry[9] {0, 1, 2, 3, 4, 5, 6, 7, 8}; + void* const data = dataArry; + OH_NN_Memory memory = {data, 9 * sizeof(float)}; + OH_NN_Memory* pMemory = &memory; + OH_NNExecutor_DestroyOutputMemory(nnExecutor, outputIndex, &pMemory); + EXPECT_EQ(nullptr, nnExecutor); +} + +/* + * @tc.name: executor_destroy_output_memory_002 + * @tc.desc: Verify the memory is nullptr of the OH_NNExecutor_DestroyOutputMemory function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, executor_destroy_output_memory_002, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation innerCompilation(&innerModel); + Executor executor(&innerCompilation); + OH_NNExecutor* nnExecutor = reinterpret_cast(&executor); + + uint32_t outputIndex = 0; + OH_NN_Memory** memory = nullptr; + OH_NNExecutor_DestroyOutputMemory(nnExecutor, outputIndex, memory); + EXPECT_EQ(nullptr, memory); +} + +/* + * @tc.name: executor_destroy_output_memory_003 + * @tc.desc: Verify the *memory is nullptr of the OH_NNExecutor_DestroyOutputMemory function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, executor_destroy_output_memory_003, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation innerCompilation(&innerModel); + Executor executor(&innerCompilation); + OH_NNExecutor* nnExecutor = reinterpret_cast(&executor); + + uint32_t outputIndex = 0; + OH_NN_Memory* memory = nullptr; + OH_NN_Memory** pMemory = &memory; + OH_NNExecutor_DestroyOutputMemory(nnExecutor, outputIndex, pMemory); + EXPECT_EQ(nullptr, memory); +} + +/* + * @tc.name: executor_destroy_output_memory_004 + * @tc.desc: Verify the error happened when destroying output memory of the OH_NNExecutor_DestroyOutputMemory function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, executor_destroy_output_memory_004, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation innerCompilation(&innerModel); + Executor executor(&innerCompilation); + OH_NNExecutor* nnExecutor = reinterpret_cast(&executor); + + uint32_t outputIndex = 6; + float dataArry[9] {0, 1, 2, 3, 4, 5, 6, 7, 8}; + void* const data = dataArry; + OH_NN_Memory memory = {data, 9 * sizeof(float)}; + OH_NN_Memory* pMemory = &memory; + OH_NNExecutor_DestroyOutputMemory(nnExecutor, outputIndex, &pMemory); + EXPECT_NE(nullptr, pMemory); +} + +/* + * @tc.name: executor_destroy_output_memory_005 + * @tc.desc: Verify the success of the OH_NNExecutor_DestroyOutputMemory function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, executor_destroy_output_memory_005, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation innerCompilation(&innerModel); + Executor executor(&innerCompilation); + OH_NNExecutor* nnExecutor = reinterpret_cast(&executor); + + float dataArry[9] {0, 1, 2, 3, 4, 5, 6, 7, 8}; + void* const data = dataArry; + OH_NN_Memory memory = {data, 9 * sizeof(float)}; + OH_NN_Memory* pMemory = &memory; + size_t length = 9 * sizeof(float); + uint32_t outputIndex = 0; + EXPECT_EQ(OH_NN_SUCCESS, executor.CreateOutputMemory(outputIndex, length, &pMemory)); + OH_NNExecutor_DestroyOutputMemory(nnExecutor, outputIndex, &pMemory); + EXPECT_EQ(nullptr, pMemory); +} + +/* + * @tc.name: executor_set_input_with_memory_001 + * @tc.desc: Verify the OH_NNExecutor is nullptr of the OH_NNExecutor_SetInputWithMemory function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, executor_set_input_with_memory_001, testing::ext::TestSize.Level0) +{ + OH_NNExecutor* nnExecutor = nullptr; + + SetTensor(); + + uint32_t inputIndex = 0; + float dataArry[9] {0, 1, 2, 3, 4, 5, 6, 7, 8}; + void* const data = dataArry; + OH_NN_Memory memory = {data, 9 * sizeof(float)}; + + OH_NN_ReturnCode ret = OH_NNExecutor_SetInputWithMemory(nnExecutor, inputIndex, &m_tensor, &memory); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: executor_set_input_with_memory_002 + * @tc.desc: Verify the operand is nullptr of the OH_NNExecutor_SetInputWithMemory function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, executor_set_input_with_memory_002, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation innerCompilation(&innerModel); + Executor executor(&innerCompilation); + OH_NNExecutor* nnExecutor = reinterpret_cast(&executor); + + OH_NN_Tensor* operand = nullptr; + + uint32_t inputIndex = 0; + float dataArry[9] {0, 1, 2, 3, 4, 5, 6, 7, 8}; + void* const data = dataArry; + OH_NN_Memory memory = {data, 9 * sizeof(float)}; + + OH_NN_ReturnCode ret = OH_NNExecutor_SetInputWithMemory(nnExecutor, inputIndex, operand, &memory); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: executor_set_input_with_memory_003 + * @tc.desc: Verify the memory is nullptr of the OH_NNExecutor_SetInputWithMemory function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, executor_set_input_with_memory_003, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation innerCompilation(&innerModel); + Executor executor(&innerCompilation); + OH_NNExecutor* nnExecutor = reinterpret_cast(&executor); + + SetTensor(); + + uint32_t inputIndex = 0; + OH_NN_Memory* memory = nullptr; + OH_NN_ReturnCode ret = OH_NNExecutor_SetInputWithMemory(nnExecutor, inputIndex, &m_tensor, memory); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: executor_set_input_with_memory_004 + * @tc.desc: Verify the success of the OH_NNExecutor_SetInputWithMemory function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, executor_set_input_with_memory_004, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation innerCompilation(&innerModel); + Executor executor(&innerCompilation); + OH_NNExecutor* nnExecutor = reinterpret_cast(&executor); + + SetTensor(); + + uint32_t inputIndex = 0; + float dataArry[9] {0, 1, 2, 3, 4, 5, 6, 7, 8}; + void* const data = dataArry; + OH_NN_Memory memory = {data, 9 * sizeof(float)}; + + OH_NN_ReturnCode ret = OH_NNExecutor_SetInputWithMemory(nnExecutor, inputIndex, &m_tensor, &memory); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + + +/* + * @tc.name: executor_set_output_with_memory_001 + * @tc.desc: Verify the OH_NNExecutor is nullptr of the OH_NNExecutor_SetOutputWithMemory function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, executor_set_output_with_memory_001, testing::ext::TestSize.Level0) +{ + OH_NNExecutor* nnExecutor = nullptr; + uint32_t outputIndex = 0; + float dataArry[9] {0, 1, 2, 3, 4, 5, 6, 7, 8}; + void* const data = dataArry; + OH_NN_Memory memory = {data, 9 * sizeof(float)}; + OH_NN_ReturnCode ret = OH_NNExecutor_SetOutputWithMemory(nnExecutor, outputIndex, &memory); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: executor_set_output_with_memory_002 + * @tc.desc: Verify the memory is nullptr of the OH_NNExecutor_SetOutputWithMemory function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, executor_set_output_with_memory_002, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation innerCompilation(&innerModel); + Executor executor(&innerCompilation); + OH_NNExecutor* nnExecutor = reinterpret_cast(&executor); + + uint32_t outputIndex = 0; + OH_NN_Memory* memory = nullptr; + OH_NN_ReturnCode ret = OH_NNExecutor_SetOutputWithMemory(nnExecutor, outputIndex, memory); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: executor_set_output_with_memory_003 + * @tc.desc: Verify the success of the OH_NNExecutor_SetOutputWithMemory function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, executor_set_output_with_memory_003, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation innerCompilation(&innerModel); + Executor executor(&innerCompilation); + OH_NNExecutor* nnExecutor = reinterpret_cast(&executor); + + uint32_t outputIndex = 0; + float dataArry[9] {0, 1, 2, 3, 4, 5, 6, 7, 8}; + void* const data = dataArry; + OH_NN_Memory memory = {data, 9 * sizeof(float)}; + OH_NN_ReturnCode ret = OH_NNExecutor_SetOutputWithMemory(nnExecutor, outputIndex, &memory); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/* + * @tc.name: executor_destroy_001 + * @tc.desc: Verify the OH_NNExecutor is nullptr of the OH_NNExecutor_Destroy function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, executor_destroy_001, testing::ext::TestSize.Level0) +{ + OH_NNExecutor** pExecutor = nullptr; + OH_NNExecutor_Destroy(pExecutor); + EXPECT_EQ(nullptr, pExecutor); +} + +/* + * @tc.name: executor_destroy_002 + * @tc.desc: Verify the *OH_NNExecutor is nullptr of the OH_NNExecutor_Destroy function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, executor_destroy_002, testing::ext::TestSize.Level0) +{ + OH_NNExecutor* nnExecutor = nullptr; + OH_NNExecutor** pExecutor = &nnExecutor; + OH_NNExecutor_Destroy(pExecutor); + EXPECT_EQ(nullptr, nnExecutor); +} + +/* + * @tc.name: executor_destroy_003 + * @tc.desc: Verify the normal model of the OH_NNExecutor_Destroy function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, executor_destroy_003, testing::ext::TestSize.Level0) +{ + InnerModel* innerModel = new InnerModel(); + EXPECT_NE(nullptr, innerModel); + Compilation* innerCompilation = new(std::nothrow) Compilation(innerModel); + EXPECT_NE(nullptr, innerCompilation); + Executor* executor = new(std::nothrow) Executor(innerCompilation); + EXPECT_NE(nullptr, executor); + + OH_NNExecutor* nnExecutor = reinterpret_cast(executor); + OH_NNExecutor_Destroy(&nnExecutor); + EXPECT_EQ(nullptr, nnExecutor); +} + +/* + * @tc.name: device_get_all_devices_id_001 + * @tc.desc: Verify the allDevicesID is nullptr of the OH_NNDevice_GetAllDevicesID function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, device_get_all_devices_id_001, testing::ext::TestSize.Level0) +{ + const size_t** allDevicesId = nullptr; + uint32_t deviceCount = 1; + uint32_t* pDeviceCount = &deviceCount; + OH_NN_ReturnCode ret = OH_NNDevice_GetAllDevicesID(allDevicesId, pDeviceCount); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: device_get_all_devices_id_002 + * @tc.desc: Verify the *allDevicesID is not nullptr of the OH_NNDevice_GetAllDevicesID function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, device_get_all_devices_id_002, testing::ext::TestSize.Level0) +{ + const size_t devicesId = 1; + const size_t* allDevicesId = &devicesId; + const size_t** pAllDevicesId = &allDevicesId; + uint32_t deviceCount = 1; + uint32_t* pDeviceCount = &deviceCount; + OH_NN_ReturnCode ret = OH_NNDevice_GetAllDevicesID(pAllDevicesId, pDeviceCount); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: device_get_all_devices_id_003 + * @tc.desc: Verify the deviceCount is nullptr of the OH_NNDevice_GetAllDevicesID function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, device_get_all_devices_id_003, testing::ext::TestSize.Level0) +{ + const size_t* allDevicesId = nullptr; + const size_t** pAllDevicesId = &allDevicesId; + uint32_t* pDeviceCount = nullptr; + OH_NN_ReturnCode ret = OH_NNDevice_GetAllDevicesID(pAllDevicesId, pDeviceCount); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: device_get_all_devices_id_004 + * @tc.desc: Verify the get no device of the OH_NNDevice_GetAllDevicesID function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, device_get_all_devices_id_004, testing::ext::TestSize.Level0) +{ + const size_t* allDevicesId = nullptr; + const size_t** pAllDevicesId = &allDevicesId; + uint32_t deviceCount = 1; + uint32_t* pDeviceCount = &deviceCount; + OHOS::HDI::Nnrt::V1_0::MockIPreparedModel::m_ExpectRetCode = OH_NN_FAILED; + OH_NN_ReturnCode ret = OH_NNDevice_GetAllDevicesID(pAllDevicesId, pDeviceCount); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/* + * @tc.name: device_get_all_devices_id_005 + * @tc.desc: Verify the success of the OH_NNDevice_GetAllDevicesID function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, device_get_all_devices_id_005, testing::ext::TestSize.Level0) +{ + const size_t* allDevicesId = nullptr; + const size_t** pAllDevicesId = &allDevicesId; + uint32_t deviceCount = 1; + uint32_t* pDeviceCount = &deviceCount; + OH_NN_ReturnCode ret = OH_NNDevice_GetAllDevicesID(pAllDevicesId, pDeviceCount); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/* + * @tc.name: device_get_name_001 + * @tc.desc: Verify the name is nullptr of the OH_NNDevice_GetName function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, device_get_name_001, testing::ext::TestSize.Level0) +{ + size_t deviceID = 1; + const char **name = nullptr; + OH_NN_ReturnCode ret = OH_NNDevice_GetName(deviceID, name); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: device_get_name_002 + * @tc.desc: Verify the *name is not nullptr of the OH_NNDevice_GetName function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, device_get_name_002, testing::ext::TestSize.Level0) +{ + size_t deviceID = 1; + const char* name = "diviceId"; + const char** pName = &name; + OH_NN_ReturnCode ret = OH_NNDevice_GetName(deviceID, pName); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: device_get_name_003 + * @tc.desc: Verify the error happened when getting name of deviceID of the OH_NNDevice_GetName function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, device_get_name_003, testing::ext::TestSize.Level0) +{ + size_t deviceID = 0; + const char* name = nullptr; + const char** pName = &name; + OH_NN_ReturnCode ret = OH_NNDevice_GetName(deviceID, pName); + EXPECT_EQ(OH_NN_FAILED, ret); +} + +/* + * @tc.name: device_get_name_004 + * @tc.desc: Verify the success of the OH_NNDevice_GetName function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, device_get_name_004, testing::ext::TestSize.Level0) +{ + size_t deviceID = 1; + const char* name = nullptr; + const char** pName = &name; + OH_NN_ReturnCode ret = OH_NNDevice_GetName(deviceID, pName); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/* + * @tc.name: device_get_type_001 + * @tc.desc: Verify the device is nullptr of the OH_NNDevice_GetType function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, device_get_type_001, testing::ext::TestSize.Level0) +{ + size_t deviceID = 0; + OH_NN_DeviceType deviceType = OH_NN_CPU; + OH_NN_DeviceType* pDeviceType = &deviceType; + OH_NN_ReturnCode ret = OH_NNDevice_GetType(deviceID, pDeviceType); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: device_get_type_002 + * @tc.desc: Verify the OH_NN_DeviceType is nullptr of the OH_NNDevice_GetType function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, device_get_type_002, testing::ext::TestSize.Level0) +{ + size_t deviceID = 1; + OH_NN_DeviceType* pDeviceType = nullptr; + OH_NN_ReturnCode ret = OH_NNDevice_GetType(deviceID, pDeviceType); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: device_get_type_003 + * @tc.desc: Verify the error happened when getting name of deviceID of the OH_NNDevice_GetType function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, device_get_type_003, testing::ext::TestSize.Level0) +{ + size_t deviceID = 1; + OH_NN_DeviceType deviceType = OH_NN_OTHERS; + OH_NN_DeviceType* pDeviceType = &deviceType; + OH_NN_ReturnCode ret = OH_NNDevice_GetType(deviceID, pDeviceType); + EXPECT_EQ(OH_NN_UNAVALIDABLE_DEVICE, ret); +} + +/* + * @tc.name: device_get_type_004 + * @tc.desc: Verify the success of the OH_NNDevice_GetType function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, device_get_type_004, testing::ext::TestSize.Level0) +{ + size_t deviceID = 1; + OH_NN_DeviceType deviceType = OH_NN_CPU; + OH_NN_DeviceType* pDeviceType = &deviceType; + OH_NN_ReturnCode ret = OH_NNDevice_GetType(deviceID, pDeviceType); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} +} // namespace Unittest +} // namespace NeuralNetworkRuntime +} // namespace OHOS diff --git a/test/unittest/components/neural_network_runtime_test/neural_network_runtime_test.h b/test/unittest/components/neural_network_runtime_test/neural_network_runtime_test.h new file mode 100644 index 0000000000000000000000000000000000000000..61f1ed2b157af6f9b546b11c490770971a984ba4 --- /dev/null +++ b/test/unittest/components/neural_network_runtime_test/neural_network_runtime_test.h @@ -0,0 +1,52 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_UNITTEST_H +#define NEURAL_NETWORK_RUNTIME_UNITTEST_H + +#include + +#include "interfaces/kits/c/neural_network_runtime.h" +#include "frameworks/native/inner_model.h" +#include "frameworks/native/executor.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Unittest { +class NeuralNetworkRuntimeTest : public testing::Test { +public: + OH_NN_ReturnCode BuildModelGraph(InnerModel& innerModel); + void InitIndices(); + void AddModelTensor(InnerModel& innerModel); + void SetInnerBuild(InnerModel& innerModel); + void SetExecutor(Executor& executor); + void SetInputAndOutput(Executor& executor); + void SetTensor(); + +public: + OH_NN_UInt32Array m_inputIndices; + OH_NN_UInt32Array m_outputIndices; + OH_NN_UInt32Array m_paramIndices; + OH_NN_Tensor m_tensor; + + uint32_t m_inputIndexs[2]{0, 1}; + uint32_t m_outputIndexs[1]{2}; + uint32_t m_paramIndexs[1]{3}; +}; +} // namespace Unittest +} // namespace NeuralNetworkRuntime +} // namespace OHOS + +#endif // NEURAL_NETWORK_RUNTIME_UNITTEST_H diff --git a/test/unittest/components/transform/transform_test.cpp b/test/unittest/components/transform/transform_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..ae9e4cada1ea40178c467d5a8d9792dd2bcc14d3 --- /dev/null +++ b/test/unittest/components/transform/transform_test.cpp @@ -0,0 +1,912 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include + +#include "frameworks/native/transform.h" +#include "frameworks/native/memory_manager.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime; +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class TransformTestTest : public testing::Test { +public: + TransformTestTest() = default; + ~TransformTestTest() = default; +}; + +/** + * @tc.name: transform_transhdidevicetype_001 + * @tc.desc: Verify the TransHDIDeviceType function return OH_NN_CPU + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_transhdidevicetype_001, TestSize.Level0) +{ + V1_0::DeviceType iDeviceType = V1_0::DeviceType::CPU; + OH_NN_DeviceType result = HDIToNN::TransHDIDeviceType(iDeviceType); + EXPECT_EQ(OH_NN_CPU, result); +} + +/** + * @tc.name: transform_transhdidevicetype_002 + * @tc.desc: Verify the TransHDIDeviceType function return OH_NN_GPU + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_transhdidevicetype_002, TestSize.Level0) +{ + V1_0::DeviceType iDeviceType = V1_0::DeviceType::GPU; + OH_NN_DeviceType result = HDIToNN::TransHDIDeviceType(iDeviceType); + EXPECT_EQ(OH_NN_GPU, result); +} + +/** + * @tc.name: transform_transhdidevicetype_003 + * @tc.desc: Verify the TransHDIDeviceType function return OH_NN_ACCELERATOR + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_transhdidevicetype_003, TestSize.Level0) +{ + V1_0::DeviceType iDeviceType = V1_0::DeviceType::ACCELERATOR; + OH_NN_DeviceType result = HDIToNN::TransHDIDeviceType(iDeviceType); + EXPECT_EQ(OH_NN_ACCELERATOR, result); +} + +/** + * @tc.name: transform_transhdidevicetype_004 + * @tc.desc: Verify the TransHDIDeviceType function return OH_NN_OTHERS + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_transhdidevicetype_004, TestSize.Level0) +{ + V1_0::DeviceType iDeviceType = V1_0::DeviceType::OTHER; + OH_NN_DeviceType result = HDIToNN::TransHDIDeviceType(iDeviceType); + EXPECT_EQ(OH_NN_OTHERS, result); +} + +/** + * @tc.name: transform_transhdidevicestatus_001 + * @tc.desc: Verify the TransHDIDeviceStatus function return AVAILABLE + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_transhdidevicestatus_001, TestSize.Level0) +{ + V1_0::DeviceStatus iDeviceStatus = V1_0::DeviceStatus::AVAILABLE; + DeviceStatus result = HDIToNN::TransHDIDeviceStatus(iDeviceStatus); + EXPECT_EQ(DeviceStatus::AVAILABLE, result); +} + +/** + * @tc.name: transform_transhdidevicestatus_002 + * @tc.desc: Verify the TransHDIDeviceStatus function return BUSY. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_transhdidevicestatus_002, TestSize.Level0) +{ + V1_0::DeviceStatus iDeviceStatus = V1_0::DeviceStatus::BUSY; + DeviceStatus result = HDIToNN::TransHDIDeviceStatus(iDeviceStatus); + EXPECT_EQ(DeviceStatus::BUSY, result); +} + +/** + * @tc.name: transform_transhdidevicestatus_003 + * @tc.desc: Verify the TransHDIDeviceStatus function return OFFLINE. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_transhdidevicestatus_003, TestSize.Level0) +{ + V1_0::DeviceStatus iDeviceStatus = V1_0::DeviceStatus::OFFLINE; + DeviceStatus result = HDIToNN::TransHDIDeviceStatus(iDeviceStatus); + EXPECT_EQ(DeviceStatus::OFFLINE, result); +} + +/** + * @tc.name: transform_transhdidevicestatus_004 + * @tc.desc: Verify the TransHDIDeviceStatus function return UNKNOWN. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_transhdidevicestatus_004, TestSize.Level0) +{ + V1_0::DeviceStatus iDeviceStatus = V1_0::DeviceStatus::UNKNOWN; + DeviceStatus result = HDIToNN::TransHDIDeviceStatus(iDeviceStatus); + EXPECT_EQ(DeviceStatus::UNKNOWN, result); +} + +/** + * @tc.name: transform_transperformancemode_001 + * @tc.desc: Verify the TransPerformanceMode function return PERFORMANCE_LOW. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_transperformancemode_001, TestSize.Level0) +{ + OH_NN_PerformanceMode mode = OH_NN_PERFORMANCE_LOW; + V1_0::PerformanceMode result = NNToHDI::TransPerformanceMode(mode); + EXPECT_EQ(V1_0::PerformanceMode::PERFORMANCE_LOW, result); +} + +/** + * @tc.name: transform_transperformancemode_002 + * @tc.desc: Verify the TransPerformanceMode function return PERFORMANCE_MEDIUM. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_transperformancemode_002, TestSize.Level0) +{ + OH_NN_PerformanceMode mode = OH_NN_PERFORMANCE_MEDIUM; + V1_0::PerformanceMode result = NNToHDI::TransPerformanceMode(mode); + EXPECT_EQ(V1_0::PerformanceMode::PERFORMANCE_MEDIUM, result); +} + +/** + * @tc.name: transform_transperformancemode_003 + * @tc.desc: Verify the TransPerformanceMode function return PERFORMANCE_HIGH. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_transperformancemode_003, TestSize.Level0) +{ + OH_NN_PerformanceMode mode = OH_NN_PERFORMANCE_HIGH; + V1_0::PerformanceMode result = NNToHDI::TransPerformanceMode(mode); + EXPECT_EQ(V1_0::PerformanceMode::PERFORMANCE_HIGH, result); +} + +/** + * @tc.name: transform_transperformancemode_004 + * @tc.desc: Verify the TransPerformanceMode function return PERFORMANCE_EXTREME. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_transperformancemode_004, TestSize.Level0) +{ + OH_NN_PerformanceMode mode = OH_NN_PERFORMANCE_EXTREME; + V1_0::PerformanceMode result = NNToHDI::TransPerformanceMode(mode); + EXPECT_EQ(V1_0::PerformanceMode::PERFORMANCE_EXTREME, result); +} + +/** + * @tc.name: transform_transperformancemode_005 + * @tc.desc: Verify the TransPerformanceMode function return PERFORMANCE_NONE. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_transperformancemode_005, TestSize.Level0) +{ + OH_NN_PerformanceMode mode = OH_NN_PERFORMANCE_NONE; + V1_0::PerformanceMode result = NNToHDI::TransPerformanceMode(mode); + EXPECT_EQ(V1_0::PerformanceMode::PERFORMANCE_NONE, result); +} + +/** + * @tc.name: transform_transpriority_001 + * @tc.desc: Verify the TransPriority function return PRIORITY_LOW. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_transpriority_001, TestSize.Level0) +{ + OH_NN_Priority priority = OH_NN_PRIORITY_LOW; + V1_0::Priority result = NNToHDI::TransPriority(priority); + EXPECT_EQ(V1_0::Priority::PRIORITY_LOW, result); +} + +/** + * @tc.name: transform_transpriority_002 + * @tc.desc: Verify the TransPriority function return PRIORITY_MEDIUM. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_transpriority_002, TestSize.Level0) +{ + OH_NN_Priority priority = OH_NN_PRIORITY_MEDIUM; + V1_0::Priority result = NNToHDI::TransPriority(priority); + EXPECT_EQ(V1_0::Priority::PRIORITY_MEDIUM, result); +} + +/** + * @tc.name: transform_transpriority_003 + * @tc.desc: Verify the TransPriority function return PRIORITY_HIGH. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_transpriority_003, TestSize.Level0) +{ + OH_NN_Priority priority = OH_NN_PRIORITY_HIGH; + V1_0::Priority result = NNToHDI::TransPriority(priority); + EXPECT_EQ(V1_0::Priority::PRIORITY_HIGH, result); +} + +/** + * @tc.name: transform_transdatatype_001 + * @tc.desc: Verify the TransDataType function return DATA_TYPE_BOOL. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_transdatatype_001, TestSize.Level0) +{ + OH_NN_DataType dataType = OH_NN_BOOL; + V1_0::DataType result = NNToHDI::TransDataType(dataType); + EXPECT_EQ(V1_0::DataType::DATA_TYPE_BOOL, result); +} + +/** + * @tc.name: transform_transdatatype_002 + * @tc.desc: Verify the TransDataType function return DATA_TYPE_INT8. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_transdatatype_002, TestSize.Level0) +{ + OH_NN_DataType dataType = OH_NN_INT8; + V1_0::DataType result = NNToHDI::TransDataType(dataType); + EXPECT_EQ(V1_0::DataType::DATA_TYPE_INT8, result); +} + +/** + * @tc.name: transform_transdatatype_003 + * @tc.desc: Verify the TransDataType function return DATA_TYPE_INT16. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_transdatatype_003, TestSize.Level0) +{ + OH_NN_DataType dataType = OH_NN_INT16; + V1_0::DataType result = NNToHDI::TransDataType(dataType); + EXPECT_EQ(V1_0::DataType::DATA_TYPE_INT16, result); +} + +/** + * @tc.name: transform_transdatatype_004 + * @tc.desc: Verify the TransDataType function return DATA_TYPE_INT32. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_transdatatype_004, TestSize.Level0) +{ + OH_NN_DataType dataType = OH_NN_INT32; + V1_0::DataType result = NNToHDI::TransDataType(dataType); + EXPECT_EQ(V1_0::DataType::DATA_TYPE_INT32, result); +} + +/** + * @tc.name: transform_transdatatype_005 + * @tc.desc: Verify the TransDataType function return DATA_TYPE_INT64. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_transdatatype_005, TestSize.Level0) +{ + OH_NN_DataType dataType = OH_NN_INT64; + V1_0::DataType result = NNToHDI::TransDataType(dataType); + EXPECT_EQ(V1_0::DataType::DATA_TYPE_INT64, result); +} + +/** + * @tc.name: transform_transdatatype_006 + * @tc.desc: Verify the TransDataType function return DATA_TYPE_UINT8. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_transdatatype_006, TestSize.Level0) +{ + OH_NN_DataType dataType = OH_NN_UINT8; + V1_0::DataType result = NNToHDI::TransDataType(dataType); + EXPECT_EQ(V1_0::DataType::DATA_TYPE_UINT8, result); +} + +/** + * @tc.name: transform_transdatatype_007 + * @tc.desc: Verify the TransDataType function return DATA_TYPE_UINT16. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_transdatatype_007, TestSize.Level0) +{ + OH_NN_DataType dataType = OH_NN_UINT16; + V1_0::DataType result = NNToHDI::TransDataType(dataType); + EXPECT_EQ(V1_0::DataType::DATA_TYPE_UINT16, result); +} + +/** + * @tc.name: transform_transdatatype_008 + * @tc.desc: Verify the TransDataType function return DATA_TYPE_UINT32. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_transdatatype_008, TestSize.Level0) +{ + OH_NN_DataType dataType = OH_NN_UINT32; + V1_0::DataType result = NNToHDI::TransDataType(dataType); + EXPECT_EQ(V1_0::DataType::DATA_TYPE_UINT32, result); +} + +/** + * @tc.name: transform_transdatatype_009 + * @tc.desc: Verify the TransDataType function return DATA_TYPE_UINT64. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_transdatatype_009, TestSize.Level0) +{ + OH_NN_DataType dataType = OH_NN_UINT64; + V1_0::DataType result = NNToHDI::TransDataType(dataType); + EXPECT_EQ(V1_0::DataType::DATA_TYPE_UINT64, result); +} + +/** + * @tc.name: transform_transdatatype_010 + * @tc.desc: Verify the TransDataType function return DATA_TYPE_FLOAT16. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_transdatatype_010, TestSize.Level0) +{ + OH_NN_DataType dataType = OH_NN_FLOAT16; + V1_0::DataType result = NNToHDI::TransDataType(dataType); + EXPECT_EQ(V1_0::DataType::DATA_TYPE_FLOAT16, result); +} + +/** + * @tc.name: transform_transdatatype_011 + * @tc.desc: Verify the TransDataType function return DATA_TYPE_FLOAT32. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_transdatatype_011, TestSize.Level0) +{ + OH_NN_DataType dataType = OH_NN_FLOAT32; + V1_0::DataType result = NNToHDI::TransDataType(dataType); + EXPECT_EQ(V1_0::DataType::DATA_TYPE_FLOAT32, result); +} + +/** + * @tc.name: transform_transdatatype_012 + * @tc.desc: Verify the TransDataType function return DATA_TYPE_UNKNOWN. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_transdatatype_012, TestSize.Level0) +{ + OH_NN_DataType dataType = OH_NN_UNKNOWN; + V1_0::DataType result = NNToHDI::TransDataType(dataType); + EXPECT_EQ(V1_0::DataType::DATA_TYPE_UNKNOWN, result); +} + +/** + * @tc.name: transform_transdatatype_013 + * @tc.desc: Verify the TransDataType function return DATA_TYPE_FLOAT64. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_transdatatype_013, TestSize.Level0) +{ + OH_NN_DataType dataType = OH_NN_FLOAT64; + V1_0::DataType result = NNToHDI::TransDataType(dataType); + EXPECT_EQ(V1_0::DataType::DATA_TYPE_FLOAT64, result); +} + +/** + * @tc.name: transform_transformat_001 + * @tc.desc: Verify the TransFormat function return FORMAT_NCHW. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_transformat_001, TestSize.Level0) +{ + OH_NN_Format format = OH_NN_FORMAT_NCHW; + V1_0::Format result = NNToHDI::TransFormat(format); + EXPECT_EQ(V1_0::Format::FORMAT_NCHW, result); +} + +/** + * @tc.name: transform_transformat_002 + * @tc.desc: Verify the TransFormat function return FORMAT_NHWC. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_transformat_002, TestSize.Level0) +{ + OH_NN_Format format = OH_NN_FORMAT_NHWC; + V1_0::Format result = NNToHDI::TransFormat(format); + EXPECT_EQ(V1_0::Format::FORMAT_NHWC, result); +} + +/** + * @tc.name: transform_transformat_003 + * @tc.desc: Verify the TransFormat function return FORMAT_NONE. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_transformat_003, TestSize.Level0) +{ + OH_NN_Format format = OH_NN_FORMAT_NONE; + V1_0::Format result = NNToHDI::TransFormat(format); + EXPECT_EQ(V1_0::Format::FORMAT_NONE, result); +} + +/** + * @tc.name: transform_transiotensor_001 + * @tc.desc: Verify the TransIOTensor function return int8 + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_transiotensor_001, TestSize.Level0) +{ + IOTensor tensor; + tensor.dataType = OH_NN_INT8; + V1_0::IOTensor result = NNToHDI::TransIOTensor(tensor); + EXPECT_EQ(V1_0::DataType::DATA_TYPE_INT8, result.dataType); +} + +/** + * @tc.name: transform_gettypesize_001 + * @tc.desc: Verify the TransIOTensor function return 1. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_gettypesize_001, TestSize.Level0) +{ + OH_NN_DataType dataType = OH_NN_BOOL; + uint32_t result = GetTypeSize(dataType); + EXPECT_EQ(static_cast(1), result); +} + +/** + * @tc.name: transform_gettypesize_002 + * @tc.desc: Verify the TransIOTensor function return 2. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_gettypesize_002, TestSize.Level0) +{ + OH_NN_DataType dataType = OH_NN_INT16; + uint32_t result = GetTypeSize(dataType); + EXPECT_EQ(static_cast(2), result); +} + +/** + * @tc.name: transform_gettypesize_003 + * @tc.desc: Verify the TransIOTensor function return 4. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_gettypesize_003, TestSize.Level0) +{ + OH_NN_DataType dataType = OH_NN_INT32; + uint32_t result = GetTypeSize(dataType); + EXPECT_EQ(static_cast(4), result); +} + +/** + * @tc.name: transform_gettypesize_004 + * @tc.desc: Verify the TransIOTensor function return 8. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_gettypesize_004, TestSize.Level0) +{ + OH_NN_DataType dataType = OH_NN_INT64; + uint32_t result = GetTypeSize(dataType); + EXPECT_EQ(static_cast(8), result); +} + +/** + * @tc.name: transform_gettypesize_005 + * @tc.desc: Verify the TransIOTensor function return 0. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_gettypesize_005, TestSize.Level0) +{ + OH_NN_DataType dataType = OH_NN_UNKNOWN; + uint32_t result = GetTypeSize(dataType); + EXPECT_EQ(static_cast(0), result); +} + +/** + * @tc.name: transform_nntoms_transformdatatype_001 + * @tc.desc: Verify the TransIOTensor function return DATA_TYPE_BOOL. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_nntoms_transformdatatype_001, TestSize.Level0) +{ + OH_NN_DataType dataType = OH_NN_BOOL; + mindspore::lite::DataType result = NNToMS::TransformDataType(dataType); + EXPECT_EQ(mindspore::lite::DATA_TYPE_BOOL, result); +} + +/** + * @tc.name: transform_nntoms_transformdatatype_002 + * @tc.desc: Verify the TransDataType function return DATA_TYPE_INT8. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_nntoms_transformdatatype_002, TestSize.Level0) +{ + OH_NN_DataType dataType = OH_NN_INT8; + mindspore::lite::DataType result = NNToMS::TransformDataType(dataType); + EXPECT_EQ(mindspore::lite::DATA_TYPE_INT8, result); +} + +/** + * @tc.name: transform_nntoms_transformdatatype_003 + * @tc.desc: Verify the TransDataType function return DATA_TYPE_INT16. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_nntoms_transformdatatype_003, TestSize.Level0) +{ + OH_NN_DataType dataType = OH_NN_INT16; + mindspore::lite::DataType result = NNToMS::TransformDataType(dataType); + EXPECT_EQ(mindspore::lite::DATA_TYPE_INT16, result); +} + +/** + * @tc.name: transform_nntoms_transformdatatype_004 + * @tc.desc: Verify the TransDataType function return DATA_TYPE_INT32. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_nntoms_transformdatatype_004, TestSize.Level0) +{ + OH_NN_DataType dataType = OH_NN_INT32; + mindspore::lite::DataType result = NNToMS::TransformDataType(dataType); + EXPECT_EQ(mindspore::lite::DATA_TYPE_INT32, result); +} + +/** + * @tc.name: transform_nntoms_transformdatatype_005 + * @tc.desc: Verify the TransDataType function return DATA_TYPE_INT64. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_nntoms_transformdatatype_005, TestSize.Level0) +{ + OH_NN_DataType dataType = OH_NN_INT64; + mindspore::lite::DataType result = NNToMS::TransformDataType(dataType); + EXPECT_EQ(mindspore::lite::DATA_TYPE_INT64, result); +} + +/** + * @tc.name: transform_nntoms_transformdatatype_006 + * @tc.desc: Verify the TransDataType function return DATA_TYPE_UINT8. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_nntoms_transformdatatype_006, TestSize.Level0) +{ + OH_NN_DataType dataType = OH_NN_UINT8; + mindspore::lite::DataType result = NNToMS::TransformDataType(dataType); + EXPECT_EQ(mindspore::lite::DATA_TYPE_UINT8, result); +} + +/** + * @tc.name: transform_nntoms_transformdatatype_007 + * @tc.desc: Verify the TransDataType function return DATA_TYPE_UINT16. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_nntoms_transformdatatype_007, TestSize.Level0) +{ + OH_NN_DataType dataType = OH_NN_UINT16; + mindspore::lite::DataType result = NNToMS::TransformDataType(dataType); + EXPECT_EQ(mindspore::lite::DATA_TYPE_UINT16, result); +} + +/** + * @tc.name: transform_nntoms_transformdatatype_008 + * @tc.desc: Verify the TransDataType function return DATA_TYPE_UINT32. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_nntoms_transformdatatype_008, TestSize.Level0) +{ + OH_NN_DataType dataType = OH_NN_UINT32; + mindspore::lite::DataType result = NNToMS::TransformDataType(dataType); + EXPECT_EQ(mindspore::lite::DATA_TYPE_UINT32, result); +} + +/** + * @tc.name: transform_nntoms_transformdatatype_009 + * @tc.desc: Verify the TransDataType function return DATA_TYPE_UINT64. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_nntoms_transformdatatype_009, TestSize.Level0) +{ + OH_NN_DataType dataType = OH_NN_UINT64; + mindspore::lite::DataType result = NNToMS::TransformDataType(dataType); + EXPECT_EQ(mindspore::lite::DATA_TYPE_UINT64, result); +} + +/** + * @tc.name: transform_nntoms_transformdatatype_010 + * @tc.desc: Verify the TransDataType function return DATA_TYPE_FLOAT16. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_nntoms_transformdatatype_010, TestSize.Level0) +{ + OH_NN_DataType dataType = OH_NN_FLOAT16; + mindspore::lite::DataType result = NNToMS::TransformDataType(dataType); + EXPECT_EQ(mindspore::lite::DATA_TYPE_FLOAT16, result); +} + +/** + * @tc.name: transform_nntoms_transformdatatype_011 + * @tc.desc: Verify the TransDataType function return DATA_TYPE_FLOAT32. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_nntoms_transformdatatype_011, TestSize.Level0) +{ + OH_NN_DataType dataType = OH_NN_FLOAT32; + mindspore::lite::DataType result = NNToMS::TransformDataType(dataType); + EXPECT_EQ(mindspore::lite::DATA_TYPE_FLOAT32, result); +} + +/** + * @tc.name: transform_nntoms_transformdatatype_012 + * @tc.desc: Verify the TransDataType function return DATA_TYPE_UNKNOWN. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_nntoms_transformdatatype_012, TestSize.Level0) +{ + OH_NN_DataType dataType = OH_NN_UNKNOWN; + mindspore::lite::DataType result = NNToMS::TransformDataType(dataType); + EXPECT_EQ(mindspore::lite::DATA_TYPE_UNKNOWN, result); +} + +/** + * @tc.name: transform_nntoms_transformdatatype_013 + * @tc.desc: Verify the TransDataType function return DATA_TYPE_FLOAT64 + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_nntoms_transformdatatype_013, TestSize.Level0) +{ + OH_NN_DataType dataType = OH_NN_FLOAT64; + mindspore::lite::DataType result = NNToMS::TransformDataType(dataType); + EXPECT_EQ(mindspore::lite::DATA_TYPE_FLOAT64, result); +} + +/** + * @tc.name: transform_nntoms_transformformat_001 + * @tc.desc: Verify the TransFormat function return FORMAT_NCHW. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_nntoms_transformformat_001, TestSize.Level0) +{ + OH_NN_Format format = OH_NN_FORMAT_NCHW; + mindspore::lite::Format result = NNToMS::TransformFormat(format); + EXPECT_EQ(mindspore::lite::FORMAT_NCHW, result); +} + +/** + * @tc.name: transform_nntoms_transformformat_002 + * @tc.desc: Verify the TransFormat function return FORMAT_NHWC. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_nntoms_transformformat_002, TestSize.Level0) +{ + OH_NN_Format format = OH_NN_FORMAT_NHWC; + mindspore::lite::Format result = NNToMS::TransformFormat(format); + EXPECT_EQ(mindspore::lite::FORMAT_NHWC, result); +} + +/** + * @tc.name: transform_nntoms_transformformat_003 + * @tc.desc: Verify the TransFormat function return FORMAT_NHWC. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_nntoms_transformformat_003, TestSize.Level0) +{ + OH_NN_Format format = OH_NN_FORMAT_NONE; + mindspore::lite::Format result = NNToMS::TransformFormat(format); + EXPECT_EQ(mindspore::lite::FORMAT_NHWC, result); +} + +/** + * @tc.name: transform_nntoms_transformfusiontype_001 + * @tc.desc: Verify the TransFormat function return ACTIVATION_TYPE_NO_ACTIVATION. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_nntoms_transformfusiontype_001, TestSize.Level0) +{ + OH_NN_FuseType type = OH_NN_FUSED_NONE; + mindspore::lite::ActivationType result = NNToMS::TransfromFusionType(type); + EXPECT_EQ(mindspore::lite::ACTIVATION_TYPE_NO_ACTIVATION, result); +} + +/** + * @tc.name: transform_nntoms_transformfusiontype_002 + * @tc.desc: Verify the TransFormat function return ACTIVATION_TYPE_RELU. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_nntoms_transformfusiontype_002, TestSize.Level0) +{ + OH_NN_FuseType type = OH_NN_FUSED_RELU; + mindspore::lite::ActivationType result = NNToMS::TransfromFusionType(type); + EXPECT_EQ(mindspore::lite::ACTIVATION_TYPE_RELU, result); +} + +/** + * @tc.name: transform_nntoms_transformfusiontype_003 + * @tc.desc: Verify the TransFormat function return ACTIVATION_TYPE_RELU6. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_nntoms_transformfusiontype_003, TestSize.Level0) +{ + OH_NN_FuseType type = OH_NN_FUSED_RELU6; + mindspore::lite::ActivationType result = NNToMS::TransfromFusionType(type); + EXPECT_EQ(mindspore::lite::ACTIVATION_TYPE_RELU6, result); +} + +/** + * @tc.name: transform_nntoms_transformquanttype_001 + * @tc.desc: Verify the TransFormat function return QUANT_TYPE_NONE. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_nntoms_transformquanttype_001, TestSize.Level0) +{ + OHOS::NeuralNetworkRuntime::Ops::OpsQuantType type = OHOS::NeuralNetworkRuntime::Ops::OpsQuantType::QUANT_NONE; + mindspore::lite::QuantType result = NNToMS::TransformQuantType(type); + EXPECT_EQ(mindspore::lite::QUANT_TYPE_NONE, result); +} + +/** + * @tc.name: transform_nntoms_transformquanttype_002 + * @tc.desc: Verify the TransFormat function return QUANT_TYPE_ALL. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_nntoms_transformquanttype_002, TestSize.Level0) +{ + OHOS::NeuralNetworkRuntime::Ops::OpsQuantType type = OHOS::NeuralNetworkRuntime::Ops::OpsQuantType::QUANT_ALL; + mindspore::lite::QuantType result = NNToMS::TransformQuantType(type); + EXPECT_EQ(mindspore::lite::QUANT_TYPE_ALL, result); +} + + +/** + * @tc.name: transform_mstonn_transformdatatype_001 + * @tc.desc: Verify the TransIOTensor function return OH_NN_BOOL. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_mstonn_transformdatatype_001, TestSize.Level0) +{ + mindspore::lite::DataType dataType = mindspore::lite::DATA_TYPE_BOOL; + OH_NN_DataType result = MSToNN::TransformDataType(dataType); + EXPECT_EQ(OH_NN_BOOL, result); +} + +/** + * @tc.name: transform_mstonn_transformdatatype_002 + * @tc.desc: Verify the TransDataType function return OH_NN_INT8. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_mstonn_transformdatatype_002, TestSize.Level0) +{ + mindspore::lite::DataType dataType = mindspore::lite::DATA_TYPE_INT8; + OH_NN_DataType result = MSToNN::TransformDataType(dataType); + EXPECT_EQ(OH_NN_INT8, result); +} + +/** + * @tc.name: transform_mstonn_transformdatatype_003 + * @tc.desc: Verify the TransDataType function return OH_NN_INT16. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_mstonn_transformdatatype_003, TestSize.Level0) +{ + mindspore::lite::DataType dataType = mindspore::lite::DATA_TYPE_INT16; + OH_NN_DataType result = MSToNN::TransformDataType(dataType); + EXPECT_EQ(OH_NN_INT16, result); +} + +/** + * @tc.name: transform_mstonn_transformdatatype_004 + * @tc.desc: Verify the TransDataType function return OH_NN_INT32. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_mstonn_transformdatatype_004, TestSize.Level0) +{ + mindspore::lite::DataType dataType = mindspore::lite::DATA_TYPE_INT32; + OH_NN_DataType result = MSToNN::TransformDataType(dataType); + EXPECT_EQ(OH_NN_INT32, result); +} + +/** + * @tc.name: transform_mstonn_transformdatatype_005 + * @tc.desc: Verify the TransDataType function return OH_NN_INT64. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_mstonn_transformdatatype_005, TestSize.Level0) +{ + mindspore::lite::DataType dataType = mindspore::lite::DATA_TYPE_INT64; + OH_NN_DataType result = MSToNN::TransformDataType(dataType); + EXPECT_EQ(OH_NN_INT64, result); +} + +/** + * @tc.name: transform_mstonn_transformdatatype_006 + * @tc.desc: Verify the TransDataType function return OH_NN_UINT8. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_mstonn_transformdatatype_006, TestSize.Level0) +{ + mindspore::lite::DataType dataType = mindspore::lite::DATA_TYPE_UINT8; + OH_NN_DataType result = MSToNN::TransformDataType(dataType); + EXPECT_EQ(OH_NN_UINT8, result); +} + +/** + * @tc.name: transform_mstonn_transformdatatype_007 + * @tc.desc: Verify the TransDataType function return OH_NN_UINT16. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_mstonn_transformdatatype_007, TestSize.Level0) +{ + mindspore::lite::DataType dataType = mindspore::lite::DATA_TYPE_UINT16; + OH_NN_DataType result = MSToNN::TransformDataType(dataType); + EXPECT_EQ(OH_NN_UINT16, result); +} + +/** + * @tc.name: transform_mstonn_transformdatatype_008 + * @tc.desc: Verify the TransDataType function return OH_NN_UINT32. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_mstonn_transformdatatype_008, TestSize.Level0) +{ + mindspore::lite::DataType dataType = mindspore::lite::DATA_TYPE_UINT32; + OH_NN_DataType result = MSToNN::TransformDataType(dataType); + EXPECT_EQ(OH_NN_UINT32, result); +} + +/** + * @tc.name: transform_mstonn_transformdatatype_009 + * @tc.desc: Verify the TransDataType function return OH_NN_UINT64. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_mstonn_transformdatatype_009, TestSize.Level0) +{ + mindspore::lite::DataType dataType = mindspore::lite::DATA_TYPE_UINT64; + OH_NN_DataType result = MSToNN::TransformDataType(dataType); + EXPECT_EQ(OH_NN_UINT64, result); +} + +/** + * @tc.name: transform_mstonn_transformdatatype_010 + * @tc.desc: Verify the TransDataType function return OH_NN_FLOAT16 + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_mstonn_transformdatatype_010, TestSize.Level0) +{ + mindspore::lite::DataType dataType = mindspore::lite::DATA_TYPE_FLOAT16; + OH_NN_DataType result = MSToNN::TransformDataType(dataType); + EXPECT_EQ(OH_NN_FLOAT16, result); +} + +/** + * @tc.name: transform_mstonn_transformdatatype_011 + * @tc.desc: Verify the TransDataType function return OH_NN_FLOAT32. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_mstonn_transformdatatype_011, TestSize.Level0) +{ + mindspore::lite::DataType dataType = mindspore::lite::DATA_TYPE_FLOAT32; + OH_NN_DataType result = MSToNN::TransformDataType(dataType); + EXPECT_EQ(OH_NN_FLOAT32, result); +} + +/** + * @tc.name: transform_mstonn_transformdatatype_012 + * @tc.desc: Verify the TransDataType function return OH_NN_UNKNOWN. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_mstonn_transformdatatype_012, TestSize.Level0) +{ + mindspore::lite::DataType dataType = mindspore::lite::DATA_TYPE_UNKNOWN; + OH_NN_DataType result = MSToNN::TransformDataType(dataType); + EXPECT_EQ(OH_NN_UNKNOWN, result); +} + +/** + * @tc.name: transform_mstonn_transformdatatype_013 + * @tc.desc: Verify the TransDataType function return DATA_TYPE_FLOAT64 + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_mstonn_transformdatatype_013, TestSize.Level0) +{ + mindspore::lite::DataType dataType = mindspore::lite::DATA_TYPE_FLOAT64; + OH_NN_DataType result = MSToNN::TransformDataType(dataType); + EXPECT_EQ(OH_NN_FLOAT64, result); +} + +/** + * @tc.name: transform_mstonn_transformquantparams_001 + * @tc.desc: Verify the TransformQuantParams function. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_mstonn_transformquantparams_001, TestSize.Level0) +{ + std::vector msQuantParams = {{1, 1.0, 8}}; + std::vector result = MSToNN::TransformQuantParams(msQuantParams); + EXPECT_EQ(msQuantParams.size(), result.size()); +} +} // namespace UnitTest +} // namespace NeuralNetworkRuntime +} // namespace OHOS diff --git a/test/unittest/inner_kits/BUILD.gn b/test/unittest/inner_kits/BUILD.gn new file mode 100644 index 0000000000000000000000000000000000000000..bea6436c0d99902b952809288bd24a97cb9f9acc --- /dev/null +++ b/test/unittest/inner_kits/BUILD.gn @@ -0,0 +1,64 @@ +# Copyright (c) 2022 Huawei Device Co., Ltd. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import("//build/test.gni") + +module_output_path = "neural_network_runtime/" + +config("module_private_config") { + visibility = [ ":*" ] + + include_dirs = [ + "//third_party/googletest/googlemock/include", + "//foundation/ai/neural_network_runtime", + "//foundation/communication/ipc/interfaces/innerkits/ipc_core/include", + "//third_party/mindspore/mindspore/lite/mindir/include" + ] + + cflags = [ + "-Wall", + "-Wextra", + "-Werror", + "--coverage", + ] + + ldflags = [ + "--coverage", + ] +} + +ohos_unittest("NeuralNetworkRuntimeInnerTest") { + module_out_path = module_output_path + sources = ["//foundation/ai/neural_network_runtime/test/unittest/inner_kits/neural_network_runtime_inner_test.cpp"] + + configs = [ ":module_private_config" ] + + deps = [ + "//foundation/ai/neural_network_runtime/frameworks:libneural_network_runtime", + "//third_party/googletest:gtest_main", + ] + + external_deps = [ + "hitrace_native:libhitracechain", + "hiviewdfx_hilog_native:libhilog", + "drivers_interface_nnrt:libnnrt_proxy_1.0", + "c_utils:utils", + "hdf_core:libhdf_utils", + "mindspore:mindir" + ] +} + +group("inner_kits_unittest") { + testonly = true + deps = [ ":NeuralNetworkRuntimeInnerTest" ] +} diff --git a/test/unittest/inner_kits/neural_network_runtime_inner_test.cpp b/test/unittest/inner_kits/neural_network_runtime_inner_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..31fba4c97f2190470b90c63e06846854c97dff98 --- /dev/null +++ b/test/unittest/inner_kits/neural_network_runtime_inner_test.cpp @@ -0,0 +1,130 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "neural_network_runtime_inner_test.h" + +#include "mindir.h" +#include "frameworks/native/inner_model.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Unittest { +void NeuralNetworkRuntimeInnerTest::SetUpTestCase(void) +{ +} + +void NeuralNetworkRuntimeInnerTest::TearDownTestCase(void) +{ +} + +void NeuralNetworkRuntimeInnerTest::SetUp(void) +{ +} + +void NeuralNetworkRuntimeInnerTest::TearDown(void) +{ +} + +/* + * @tc.name: build_from_lite_graph_001 + * @tc.desc: Verify the OH_NNModel is nullptr of the OH_NNModel_BuildFromLiteGraph function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeInnerTest, build_from_lite_graph_001, testing::ext::TestSize.Level0) +{ + OH_NNModel* model = nullptr; + mindspore::lite::LiteGraph* liteGraph = new (std::nothrow) mindspore::lite::LiteGraph; + EXPECT_NE(nullptr, liteGraph); + OH_NN_ReturnCode ret = OH_NNModel_BuildFromLiteGraph(model, liteGraph); + delete liteGraph; + liteGraph = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: build_from_lite_graph_002 + * @tc.desc: Verify the liteGraph is nullptr of the OH_NNModel_BuildFromLiteGraph function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeInnerTest, build_from_lite_graph_002, testing::ext::TestSize.Level0) +{ + OHOS::NeuralNetworkRuntime::InnerModel* innerModel = new (std::nothrow) OHOS::NeuralNetworkRuntime::InnerModel(); + EXPECT_NE(nullptr, innerModel); + OH_NNModel* model = reinterpret_cast(innerModel); + const void* liteGraph = nullptr; + OH_NN_ReturnCode ret = OH_NNModel_BuildFromLiteGraph(model, liteGraph); + delete innerModel; + innerModel = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: build_from_lite_graph_003 + * @tc.desc: Verify the success of the OH_NNModel_BuildFromLiteGraph function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeInnerTest, build_from_lite_graph_003, testing::ext::TestSize.Level0) +{ + OHOS::NeuralNetworkRuntime::InnerModel* innerModel = new (std::nothrow) OHOS::NeuralNetworkRuntime::InnerModel(); + EXPECT_NE(nullptr, innerModel); + OH_NNModel* model = reinterpret_cast(innerModel); + mindspore::lite::LiteGraph* liteGraph = new (std::nothrow) mindspore::lite::LiteGraph; + EXPECT_NE(nullptr, liteGraph); + liteGraph->name_ = "testGraph"; + liteGraph->input_indices_ = {0}; + liteGraph->output_indices_ = {1}; + const std::vector quant_params {}; + for (size_t indexInput = 0; indexInput < liteGraph->input_indices_.size(); ++indexInput) { + const std::vector dim = {3, 3}; + const std::vector data(36, 1); + + liteGraph->all_tensors_.emplace_back(mindspore::lite::MindIR_Tensor_Create(liteGraph->name_, + mindspore::lite::DATA_TYPE_FLOAT32, dim, mindspore::lite::FORMAT_NCHW, data, quant_params)); + } + for (size_t indexOutput = 0; indexOutput < liteGraph->output_indices_.size(); ++indexOutput) { + const std::vector dimOut = {3, 3}; + const std::vector dataOut(36, 1); + liteGraph->all_tensors_.emplace_back(mindspore::lite::MindIR_Tensor_Create(liteGraph->name_, + mindspore::lite::DATA_TYPE_FLOAT32, dimOut, mindspore::lite::FORMAT_NCHW, dataOut, quant_params)); + } + OH_NN_ReturnCode ret = OH_NNModel_BuildFromLiteGraph(model, liteGraph); + delete innerModel; + innerModel = nullptr; + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/* + * @tc.name: build_from_lite_graph_004 + * @tc.desc: Verify that the liteGraph parameter passed to the OH_NNModel_BuildFromLiteGraph function is invalid. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeInnerTest, build_from_lite_graph_004, testing::ext::TestSize.Level0) +{ + OHOS::NeuralNetworkRuntime::InnerModel* innerModel = new (std::nothrow) OHOS::NeuralNetworkRuntime::InnerModel(); + EXPECT_NE(nullptr, innerModel); + OH_NNModel* model = reinterpret_cast(innerModel); + mindspore::lite::LiteGraph* liteGraph = new (std::nothrow) mindspore::lite::LiteGraph; + EXPECT_NE(nullptr, liteGraph); + liteGraph->name_ = "testGraph"; + OH_NN_ReturnCode ret = OH_NNModel_BuildFromLiteGraph(model, liteGraph); + delete innerModel; + delete liteGraph; + innerModel = nullptr; + liteGraph = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} +} // namespace Unittest +} // namespace NeuralNetworkRuntime +} // namespace OHOS diff --git a/test/unittest/inner_kits/neural_network_runtime_inner_test.h b/test/unittest/inner_kits/neural_network_runtime_inner_test.h new file mode 100644 index 0000000000000000000000000000000000000000..03d7d923dc9e2910551d95606c4228731f935551 --- /dev/null +++ b/test/unittest/inner_kits/neural_network_runtime_inner_test.h @@ -0,0 +1,36 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_INNER_UNITTEST_H +#define NEURAL_NETWORK_RUNTIME_INNER_UNITTEST_H + +#include "interfaces/innerkits/c/neural_network_runtime_inner.h" +#include + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Unittest { +class NeuralNetworkRuntimeInnerTest : public testing::Test { +public: + static void SetUpTestCase(void); + static void TearDownTestCase(void); + void SetUp(); + void TearDown(); +}; +} // namespace Unittest +} // namespace NeuralNetworkRuntime +} // namespace OHOS + +#endif // NEURAL_NETWORK_RUNTIME_INNER_UNITTEST_H \ No newline at end of file diff --git a/test/unittest/ops/BUILD.gn b/test/unittest/ops/BUILD.gn new file mode 100644 index 0000000000000000000000000000000000000000..4983433292044104c8fcd2baabeb6e8aa27af587 --- /dev/null +++ b/test/unittest/ops/BUILD.gn @@ -0,0 +1,119 @@ +# Copyright (c) 2022 Huawei Device Co., Ltd. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import("//build/test.gni") + +module_output_path = "neural_network_runtime/" + +config("module_private_config") { + visibility = [ ":*" ] + + include_dirs = [ + "//third_party/googletest/googlemock/include", + "//foundation/ai/neural_network_runtime", + "//foundation/communication/ipc/interfaces/innerkits/ipc_core/include", + "//third_party/mindspore/mindspore/lite/mindir/include" + ] +} + +ohos_unittest("OpsUnittest") { + module_out_path = module_output_path + + sources = [ "//foundation/ai/neural_network_runtime/test/unittest/ops/add_test.cpp" ] + sources += [ "//foundation/ai/neural_network_runtime/test/unittest/ops/argmax_test.cpp" ] + sources += [ "//foundation/ai/neural_network_runtime/test/unittest/ops/avgpool_pad_test.cpp" ] + sources += [ "//foundation/ai/neural_network_runtime/test/unittest/ops/avgpool_padmod_test.cpp" ] + sources += [ "//foundation/ai/neural_network_runtime/test/unittest/ops/batch_to_space_nd_test.cpp" ] + sources += [ "//foundation/ai/neural_network_runtime/test/unittest/ops/batchnorm_builder_test.cpp" ] + sources += [ "//foundation/ai/neural_network_runtime/test/unittest/ops/biasadd_test.cpp" ] + sources += [ "//foundation/ai/neural_network_runtime/test/unittest/ops/cast_test.cpp" ] + sources += [ "//foundation/ai/neural_network_runtime/test/unittest/ops/concat_three_inputs_test.cpp" ] + sources += [ "//foundation/ai/neural_network_runtime/test/unittest/ops/concat_two_inputs_test.cpp" ] + sources += [ "//foundation/ai/neural_network_runtime/test/unittest/ops/conv2d_pad_test.cpp" ] + sources += [ "//foundation/ai/neural_network_runtime/test/unittest/ops/conv2d_padmode_test.cpp" ] + sources += [ "//foundation/ai/neural_network_runtime/test/unittest/ops/conv2d_tranpose_padmode_test.cpp" ] + sources += [ "//foundation/ai/neural_network_runtime/test/unittest/ops/conv2d_transpose_pad_test.cpp" ] + sources += [ "//foundation/ai/neural_network_runtime/test/unittest/ops/depthwise_conv2d_native_pad_test.cpp" ] + sources += [ "//foundation/ai/neural_network_runtime/test/unittest/ops/depthwise_conv2d_native_padmode_test.cpp" ] + sources += [ "//foundation/ai/neural_network_runtime/test/unittest/ops/div_test.cpp" ] + sources += [ "//foundation/ai/neural_network_runtime/test/unittest/ops/eltwise_test.cpp" ] + sources += [ "//foundation/ai/neural_network_runtime/test/unittest/ops/expandims_test.cpp" ] + sources += [ "//foundation/ai/neural_network_runtime/test/unittest/ops/fullconnection_test.cpp" ] + sources += [ "//foundation/ai/neural_network_runtime/test/unittest/ops/fullconnection_with_axis_test.cpp" ] + sources += [ "//foundation/ai/neural_network_runtime/test/unittest/ops/fill_builder_test.cpp" ] + sources += [ "//foundation/ai/neural_network_runtime/test/unittest/ops/gather_builder_test.cpp" ] + sources += [ "//foundation/ai/neural_network_runtime/test/unittest/ops/gelu_builder_test.cpp" ] + sources += [ "//foundation/ai/neural_network_runtime/test/unittest/ops/hswish_builder_test.cpp" ] + sources += [ "//foundation/ai/neural_network_runtime/test/unittest/ops/layernorm_builder_test.cpp" ] + sources += [ "//foundation/ai/neural_network_runtime/test/unittest/ops/lessequal_builder_test.cpp" ] + sources += [ "//foundation/ai/neural_network_runtime/test/unittest/ops/maximum_builder_test.cpp" ] + sources += [ "//foundation/ai/neural_network_runtime/test/unittest/ops/maxpool_pad_test.cpp" ] + sources += [ "//foundation/ai/neural_network_runtime/test/unittest/ops/maxpool_padmode_test.cpp" ] + sources += [ "//foundation/ai/neural_network_runtime/test/unittest/ops/matmul_builder_test.cpp" ] + sources += [ "//foundation/ai/neural_network_runtime/test/unittest/ops/mul_builder_test.cpp" ] + sources += [ "//foundation/ai/neural_network_runtime/test/unittest/ops/onehot_builder_test.cpp" ] + sources += [ "//foundation/ai/neural_network_runtime/test/unittest/ops/pad_builder_test.cpp" ] + sources += [ "//foundation/ai/neural_network_runtime/test/unittest/ops/pow_builder_test.cpp" ] + sources += [ "//foundation/ai/neural_network_runtime/test/unittest/ops/prelu_builder_test.cpp" ] + sources += [ "//foundation/ai/neural_network_runtime/test/unittest/ops/quant_dtype_cast_builder_test.cpp" ] + sources += [ "//foundation/ai/neural_network_runtime/test/unittest/ops/reduce_all_builder_test.cpp" ] + sources += [ "//foundation/ai/neural_network_runtime/test/unittest/ops/reduce_mean_builder_test.cpp" ] + sources += [ "//foundation/ai/neural_network_runtime/test/unittest/ops/reduce_prod_builder_test.cpp" ] + sources += [ "//foundation/ai/neural_network_runtime/test/unittest/ops/relu_builder_test.cpp" ] + sources += [ "//foundation/ai/neural_network_runtime/test/unittest/ops/relu6_builder_test.cpp" ] + sources += [ "//foundation/ai/neural_network_runtime/test/unittest/ops/reshape_builder_test.cpp" ] + sources += [ "//foundation/ai/neural_network_runtime/test/unittest/ops/resize_bilinear_builder_test.cpp" ] + sources += [ "//foundation/ai/neural_network_runtime/test/unittest/ops/rsqrt_builder_test.cpp" ] + sources += [ "//foundation/ai/neural_network_runtime/test/unittest/ops/scale_builder_test.cpp" ] + sources += [ "//foundation/ai/neural_network_runtime/test/unittest/ops/shape_builder_test.cpp" ] + sources += [ "//foundation/ai/neural_network_runtime/test/unittest/ops/sigmoid_builder_test.cpp" ] + sources += [ "//foundation/ai/neural_network_runtime/test/unittest/ops/slice_builder_test.cpp" ] + sources += [ "//foundation/ai/neural_network_runtime/test/unittest/ops/softmax_builder_test.cpp" ] + sources += [ "//foundation/ai/neural_network_runtime/test/unittest/ops/spacetobatchnd_builder_test.cpp" ] + sources += [ "//foundation/ai/neural_network_runtime/test/unittest/ops/split_builder_test.cpp" ] + sources += [ "//foundation/ai/neural_network_runtime/test/unittest/ops/sqrt_builder_test.cpp" ] + sources += [ "//foundation/ai/neural_network_runtime/test/unittest/ops/squared_difference_builder_test.cpp" ] + sources += [ "//foundation/ai/neural_network_runtime/test/unittest/ops/squeeze_builder_test.cpp" ] + sources += [ "//foundation/ai/neural_network_runtime/test/unittest/ops/stack_builder_test.cpp" ] + sources += [ "//foundation/ai/neural_network_runtime/test/unittest/ops/strided_slice_builder_test.cpp" ] + sources += [ "//foundation/ai/neural_network_runtime/test/unittest/ops/sub_builder_test.cpp" ] + sources += [ "//foundation/ai/neural_network_runtime/test/unittest/ops/tanh_builder_test.cpp" ] + sources += [ "//foundation/ai/neural_network_runtime/test/unittest/ops/tile_builder_test.cpp" ] + sources += [ "//foundation/ai/neural_network_runtime/test/unittest/ops/topk_builder_test.cpp" ] + sources += [ "//foundation/ai/neural_network_runtime/test/unittest/ops/transpose_builder_test.cpp" ] + sources += [ "//foundation/ai/neural_network_runtime/test/unittest/ops/unsqueeze_builder_test.cpp" ] + sources += [ "//foundation/ai/neural_network_runtime/test/unittest/ops/ops_test.cpp" ] + sources += [ "//foundation/ai/neural_network_runtime/test/unittest/common/base_test.cpp" ] + + configs = [ ":module_private_config" ] + + deps = [ + "//foundation/ai/neural_network_runtime/frameworks:libneural_network_runtime", + "//third_party/googletest:gtest_main", + "//third_party/googletest:gmock_main", + ] + + external_deps = [ + "hitrace_native:hitrace_meter", + "hiviewdfx_hilog_native:libhilog", + "drivers_interface_nnrt:libnnrt_proxy_1.0", + "mindspore:mindir" + ] +} + +group("ops_unittest") { + testonly = true + deps = [ + ":OpsUnittest", + ] +} \ No newline at end of file diff --git a/test/unittest/ops/add_test.cpp b/test/unittest/ops/add_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..50fed28e3105b8820adb65acafc15defd1bfa847 --- /dev/null +++ b/test/unittest/ops/add_test.cpp @@ -0,0 +1,277 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "frameworks/native/ops/add_builder.h" + +#include "ops_test.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Ops; + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class AddFusionBuilderTest : public OpsTest { +public: + void SetUp() override; + void TearDown() override; + + void SaveParamsTensor(const std::vector& m_param, OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + +public: + AddBuilder m_builder; + std::vector m_inputs{0, 1}; + std::vector m_outputs{2}; + std::vector m_param{3}; + std::vector m_input_dim{3, 3}; + std::vector m_output_dim{3, 3}; + std::vector m_param_dim{}; +}; + +void AddFusionBuilderTest::SetUp() {} + +void AddFusionBuilderTest::TearDown() {} + +void AddFusionBuilderTest::SaveParamsTensor(const std::vector& m_param, OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + m_paramsIndex = m_param; + std::shared_ptr tensor = TransToNNTensor(dataType, dim, quantParam, type); + int8_t* activationValue = new (std::nothrow) int8_t(0); + EXPECT_NE(nullptr, activationValue); + tensor->SetBuffer(activationValue, sizeof(int8_t)); + m_allTensors.emplace_back(tensor); +} + +/** + * @tc.name: add_build_001 + * @tc.desc: Verify the success of the build function + * @tc.type: FUNC + */ +HWTEST_F(AddFusionBuilderTest, add_build_001, TestSize.Level1) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SaveParamsTensor(m_param, OH_NN_INT8, m_param_dim, nullptr, OH_NN_ADD_ACTIVATIONTYPE); + + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: add_build_002 + * @tc.desc: Verify the forbidden of the build function + * @tc.type: FUNC + */ +HWTEST_F(AddFusionBuilderTest, add_build_002, TestSize.Level1) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SaveParamsTensor(m_param, OH_NN_INT8, m_param_dim, nullptr, OH_NN_ADD_ACTIVATIONTYPE); + + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: add_build_003 + * @tc.desc: Verify the missing input of the build function + * @tc.type: FUNC + */ +HWTEST_F(AddFusionBuilderTest, add_build_003, TestSize.Level1) +{ + m_inputs = {0}; + m_outputs = {1}; + m_param = {2}; + + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SaveParamsTensor(m_param, OH_NN_INT8, m_param_dim, nullptr, OH_NN_ADD_ACTIVATIONTYPE); + + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: add_build_004 + * @tc.desc: Verify the missing output of the build function + * @tc.type: FUNC + */ +HWTEST_F(AddFusionBuilderTest, add_build_004, TestSize.Level1) +{ + m_inputs = {0, 1}; + m_outputs = {}; + m_param = {2}; + + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SaveParamsTensor(m_param, OH_NN_INT8, m_param_dim, nullptr, OH_NN_ADD_ACTIVATIONTYPE); + + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: add_build_005 + * @tc.desc: Verify the inputIndex out of bounds of the build function + * @tc.type: FUNC + */ +HWTEST_F(AddFusionBuilderTest, add_build_005, TestSize.Level1) +{ + m_inputs = {0, 6}; + m_outputs = {2}; + m_param = {3}; + + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SaveParamsTensor(m_param, OH_NN_INT8, m_param_dim, nullptr, OH_NN_ADD_ACTIVATIONTYPE); + + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: add_build_006 + * @tc.desc: Verify the outputIndex out of bounds of the build function + * @tc.type: FUNC + */ +HWTEST_F(AddFusionBuilderTest, add_build_006, TestSize.Level1) +{ + m_inputs = {0, 1}; + m_outputs = {6}; + m_param = {3}; + + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SaveParamsTensor(m_param, OH_NN_INT8, m_param_dim, nullptr, OH_NN_ADD_ACTIVATIONTYPE); + + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: add_build_007 + * @tc.desc: Verify the param invalid type of the build function + * @tc.type: FUNC + */ +HWTEST_F(AddFusionBuilderTest, add_build_007, TestSize.Level1) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + m_paramsIndex = m_param; + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT32, m_param_dim, nullptr, OH_NN_ADD_ACTIVATIONTYPE); + int32_t* activationValueTest = new (std::nothrow) int32_t[0]; + EXPECT_NE(nullptr, activationValueTest); + tensor->SetBuffer(activationValueTest, sizeof(int32_t)); + m_allTensors.emplace_back(tensor); + + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: add_build_008 + * @tc.desc: Verify the param invalid value of the build function + * @tc.type: FUNC + */ +HWTEST_F(AddFusionBuilderTest, add_build_008, TestSize.Level1) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT8, m_param_dim, nullptr, OH_NN_ADD_ACTIVATIONTYPE); + int8_t* activationValueTest = new (std::nothrow) int8_t[40]; + EXPECT_NE(nullptr, activationValueTest); + tensor->SetBuffer(activationValueTest, sizeof(int8_t)); + m_allTensors.emplace_back(tensor); + + m_paramsIndex = m_param; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: add_build_009 + * @tc.desc: Verify the param invalid to add of the build function + * @tc.type: FUNC + */ +HWTEST_F(AddFusionBuilderTest, add_build_009, TestSize.Level1) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT8, m_param_dim, nullptr, OH_NN_ARG_MAX_KEEPDIMS); + int8_t* activationValueTest = new (std::nothrow) int8_t[0]; + EXPECT_NE(nullptr, activationValueTest); + tensor->SetBuffer(activationValueTest, sizeof(int8_t)); + m_allTensors.emplace_back(tensor); + + m_paramsIndex = m_param; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: add_build_010 + * @tc.desc: Verify the param invalid to add of the build function + * @tc.type: FUNC + */ +HWTEST_F(AddFusionBuilderTest, add_build_010, TestSize.Level1) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT8, m_param_dim, nullptr, OH_NN_ADD_ACTIVATIONTYPE); + m_allTensors.emplace_back(tensor); + + m_paramsIndex = m_param; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: add_getprimitive_001 + * @tc.desc: Verify the success of the GetPrimitive function + * @tc.type: FUNC + */ +HWTEST_F(AddFusionBuilderTest, add_getprimitive_001, TestSize.Level1) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SaveParamsTensor(m_param, OH_NN_INT8, m_param_dim, nullptr, OH_NN_ADD_ACTIVATIONTYPE); + + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + LiteGraphTensorPtr primitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive = {nullptr, DestroyLiteGraphPrimitive}; + EXPECT_NE(expectPrimitive, primitive); + + int8_t activationValueTest = 0; + int8_t returnValue = mindspore::lite::MindIR_AddFusion_GetActivationType(primitive.get()); + EXPECT_EQ(returnValue, activationValueTest); +} + +/** + * @tc.name: add_getprimitive_002 + * @tc.desc: Verify the nullptr return of the GetPrimitive function + * @tc.type: FUNC + */ +HWTEST_F(AddFusionBuilderTest, add_getprimitive_002, TestSize.Level1) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SaveParamsTensor(m_param, OH_NN_INT8, m_param_dim, nullptr, OH_NN_ADD_ACTIVATIONTYPE); + + LiteGraphTensorPtr primitive = {nullptr, DestroyLiteGraphPrimitive}; + LiteGraphTensorPtr expectPrimitive = m_builder.GetPrimitive(); + + EXPECT_EQ(primitive, expectPrimitive); +} +} // namespace UnitTest +} // namespace NeuralNetworkRuntime +} // namespace OHOS \ No newline at end of file diff --git a/test/unittest/ops/argmax_test.cpp b/test/unittest/ops/argmax_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..5a7ee65fed05f9a2ebcc2c92ec06cd3bb22c789d --- /dev/null +++ b/test/unittest/ops/argmax_test.cpp @@ -0,0 +1,330 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "frameworks/native/ops/argmax_builder.h" + +#include "ops_test.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Ops; + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class ArgMaxBuilderTest : public OpsTest { +public: + void SetUp(); + void TearDown(); + + void SetArgmaxAxis(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + void SetArgmaxKeepdims(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + +public: + ArgMaxBuilder m_builder; + std::vector m_inputs{0}; + std::vector m_outputs{1}; + std::vector m_params{2, 3}; + std::vector m_input_dim{3, 3}; + std::vector m_output_dim{3, 3}; + std::vector m_param_dim{}; +}; + +void ArgMaxBuilderTest::SetUp() {} + +void ArgMaxBuilderTest::TearDown() {} + +void ArgMaxBuilderTest::SetArgmaxAxis(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + std::shared_ptr tensor = TransToNNTensor(dataType, dim, quantParam, type); + int64_t* axisValue = new (std::nothrow) int64_t(0); + EXPECT_NE(nullptr, axisValue); + tensor->SetBuffer(axisValue, sizeof(int64_t)); + m_allTensors.emplace_back(tensor); +} + +void ArgMaxBuilderTest::SetArgmaxKeepdims(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + std::shared_ptr tensor = TransToNNTensor(dataType, dim, quantParam, type); + bool* keepdimsValue = new (std::nothrow) bool(false); + EXPECT_NE(nullptr, keepdimsValue); + tensor->SetBuffer(keepdimsValue, sizeof(keepdimsValue)); + m_allTensors.emplace_back(tensor); +} + +/** + * @tc.name: argmax_build_001 + * @tc.desc: Verify the success of the build function + * @tc.type: FUNC + */ +HWTEST_F(ArgMaxBuilderTest, argmax_build_001, TestSize.Level1) +{ + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetArgmaxAxis(OH_NN_INT64, m_param_dim, nullptr, OH_NN_ARG_MAX_AXIS); + SetArgmaxKeepdims(OH_NN_BOOL, m_param_dim, nullptr, OH_NN_ARG_MAX_KEEPDIMS); + + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} +/** + * @tc.name: argmax_build_002 + * @tc.desc: Verify the forbidden of the build function + * @tc.type: FUNC + */ +HWTEST_F(ArgMaxBuilderTest, argmax_build_002, TestSize.Level1) +{ + m_inputs = {0}; + m_outputs = {1}; + m_params = {2, 3}; + + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetArgmaxAxis(OH_NN_INT64, m_param_dim, nullptr, OH_NN_ARG_MAX_AXIS); + SetArgmaxKeepdims(OH_NN_BOOL, m_param_dim, nullptr, OH_NN_ARG_MAX_KEEPDIMS); + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: argmax_build_003 + * @tc.desc: Verify the missing input of the build function + * @tc.type: FUNC + */ +HWTEST_F(ArgMaxBuilderTest, argmax_build_003, TestSize.Level1) +{ + m_inputs = {}; + m_outputs = {1}; + m_params = {2, 3}; + + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetArgmaxAxis(OH_NN_INT64, m_param_dim, nullptr, OH_NN_ARG_MAX_AXIS); + SetArgmaxKeepdims(OH_NN_BOOL, m_param_dim, nullptr, OH_NN_ARG_MAX_KEEPDIMS); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: argmax_build_004 + * @tc.desc: Verify the missing output of the build function + * @tc.type: FUNC + */ +HWTEST_F(ArgMaxBuilderTest, argmax_build_004, TestSize.Level1) +{ + m_inputs = {0}; + m_outputs = {}; + m_params = {1, 2}; + + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetArgmaxAxis(OH_NN_INT64, m_param_dim, nullptr, OH_NN_ARG_MAX_AXIS); + SetArgmaxKeepdims(OH_NN_BOOL, m_param_dim, nullptr, OH_NN_ARG_MAX_KEEPDIMS); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: argmax_build_005 + * @tc.desc: Verify the inputIndex out of bounds of the build function + * @tc.type: FUNC + */ +HWTEST_F(ArgMaxBuilderTest, argmax_build_005, TestSize.Level1) +{ + m_inputs = {6}; + m_outputs = {1}; + m_params = {2, 3}; + + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetArgmaxAxis(OH_NN_INT64, m_param_dim, nullptr, OH_NN_ARG_MAX_AXIS); + SetArgmaxKeepdims(OH_NN_BOOL, m_param_dim, nullptr, OH_NN_ARG_MAX_KEEPDIMS); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: argmax_build_006 + * @tc.desc: Verify the outputIndex out of bounds of the build function + * @tc.type: FUNC + */ +HWTEST_F(ArgMaxBuilderTest, argmax_build_006, TestSize.Level1) +{ + m_inputs = {0}; + m_outputs = {6}; + m_params = {2, 3}; + + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetArgmaxAxis(OH_NN_INT64, m_param_dim, nullptr, OH_NN_ARG_MAX_AXIS); + SetArgmaxKeepdims(OH_NN_BOOL, m_param_dim, nullptr, OH_NN_ARG_MAX_KEEPDIMS); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: argmax_build_007 + * @tc.desc: Verify the invalid axis of the build function + * @tc.type: FUNC + */ +HWTEST_F(ArgMaxBuilderTest, argmax_build_007, TestSize.Level1) +{ + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + std::shared_ptr tensor = TransToNNTensor(OH_NN_FLOAT32, m_param_dim, nullptr, OH_NN_ARG_MAX_AXIS); + float* axisValueTest = new (std::nothrow) float(0); + EXPECT_NE(nullptr, axisValueTest); + + tensor->SetBuffer(axisValueTest, sizeof(float)); + m_allTensors.emplace_back(tensor); + SetArgmaxKeepdims(OH_NN_BOOL, m_param_dim, nullptr, OH_NN_ARG_MAX_KEEPDIMS); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: argmax_build_008 + * @tc.desc: Verify the invalid keepdims of the build function + * @tc.type: FUNC + */ +HWTEST_F(ArgMaxBuilderTest, argmax_build_008, TestSize.Level1) +{ + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetArgmaxAxis(OH_NN_INT64, m_param_dim, nullptr, OH_NN_ARG_MAX_AXIS); + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT64, m_param_dim, nullptr, OH_NN_ARG_MAX_KEEPDIMS); + int64_t* keepdimsValue = new (std::nothrow) int64_t(0); + EXPECT_NE(nullptr, keepdimsValue); + + tensor->SetBuffer(keepdimsValue, sizeof(int64_t)); + m_allTensors.emplace_back(tensor); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: argmax_build_009 + * @tc.desc: Verify the invalid param to argmax of the build function + * @tc.type: FUNC + */ +HWTEST_F(ArgMaxBuilderTest, argmax_build_009, TestSize.Level1) +{ + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetArgmaxAxis(OH_NN_INT64, m_param_dim, nullptr, OH_NN_ARG_MAX_AXIS); + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT64, m_param_dim, nullptr, OH_NN_AVG_POOL_STRIDE); + int64_t* strideValue = new (std::nothrow) int64_t(0); + EXPECT_NE(nullptr, strideValue); + + tensor->SetBuffer(strideValue, sizeof(int64_t)); + m_allTensors.emplace_back(tensor); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: argmax_build_010 + * @tc.desc: Verify the argmax without set axis of the build function + * @tc.type: FUNC + */ +HWTEST_F(ArgMaxBuilderTest, argmax_build_010, TestSize.Level1) +{ + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT64, m_param_dim, nullptr, OH_NN_ARG_MAX_AXIS); + m_allTensors.emplace_back(tensor); + SetArgmaxKeepdims(OH_NN_BOOL, m_param_dim, nullptr, OH_NN_ARG_MAX_KEEPDIMS); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: argmax_build_011 + * @tc.desc: Verify the argmax without set keepdims of the build function + * @tc.type: FUNC + */ +HWTEST_F(ArgMaxBuilderTest, argmax_build_011, TestSize.Level1) +{ + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetArgmaxAxis(OH_NN_INT64, m_param_dim, nullptr, OH_NN_ARG_MAX_AXIS); + std::shared_ptr tensor = TransToNNTensor(OH_NN_BOOL, m_param_dim, nullptr, OH_NN_ARG_MAX_KEEPDIMS); + m_allTensors.emplace_back(tensor); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: add_getprimitive_001 + * @tc.desc: Verify the behavior of the GetPrimitive function + * @tc.type: FUNC + */ +HWTEST_F(ArgMaxBuilderTest, add_getprimitive_001, TestSize.Level1) +{ + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetArgmaxAxis(OH_NN_INT64, m_param_dim, nullptr, OH_NN_ARG_MAX_AXIS); + SetArgmaxKeepdims(OH_NN_BOOL, m_param_dim, nullptr, OH_NN_ARG_MAX_KEEPDIMS); + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + + LiteGraphTensorPtr primitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive = {nullptr, DestroyLiteGraphPrimitive}; + EXPECT_NE(expectPrimitive, primitive); + EXPECT_NE(nullptr, primitive); + + int64_t returnValue = mindspore::lite::MindIR_ArgMaxFusion_GetAxis(primitive.get()); + EXPECT_EQ(returnValue, 0); + bool keepdimsReturn = mindspore::lite::MindIR_ArgMaxFusion_GetKeepDims(primitive.get()); + EXPECT_EQ(keepdimsReturn, false); +} + +/** + * @tc.name: add_getprimitive_002 + * @tc.desc: Verify the behavior of the GetPrimitive function + * @tc.type: FUNC + */ +HWTEST_F(ArgMaxBuilderTest, add_getprimitive_002, TestSize.Level1) +{ + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetArgmaxAxis(OH_NN_INT64, m_param_dim, nullptr, OH_NN_ARG_MAX_AXIS); + SetArgmaxKeepdims(OH_NN_BOOL, m_param_dim, nullptr, OH_NN_ARG_MAX_KEEPDIMS); + LiteGraphTensorPtr primitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive = {nullptr, DestroyLiteGraphPrimitive}; + EXPECT_EQ(expectPrimitive, primitive); +} +} // namespace UnitTest +} // namespace NeuralNetworkRuntime +} // namespace OHOS \ No newline at end of file diff --git a/test/unittest/ops/avgpool_pad_test.cpp b/test/unittest/ops/avgpool_pad_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..665896aff2450cc2dad6fc4ab82d5cd162f605ea --- /dev/null +++ b/test/unittest/ops/avgpool_pad_test.cpp @@ -0,0 +1,431 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "frameworks/native/ops/avgpool_builder.h" + +#include "ops_test.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Ops; + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class AvgPoolPadBuilderTest : public OpsTest { +public: + void SetUp() override; + void TearDown() override; + + void SetPad(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + void SetPadParams(); + +public: + AvgPoolBuilder m_builder; + std::vector m_input_dim{1, 3, 3, 1}; + std::vector m_output_dim{1, 2, 2, 1}; + std::vector m_kenelsize_dim{2}; + std::vector m_stride_dim{2}; + std::vector m_pad_dim{4}; + std::vector m_param_dim{}; + std::vector m_inputs{0}; + std::vector m_outputs{1}; + std::vector m_params{2, 3, 4, 5}; +}; + +void AvgPoolPadBuilderTest::SetUp() {} + +void AvgPoolPadBuilderTest::TearDown() {} + +void AvgPoolPadBuilderTest::SetPad(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + int32_t padNum{4}; + std::shared_ptr tensor = TransToNNTensor(dataType, dim, quantParam, type); + int64_t* padValue = new (std::nothrow) int64_t[padNum]{0, 0, 0, 0}; + EXPECT_NE(nullptr, padValue); + tensor->SetBuffer(padValue, sizeof(int64_t) * padNum); + m_allTensors.emplace_back(tensor); +} + +void AvgPoolPadBuilderTest::SetPadParams() +{ + SetKernelSize(OH_NN_INT64, m_kenelsize_dim, nullptr, OH_NN_AVG_POOL_KERNEL_SIZE); + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_AVG_POOL_STRIDE); + SetPad(OH_NN_INT64, m_pad_dim, nullptr, OH_NN_AVG_POOL_PAD); + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_AVG_POOL_ACTIVATION_TYPE); +} + +/** + * @tc.name: avgpool_build_pad_001 + * @tc.desc: Verify the success of the build function + * @tc.type: FUNC + */ +HWTEST_F(AvgPoolPadBuilderTest, avgpool_build_pad_001, TestSize.Level1) +{ + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetPadParams(); + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: avgpool_build_pad_002 + * @tc.desc: Verify the forbidden of the build function + * @tc.type: FUNC + */ +HWTEST_F(AvgPoolPadBuilderTest, avgpool_build_pad_002, TestSize.Level1) +{ + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetPadParams(); + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: avgpool_build_pad_003 + * @tc.desc: Verify the missing input of the build function + * @tc.type: FUNC + */ +HWTEST_F(AvgPoolPadBuilderTest, avgpool_build_pad_003, TestSize.Level1) +{ + m_inputs = {}; + m_outputs = {0}; + m_params = {1, 2, 3, 4}; + m_paramsIndex = m_params; + + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetPadParams(); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: avgpool_build_pad_004 + * @tc.desc: Verify the missing output of the build function + * @tc.type: FUNC + */ +HWTEST_F(AvgPoolPadBuilderTest, avgpool_build_pad_004, TestSize.Level1) +{ + m_inputs = {0}; + m_outputs = {}; + m_params = {1, 2, 3, 4}; + m_paramsIndex = m_params; + + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetPadParams(); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: avgpool_build_pad_005 + * @tc.desc: Verify the inputIndex out of bounds of the build function + * @tc.type: FUNC + */ +HWTEST_F(AvgPoolPadBuilderTest, avgpool_build_pad_005, TestSize.Level1) +{ + m_inputs = {6}; + m_outputs = {1}; + m_params = {2, 3, 4, 5}; + m_paramsIndex = m_params; + + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetPadParams(); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: avgpool_build_pad_006 + * @tc.desc: Verify the outputIndex out of bounds of the build function + * @tc.type: FUNC + */ +HWTEST_F(AvgPoolPadBuilderTest, avgpool_build_pad_006, TestSize.Level1) +{ + m_inputs = {0}; + m_outputs = {6}; + m_params = {2, 3, 4, 5}; + m_paramsIndex = m_params; + + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetPadParams(); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: avgpool_build_pad_007 + * @tc.desc: Verify the invalid kernelSize of the build function + * @tc.type: FUNC + */ +HWTEST_F(AvgPoolPadBuilderTest, avgpool_build_pad_007, TestSize.Level1) +{ + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + int32_t numKernels{2}; + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT32, m_kenelsize_dim, nullptr, + OH_NN_AVG_POOL_KERNEL_SIZE); + int32_t* kernelSizeValue = new (std::nothrow) int32_t[numKernels]{1, 1}; + EXPECT_NE(nullptr, kernelSizeValue); + tensor->SetBuffer(kernelSizeValue, sizeof(int32_t) * numKernels); + m_allTensors.emplace_back(tensor); + + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_AVG_POOL_STRIDE); + SetPad(OH_NN_INT64, m_pad_dim, nullptr, OH_NN_AVG_POOL_PAD); + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_AVG_POOL_ACTIVATION_TYPE); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: avgpool_build_pad_008 + * @tc.desc: Verify the invalid stride of the build function + * @tc.type: FUNC + */ +HWTEST_F(AvgPoolPadBuilderTest, avgpool_build_pad_008, TestSize.Level1) +{ + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetKernelSize(OH_NN_INT64, m_kenelsize_dim, nullptr, OH_NN_AVG_POOL_KERNEL_SIZE); + + int32_t numStride{2}; + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT32, m_stride_dim, nullptr, OH_NN_AVG_POOL_STRIDE); + int32_t* strideValue = new (std::nothrow) int32_t[numStride]{1, 1}; + EXPECT_NE(nullptr, strideValue); + tensor->SetBuffer(strideValue, sizeof(int32_t) * numStride); + m_allTensors.emplace_back(tensor); + + SetPad(OH_NN_INT64, m_pad_dim, nullptr, OH_NN_AVG_POOL_PAD); + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_AVG_POOL_ACTIVATION_TYPE); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: avgpool_build_pad_009 + * @tc.desc: Verify the invalid pad of the build function + * @tc.type: FUNC + */ +HWTEST_F(AvgPoolPadBuilderTest, avgpool_build_pad_009, TestSize.Level1) +{ + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetKernelSize(OH_NN_INT64, m_kenelsize_dim, nullptr, OH_NN_AVG_POOL_KERNEL_SIZE); + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_AVG_POOL_STRIDE); + int32_t padNum{4}; + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT32, m_pad_dim, nullptr, OH_NN_AVG_POOL_PAD); + int32_t* padValue = new (std::nothrow) int32_t[padNum]{0, 0, 0, 0}; + EXPECT_NE(nullptr, padValue); + + tensor->SetBuffer(padValue, sizeof(int32_t) * padNum); + m_allTensors.emplace_back(tensor); + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_AVG_POOL_ACTIVATION_TYPE); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + + +/** + * @tc.name: avgpool_build_pad_010 + * @tc.desc: Verify the invalid activation of the build function + * @tc.type: FUNC + */ +HWTEST_F(AvgPoolPadBuilderTest, avgpool_build_pad_010, TestSize.Level1) +{ + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetKernelSize(OH_NN_INT64, m_kenelsize_dim, nullptr, OH_NN_AVG_POOL_KERNEL_SIZE); + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_AVG_POOL_STRIDE); + SetPad(OH_NN_INT64, m_pad_dim, nullptr, OH_NN_AVG_POOL_PAD); + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT32, m_param_dim, nullptr, + OH_NN_AVG_POOL_ACTIVATION_TYPE); + int32_t* activationValue = new (std::nothrow) int32_t(0); + EXPECT_NE(nullptr, activationValue); + + tensor->SetBuffer(activationValue, sizeof(int32_t)); + m_allTensors.emplace_back(tensor); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: avgpool_build_pad_011 + * @tc.desc: Verify the activation scalar length of the build function + * @tc.type: FUNC + */ +HWTEST_F(AvgPoolPadBuilderTest, avgpool_build_pad_011, TestSize.Level1) +{ + m_param_dim = {2}; + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetKernelSize(OH_NN_INT64, m_kenelsize_dim, nullptr, OH_NN_AVG_POOL_KERNEL_SIZE); + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_AVG_POOL_STRIDE); + SetPad(OH_NN_INT64, m_pad_dim, nullptr, OH_NN_AVG_POOL_PAD); + int8_t* activationValue = new (std::nothrow) int8_t[2]{1, 2}; + EXPECT_NE(nullptr, activationValue); + + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT8, m_param_dim, nullptr, + OH_NN_AVG_POOL_ACTIVATION_TYPE); + tensor->SetBuffer(activationValue, 2 * sizeof(int8_t)); + m_allTensors.emplace_back(tensor); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: avgpool_build_pad_012 + * @tc.desc: Verify the avgpool without set kernelsize of the build function + * @tc.type: FUNC + */ +HWTEST_F(AvgPoolPadBuilderTest, avgpool_build_pad_012, TestSize.Level1) +{ + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT64, m_kenelsize_dim, nullptr, + OH_NN_AVG_POOL_KERNEL_SIZE); + m_allTensors.emplace_back(tensor); + + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_AVG_POOL_STRIDE); + SetPad(OH_NN_INT64, m_pad_dim, nullptr, OH_NN_AVG_POOL_PAD); + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_AVG_POOL_ACTIVATION_TYPE); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: avgpool_build_pad_013 + * @tc.desc: Verify the avgpool without set stride of the build function + * @tc.type: FUNC + */ +HWTEST_F(AvgPoolPadBuilderTest, avgpool_build_pad_013, TestSize.Level1) +{ + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetKernelSize(OH_NN_INT64, m_kenelsize_dim, nullptr, OH_NN_AVG_POOL_KERNEL_SIZE); + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_AVG_POOL_STRIDE); + m_allTensors.emplace_back(tensor); + + SetPad(OH_NN_INT64, m_pad_dim, nullptr, OH_NN_AVG_POOL_PAD); + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_AVG_POOL_ACTIVATION_TYPE); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: avgpool_build_pad_014 + * @tc.desc: Verify the avgpool without set pad of the build function + * @tc.type: FUNC + */ +HWTEST_F(AvgPoolPadBuilderTest, avgpool_build_pad_014, TestSize.Level1) +{ + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetKernelSize(OH_NN_INT64, m_kenelsize_dim, nullptr, OH_NN_AVG_POOL_KERNEL_SIZE); + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_AVG_POOL_STRIDE); + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT64, m_pad_dim, nullptr, OH_NN_AVG_POOL_PAD); + m_allTensors.emplace_back(tensor); + + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_AVG_POOL_ACTIVATION_TYPE); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: avgpool_build_pad_015 + * @tc.desc: Verify the avgpool without set activation of the build function + * @tc.type: FUNC + */ +HWTEST_F(AvgPoolPadBuilderTest, avgpool_build_pad_015, TestSize.Level1) +{ + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetKernelSize(OH_NN_INT64, m_kenelsize_dim, nullptr, OH_NN_AVG_POOL_KERNEL_SIZE); + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_AVG_POOL_STRIDE); + SetPad(OH_NN_INT64, m_pad_dim, nullptr, OH_NN_AVG_POOL_PAD); + + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT8, m_param_dim, nullptr, + OH_NN_AVG_POOL_ACTIVATION_TYPE); + m_allTensors.emplace_back(tensor); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: avgpool_getprimitive_pad_001 + * @tc.desc: Verify the behavior of the GetPrimitive function + * @tc.type: FUNC + */ +HWTEST_F(AvgPoolPadBuilderTest, avgpool_getprimitive_pad_001, TestSize.Level1) +{ + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetPadParams(); + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + LiteGraphTensorPtr primitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive = {nullptr, DestroyLiteGraphPrimitive}; + EXPECT_NE(expectPrimitive, primitive); + + std::vector expetctKernelSize = mindspore::lite::MindIR_AvgPoolFusion_GetKernelSize(primitive.get()); + std::vector kernelSizeValueTest{1, 1}; + EXPECT_EQ(kernelSizeValueTest, expetctKernelSize); + std::vector expetctStrides = mindspore::lite::MindIR_AvgPoolFusion_GetStrides(primitive.get()); + std::vector strideValueTest{1, 1}; + std::vector expetctPadValue = mindspore::lite::MindIR_AvgPoolFusion_GetPad(primitive.get()); + std::vector padValueValueTest{0, 0, 0, 0}; + EXPECT_EQ(padValueValueTest, expetctPadValue); + + int8_t activationValue = 0; + int expectActivation = mindspore::lite::MindIR_AvgPoolFusion_GetActivationType(primitive.get()); + EXPECT_EQ(activationValue, expectActivation); +} + +/** + * @tc.name: avgpool_getprimitive_pad_002 + * @tc.desc: Verify the behavior of the GetPrimitive function + * @tc.type: FUNC + */ +HWTEST_F(AvgPoolPadBuilderTest, avgpool_getprimitive_pad_002, TestSize.Level1) +{ + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetPadParams(); + LiteGraphTensorPtr primitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive = {nullptr, DestroyLiteGraphPrimitive}; + EXPECT_EQ(expectPrimitive, primitive); +} +} // namespace UnitTest +} // namespace NeuralNetworkRuntime +} // namespace OHOS diff --git a/test/unittest/ops/avgpool_padmod_test.cpp b/test/unittest/ops/avgpool_padmod_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..61fb147245c4bbbe0350963834b4288246bf873e --- /dev/null +++ b/test/unittest/ops/avgpool_padmod_test.cpp @@ -0,0 +1,420 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "frameworks/native/ops/avgpool_builder.h" + +#include "ops_test.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Ops; + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class AvgPoolBuilderTest : public OpsTest { +public: + void SetUp() override; + void TearDown() override; + + void SetPadMode(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + void SetParams(); + +public: + AvgPoolBuilder m_builder; + std::vector m_inputs{0}; + std::vector m_outputs{1}; + std::vector m_params{2, 3, 4, 5}; + std::vector m_input_dim{1, 3, 3, 1}; + std::vector m_output_dim{1, 2, 2, 1}; + std::vector m_kenelsize_dim{2}; + std::vector m_stride_dim{2}; + std::vector m_param_dim{}; +}; + +void AvgPoolBuilderTest::SetUp() {} + +void AvgPoolBuilderTest::TearDown() {} + +void AvgPoolBuilderTest::SetPadMode(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + std::shared_ptr tensor = TransToNNTensor(dataType, dim, quantParam, type); + int8_t* padModeValue = new (std::nothrow) int8_t(0); + EXPECT_NE(nullptr, padModeValue); + tensor->SetBuffer(padModeValue, sizeof(int8_t)); + m_allTensors.emplace_back(tensor); +} + +void AvgPoolBuilderTest::SetParams() +{ + SetKernelSize(OH_NN_INT64, m_kenelsize_dim, nullptr, OH_NN_AVG_POOL_KERNEL_SIZE); + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_AVG_POOL_STRIDE); + SetPadMode(OH_NN_INT8, m_param_dim, nullptr, OH_NN_AVG_POOL_PAD_MODE); + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_AVG_POOL_ACTIVATION_TYPE); +} + +/** + * @tc.name: avgpool_build_pad_mode_001 + * @tc.desc: Verify the success of the build function + * @tc.type: FUNC + */ +HWTEST_F(AvgPoolBuilderTest, avgpool_build_pad_mode_001, TestSize.Level1) +{ + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetParams(); + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: avgpool_build_pad_mode_002 + * @tc.desc: Verify the forbidden of the build function + * @tc.type: FUNC + */ +HWTEST_F(AvgPoolBuilderTest, avgpool_build_pad_mode_002, TestSize.Level1) +{ + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetParams(); + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: avgpool_build_pad_mode_003 + * @tc.desc: Verify the missing input of the build function + * @tc.type: FUNC + */ +HWTEST_F(AvgPoolBuilderTest, avgpool_build_pad_mode_003, TestSize.Level1) +{ + m_inputs = {}; + m_outputs = {0}; + m_params = {1, 2, 3, 4}; + m_paramsIndex = m_params; + + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetParams(); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: avgpool_build_pad_mode_004 + * @tc.desc: Verify the missing output of the build function + * @tc.type: FUNC + */ +HWTEST_F(AvgPoolBuilderTest, avgpool_build_pad_mode_004, TestSize.Level1) +{ + m_inputs = {0}; + m_outputs = {}; + m_params = {1, 2, 3, 4}; + m_paramsIndex = m_params; + + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetParams(); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: avgpool_build_pad_mode_005 + * @tc.desc: Verify the inputIndex out of bounds of the build function + * @tc.type: FUNC + */ +HWTEST_F(AvgPoolBuilderTest, avgpool_build_pad_mode_005, TestSize.Level1) +{ + m_inputs = {6}; + m_outputs = {1}; + m_params = {2, 3, 4, 5}; + m_paramsIndex = m_params; + + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetParams(); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: avgpool_build_pad_mode_006 + * @tc.desc: Verify the outputIndex out of bounds of the build function + * @tc.type: FUNC + */ +HWTEST_F(AvgPoolBuilderTest, avgpool_build_pad_mode_006, TestSize.Level1) +{ + m_inputs = {0}; + m_outputs = {6}; + m_params = {2, 3, 4, 5}; + m_paramsIndex = m_params; + + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetParams(); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: avgpool_build_pad_mode_007 + * @tc.desc: Verify the invalid kernelSize of the build function + * @tc.type: FUNC + */ +HWTEST_F(AvgPoolBuilderTest, avgpool_build_pad_mode_007, TestSize.Level1) +{ + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + int32_t kernelsNum{2}; + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT32, m_kenelsize_dim, nullptr, + OH_NN_AVG_POOL_KERNEL_SIZE); + int32_t* kernelSizeValue = new (std::nothrow) int32_t[kernelsNum]{1, 1}; + EXPECT_NE(nullptr, kernelSizeValue); + + tensor->SetBuffer(kernelSizeValue, sizeof(int32_t) * kernelsNum); + m_allTensors.emplace_back(tensor); + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_AVG_POOL_STRIDE); + SetPadMode(OH_NN_INT8, m_param_dim, nullptr, OH_NN_AVG_POOL_PAD_MODE); + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_AVG_POOL_ACTIVATION_TYPE); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: avgpool_build_pad_mode_008 + * @tc.desc: Verify the invalid stride of the build function + * @tc.type: FUNC + */ +HWTEST_F(AvgPoolBuilderTest, avgpool_build_pad_mode_008, TestSize.Level1) +{ + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetKernelSize(OH_NN_INT64, m_kenelsize_dim, nullptr, OH_NN_AVG_POOL_KERNEL_SIZE); + + int32_t strideNum{2}; + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT32, m_stride_dim, nullptr, OH_NN_AVG_POOL_STRIDE); + int32_t* strideValue = new (std::nothrow) int32_t[strideNum]{1, 1}; + EXPECT_NE(nullptr, strideValue); + + tensor->SetBuffer(strideValue, sizeof(int32_t) * strideNum); + m_allTensors.emplace_back(tensor); + + SetPadMode(OH_NN_INT8, m_param_dim, nullptr, OH_NN_AVG_POOL_PAD_MODE); + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_AVG_POOL_ACTIVATION_TYPE); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: avgpool_build_pad_mode_009 + * @tc.desc: Verify the invalid padmode of the build function + * @tc.type: FUNC + */ +HWTEST_F(AvgPoolBuilderTest, avgpool_build_pad_mode_009, TestSize.Level1) +{ + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetKernelSize(OH_NN_INT64, m_kenelsize_dim, nullptr, OH_NN_AVG_POOL_KERNEL_SIZE); + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_AVG_POOL_STRIDE); + + int32_t *padValueTest = new (std::nothrow) int32_t(0); + EXPECT_NE(nullptr, padValueTest); + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT32, m_param_dim, nullptr, OH_NN_AVG_POOL_PAD_MODE); + tensor->SetBuffer(padValueTest, sizeof(int32_t)); + m_allTensors.emplace_back(tensor); + + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_AVG_POOL_ACTIVATION_TYPE); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + + +/** + * @tc.name: avgpool_build_pad_mode_010 + * @tc.desc: Verify the invalid activation type of the build function + * @tc.type: FUNC + */ +HWTEST_F(AvgPoolBuilderTest, avgpool_build_pad_mode_010, TestSize.Level1) +{ + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetKernelSize(OH_NN_INT64, m_kenelsize_dim, nullptr, OH_NN_AVG_POOL_KERNEL_SIZE); + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_AVG_POOL_STRIDE); + SetPadMode(OH_NN_INT8, m_param_dim, nullptr, OH_NN_AVG_POOL_PAD_MODE); + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT32, m_param_dim, nullptr, + OH_NN_AVG_POOL_ACTIVATION_TYPE); + int32_t* activationValue = new (std::nothrow) int32_t(0); + EXPECT_NE(nullptr, activationValue); + + tensor->SetBuffer(activationValue, sizeof(int32_t)); + m_allTensors.emplace_back(tensor); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: avgpool_build_pad_mode_011 + * @tc.desc: Verify the scalar length of the build function + * @tc.type: FUNC + */ +HWTEST_F(AvgPoolBuilderTest, avgpool_build_pad_mode_011, TestSize.Level1) +{ + m_param_dim = {2}; + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetKernelSize(OH_NN_INT64, m_kenelsize_dim, nullptr, OH_NN_AVG_POOL_KERNEL_SIZE); + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_AVG_POOL_STRIDE); + SetPadMode(OH_NN_INT8, m_param_dim, nullptr, OH_NN_AVG_POOL_PAD_MODE); + int8_t* activationValue = new (std::nothrow) int8_t[2]{1, 2}; + EXPECT_NE(nullptr, activationValue); + + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT8, m_param_dim, nullptr, + OH_NN_AVG_POOL_ACTIVATION_TYPE); + tensor->SetBuffer(activationValue, 2 * sizeof(int8_t)); + m_allTensors.emplace_back(tensor); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: avgpool_build_pad_mode_012 + * @tc.desc: Verify the param invalid to avgpool of the build function + * @tc.type: FUNC + */ +HWTEST_F(AvgPoolBuilderTest, avgpool_build_pad_mode_012, TestSize.Level1) +{ + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetKernelSize(OH_NN_INT64, m_kenelsize_dim, nullptr, OH_NN_AVG_POOL_KERNEL_SIZE); + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_AVG_POOL_STRIDE); + SetPadMode(OH_NN_INT8, m_param_dim, nullptr, OH_NN_AVG_POOL_PAD_MODE); + int8_t* activationValue = new (std::nothrow) int8_t(0); + EXPECT_NE(nullptr, activationValue); + + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT8, m_param_dim, nullptr, + OH_NN_DIV_ACTIVATIONTYPE); + tensor->SetBuffer(activationValue, sizeof(int8_t)); + m_allTensors.emplace_back(tensor); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: avgpool_build_pad_mode_013 + * @tc.desc: Verify the invalid padmode of the build function + * @tc.type: FUNC + */ +HWTEST_F(AvgPoolBuilderTest, avgpool_build_pad_mode_013, TestSize.Level1) +{ + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetKernelSize(OH_NN_INT64, m_kenelsize_dim, nullptr, OH_NN_AVG_POOL_KERNEL_SIZE); + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_AVG_POOL_STRIDE); + int8_t *padValueTest = new (std::nothrow) int8_t(6); + EXPECT_NE(nullptr, padValueTest); + + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT8, m_param_dim, nullptr, OH_NN_AVG_POOL_PAD_MODE); + tensor->SetBuffer(padValueTest, sizeof(int8_t)); + m_allTensors.emplace_back(tensor); + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_AVG_POOL_ACTIVATION_TYPE); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: avgpool_build_pad_mode_014 + * @tc.desc: Verify the invalid activation value of the build function + * @tc.type: FUNC + */ +HWTEST_F(AvgPoolBuilderTest, avgpool_build_pad_mode_014, TestSize.Level1) +{ + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetKernelSize(OH_NN_INT64, m_kenelsize_dim, nullptr, OH_NN_AVG_POOL_KERNEL_SIZE); + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_AVG_POOL_STRIDE); + SetPadMode(OH_NN_INT8, m_param_dim, nullptr, OH_NN_AVG_POOL_PAD_MODE); + + int8_t* activationValue = new (std::nothrow) int8_t(6); + EXPECT_NE(nullptr, activationValue); + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT8, m_param_dim, nullptr, + OH_NN_AVG_POOL_ACTIVATION_TYPE); + tensor->SetBuffer(activationValue, sizeof(int8_t)); + m_allTensors.emplace_back(tensor); + + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: avgpool_getprimitive_pad_mode_001 + * @tc.desc: Verify the behavior of the GetPrimitive function + * @tc.type: FUNC + */ +HWTEST_F(AvgPoolBuilderTest, avgpool_getprimitive_pad_mode_001, TestSize.Level1) +{ + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetParams(); + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + LiteGraphTensorPtr primitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive = {nullptr, DestroyLiteGraphPrimitive}; + EXPECT_NE(expectPrimitive, primitive); + + std::vector returnKernelSize = mindspore::lite::MindIR_AvgPoolFusion_GetKernelSize(primitive.get()); + std::vector kernelSizeValueTest{1, 1}; + EXPECT_EQ(kernelSizeValueTest, returnKernelSize); + + std::vector returnStrides = mindspore::lite::MindIR_AvgPoolFusion_GetStrides(primitive.get()); + std::vector strideValueTest{1, 1}; + int returnPadMode = mindspore::lite::MindIR_AvgPoolFusion_GetPadMode(primitive.get()); + EXPECT_EQ(1, returnPadMode); + int returnActivation = mindspore::lite::MindIR_AvgPoolFusion_GetActivationType(primitive.get()); + EXPECT_EQ(0, returnActivation); +} + +/** + * @tc.name: avgpool_getprimitive_pad_mode_002 + * @tc.desc: Verify the behavior of the GetPrimitive function + * @tc.type: FUNC + */ +HWTEST_F(AvgPoolBuilderTest, avgpool_getprimitive_pad_mode_002, TestSize.Level1) +{ + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetParams(); + LiteGraphTensorPtr primitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive = {nullptr, DestroyLiteGraphPrimitive}; + EXPECT_EQ(expectPrimitive, primitive); +} +} // namespace UnitTest +} // namespace NeuralNetworkRuntime +} // namespace OHOS \ No newline at end of file diff --git a/test/unittest/ops/batch_to_space_nd_test.cpp b/test/unittest/ops/batch_to_space_nd_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..df5897e72a2f93245697dd1da06bdadba1dc4b1f --- /dev/null +++ b/test/unittest/ops/batch_to_space_nd_test.cpp @@ -0,0 +1,338 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "frameworks/native/ops/batch_to_space_nd_builder.h" + +#include "ops_test.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Ops; + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class BatchToSpaceNDBuilderTest : public OpsTest { +public: + void SetUp() override; + void TearDown() override; + + void SetBlockSize(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + void SetCrops(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + +public: + BatchToSpaceNDBuilder m_builder; + std::vector m_inputs{0}; + std::vector m_outputs{1}; + std::vector m_params{2, 3}; + std::vector m_input_dim{4, 1, 1, 1}; + std::vector m_output_dim{1, 2, 2, 1}; + std::vector m_block_dim{2}; + std::vector m_crops_dim{2, 2}; +}; + +void BatchToSpaceNDBuilderTest::SetUp() {} + +void BatchToSpaceNDBuilderTest::TearDown() {} + +void BatchToSpaceNDBuilderTest::SetBlockSize(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + std::shared_ptr tensor = TransToNNTensor(dataType, dim, quantParam, type); + int32_t blockNum = 2; + int64_t* blockSizeValue = new (std::nothrow) int64_t[2]{2, 2}; + EXPECT_NE(nullptr, blockSizeValue); + tensor->SetBuffer(blockSizeValue, sizeof(int64_t) * blockNum); + m_allTensors.emplace_back(tensor); +} + +void BatchToSpaceNDBuilderTest::SetCrops(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + int32_t cropsNum = 4; + std::shared_ptr tensor = TransToNNTensor(dataType, dim, quantParam, type); + int64_t* cropsValue = new (std::nothrow) int64_t[4]{0, 0, 0, 0}; + EXPECT_NE(nullptr, cropsValue); + tensor->SetBuffer(cropsValue, sizeof(int64_t) * cropsNum); + m_allTensors.emplace_back(tensor); +} + +/** + * @tc.name: batch_to_space_nd_build_001 + * @tc.desc: Verify the success of the build function + * @tc.type: FUNC + */ +HWTEST_F(BatchToSpaceNDBuilderTest, batch_to_space_nd_build_001, TestSize.Level1) +{ + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetBlockSize(OH_NN_INT64, m_block_dim, nullptr, OH_NN_BATCH_TO_SPACE_ND_BLOCKSIZE); + SetCrops(OH_NN_INT64, m_crops_dim, nullptr, OH_NN_BATCH_TO_SPACE_ND_CROPS); + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: batch_to_space_nd_build_002 + * @tc.desc: Verify the forbidden of the build function + * @tc.type: FUNC + */ +HWTEST_F(BatchToSpaceNDBuilderTest, batch_to_space_nd_build_002, TestSize.Level1) +{ + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetBlockSize(OH_NN_INT64, m_block_dim, nullptr, OH_NN_BATCH_TO_SPACE_ND_BLOCKSIZE); + SetCrops(OH_NN_INT64, m_crops_dim, nullptr, OH_NN_BATCH_TO_SPACE_ND_CROPS); + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: batch_to_space_nd_build_003 + * @tc.desc: Verify the missing input of the build function + * @tc.type: FUNC + */ +HWTEST_F(BatchToSpaceNDBuilderTest, batch_to_space_nd_build_003, TestSize.Level1) +{ + m_params = {1, 2}; + m_paramsIndex = m_params; + m_inputs = {}; + m_outputs = {0}; + + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetBlockSize(OH_NN_INT64, m_block_dim, nullptr, OH_NN_BATCH_TO_SPACE_ND_BLOCKSIZE); + SetCrops(OH_NN_INT64, m_crops_dim, nullptr, OH_NN_BATCH_TO_SPACE_ND_CROPS); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: batch_to_space_nd_build_004 + * @tc.desc: Verify the missing output of the build function + * @tc.type: FUNC + */ +HWTEST_F(BatchToSpaceNDBuilderTest, batch_to_space_nd_build_004, TestSize.Level1) +{ + m_inputs = {}; + m_outputs = {0}; + m_params = {1, 2}; + + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetBlockSize(OH_NN_INT64, m_block_dim, nullptr, OH_NN_BATCH_TO_SPACE_ND_BLOCKSIZE); + SetCrops(OH_NN_INT64, m_crops_dim, nullptr, OH_NN_BATCH_TO_SPACE_ND_CROPS); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: batch_to_space_nd_build_005 + * @tc.desc: Verify the inputIndex out of bounds of the build function + * @tc.type: FUNC + */ +HWTEST_F(BatchToSpaceNDBuilderTest, batch_to_space_nd_build_005, TestSize.Level1) +{ + m_inputs = {6}; + m_outputs = {1}; + m_params = {2, 3}; + + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetBlockSize(OH_NN_INT64, m_block_dim, nullptr, OH_NN_BATCH_TO_SPACE_ND_BLOCKSIZE); + SetCrops(OH_NN_INT64, m_crops_dim, nullptr, OH_NN_BATCH_TO_SPACE_ND_CROPS); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: batch_to_space_nd_build_006 + * @tc.desc: Verify the outputIndex out of bounds of the build function + * @tc.type: FUNC + */ +HWTEST_F(BatchToSpaceNDBuilderTest, batch_to_space_nd_build_006, TestSize.Level1) +{ + m_inputs = {0}; + m_outputs = {6}; + m_params = {2, 3}; + + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetBlockSize(OH_NN_INT64, m_block_dim, nullptr, OH_NN_BATCH_TO_SPACE_ND_BLOCKSIZE); + SetCrops(OH_NN_INT64, m_crops_dim, nullptr, OH_NN_BATCH_TO_SPACE_ND_CROPS); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: batch_to_space_nd_build_007 + * @tc.desc: Verify the invalid crops of the build function + * @tc.type: FUNC + */ +HWTEST_F(BatchToSpaceNDBuilderTest, batch_to_space_nd_build_007, TestSize.Level1) +{ + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetBlockSize(OH_NN_INT64, m_block_dim, nullptr, OH_NN_BATCH_TO_SPACE_ND_BLOCKSIZE); + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT32, m_crops_dim, nullptr, + OH_NN_BATCH_TO_SPACE_ND_CROPS); + int32_t cropsNum = 4; + int32_t* cropsValue = new (std::nothrow) int32_t[4]{0, 0, 0, 0}; + EXPECT_NE(nullptr, cropsValue); + + tensor->SetBuffer(cropsValue, sizeof(int32_t) * cropsNum); + m_allTensors.emplace_back(tensor); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: batch_to_space_nd_build_008 + * @tc.desc: Verify the invalid blocksize of the build function + * @tc.type: FUNC + */ +HWTEST_F(BatchToSpaceNDBuilderTest, batch_to_space_nd_build_008, TestSize.Level1) +{ + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT32, m_block_dim, nullptr, + OH_NN_BATCH_TO_SPACE_ND_BLOCKSIZE); + int32_t blockNum = 2; + int32_t* blockSizeValue = new (std::nothrow) int32_t[2]{2, 2}; + EXPECT_NE(nullptr, blockSizeValue); + tensor->SetBuffer(blockSizeValue, sizeof(int32_t) * blockNum); + m_allTensors.emplace_back(tensor); + + SetCrops(OH_NN_INT64, m_crops_dim, nullptr, OH_NN_BATCH_TO_SPACE_ND_CROPS); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: batch_to_space_nd_build_009 + * @tc.desc: Verify the invalid param to batchtospace of the build function + * @tc.type: FUNC + */ +HWTEST_F(BatchToSpaceNDBuilderTest, batch_to_space_nd_build_009, TestSize.Level1) +{ + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT32, m_block_dim, nullptr, + OH_NN_CONV2D_STRIDES); + int64_t blockNum = 2; + int64_t* blockSizeValue = new (std::nothrow) int64_t[2]{2, 2}; + EXPECT_NE(nullptr, blockSizeValue); + tensor->SetBuffer(blockSizeValue, sizeof(int64_t) * blockNum); + + m_allTensors.emplace_back(tensor); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: batch_to_space_nd_build_010 + * @tc.desc: Verify the batchtospacend without set blocksize of the build function + * @tc.type: FUNC + */ +HWTEST_F(BatchToSpaceNDBuilderTest, batch_to_space_nd_build_010, TestSize.Level1) +{ + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT64, m_block_dim, nullptr, + OH_NN_BATCH_TO_SPACE_ND_BLOCKSIZE); + m_allTensors.emplace_back(tensor); + + SetCrops(OH_NN_INT64, m_crops_dim, nullptr, OH_NN_BATCH_TO_SPACE_ND_CROPS); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: batch_to_space_nd_build_011 + * @tc.desc: Verify the batchtospacend without set crops of the build function + * @tc.type: FUNC + */ +HWTEST_F(BatchToSpaceNDBuilderTest, batch_to_space_nd_build_011, TestSize.Level1) +{ + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetBlockSize(OH_NN_INT64, m_block_dim, nullptr, OH_NN_BATCH_TO_SPACE_ND_BLOCKSIZE); + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT64, m_crops_dim, nullptr, + OH_NN_BATCH_TO_SPACE_ND_CROPS); + m_allTensors.emplace_back(tensor); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: batch_to_space_nd_getprimitive_001 + * @tc.desc: Verify the success of the GetPrimitive function + * @tc.type: FUNC + */ +HWTEST_F(BatchToSpaceNDBuilderTest, batch_to_space_nd_getprimitive_001, TestSize.Level1) +{ + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetBlockSize(OH_NN_INT64, m_block_dim, nullptr, OH_NN_BATCH_TO_SPACE_ND_BLOCKSIZE); + SetCrops(OH_NN_INT64, m_crops_dim, nullptr, OH_NN_BATCH_TO_SPACE_ND_CROPS); + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + LiteGraphTensorPtr primitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive = {nullptr, DestroyLiteGraphPrimitive}; + EXPECT_NE(expectPrimitive, primitive); + + std::vector blockSizeValue{2, 2}; + std::vector> cropsValue{{0, 0}, {0, 0}}; + std::vector returnValue = mindspore::lite::MindIR_BatchToSpaceND_GetBlockShape(primitive.get()); + EXPECT_EQ(returnValue, blockSizeValue); + std::vector> cropsReturn = mindspore::lite::MindIR_BatchToSpaceND_GetCrops(primitive.get()); + EXPECT_EQ(cropsReturn, cropsValue); +} + +/** + * @tc.name: batch_to_space_nd_getprimitive_002 + * @tc.desc: Verify the nullptr of the GetPrimitive function + * @tc.type: FUNC + */ +HWTEST_F(BatchToSpaceNDBuilderTest, batch_to_space_nd_getprimitive_002, TestSize.Level1) +{ + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetBlockSize(OH_NN_INT64, m_block_dim, nullptr, OH_NN_BATCH_TO_SPACE_ND_BLOCKSIZE); + SetCrops(OH_NN_INT64, m_crops_dim, nullptr, OH_NN_BATCH_TO_SPACE_ND_CROPS); + + LiteGraphTensorPtr primitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive = {nullptr, DestroyLiteGraphPrimitive}; + EXPECT_EQ(expectPrimitive, primitive); +} +} // namespace UnitTest +} // namespace NeuralNetworkRuntime +} // namespace OHOS diff --git a/test/unittest/ops/batchnorm_builder_test.cpp b/test/unittest/ops/batchnorm_builder_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..a8eb80052e54b9bb728cb0f51f5fc95eeb4aaf5c --- /dev/null +++ b/test/unittest/ops/batchnorm_builder_test.cpp @@ -0,0 +1,261 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "frameworks/native/ops/batchnorm_builder.h" + +#include "ops_test.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Ops; + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class BatchNormBuilderTest : public OpsTest { +public: + void SetUp() override; + void TearDown() override; + +protected: + void SaveParamsTensor(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + +protected: + BatchNormBuilder m_batchNorm; + std::vector m_inputs {0, 1, 2, 3, 4}; + std::vector m_outputs {5}; + std::vector m_params {6}; + std::vector m_inputDim {2, 2}; + std::vector m_outputDim {2, 2}; + std::vector m_paramDim {}; +}; + +void BatchNormBuilderTest::SetUp() {} + +void BatchNormBuilderTest::TearDown() {} + +void BatchNormBuilderTest::SaveParamsTensor(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + std::shared_ptr epsilonTensor = TransToNNTensor(dataType, dim, quantParam, type); + float *epsilonValue = new (std::nothrow) float(0.0f); + EXPECT_NE(nullptr, epsilonValue); + epsilonTensor->SetBuffer(epsilonValue, sizeof(float)); + m_allTensors.emplace_back(epsilonTensor); +} + +/** + * @tc.name: batchnorm_build_001 + * @tc.desc: Verify that the build function returns a successful message. + * @tc.type: FUNC + */ +HWTEST_F(BatchNormBuilderTest, batchnorm_build_001, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + SaveParamsTensor(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_BATCH_NORM_EPSILON); + + OH_NN_ReturnCode ret = m_batchNorm.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/** + * @tc.name: batchnorm_build_002 + * @tc.desc: Verify that the build function returns a failed message with true m_isBuild. + * @tc.type: FUNC + */ +HWTEST_F(BatchNormBuilderTest, batchnorm_build_002, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + SaveParamsTensor(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_BATCH_NORM_EPSILON); + + EXPECT_EQ(OH_NN_SUCCESS, m_batchNorm.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors)); + OH_NN_ReturnCode ret = m_batchNorm.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/** + * @tc.name: batchnorm_build_003 + * @tc.desc: Verify that the build function returns a failed message with invalided input. + * @tc.type: FUNC + */ +HWTEST_F(BatchNormBuilderTest, batchnorm_build_003, TestSize.Level0) +{ + m_inputs = {0, 1, 2, 3, 4, 5}; + m_outputs = {6}; + m_params = {7}; + + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + SaveParamsTensor(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_BATCH_NORM_EPSILON); + + OH_NN_ReturnCode ret = m_batchNorm.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: batchnorm_build_004 + * @tc.desc: Verify that the build function returns a failed message with invalided output. + * @tc.type: FUNC + */ +HWTEST_F(BatchNormBuilderTest, batchnorm_build_004, TestSize.Level0) +{ + m_outputs = {5, 6}; + m_params = {7}; + + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + SaveParamsTensor(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_BATCH_NORM_EPSILON); + + OH_NN_ReturnCode ret = m_batchNorm.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: batchnorm_build_005 + * @tc.desc: Verify that the build function returns a failed message with null allTensor. + * @tc.type: FUNC + */ +HWTEST_F(BatchNormBuilderTest, batchnorm_build_005, TestSize.Level0) +{ + OH_NN_ReturnCode ret = m_batchNorm.Build(m_params, m_inputs, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: batchnorm_build_006 + * @tc.desc: Verify that the build function returns a failed message without output tensor. + * @tc.type: FUNC + */ +HWTEST_F(BatchNormBuilderTest, batchnorm_build_006, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + + OH_NN_ReturnCode ret = m_batchNorm.Build(m_params, m_inputsIndex, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: batchnorm_build_007 + * @tc.desc: Verify that the build function returns a failed message with invalid epsilon's dataType. + * @tc.type: FUNC + */ +HWTEST_F(BatchNormBuilderTest, batchnorm_build_007, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + std::shared_ptr epsilonTensor = TransToNNTensor(OH_NN_INT32, m_paramDim, + nullptr, OH_NN_BATCH_NORM_EPSILON); + float epsilonValue = 0.0f; + epsilonTensor->SetBuffer(&epsilonValue, sizeof(epsilonValue)); + m_allTensors.emplace_back(epsilonTensor); + + OH_NN_ReturnCode ret = m_batchNorm.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + epsilonTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: batchnorm_build_008 + * @tc.desc: Verify that the build function returns a failed message with invalid epsilon's dimension. + * @tc.type: FUNC + */ +HWTEST_F(BatchNormBuilderTest, batchnorm_build_008, TestSize.Level0) +{ + std::vector m_paramDim = { 2 }; + + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + std::shared_ptr epsilonTensor = TransToNNTensor(OH_NN_FLOAT32, m_paramDim, + nullptr, OH_NN_BATCH_NORM_EPSILON); + float epsilonValue[2] = {0.0f, 0.0f}; + epsilonTensor->SetBuffer(epsilonValue, 2 * sizeof(float)); + m_allTensors.emplace_back(epsilonTensor); + + OH_NN_ReturnCode ret = m_batchNorm.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + epsilonTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: batchnorm_build_009 + * @tc.desc: Verify that the build function returns a failed message with invalid param. + * @tc.type: FUNC + */ +HWTEST_F(BatchNormBuilderTest, batchnorm_build_009, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + SaveParamsTensor(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_MATMUL_TRANSPOSE_A); + + OH_NN_ReturnCode ret = m_batchNorm.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: batchnorm_build_010 + * @tc.desc: Verify that the build function returns a failed message without set buffer successfully. + * @tc.type: FUNC + */ +HWTEST_F(BatchNormBuilderTest, batchnorm_build_010, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + std::shared_ptr epsilonTensor = TransToNNTensor(OH_NN_FLOAT32, m_paramDim, nullptr, + OH_NN_BATCH_NORM_EPSILON); + m_allTensors.emplace_back(epsilonTensor); + + OH_NN_ReturnCode ret = m_batchNorm.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: batchnorm_getprimitive_001 + * @tc.desc: Verify that the getPrimitive function returns a successful message + * @tc.type: FUNC + */ +HWTEST_F(BatchNormBuilderTest, batchnorm_getprimitive_001, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + SaveParamsTensor(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_BATCH_NORM_EPSILON); + + float epsilonValue = 0.9; + EXPECT_EQ(OH_NN_SUCCESS, m_batchNorm.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors)); + LiteGraphPrimitvePtr primitive = m_batchNorm.GetPrimitive(); + LiteGraphPrimitvePtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); + EXPECT_NE(expectPrimitive, primitive); + + auto returnValue = mindspore::lite::MindIR_FusedBatchNorm_GetEpsilon(primitive.get()); + EXPECT_EQ(returnValue, epsilonValue); +} + +/** + * @tc.name: batchnorm_getprimitive_002 + * @tc.desc: Verify that the getPrimitive function returns a failed message without build. + * @tc.type: FUNC + */ +HWTEST_F(BatchNormBuilderTest, batchnorm_getprimitive_002, TestSize.Level0) +{ + BatchNormBuilder batchNorm; + LiteGraphPrimitvePtr primitive = m_batchNorm.GetPrimitive(); + LiteGraphPrimitvePtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); + EXPECT_EQ(expectPrimitive, primitive); +} +} +} +} \ No newline at end of file diff --git a/test/unittest/ops/biasadd_test.cpp b/test/unittest/ops/biasadd_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..0c848c396cb3524d9fed38eccc74af0e3fcf752f --- /dev/null +++ b/test/unittest/ops/biasadd_test.cpp @@ -0,0 +1,212 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "frameworks/native/ops/bias_add_builder.h" + +#include "ops_test.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Ops; + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class BiasAddBuilderTest : public OpsTest { +public: + void SetUp() override; + void TearDown() override; + + void SetBiasAddToallTensors(); +public: + BiasAddBuilder m_builder; + std::vector m_inputs{0, 1}; + std::vector m_outputs{2}; + std::vector m_params{}; + std::vector m_output_dim{2, 3}; +}; + +void BiasAddBuilderTest::SetUp() {} + +void BiasAddBuilderTest::TearDown() {} + +void BiasAddBuilderTest::SetBiasAddToallTensors() +{ + std::vector m_input_dim{2, 3}; + std::vector biasDim{3}; + std::shared_ptr inputTensor; + inputTensor = TransToNNTensor(OH_NN_FLOAT32, m_input_dim, nullptr, OH_NN_TENSOR); + m_allTensors.emplace_back(inputTensor); + inputTensor = TransToNNTensor(OH_NN_FLOAT32, biasDim, nullptr, OH_NN_TENSOR); + m_allTensors.emplace_back(inputTensor); +} + +/** + * @tc.name: biasadd_build_001 + * @tc.desc: Verify the success of the build function + * @tc.type: FUNC + */ +HWTEST_F(BiasAddBuilderTest, biasadd_build_001, TestSize.Level1) +{ + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + SetBiasAddToallTensors(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: biasadd_build_002 + * @tc.desc: Verify the forbidden of the build function + * @tc.type: FUNC + */ +HWTEST_F(BiasAddBuilderTest, biasadd_build_002, TestSize.Level1) +{ + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetBiasAddToallTensors(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: biasadd_build_003 + * @tc.desc: Verify the missing input of the build function + * @tc.type: FUNC + */ +HWTEST_F(BiasAddBuilderTest, biasadd_build_003, TestSize.Level1) +{ + m_inputs = {0}; + m_outputs = {1}; + m_params = {}; + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetBiasAddToallTensors(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: biasadd_build_004 + * @tc.desc: Verify the missing output of the build function + * @tc.type: FUNC + */ +HWTEST_F(BiasAddBuilderTest, biasadd_build_004, TestSize.Level1) +{ + m_inputs = {0, 1}; + m_outputs = {}; + m_params = {}; + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + SetBiasAddToallTensors(); + + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: biasadd_build_005 + * @tc.desc: Verify the inputIndex out of bounds of the build function + * @tc.type: FUNC + */ +HWTEST_F(BiasAddBuilderTest, biasadd_build_005, TestSize.Level1) +{ + m_inputs = {0, 6}; + m_outputs = {2}; + m_params = {}; + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + SetBiasAddToallTensors(); + + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: biasadd_build_006 + * @tc.desc: Verify the outputIndex out of bounds of the build function + * @tc.type: FUNC + */ +HWTEST_F(BiasAddBuilderTest, biasadd_build_006, TestSize.Level1) +{ + m_inputs = {0, 1}; + m_outputs = {6}; + m_params = {}; + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + SetBiasAddToallTensors(); + + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: biasadd_build_007 + * @tc.desc: Verify the paramIndex not empty of the build function + * @tc.type: FUNC + */ +HWTEST_F(BiasAddBuilderTest, biasadd_build_007, TestSize.Level1) +{ + m_params = {1}; + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + SetBiasAddToallTensors(); + + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: biasadd_getprimitive_001 + * @tc.desc: Verify the success of the GetPrimitive function + * @tc.type: FUNC + */ +HWTEST_F(BiasAddBuilderTest, biasadd_getprimitive_001, TestSize.Level1) +{ + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetBiasAddToallTensors(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + + LiteGraphTensorPtr primitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive = {nullptr, DestroyLiteGraphPrimitive}; + EXPECT_NE(expectPrimitive, primitive); +} + +/** + * @tc.name: biasadd_getprimitive_002 + * @tc.desc: Verify the nullptr return of the GetPrimitive function + * @tc.type: FUNC + */ +HWTEST_F(BiasAddBuilderTest, biasadd_getprimitive_002, TestSize.Level1) +{ + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetBiasAddToallTensors(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + LiteGraphTensorPtr primitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive = {nullptr, DestroyLiteGraphPrimitive}; + EXPECT_EQ(expectPrimitive, primitive); +} +} // namespace UnitTest +} // namespace NeuralNetworkRuntime +} // namespace OHOS \ No newline at end of file diff --git a/test/unittest/ops/cast_test.cpp b/test/unittest/ops/cast_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..fb52b4bcc298e62c102311741efd9a4946371757 --- /dev/null +++ b/test/unittest/ops/cast_test.cpp @@ -0,0 +1,260 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "frameworks/native/ops/cast_builder.h" + +#include "ops_test.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Ops; + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class CastBuilderTest : public OpsTest { +public: + void SetUp() override; + void TearDown() override; + + void SetCastAddToallTensors(); + +public: + CastBuilder m_builder; + std::vector m_inputs{0, 1}; + std::vector m_outputs{2}; + std::vector m_params{}; + std::vector m_output_dim{1, 2, 2, 1}; +}; + +void CastBuilderTest::SetUp() {} + +void CastBuilderTest::TearDown() {} + +void CastBuilderTest::SetCastAddToallTensors() +{ + std::vector m_input_dim{1, 2, 2, 1}; + std::vector typeDim = {}; + std::shared_ptr inputTensor; + inputTensor = TransToNNTensor(OH_NN_FLOAT32, m_input_dim, nullptr, OH_NN_TENSOR); + m_allTensors.emplace_back(inputTensor); + + inputTensor = TransToNNTensor(OH_NN_INT32, typeDim, nullptr, OH_NN_TENSOR); + int32_t* typeValue = new (std::nothrow) int32_t(4); + EXPECT_NE(nullptr, typeValue); + inputTensor->SetBuffer(typeValue, sizeof(int32_t)); + m_allTensors.emplace_back(inputTensor); +} + +/** + * @tc.name: cast_build_001 + * @tc.desc: Verify the success of the build function + * @tc.type: FUNC + */ +HWTEST_F(CastBuilderTest, cast_build_001, TestSize.Level1) +{ + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetCastAddToallTensors(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: cast_build_002 + * @tc.desc: Verify the forbidden of the build function + * @tc.type: FUNC + */ +HWTEST_F(CastBuilderTest, cast_build_002, TestSize.Level1) +{ + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetCastAddToallTensors(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: cast_build_003 + * @tc.desc: Verify the missing input of the build function + * @tc.type: FUNC + */ +HWTEST_F(CastBuilderTest, cast_build_003, TestSize.Level1) +{ + m_inputs = {0}; + m_outputs = {1}; + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetCastAddToallTensors(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: cast_build_004 + * @tc.desc: Verify the missing output of the build function + * @tc.type: FUNC + */ +HWTEST_F(CastBuilderTest, cast_build_004, TestSize.Level1) +{ + m_inputs = {0, 1}; + m_outputs = {}; + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetCastAddToallTensors(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: cast_build_005 + * @tc.desc: Verify the inputIndex out of bounds of the build function + * @tc.type: FUNC + */ +HWTEST_F(CastBuilderTest, cast_build_005, TestSize.Level1) +{ + m_inputs = {0, 6}; + m_outputs = {2}; + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetCastAddToallTensors(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: cast_build_006 + * @tc.desc: Verify the outputIndex out of bounds of the build function + * @tc.type: FUNC + */ +HWTEST_F(CastBuilderTest, cast_build_006, TestSize.Level1) +{ + m_inputs = {0, 1}; + m_outputs = {6}; + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetCastAddToallTensors(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: cast_build_007 + * @tc.desc: Verify the paramIndex not empty of the build function + * @tc.type: FUNC + */ +HWTEST_F(CastBuilderTest, cast_build_007, TestSize.Level1) +{ + m_params = {1}; + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetCastAddToallTensors(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: cast_build_008 + * @tc.desc: Verify the paramIndex not empty of the build function + * @tc.type: FUNC + */ +HWTEST_F(CastBuilderTest, cast_build_008, TestSize.Level1) +{ + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + std::vector m_input_dim{1, 2, 2, 1}; + std::vector typeDim = {}; + std::shared_ptr inputTensor; + inputTensor = TransToNNTensor(OH_NN_FLOAT32, m_input_dim, nullptr, OH_NN_TENSOR); + m_allTensors.emplace_back(inputTensor); + + inputTensor = TransToNNTensor(OH_NN_INT32, typeDim, nullptr, OH_NN_TENSOR); + int32_t* typeValue = new (std::nothrow) int32_t(40); + EXPECT_NE(nullptr, typeValue); + + inputTensor->SetBuffer(typeValue, sizeof(int32_t)); + m_allTensors.emplace_back(inputTensor); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: cast_build_009 + * @tc.desc: Verify the cast without set types of the build function + * @tc.type: FUNC + */ +HWTEST_F(CastBuilderTest, cast_build_009, TestSize.Level1) +{ + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + std::vector m_input_dim{1, 2, 2, 1}; + std::vector typeDim = {}; + std::shared_ptr inputTensor; + inputTensor = TransToNNTensor(OH_NN_FLOAT32, m_input_dim, nullptr, OH_NN_TENSOR); + m_allTensors.emplace_back(inputTensor); + + inputTensor = TransToNNTensor(OH_NN_INT32, typeDim, nullptr, OH_NN_TENSOR); + m_allTensors.emplace_back(inputTensor); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: cast_getprimitive_001 + * @tc.desc: Verify the behavior of the GetPrimitive function + * @tc.type: FUNC + */ +HWTEST_F(CastBuilderTest, cast_getprimitive_001, TestSize.Level1) +{ + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + SetCastAddToallTensors(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + + LiteGraphTensorPtr primitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive = {nullptr, DestroyLiteGraphPrimitive}; + EXPECT_NE(expectPrimitive, primitive); +} + +/** + * @tc.name: cast_getprimitive_002 + * @tc.desc: Verify the behavior of the GetPrimitive function + * @tc.type: FUNC + */ +HWTEST_F(CastBuilderTest, cast_getprimitive_002, TestSize.Level1) +{ + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + SetCastAddToallTensors(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + LiteGraphTensorPtr primitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive = {nullptr, DestroyLiteGraphPrimitive}; + EXPECT_EQ(expectPrimitive, primitive); +} +} // namespace UnitTest +} // namespace NeuralNetworkRuntime +} // namespace OHOS \ No newline at end of file diff --git a/test/unittest/ops/concat_three_inputs_test.cpp b/test/unittest/ops/concat_three_inputs_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..093b1c5b83f7ec722a4ec43b322a052587fcc845 --- /dev/null +++ b/test/unittest/ops/concat_three_inputs_test.cpp @@ -0,0 +1,244 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "frameworks/native/ops/concat_builder.h" + +#include "ops_test.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Ops; + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class ConcatBuilderTest : public OpsTest { +public: + void SetUp() override; + void TearDown() override; + + void SetAxis(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + +public: + ConcatBuilder m_builder; + std::vector m_inputs{0, 1, 2}; + std::vector m_outputs{3}; + std::vector m_params{4}; + std::vector m_input_dim{3, 3}; + std::vector m_output_dim{3, 3}; + std::vector m_param_dim{}; +}; + +void ConcatBuilderTest::SetUp() {} + +void ConcatBuilderTest::TearDown() {} + +void ConcatBuilderTest::SetAxis(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + std::shared_ptr tensor = TransToNNTensor(dataType, dim, quantParam, type); + int64_t* axisValue = new (std::nothrow) int64_t(0); + EXPECT_NE(nullptr, axisValue); + tensor->SetBuffer(axisValue, sizeof(int64_t)); + m_allTensors.emplace_back(tensor); +} + +/** + * @tc.name: concat_build_three_input_001 + * @tc.desc: Verify the success of the build function + * @tc.type: FUNC + */ +HWTEST_F(ConcatBuilderTest, concat_build_three_input_001, TestSize.Level1) +{ + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetAxis(OH_NN_INT64, m_param_dim, nullptr, OH_NN_CONCAT_AXIS); + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: concat_build_three_input_002 + * @tc.desc: Verify the forbidden of the build function + * @tc.type: FUNC + */ +HWTEST_F(ConcatBuilderTest, concat_build_three_input_002, TestSize.Level1) +{ + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetAxis(OH_NN_INT64, m_param_dim, nullptr, OH_NN_CONCAT_AXIS); + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: concat_build_three_input_003 + * @tc.desc: Verify the missing input of the build function + * @tc.type: FUNC + */ +HWTEST_F(ConcatBuilderTest, concat_build_three_input_003, TestSize.Level1) +{ + m_inputs = {0}; + m_outputs = {1}; + m_params = {2}; + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetAxis(OH_NN_INT64, m_param_dim, nullptr, OH_NN_CONCAT_AXIS); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: concat_build_three_input_004 + * @tc.desc: Verify the missing output of the build function + * @tc.type: FUNC + */ +HWTEST_F(ConcatBuilderTest, concat_build_three_input_004, TestSize.Level1) +{ + m_inputs = {0, 1, 2}; + m_outputs = {}; + m_params = {3}; + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetAxis(OH_NN_INT64, m_param_dim, nullptr, OH_NN_CONCAT_AXIS); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: concat_build_three_input_005 + * @tc.desc: Verify the inputIndex out of bounds of the build function + * @tc.type: FUNC + */ +HWTEST_F(ConcatBuilderTest, concat_build_three_input_005, TestSize.Level1) +{ + m_outputs = {3}; + m_params = {4}; + m_inputs = {0, 1, 6}; + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetAxis(OH_NN_INT64, m_param_dim, nullptr, OH_NN_CONCAT_AXIS); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: concat_build_three_input_007 + * @tc.desc: Verify the invalid axis of the build function + * @tc.type: FUNC + */ +HWTEST_F(ConcatBuilderTest, concat_build_three_input_007, TestSize.Level1) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT32, m_param_dim, nullptr, OH_NN_CONCAT_AXIS); + m_paramsIndex = m_params; + int32_t* axisValue = new (std::nothrow) int32_t(0); + EXPECT_NE(nullptr, axisValue); + + tensor->SetBuffer(axisValue, sizeof(int32_t)); + m_allTensors.emplace_back(tensor); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: concat_build_three_input_008 + * @tc.desc: Verify the scalar length of the build function + * @tc.type: FUNC + */ +HWTEST_F(ConcatBuilderTest, concat_build_three_input_008, TestSize.Level1) +{ + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + m_param_dim = {2}; + + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT64, m_param_dim, nullptr, OH_NN_CONCAT_AXIS); + int64_t* axisValue = new (std::nothrow) int64_t[2]{0, 0}; + EXPECT_NE(nullptr, axisValue); + + tensor->SetBuffer(axisValue, 2 * sizeof(int64_t)); + m_allTensors.emplace_back(tensor); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: concat_build_three_input_009 + * @tc.desc: Verify the invalid param to concat of the build function + * @tc.type: FUNC + */ +HWTEST_F(ConcatBuilderTest, concat_build_three_input_009, TestSize.Level1) +{ + m_param_dim = {2}; + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT64, m_param_dim, nullptr, OH_NN_CONV2D_STRIDES); + int64_t* axisValue = new (std::nothrow) int64_t[2]{0, 0}; + EXPECT_NE(nullptr, axisValue); + + tensor->SetBuffer(axisValue, 2 * sizeof(int64_t)); + m_allTensors.emplace_back(tensor); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: concat_getprimitive_three_input_001 + * @tc.desc: Verify the success of the GetPrimitive function + * @tc.type: FUNC + */ +HWTEST_F(ConcatBuilderTest, concat_getprimitive_three_input_001, TestSize.Level1) +{ + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetAxis(OH_NN_INT64, m_param_dim, nullptr, OH_NN_CONCAT_AXIS); + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + LiteGraphTensorPtr primitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive = {nullptr, DestroyLiteGraphPrimitive}; + EXPECT_NE(expectPrimitive, primitive); + + int64_t expectValue = mindspore::lite::MindIR_Concat_GetAxis(primitive.get()); + EXPECT_EQ(expectValue, 0); +} + +/** + * @tc.name: concat_getprimitive_three_input_002 + * @tc.desc: Verify the nullptr return of the GetPrimitive function + * @tc.type: FUNC + */ +HWTEST_F(ConcatBuilderTest, concat_getprimitive_three_input_002, TestSize.Level1) +{ + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetAxis(OH_NN_INT64, m_param_dim, nullptr, OH_NN_CONCAT_AXIS); + LiteGraphTensorPtr primitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr returnPrimitive = {nullptr, DestroyLiteGraphPrimitive}; + EXPECT_EQ(returnPrimitive, primitive); +} +} // namespace UnitTest +} // namespace NeuralNetworkRuntime +} // namespace OHOS diff --git a/test/unittest/ops/concat_two_inputs_test.cpp b/test/unittest/ops/concat_two_inputs_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..8fa81b05f3f98b14e891e45e2d7067975fb97bb2 --- /dev/null +++ b/test/unittest/ops/concat_two_inputs_test.cpp @@ -0,0 +1,265 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "frameworks/native/ops/concat_builder.h" + +#include "ops_test.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Ops; + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class ConcatTwoInputBuilderTest : public OpsTest { +public: + void SetUp() override; + void TearDown() override; + + void SetAxis(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + +public: + ConcatBuilder m_builder; + std::vector m_inputs{0, 1}; + std::vector m_outputs{2}; + std::vector m_params{3}; + std::vector m_input_dim{3, 3}; + std::vector m_output_dim{3, 3}; + std::vector m_param_dim{}; +}; + +void ConcatTwoInputBuilderTest::SetUp() {} + +void ConcatTwoInputBuilderTest::TearDown() {} + +void ConcatTwoInputBuilderTest::SetAxis(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + std::shared_ptr tensor = TransToNNTensor(dataType, dim, quantParam, type); + int64_t* axisValue = new (std::nothrow) int64_t(0); + EXPECT_NE(nullptr, axisValue); + + tensor->SetBuffer(axisValue, sizeof(int64_t)); + m_allTensors.emplace_back(tensor); +} + +/** + * @tc.name: concat_build_two_input_001 + * @tc.desc: Verify the success of the build function + * @tc.type: FUNC + */ +HWTEST_F(ConcatTwoInputBuilderTest, concat_build_two_input_001, TestSize.Level1) +{ + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetAxis(OH_NN_INT64, m_param_dim, nullptr, OH_NN_CONCAT_AXIS); + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: concat_build_two_input_002 + * @tc.desc: Verify the forbidden of the build function + * @tc.type: FUNC + */ +HWTEST_F(ConcatTwoInputBuilderTest, concat_build_two_input_002, TestSize.Level1) +{ + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetAxis(OH_NN_INT64, m_param_dim, nullptr, OH_NN_CONCAT_AXIS); + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: concat_build_two_input_003 + * @tc.desc: Verify the missing input of the build function + * @tc.type: FUNC + */ +HWTEST_F(ConcatTwoInputBuilderTest, concat_build_two_input_003, TestSize.Level1) +{ + m_inputs = {0}; + m_outputs = {2}; + m_params = {3}; + m_paramsIndex = m_params; + + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetAxis(OH_NN_INT64, m_param_dim, nullptr, OH_NN_CONCAT_AXIS); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: concat_build_two_input_004 + * @tc.desc: Verify the missing output of the build function + * @tc.type: FUNC + */ +HWTEST_F(ConcatTwoInputBuilderTest, concat_build_two_input_004, TestSize.Level1) +{ + m_inputs = {0, 1}; + m_outputs = {}; + m_params = {3}; + m_paramsIndex = m_params; + + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetAxis(OH_NN_INT64, m_param_dim, nullptr, OH_NN_CONCAT_AXIS); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: concat_build_two_input_005 + * @tc.desc: Verify the inputIndex out of bounds of the build function + * @tc.type: FUNC + */ +HWTEST_F(ConcatTwoInputBuilderTest, concat_build_two_input_005, TestSize.Level1) +{ + m_inputs = {0, 1, 6}; + m_outputs = {3}; + m_params = {4}; + m_paramsIndex = m_params; + + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetAxis(OH_NN_INT64, m_param_dim, nullptr, OH_NN_CONCAT_AXIS); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: concat_build_two_input_006 + * @tc.desc: Verify the outputIndex out of bounds of the build function + * @tc.type: FUNC + */ +HWTEST_F(ConcatTwoInputBuilderTest, concat_build_two_input_006, TestSize.Level1) +{ + m_inputs = {0, 1}; + m_outputs = {6}; + m_params = {3}; + m_paramsIndex = m_params; + + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetAxis(OH_NN_INT64, m_param_dim, nullptr, OH_NN_CONCAT_AXIS); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: concat_build_two_input_007 + * @tc.desc: Verify the invalid axis of the build function + * @tc.type: FUNC + */ +HWTEST_F(ConcatTwoInputBuilderTest, concat_build_two_input_007, TestSize.Level1) +{ + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT32, m_param_dim, nullptr, OH_NN_CONCAT_AXIS); + int32_t* axisValue = new (std::nothrow) int32_t(0); + EXPECT_NE(nullptr, axisValue); + + tensor->SetBuffer(axisValue, sizeof(int32_t)); + m_allTensors.emplace_back(tensor); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: concat_build_two_input_008 + * @tc.desc: Verify the scalar length of the build function + * @tc.type: FUNC + */ +HWTEST_F(ConcatTwoInputBuilderTest, concat_build_two_input_008, TestSize.Level1) +{ + m_param_dim = {2}; + m_paramsIndex = m_params; + + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT64, m_param_dim, nullptr, OH_NN_CONCAT_AXIS); + int64_t* axisValue = new (std::nothrow) int64_t[2]{0, 0}; + EXPECT_NE(nullptr, axisValue); + + tensor->SetBuffer(axisValue, 2 * sizeof(int64_t)); + m_allTensors.emplace_back(tensor); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: concat_build_two_input_009 + * @tc.desc: This is OH_NN_INVALID_PARAMETER case, course the value of axis is nullptr. + * @tc.type: FUNC + */ +HWTEST_F(ConcatTwoInputBuilderTest, concat_build_two_input_010, TestSize.Level1) +{ + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT64, m_param_dim, nullptr, OH_NN_CONCAT_AXIS); + m_allTensors.emplace_back(tensor); + + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: concat_getprimitive_two_input_001 + * @tc.desc: Verify the success of the GetPrimitive function + * @tc.type: FUNC + */ +HWTEST_F(ConcatTwoInputBuilderTest, concat_getprimitive_two_input_001, TestSize.Level1) +{ + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetAxis(OH_NN_INT64, m_param_dim, nullptr, OH_NN_CONCAT_AXIS); + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + LiteGraphTensorPtr primitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive = {nullptr, DestroyLiteGraphPrimitive}; + EXPECT_NE(expectPrimitive, primitive); + + int64_t returnValue = mindspore::lite::MindIR_Concat_GetAxis(primitive.get()); + EXPECT_EQ(returnValue, 0); +} + +/** + * @tc.name: concat_getprimitive_two_input_001 + * @tc.desc: Verify the nullptr return of the GetPrimitive function + * @tc.type: FUNC + */ +HWTEST_F(ConcatTwoInputBuilderTest, concat_getprimitive_two_input_002, TestSize.Level1) +{ + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetAxis(OH_NN_INT64, m_param_dim, nullptr, OH_NN_CONCAT_AXIS); + LiteGraphTensorPtr primitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive = {nullptr, DestroyLiteGraphPrimitive}; + EXPECT_EQ(expectPrimitive, primitive); +} +} // namespace UnitTest +} // namespace NeuralNetworkRuntime +} // namespace OHOS \ No newline at end of file diff --git a/test/unittest/ops/conv2d_pad_test.cpp b/test/unittest/ops/conv2d_pad_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..c19c40282a84c788e7aa2a559c1e54aea2a57338 --- /dev/null +++ b/test/unittest/ops/conv2d_pad_test.cpp @@ -0,0 +1,561 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "frameworks/native/ops/conv2d_builder.h" + +#include "ops_test.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Ops; + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class Conv2DBuilderTest : public OpsTest { +public: + void SetUp() override; + void TearDown() override; + + void SetConv2dInput(); + void SetPad(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + void SetPadParam(); + +public: + Conv2DBuilder m_builder; + std::vector m_inputs{0, 1, 2}; + std::vector m_outputs{3}; + std::vector m_params{4, 5, 6, 7, 8}; + std::vector m_output_dim{1, 3, 3, 1}; + std::vector m_stride_dim{2}; + std::vector m_dilation_dim{2}; + std::vector m_pad_dim{4}; + std::vector m_param_dim{}; +}; + +void Conv2DBuilderTest::SetUp() {} + +void Conv2DBuilderTest::TearDown() {} + +void Conv2DBuilderTest::SetPad(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + int32_t padNum = 4; + std::shared_ptr tensor = TransToNNTensor(dataType, dim, quantParam, type); + int64_t* padValue = new (std::nothrow) int64_t[4]{1, 1, 1, 1}; + EXPECT_NE(nullptr, padValue); + tensor->SetBuffer(padValue, padNum * sizeof(int64_t)); + m_allTensors.emplace_back(tensor); +} + +void Conv2DBuilderTest::SetPadParam() +{ + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_CONV2D_STRIDES); + SetDilation(OH_NN_INT64, m_dilation_dim, nullptr, OH_NN_CONV2D_DILATION); + SetPad(OH_NN_INT64, m_pad_dim, nullptr, OH_NN_CONV2D_PAD); + SetGroup(OH_NN_INT64, m_param_dim, nullptr, OH_NN_CONV2D_GROUP); + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_CONV2D_ACTIVATION_TYPE); +} + +void Conv2DBuilderTest::SetConv2dInput() +{ + int32_t weightNum = 4; + std::vector m_input_dim{1, 4, 4, 1}; + std::vector weightDim = {1, 2, 2, 1}; + std::vector biasDim = {1}; + std::shared_ptr inputsTensor; + inputsTensor = TransToNNTensor(OH_NN_FLOAT32, m_input_dim, nullptr, OH_NN_TENSOR); + m_allTensors.emplace_back(inputsTensor); + + inputsTensor = TransToNNTensor(OH_NN_FLOAT32, weightDim, nullptr, OH_NN_TENSOR); + float* weightValue = new (std::nothrow) float[4]{1, 1, 1, 1}; + EXPECT_NE(nullptr, weightValue); + + inputsTensor->SetBuffer(weightValue, weightNum * sizeof(weightValue)); + m_allTensors.emplace_back(inputsTensor); + inputsTensor = TransToNNTensor(OH_NN_FLOAT32, biasDim, nullptr, OH_NN_TENSOR); + float* biasValue = new (std::nothrow) float[1]{0}; + EXPECT_NE(nullptr, biasValue); + + inputsTensor->SetBuffer(biasValue, sizeof(float)); + m_allTensors.emplace_back(inputsTensor); +} + +/** + * @tc.name: conv2d_build_pad_001 + * @tc.desc: Verify the success of the build function + * @tc.type: FUNC + */ +HWTEST_F(Conv2DBuilderTest, conv2d_build_pad_001, TestSize.Level1) +{ + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetConv2dInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetPadParam(); + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: conv2d_build_pad_002 + * @tc.desc: Verify the forbidden of the build function + * @tc.type: FUNC + */ +HWTEST_F(Conv2DBuilderTest, conv2d_build_pad_002, TestSize.Level1) +{ + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetConv2dInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetPadParam(); + + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: conv2d_build_pad_003 + * @tc.desc: Verify the missing input of the build function + * @tc.type: FUNC + */ +HWTEST_F(Conv2DBuilderTest, conv2d_build_pad_003, TestSize.Level1) +{ + m_inputs = {0}; + m_outputs = {1}; + m_params = {2, 3, 4, 5, 6}; + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetConv2dInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetPadParam(); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: conv2d_build_pad_004 + * @tc.desc: Verify the missing output of the build function + * @tc.type: FUNC + */ +HWTEST_F(Conv2DBuilderTest, conv2d_build_pad_004, TestSize.Level1) +{ + m_inputs = {0, 1, 2}; + m_outputs = {}; + m_params = {3, 4, 5, 6, 7}; + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetConv2dInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetPadParam(); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} +/** + * @tc.name: conv2d_build_pad_005 + * @tc.desc: Verify the inputIndex out of bounds of the build function + * @tc.type: FUNC + */ +HWTEST_F(Conv2DBuilderTest, conv2d_build_pad_005, TestSize.Level1) +{ + m_inputs = {0, 1, 9}; + m_outputs = {3}; + m_params = {4, 5, 6, 7, 8}; + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetConv2dInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetPadParam(); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: conv2d_build_pad_006 + * @tc.desc: Verify the outputIndex out of bounds of the build function + * @tc.type: FUNC + */ +HWTEST_F(Conv2DBuilderTest, conv2d_build_pad_006, TestSize.Level1) +{ + m_inputs = {0, 1, 2}; + m_outputs = {9}; + m_params = {4, 5, 6, 7, 8}; + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetConv2dInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetPadParam(); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: conv2d_build_pad_007 + * @tc.desc: Verify the invalid stride of the build function + * @tc.type: FUNC + */ +HWTEST_F(Conv2DBuilderTest, conv2d_build_pad_007, TestSize.Level1) +{ + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetConv2dInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT32, m_stride_dim, nullptr, OH_NN_CONV2D_STRIDES); + int32_t* strideValue = new (std::nothrow) int32_t[2]{1, 1}; + EXPECT_NE(nullptr, strideValue); + + tensor->SetBuffer(strideValue, 2 * sizeof(int32_t)); + m_allTensors.emplace_back(tensor); + SetDilation(OH_NN_INT64, m_dilation_dim, nullptr, OH_NN_CONV2D_DILATION); + SetPad(OH_NN_INT64, m_pad_dim, nullptr, OH_NN_CONV2D_PAD); + SetGroup(OH_NN_INT64, m_param_dim, nullptr, OH_NN_CONV2D_GROUP); + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_CONV2D_ACTIVATION_TYPE); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: conv2d_build_pad_008 + * @tc.desc: Verify the invalid dilation of the build function + * @tc.type: FUNC + */ +HWTEST_F(Conv2DBuilderTest, conv2d_build_pad_008, TestSize.Level1) +{ + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetConv2dInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_CONV2D_STRIDES); + + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT32, m_dilation_dim, nullptr, OH_NN_CONV2D_DILATION); + int32_t* dilationValue = new (std::nothrow) int32_t[2]{1, 1}; + EXPECT_NE(nullptr, dilationValue); + + tensor->SetBuffer(dilationValue, 2 * sizeof(int32_t)); + m_allTensors.emplace_back(tensor); + SetPad(OH_NN_INT64, m_pad_dim, nullptr, OH_NN_CONV2D_PAD); + SetGroup(OH_NN_INT64, m_param_dim, nullptr, OH_NN_CONV2D_GROUP); + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_CONV2D_ACTIVATION_TYPE); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: conv2d_build_pad_009 + * @tc.desc: Verify the invalid pad of the build function + * @tc.type: FUNC + */ +HWTEST_F(Conv2DBuilderTest, conv2d_build_pad_009, TestSize.Level1) +{ + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + SetConv2dInput(); + + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_CONV2D_STRIDES); + SetDilation(OH_NN_INT64, m_dilation_dim, nullptr, OH_NN_CONV2D_DILATION); + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT32, m_pad_dim, nullptr, OH_NN_CONV2D_PAD); + + int32_t* padValue = new (std::nothrow) int32_t[4]{1, 1, 1, 1}; + EXPECT_NE(nullptr, padValue); + tensor->SetBuffer(padValue, 4 * sizeof(int32_t)); + m_allTensors.emplace_back(tensor); + + SetGroup(OH_NN_INT64, m_param_dim, nullptr, OH_NN_CONV2D_GROUP); + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_CONV2D_ACTIVATION_TYPE); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: conv2d_build_pad_010 + * @tc.desc: Verify the invalid group of the build function + * @tc.type: FUNC + */ +HWTEST_F(Conv2DBuilderTest, conv2d_build_pad_010, TestSize.Level1) +{ + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetConv2dInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_CONV2D_STRIDES); + SetDilation(OH_NN_INT64, m_dilation_dim, nullptr, OH_NN_CONV2D_DILATION); + SetPad(OH_NN_INT64, m_pad_dim, nullptr, OH_NN_CONV2D_PAD); + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT32, m_param_dim, nullptr, OH_NN_CONV2D_GROUP); + + int32_t* groupValue = new (std::nothrow) int32_t(0); + EXPECT_NE(nullptr, groupValue); + tensor->SetBuffer(groupValue, sizeof(int32_t)); + m_allTensors.emplace_back(tensor); + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_CONV2D_ACTIVATION_TYPE); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + + +/** + * @tc.name: conv2d_build_pad_011 + * @tc.desc: Verify the invalid activation of the build function + * @tc.type: FUNC + */ +HWTEST_F(Conv2DBuilderTest, conv2d_build_pad_011, TestSize.Level1) +{ + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + SetConv2dInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_CONV2D_STRIDES); + SetDilation(OH_NN_INT64, m_dilation_dim, nullptr, OH_NN_CONV2D_DILATION); + + SetPad(OH_NN_INT64, m_pad_dim, nullptr, OH_NN_CONV2D_PAD); + SetGroup(OH_NN_INT64, m_param_dim, nullptr, OH_NN_CONV2D_GROUP); + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT32, m_param_dim, nullptr, OH_NN_CONV2D_ACTIVATION_TYPE); + int32_t* activationValue = new (std::nothrow) int32_t(0); + EXPECT_NE(nullptr, activationValue); + + tensor->SetBuffer(activationValue, sizeof(int32_t)); + m_allTensors.emplace_back(tensor); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: conv2d_build_pad_012 + * @tc.desc: Verify the group scalar length of the build function + * @tc.type: FUNC + */ +HWTEST_F(Conv2DBuilderTest, conv2d_build_pad_012, TestSize.Level1) +{ + std::vector groupDim = {2}; + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetConv2dInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_CONV2D_STRIDES); + SetDilation(OH_NN_INT64, m_dilation_dim, nullptr, OH_NN_CONV2D_DILATION); + SetPad(OH_NN_INT64, m_pad_dim, nullptr, OH_NN_CONV2D_PAD); + + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT64, groupDim, nullptr, OH_NN_CONV2D_GROUP); + int64_t* groupValue = new (std::nothrow) int64_t[2]{0, 0}; + EXPECT_NE(nullptr, groupValue); + + tensor->SetBuffer(groupValue, 2 * sizeof(int64_t)); + m_allTensors.emplace_back(tensor); + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_CONV2D_ACTIVATION_TYPE); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: conv2d_build_pad_013 + * @tc.desc: Verify the scalar activation of the build function + * @tc.type: FUNC + */ +HWTEST_F(Conv2DBuilderTest, conv2d_build_pad_013, TestSize.Level1) +{ + std::vector activationDim = {2}; + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetConv2dInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_CONV2D_STRIDES); + SetDilation(OH_NN_INT64, m_dilation_dim, nullptr, OH_NN_CONV2D_DILATION); + SetPad(OH_NN_INT64, m_pad_dim, nullptr, OH_NN_CONV2D_PAD); + SetGroup(OH_NN_INT64, m_param_dim, nullptr, OH_NN_CONV2D_GROUP); + + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT8, activationDim, nullptr, + OH_NN_CONV2D_ACTIVATION_TYPE); + int8_t* activationValue = new (std::nothrow) int8_t[2]{0, 0}; + EXPECT_NE(nullptr, activationValue); + tensor->SetBuffer(activationValue, 2 * sizeof(int8_t)); + m_allTensors.emplace_back(tensor); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: conv2d_build_pad_014 + * @tc.desc: Verify the conv2d without set stride of the build function + * @tc.type: FUNC + */ +HWTEST_F(Conv2DBuilderTest, conv2d_build_pad_014, TestSize.Level1) +{ + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + SetConv2dInput(); + + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT64, m_stride_dim, nullptr, + OH_NN_CONV2D_STRIDES); + m_allTensors.emplace_back(tensor); + + SetDilation(OH_NN_INT64, m_dilation_dim, nullptr, OH_NN_CONV2D_DILATION); + SetPad(OH_NN_INT64, m_pad_dim, nullptr, OH_NN_CONV2D_PAD); + SetGroup(OH_NN_INT64, m_param_dim, nullptr, OH_NN_CONV2D_GROUP); + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_CONV2D_ACTIVATION_TYPE); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: conv2d_build_pad_015 + * @tc.desc: Verify the conv2d without set dilation of the build function + * @tc.type: FUNC + */ +HWTEST_F(Conv2DBuilderTest, conv2d_build_pad_015, TestSize.Level1) +{ + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetConv2dInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_CONV2D_STRIDES); + + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT64, m_dilation_dim, nullptr, + OH_NN_CONV2D_DILATION); + m_allTensors.emplace_back(tensor); + + SetPad(OH_NN_INT64, m_pad_dim, nullptr, OH_NN_CONV2D_PAD); + SetGroup(OH_NN_INT64, m_param_dim, nullptr, OH_NN_CONV2D_GROUP); + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_CONV2D_ACTIVATION_TYPE); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: conv2d_build_pad_016 + * @tc.desc: Verify the conv2d without set pad of the build function + * @tc.type: FUNC + */ +HWTEST_F(Conv2DBuilderTest, conv2d_build_pad_016, TestSize.Level1) +{ + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetConv2dInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_CONV2D_STRIDES); + SetDilation(OH_NN_INT64, m_dilation_dim, nullptr, OH_NN_CONV2D_DILATION); + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT64, m_pad_dim, nullptr, + OH_NN_CONV2D_PAD); + m_allTensors.emplace_back(tensor); + + SetGroup(OH_NN_INT64, m_param_dim, nullptr, OH_NN_CONV2D_GROUP); + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_CONV2D_ACTIVATION_TYPE); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: conv2d_build_pad_017 + * @tc.desc: Verify the conv2d without set group of the build function + * @tc.type: FUNC + */ +HWTEST_F(Conv2DBuilderTest, conv2d_build_pad_017, TestSize.Level1) +{ + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetConv2dInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_CONV2D_STRIDES); + SetDilation(OH_NN_INT64, m_dilation_dim, nullptr, OH_NN_CONV2D_DILATION); + SetPad(OH_NN_INT64, m_pad_dim, nullptr, OH_NN_CONV2D_PAD); + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT64, m_param_dim, nullptr, + OH_NN_CONV2D_GROUP); + m_allTensors.emplace_back(tensor); + + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_CONV2D_ACTIVATION_TYPE); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: conv2d_build_pad_018 + * @tc.desc: Verify the conv2d without set activation of the build function + * @tc.type: FUNC + */ +HWTEST_F(Conv2DBuilderTest, conv2d_build_pad_018, TestSize.Level1) +{ + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetConv2dInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_CONV2D_STRIDES); + SetDilation(OH_NN_INT64, m_dilation_dim, nullptr, OH_NN_CONV2D_DILATION); + SetPad(OH_NN_INT64, m_pad_dim, nullptr, OH_NN_CONV2D_PAD); + SetGroup(OH_NN_INT64, m_param_dim, nullptr, OH_NN_CONV2D_GROUP); + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT8, m_param_dim, nullptr, + OH_NN_CONV2D_ACTIVATION_TYPE); + m_allTensors.emplace_back(tensor); + + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: conv2d_getprimitive_pad_001 + * @tc.desc: Verify the nullptr return of the GetPrimitive function + * @tc.type: FUNC + */ +HWTEST_F(Conv2DBuilderTest, conv2d_getprimitive_pad_001, TestSize.Level1) +{ + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetConv2dInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetPadParam(); + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + + LiteGraphTensorPtr primitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); + EXPECT_NE(expectPrimitive, primitive); + + std::vector returnStrides = mindspore::lite::MindIR_Conv2DFusion_GetStride(primitive.get()); + std::vector strideValueTest{1, 1}; + EXPECT_EQ(strideValueTest, returnStrides); + + std::vector returnDliation = mindspore::lite::MindIR_Conv2DFusion_GetDilation(primitive.get()); + std::vector dilationValueTest{1, 1}; + EXPECT_EQ(dilationValueTest, returnDliation); + + std::vector returnPaddings = mindspore::lite::MindIR_Conv2DFusion_GetPadList(primitive.get()); + std::vector padValueTest{1, 1, 1, 1}; + EXPECT_EQ(padValueTest, returnPaddings); + + int returnGroup = mindspore::lite::MindIR_Conv2DFusion_GetGroup(primitive.get()); + EXPECT_EQ(0, returnGroup); + int returnActivation = mindspore::lite::MindIR_Conv2DFusion_GetActivationType(primitive.get()); + EXPECT_EQ(0, returnActivation); +} + +/** + * @tc.name: conv2d_getprimitive_pad_002 + * @tc.desc: Verify the behavior of the GetPrimitive function + * @tc.type: FUNC + */ +HWTEST_F(Conv2DBuilderTest, conv2d_getprimitive_pad_002, TestSize.Level1) +{ + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetConv2dInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetPadParam(); + + LiteGraphTensorPtr primitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); + EXPECT_EQ(expectPrimitive, primitive); +} +} // namespace UnitTest +} // namespace NeuralNetworkRuntime +} // namespace OHOS diff --git a/test/unittest/ops/conv2d_padmode_test.cpp b/test/unittest/ops/conv2d_padmode_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..4250d443a47881b6eaea457c3272f095bfc66ca1 --- /dev/null +++ b/test/unittest/ops/conv2d_padmode_test.cpp @@ -0,0 +1,591 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "frameworks/native/ops/conv2d_builder.h" + +#include "ops_test.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Ops; + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class Conv2DBuilderPadmodeTest : public OpsTest { +public: + void SetUp() override; + void TearDown() override; + + void SetConv2dInput(); + void SetPadMode(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + void SetParam(); + +public: + Conv2DBuilder m_builder; + std::vector m_inputs{0, 1, 2}; + std::vector m_outputs{3}; + std::vector m_params{4, 5, 6, 7, 8}; + std::vector m_output_dim{1, 3, 3, 1}; + std::vector m_stride_dim{2}; + std::vector m_dilation_dim{2}; + std::vector m_param_dim{}; +}; + +void Conv2DBuilderPadmodeTest::SetUp() {} + +void Conv2DBuilderPadmodeTest::TearDown() {} + +void Conv2DBuilderPadmodeTest::SetPadMode(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + std::shared_ptr tensor = TransToNNTensor(dataType, dim, quantParam, type); + int8_t* padModeValue = new (std::nothrow) int8_t(0); + EXPECT_NE(nullptr, padModeValue); + + tensor->SetBuffer(padModeValue, sizeof(int8_t)); + m_allTensors.emplace_back(tensor); +} + +void Conv2DBuilderPadmodeTest::SetParam() +{ + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_CONV2D_STRIDES); + SetDilation(OH_NN_INT64, m_dilation_dim, nullptr, OH_NN_CONV2D_DILATION); + SetPadMode(OH_NN_INT8, m_param_dim, nullptr, OH_NN_CONV2D_PAD_MODE); + SetGroup(OH_NN_INT64, m_param_dim, nullptr, OH_NN_CONV2D_GROUP); + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_CONV2D_ACTIVATION_TYPE); +} + +void Conv2DBuilderPadmodeTest::SetConv2dInput() +{ + int32_t weightNum = 4; + std::vector m_input_dim{1, 4, 4, 1}; + std::vector weightDim = {1, 2, 2, 1}; + std::vector biasDim = {1}; + + std::shared_ptr inputTensor; + inputTensor = TransToNNTensor(OH_NN_FLOAT32, m_input_dim, nullptr, OH_NN_TENSOR); + m_allTensors.emplace_back(inputTensor); + + inputTensor = TransToNNTensor(OH_NN_FLOAT32, weightDim, nullptr, OH_NN_TENSOR); + float* weightValue = new (std::nothrow) float[4]{1, 1, 1, 1}; + EXPECT_NE(nullptr, weightValue); + inputTensor->SetBuffer(weightValue, weightNum * sizeof(weightValue)); + m_allTensors.emplace_back(inputTensor); + + inputTensor = TransToNNTensor(OH_NN_FLOAT32, biasDim, nullptr, OH_NN_TENSOR); + float* biasValue = new (std::nothrow) float[1]{0}; + EXPECT_NE(nullptr, biasValue); + + inputTensor->SetBuffer(biasValue, sizeof(float)); + m_allTensors.emplace_back(inputTensor); +} + + +/** + * @tc.name: conv2d_build_padmode_001 + * @tc.desc: Verify the success of the build function + * @tc.type: FUNC + */ +HWTEST_F(Conv2DBuilderPadmodeTest, conv2d_build_padmode_001, TestSize.Level1) +{ + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetConv2dInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetParam(); + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: conv2d_build_padmode_002 + * @tc.desc: Verify the forbidden of the build function + * @tc.type: FUNC + */ +HWTEST_F(Conv2DBuilderPadmodeTest, conv2d_build_padmode_002, TestSize.Level1) +{ + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetConv2dInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetParam(); + + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: conv2d_build_padmode_003 + * @tc.desc: Verify the missing input of the build function + * @tc.type: FUNC + */ +HWTEST_F(Conv2DBuilderPadmodeTest, conv2d_build_padmode_003, TestSize.Level1) +{ + m_inputs = {0}; + m_outputs = {1}; + m_params = {2, 3, 4, 5, 6}; + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetConv2dInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetParam(); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: conv2d_build_padmode_004 + * @tc.desc: Verify the missing output of the build function + * @tc.type: FUNC + */ +HWTEST_F(Conv2DBuilderPadmodeTest, conv2d_build_padmode_004, TestSize.Level1) +{ + m_inputs = {0, 1, 2}; + m_outputs = {}; + m_params = {3, 4, 5, 6, 7}; + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetConv2dInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetParam(); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} +/** + * @tc.name: conv2d_build_padmode_005 + * @tc.desc: Verify the inputIndex out of bounds of the build function + * @tc.type: FUNC + */ +HWTEST_F(Conv2DBuilderPadmodeTest, conv2d_build_padmode_005, TestSize.Level1) +{ + m_inputs = {0, 1, 9}; + m_outputs = {3}; + m_params = {4, 5, 6, 7, 8}; + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetConv2dInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetParam(); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: conv2d_build_padmode_006 + * @tc.desc: Verify the outputIndex out of bounds of the build function + * @tc.type: FUNC + */ +HWTEST_F(Conv2DBuilderPadmodeTest, conv2d_build_padmode_006, TestSize.Level1) +{ + m_inputs = {0, 1, 2}; + m_outputs = {9}; + m_params = {4, 5, 6, 7, 8}; + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetConv2dInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetParam(); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: conv2d_build_padmode_007 + * @tc.desc: Verify the invalid stride of the build function + * @tc.type: FUNC + */ +HWTEST_F(Conv2DBuilderPadmodeTest, conv2d_build_padmode_007, TestSize.Level1) +{ + SetConv2dInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT32, m_stride_dim, nullptr, OH_NN_CONV2D_STRIDES); + int32_t* strideValue = new (std::nothrow) int32_t[2]{1, 1}; + EXPECT_NE(nullptr, strideValue); + + tensor->SetBuffer(strideValue, 2 * sizeof(int32_t)); + m_allTensors.emplace_back(tensor); + + SetDilation(OH_NN_INT64, m_dilation_dim, nullptr, OH_NN_CONV2D_DILATION); + SetPadMode(OH_NN_INT8, m_param_dim, nullptr, OH_NN_CONV2D_PAD_MODE); + SetGroup(OH_NN_INT64, m_param_dim, nullptr, OH_NN_CONV2D_GROUP); + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_CONV2D_ACTIVATION_TYPE); + + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: conv2d_build_padmode_008 + * @tc.desc: Verify the invalid dilation of the build function + * @tc.type: FUNC + */ +HWTEST_F(Conv2DBuilderPadmodeTest, conv2d_build_padmode_008, TestSize.Level1) +{ + SetConv2dInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_CONV2D_STRIDES); + + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT32, m_dilation_dim, nullptr, OH_NN_CONV2D_DILATION); + int32_t* dilationValue = new (std::nothrow) int32_t[2]{1, 1}; + EXPECT_NE(nullptr, dilationValue); + + tensor->SetBuffer(dilationValue, 2 * sizeof(int32_t)); + m_allTensors.emplace_back(tensor); + + SetPadMode(OH_NN_INT8, m_param_dim, nullptr, OH_NN_CONV2D_PAD_MODE); + SetGroup(OH_NN_INT64, m_param_dim, nullptr, OH_NN_CONV2D_GROUP); + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_CONV2D_ACTIVATION_TYPE); + + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: conv2d_build_padmode_009 + * @tc.desc: Verify the invalid padMode of the build function + * @tc.type: FUNC + */ +HWTEST_F(Conv2DBuilderPadmodeTest, conv2d_build_padmode_009, TestSize.Level1) +{ + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetConv2dInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_CONV2D_STRIDES); + SetDilation(OH_NN_INT64, m_dilation_dim, nullptr, OH_NN_CONV2D_DILATION); + + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT32, m_param_dim, nullptr, OH_NN_CONV2D_PAD_MODE); + int32_t* padModeValue = new (std::nothrow) int32_t(0); + EXPECT_NE(nullptr, padModeValue); + tensor->SetBuffer(padModeValue, sizeof(int32_t)); + m_allTensors.emplace_back(tensor); + + SetGroup(OH_NN_INT64, m_param_dim, nullptr, OH_NN_CONV2D_GROUP); + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_CONV2D_ACTIVATION_TYPE); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: conv2d_build_padmode_010 + * @tc.desc: Verify the invalid group of the build function + * @tc.type: FUNC + */ +HWTEST_F(Conv2DBuilderPadmodeTest, conv2d_build_padmode_010, TestSize.Level1) +{ + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetConv2dInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_CONV2D_STRIDES); + SetDilation(OH_NN_INT64, m_dilation_dim, nullptr, OH_NN_CONV2D_DILATION); + SetPadMode(OH_NN_INT8, m_param_dim, nullptr, OH_NN_CONV2D_PAD_MODE); + + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT32, m_param_dim, nullptr, OH_NN_CONV2D_GROUP); + int32_t* groupValue = new (std::nothrow) int32_t(0); + EXPECT_NE(nullptr, groupValue); + + tensor->SetBuffer(groupValue, sizeof(int32_t)); + m_allTensors.emplace_back(tensor); + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_CONV2D_ACTIVATION_TYPE); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + + +/** + * @tc.name: conv2d_build_padmode_011 + * @tc.desc: Verify the invalid activation of the build function + * @tc.type: FUNC + */ +HWTEST_F(Conv2DBuilderPadmodeTest, conv2d_build_padmode_011, TestSize.Level1) +{ + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetConv2dInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_CONV2D_STRIDES); + SetDilation(OH_NN_INT64, m_dilation_dim, nullptr, OH_NN_CONV2D_DILATION); + SetPadMode(OH_NN_INT8, m_param_dim, nullptr, OH_NN_CONV2D_PAD_MODE); + SetGroup(OH_NN_INT64, m_param_dim, nullptr, OH_NN_CONV2D_GROUP); + + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT32, m_param_dim, nullptr, OH_NN_CONV2D_ACTIVATION_TYPE); + int32_t* activationValue = new (std::nothrow) int32_t(0); + EXPECT_NE(nullptr, activationValue); + tensor->SetBuffer(activationValue, sizeof(int32_t)); + + m_allTensors.emplace_back(tensor); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: conv2d_build_padmode_012 + * @tc.desc: Verify the group scalar length of the build function + * @tc.type: FUNC + */ +HWTEST_F(Conv2DBuilderPadmodeTest, conv2d_build_padmode_012, TestSize.Level1) +{ + std::vector groupDim = {2}; + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetConv2dInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_CONV2D_STRIDES); + SetDilation(OH_NN_INT64, m_dilation_dim, nullptr, OH_NN_CONV2D_DILATION); + SetPadMode(OH_NN_INT8, m_param_dim, nullptr, OH_NN_CONV2D_PAD_MODE); + + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT64, groupDim, nullptr, OH_NN_CONV2D_GROUP); + int64_t* groupValue = new (std::nothrow) int64_t[2]{0, 0}; + EXPECT_NE(nullptr, groupValue); + tensor->SetBuffer(groupValue, 2 * sizeof(int64_t)); + m_allTensors.emplace_back(tensor); + + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_CONV2D_ACTIVATION_TYPE); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: conv2d_build_padmode_013 + * @tc.desc: Verify the activation scalar length of the build function + * @tc.type: FUNC + */ +HWTEST_F(Conv2DBuilderPadmodeTest, conv2d_build_padmode_013, TestSize.Level1) +{ + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetConv2dInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_CONV2D_STRIDES); + SetDilation(OH_NN_INT64, m_dilation_dim, nullptr, OH_NN_CONV2D_DILATION); + SetPadMode(OH_NN_INT8, m_param_dim, nullptr, OH_NN_CONV2D_PAD_MODE); + SetGroup(OH_NN_INT64, m_param_dim, nullptr, OH_NN_CONV2D_GROUP); + + std::vector activationDim = {2}; + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT8, activationDim, nullptr, + OH_NN_CONV2D_ACTIVATION_TYPE); + int8_t* activationValue = new (std::nothrow) int8_t[2]{0, 0}; + EXPECT_NE(nullptr, activationValue); + + tensor->SetBuffer(activationValue, 2 * sizeof(int8_t)); + m_allTensors.emplace_back(tensor); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: conv2d_build_padmode_014 + * @tc.desc: Verify the param invalid to conv2d of the build function + * @tc.type: FUNC + */ +HWTEST_F(Conv2DBuilderPadmodeTest, conv2d_build_padmode_014, TestSize.Level1) +{ + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + std::vector activationDim = {2}; + + SetConv2dInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_CONV2D_STRIDES); + SetDilation(OH_NN_INT64, m_dilation_dim, nullptr, OH_NN_CONV2D_DILATION); + SetPadMode(OH_NN_INT8, m_param_dim, nullptr, OH_NN_CONV2D_PAD_MODE); + SetGroup(OH_NN_INT64, m_param_dim, nullptr, OH_NN_CONV2D_GROUP); + + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT8, activationDim, nullptr, + OH_NN_DIV_ACTIVATIONTYPE); + int8_t* activationValue = new (std::nothrow) int8_t(0); + EXPECT_NE(nullptr, activationValue); + tensor->SetBuffer(activationValue, sizeof(int8_t)); + m_allTensors.emplace_back(tensor); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: conv2d_build_padmode_015 + * @tc.desc: Verify the pad value of the build function + * @tc.type: FUNC + */ +HWTEST_F(Conv2DBuilderPadmodeTest, conv2d_build_padmode_015, TestSize.Level1) +{ + std::vector activationDim = {2}; + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetConv2dInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_CONV2D_STRIDES); + SetDilation(OH_NN_INT64, m_dilation_dim, nullptr, OH_NN_CONV2D_DILATION); + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT8, m_param_dim, nullptr, OH_NN_CONV2D_PAD_MODE); + int8_t* padModeValue = new (std::nothrow) int8_t(10); + EXPECT_NE(nullptr, padModeValue); + + tensor->SetBuffer(padModeValue, sizeof(int8_t)); + m_allTensors.emplace_back(tensor); + SetGroup(OH_NN_INT64, m_param_dim, nullptr, OH_NN_CONV2D_GROUP); + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_CONV2D_ACTIVATION_TYPE); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: conv2d_build_padmode_016 + * @tc.desc: Verify the activation scalar length of the build function + * @tc.type: FUNC + */ +HWTEST_F(Conv2DBuilderPadmodeTest, conv2d_build_padmode_016, TestSize.Level1) +{ + std::vector activationDim = {2}; + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + int32_t weightNum = 3; + std::vector m_input_dim{1, 4, 4, 1}; + std::vector weightDim = {1, 3, 1}; + std::vector biasDim = {1}; + + std::shared_ptr inputTensor; + inputTensor = TransToNNTensor(OH_NN_FLOAT32, m_input_dim, nullptr, OH_NN_TENSOR); + m_allTensors.emplace_back(inputTensor); + inputTensor = TransToNNTensor(OH_NN_FLOAT32, weightDim, nullptr, OH_NN_TENSOR); + float* weightValue = new (std::nothrow) float[3]{1, 1, 1}; + EXPECT_NE(nullptr, weightValue); + + inputTensor->SetBuffer(weightValue, weightNum * sizeof(weightValue)); + m_allTensors.emplace_back(inputTensor); + inputTensor = TransToNNTensor(OH_NN_FLOAT32, biasDim, nullptr, OH_NN_TENSOR); + float* biasValue = new (std::nothrow) float[1]{0}; + EXPECT_NE(nullptr, biasValue); + + inputTensor->SetBuffer(biasValue, sizeof(float)); + m_allTensors.emplace_back(inputTensor); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: conv2d_build_padmode_017 + * @tc.desc: Verify the activation value of the build function + * @tc.type: FUNC + */ +HWTEST_F(Conv2DBuilderPadmodeTest, conv2d_build_padmode_017, TestSize.Level1) +{ + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetConv2dInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_CONV2D_STRIDES); + SetDilation(OH_NN_INT64, m_dilation_dim, nullptr, OH_NN_CONV2D_DILATION); + SetPadMode(OH_NN_INT8, m_param_dim, nullptr, OH_NN_CONV2D_PAD_MODE); + SetGroup(OH_NN_INT64, m_param_dim, nullptr, OH_NN_CONV2D_GROUP); + + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT8, m_param_dim, nullptr, + OH_NN_CONV2D_ACTIVATION_TYPE); + int8_t* activationValue = new (std::nothrow) int8_t(10); + EXPECT_NE(nullptr, activationValue); + + tensor->SetBuffer(activationValue, sizeof(int8_t)); + m_allTensors.emplace_back(tensor); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: conv2d_build_padmode_018 + * @tc.desc: Verify the activation value of the build function + * @tc.type: FUNC + */ +HWTEST_F(Conv2DBuilderPadmodeTest, conv2d_build_padmode_018, TestSize.Level1) +{ + std::vector m_pad_dim = {3}; + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetConv2dInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_CONV2D_STRIDES); + SetDilation(OH_NN_INT64, m_dilation_dim, nullptr, OH_NN_CONV2D_DILATION); + + int32_t padNum = 3; + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT64, m_pad_dim, nullptr, OH_NN_CONV2D_PAD); + int64_t* padValue = new (std::nothrow) int64_t[3]{1, 1, 1}; + EXPECT_NE(nullptr, padValue); + + tensor->SetBuffer(padValue, padNum * sizeof(int64_t)); + m_allTensors.emplace_back(tensor); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: conv2d_getprimitive_padmode_001 + * @tc.desc: Verify the success of the GetPrimitive function + * @tc.type: FUNC + */ +HWTEST_F(Conv2DBuilderPadmodeTest, conv2d_getprimitive_padmode_001, TestSize.Level1) +{ + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetConv2dInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetParam(); + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + LiteGraphTensorPtr primitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); + EXPECT_NE(expectPrimitive, primitive); + + std::vector expectStrides = mindspore::lite::MindIR_Conv2DFusion_GetStride(primitive.get()); + std::vector strideValueTest{1, 1}; + EXPECT_EQ(strideValueTest, expectStrides); + + std::vector expectDliation = mindspore::lite::MindIR_Conv2DFusion_GetDilation(primitive.get()); + std::vector dilationValueTest{1, 1}; + EXPECT_EQ(dilationValueTest, expectDliation); + int expectpadMode = mindspore::lite::MindIR_Conv2DFusion_GetPadMode(primitive.get()); + EXPECT_EQ(1, expectpadMode); + + int expectGroup = mindspore::lite::MindIR_Conv2DFusion_GetGroup(primitive.get()); + EXPECT_EQ(0, expectGroup); + + int expectActivation = mindspore::lite::MindIR_Conv2DFusion_GetActivationType(primitive.get()); + EXPECT_EQ(0, expectActivation); +} + +/** + * @tc.name: conv2d_getprimitive_padmode_002 + * @tc.desc: Verify the nullptr return of the GetPrimitive function + * @tc.type: FUNC + */ +HWTEST_F(Conv2DBuilderPadmodeTest, conv2d_getprimitive_padmode_002, TestSize.Level1) +{ + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetConv2dInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetParam(); + + LiteGraphTensorPtr primitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); + EXPECT_EQ(expectPrimitive, primitive); +} +} // namespace UnitTest +} // namespace NeuralNetworkRuntime +} // namespace OHOS \ No newline at end of file diff --git a/test/unittest/ops/conv2d_tranpose_padmode_test.cpp b/test/unittest/ops/conv2d_tranpose_padmode_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..eea61e9c222858ea8defdff95d5586d3affa438c --- /dev/null +++ b/test/unittest/ops/conv2d_tranpose_padmode_test.cpp @@ -0,0 +1,790 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "frameworks/native/ops/conv2d_transpose_builder.h" + +#include "ops_test.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Ops; + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class Conv2DTransposePadmodeBuilderTest : public OpsTest { +public: + void SetUp() override; + void TearDown() override; + + void SetConv2dTransposeInput(); + void SetPadMode(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + void SetOutPaddings(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + void SetParam(); + +public: + Conv2DTransposeBuilder m_builder; + std::vector m_inputs{0, 1, 2}; + std::vector m_outputs{3}; + std::vector m_params{4, 5, 6, 7, 8, 9}; + std::vector m_output_dim{1, 3, 3, 1}; + std::vector m_stride_dim{2}; + std::vector m_dilation_dim{2}; + std::vector outPaddingsDim{2}; + std::vector m_param_dim{}; +}; + +void Conv2DTransposePadmodeBuilderTest::SetUp() {} + +void Conv2DTransposePadmodeBuilderTest::TearDown() {} + +void Conv2DTransposePadmodeBuilderTest::SetPadMode(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + std::shared_ptr tensor = TransToNNTensor(dataType, dim, quantParam, type); + int8_t* padModeValue = new (std::nothrow) int8_t(0); + EXPECT_NE(nullptr, padModeValue); + tensor->SetBuffer(padModeValue, sizeof(int8_t)); + m_allTensors.emplace_back(tensor); +} + +void Conv2DTransposePadmodeBuilderTest::SetOutPaddings(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + int32_t outPaddingsNum = 2; + std::shared_ptr tensor = TransToNNTensor(dataType, dim, quantParam, type); + int64_t* outPaddingsValue = new (std::nothrow) int64_t[2]{0, 0}; + EXPECT_NE(nullptr, outPaddingsValue); + tensor->SetBuffer(outPaddingsValue, outPaddingsNum * sizeof(int64_t)); + m_allTensors.emplace_back(tensor); +} + +void Conv2DTransposePadmodeBuilderTest::SetParam() +{ + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_STRIDES); + SetDilation(OH_NN_INT64, m_dilation_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_DILATION); + SetPadMode(OH_NN_INT8, m_param_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_PAD_MODE); + SetOutPaddings(OH_NN_INT64, outPaddingsDim, nullptr, OH_NN_CONV2D_TRANSPOSE_OUTPUT_PADDINGS); + SetGroup(OH_NN_INT64, m_param_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_GROUP); + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_ACTIVATION_TYPE); +} + +void Conv2DTransposePadmodeBuilderTest::SetConv2dTransposeInput() +{ + int32_t weightNum = 4; + std::vector m_input_dim{1, 4, 4, 1}; + std::vector weightDim = {1, 2, 2, 1}; + std::vector biasDim = {1}; + std::shared_ptr inTensor; + inTensor = TransToNNTensor(OH_NN_FLOAT32, m_input_dim, nullptr, OH_NN_TENSOR); + m_allTensors.emplace_back(inTensor); + inTensor = TransToNNTensor(OH_NN_FLOAT32, weightDim, nullptr, OH_NN_TENSOR); + float* weightValue = new (std::nothrow) float[4]{1, 1, 1, 1}; + EXPECT_NE(nullptr, weightValue); + inTensor->SetBuffer(weightValue, weightNum * sizeof(weightValue)); + m_allTensors.emplace_back(inTensor); + inTensor = TransToNNTensor(OH_NN_FLOAT32, biasDim, nullptr, OH_NN_TENSOR); + float* biasValue = new (std::nothrow) float[1]{0}; + EXPECT_NE(nullptr, biasValue); + inTensor->SetBuffer(biasValue, sizeof(float)); + m_allTensors.emplace_back(inTensor); +} + +/** + * @tc.name: conv2dtranpose_build_padmode_001 + * @tc.desc: Verify the success of the build function + * @tc.type: FUNC + */ +HWTEST_F(Conv2DTransposePadmodeBuilderTest, conv2dtranpose_build_padmode_001, TestSize.Level1) +{ + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetConv2dTransposeInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetParam(); + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: conv2dtranpose_build_padmode_002 + * @tc.desc: Verify the forbidden of the build function + * @tc.type: FUNC + */ +HWTEST_F(Conv2DTransposePadmodeBuilderTest, conv2dtranpose_build_padmode_002, TestSize.Level1) +{ + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetConv2dTransposeInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetParam(); + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: conv2dtranpose_build_padmode_003 + * @tc.desc: Verify the missing input of the build function + * @tc.type: FUNC + */ +HWTEST_F(Conv2DTransposePadmodeBuilderTest, conv2dtranpose_build_padmode_003, TestSize.Level1) +{ + m_inputs = {0}; + m_outputs = {1}; + m_params = {2, 3, 4, 5, 6, 7}; + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetConv2dTransposeInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetParam(); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: conv2dtranpose_build_padmode_004 + * @tc.desc: Verify the missing output of the build function + * @tc.type: FUNC + */ +HWTEST_F(Conv2DTransposePadmodeBuilderTest, conv2dtranpose_build_padmode_004, TestSize.Level1) +{ + m_inputs = {0, 1, 2}; + m_outputs = {}; + m_params = {3, 4, 5, 6, 7, 8}; + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetConv2dTransposeInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetParam(); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: conv2dtranpose_build_padmode_005 + * @tc.desc: Verify the inputIndex out of bounds of the build function + * @tc.type: FUNC + */ +HWTEST_F(Conv2DTransposePadmodeBuilderTest, conv2dtranpose_build_padmode_005, TestSize.Level1) +{ + m_inputs = {0, 1, 10}; + m_outputs = {3}; + m_params = {4, 5, 6, 7, 8, 9}; + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetConv2dTransposeInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetParam(); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: conv2dtranpose_build_padmode_006 + * @tc.desc: Verify the outputIndex out of bounds of the build function + * @tc.type: FUNC + */ +HWTEST_F(Conv2DTransposePadmodeBuilderTest, conv2dtranpose_build_padmode_006, TestSize.Level1) +{ + m_inputs = {0, 1, 2}; + m_outputs = {10}; + m_params = {4, 5, 6, 7, 8, 9}; + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetConv2dTransposeInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetParam(); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: conv2dtranpose_build_padmode_007 + * @tc.desc: Verify the invalid stride of the build function + * @tc.type: FUNC + */ +HWTEST_F(Conv2DTransposePadmodeBuilderTest, conv2dtranpose_build_padmode_007, TestSize.Level1) +{ + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetConv2dTransposeInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT32, m_stride_dim, nullptr, + OH_NN_CONV2D_TRANSPOSE_STRIDES); + int32_t* strideValue = new (std::nothrow) int32_t[2]{1, 1}; + EXPECT_NE(nullptr, strideValue); + + tensor->SetBuffer(strideValue, 2 * sizeof(int32_t)); + m_allTensors.emplace_back(tensor); + + SetDilation(OH_NN_INT64, m_dilation_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_DILATION); + SetPadMode(OH_NN_INT8, m_param_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_PAD_MODE); + SetOutPaddings(OH_NN_INT64, outPaddingsDim, nullptr, OH_NN_CONV2D_TRANSPOSE_OUTPUT_PADDINGS); + SetGroup(OH_NN_INT64, m_param_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_GROUP); + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_ACTIVATION_TYPE); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: conv2dtranpose_build_padmode_008 + * @tc.desc: Verify the invalid dilation of the build function + * @tc.type: FUNC + */ +HWTEST_F(Conv2DTransposePadmodeBuilderTest, conv2dtranpose_build_padmode_008, TestSize.Level1) +{ + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetConv2dTransposeInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_STRIDES); + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT32, m_dilation_dim, nullptr, + OH_NN_CONV2D_TRANSPOSE_DILATION); + int32_t* dilationValue = new (std::nothrow) int32_t[2]{1, 1}; + EXPECT_NE(nullptr, dilationValue); + + tensor->SetBuffer(dilationValue, 2 * sizeof(int32_t)); + m_allTensors.emplace_back(tensor); + + SetPadMode(OH_NN_INT8, m_param_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_PAD_MODE); + SetOutPaddings(OH_NN_INT64, outPaddingsDim, nullptr, OH_NN_CONV2D_TRANSPOSE_OUTPUT_PADDINGS); + SetGroup(OH_NN_INT64, m_param_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_GROUP); + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_ACTIVATION_TYPE); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: conv2dtranpose_build_padmode_009 + * @tc.desc: Verify the invalid padmode of the build function + * @tc.type: FUNC + */ +HWTEST_F(Conv2DTransposePadmodeBuilderTest, conv2dtranpose_build_padmode_009, TestSize.Level1) +{ + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetConv2dTransposeInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_STRIDES); + SetDilation(OH_NN_INT64, m_dilation_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_DILATION); + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT32, m_param_dim, nullptr, + OH_NN_CONV2D_TRANSPOSE_PAD_MODE); + int32_t* padModeValue = new (std::nothrow) int32_t(0); + EXPECT_NE(nullptr, padModeValue); + + tensor->SetBuffer(padModeValue, sizeof(int32_t)); + m_allTensors.emplace_back(tensor); + SetOutPaddings(OH_NN_INT64, outPaddingsDim, nullptr, OH_NN_CONV2D_TRANSPOSE_OUTPUT_PADDINGS); + SetGroup(OH_NN_INT64, m_param_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_GROUP); + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_ACTIVATION_TYPE); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: conv2dtranpose_build_padmode_010 + * @tc.desc: Verify the invalid outpaddings of the build function + * @tc.type: FUNC + */ +HWTEST_F(Conv2DTransposePadmodeBuilderTest, conv2dtranpose_build_padmode_010, TestSize.Level1) +{ + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetConv2dTransposeInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_STRIDES); + SetDilation(OH_NN_INT64, m_dilation_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_DILATION); + SetPadMode(OH_NN_INT8, m_param_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_PAD_MODE); + + std::shared_ptr outPadtensor = TransToNNTensor(OH_NN_INT32, outPaddingsDim, nullptr, + OH_NN_CONV2D_TRANSPOSE_OUTPUT_PADDINGS); + int32_t* outPaddingsTypeInvalid = new (std::nothrow) int32_t(0); + EXPECT_NE(nullptr, outPaddingsTypeInvalid); + + outPadtensor->SetBuffer(outPaddingsTypeInvalid, sizeof(int32_t)); + m_allTensors.emplace_back(outPadtensor); + + SetGroup(OH_NN_INT64, m_param_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_GROUP); + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_ACTIVATION_TYPE); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: conv2dtranpose_build_padmode_011 + * @tc.desc: Verify the invalid group of the build function + * @tc.type: FUNC + */ +HWTEST_F(Conv2DTransposePadmodeBuilderTest, conv2dtranpose_build_padmode_011, TestSize.Level1) +{ + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetConv2dTransposeInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_STRIDES); + SetDilation(OH_NN_INT64, m_dilation_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_DILATION); + SetPadMode(OH_NN_INT8, m_param_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_PAD_MODE); + SetOutPaddings(OH_NN_INT64, outPaddingsDim, nullptr, OH_NN_CONV2D_TRANSPOSE_OUTPUT_PADDINGS); + + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT32, m_param_dim, nullptr, + OH_NN_CONV2D_TRANSPOSE_GROUP); + int32_t* groupValue = new (std::nothrow) int32_t(0); + EXPECT_NE(nullptr, groupValue); + + tensor->SetBuffer(groupValue, sizeof(int32_t)); + m_allTensors.emplace_back(tensor); + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_ACTIVATION_TYPE); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: conv2dtranpose_build_padmode_012 + * @tc.desc: Verify the invalid activation of the build function + * @tc.type: FUNC + */ +HWTEST_F(Conv2DTransposePadmodeBuilderTest, conv2dtranpose_build_padmode_012, TestSize.Level1) +{ + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetConv2dTransposeInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_STRIDES); + SetDilation(OH_NN_INT64, m_dilation_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_DILATION); + SetPadMode(OH_NN_INT8, m_param_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_PAD_MODE); + SetOutPaddings(OH_NN_INT64, outPaddingsDim, nullptr, OH_NN_CONV2D_TRANSPOSE_OUTPUT_PADDINGS); + SetGroup(OH_NN_INT64, m_param_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_GROUP); + + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT32, m_param_dim, nullptr, + OH_NN_CONV2D_TRANSPOSE_ACTIVATION_TYPE); + int32_t* activationTest = new (std::nothrow) int32_t(1); + EXPECT_NE(nullptr, activationTest); + + tensor->SetBuffer(activationTest, sizeof(int32_t)); + m_allTensors.emplace_back(tensor); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: conv2dtranpose_build_padmode_013 + * @tc.desc: Verify the group scalar length of the build function + * @tc.type: FUNC + */ +HWTEST_F(Conv2DTransposePadmodeBuilderTest, conv2dtranpose_build_padmode_013, TestSize.Level1) +{ + std::vector groupDim = {2}; + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetConv2dTransposeInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_STRIDES); + SetDilation(OH_NN_INT64, m_dilation_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_DILATION); + SetPadMode(OH_NN_INT8, m_param_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_PAD_MODE); + SetOutPaddings(OH_NN_INT64, outPaddingsDim, nullptr, OH_NN_CONV2D_TRANSPOSE_OUTPUT_PADDINGS); + + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT64, outPaddingsDim, nullptr, + OH_NN_CONV2D_TRANSPOSE_GROUP); + int64_t* groupTest = new (std::nothrow) int64_t[2]{0, 0}; + EXPECT_NE(nullptr, groupTest); + + tensor->SetBuffer(groupTest, 2 * sizeof(int64_t)); + m_allTensors.emplace_back(tensor); + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_ACTIVATION_TYPE); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: conv2dtranpose_build_padmode_014 + * @tc.desc: Verify the activation scalar length of the build function + * @tc.type: FUNC + */ +HWTEST_F(Conv2DTransposePadmodeBuilderTest, conv2dtranpose_build_padmode_014, TestSize.Level1) +{ + m_inputsIndex = m_inputs; + m_paramsIndex = m_params; + + SetConv2dTransposeInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_STRIDES); + SetDilation(OH_NN_INT64, m_dilation_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_DILATION); + SetPadMode(OH_NN_INT8, m_param_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_PAD_MODE); + SetOutPaddings(OH_NN_INT64, outPaddingsDim, nullptr, OH_NN_CONV2D_TRANSPOSE_OUTPUT_PADDINGS); + SetGroup(OH_NN_INT64, m_param_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_GROUP); + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT8, outPaddingsDim, nullptr, + OH_NN_CONV2D_TRANSPOSE_ACTIVATION_TYPE); + int8_t* activationTest = new (std::nothrow) int8_t[2]{0, 0}; + EXPECT_NE(nullptr, activationTest); + + tensor->SetBuffer(activationTest, 2 * sizeof(int8_t)); + m_allTensors.emplace_back(tensor); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: conv2dtranpose_build_padmode_015 + * @tc.desc: Verify the invalid weight dims of the build function + * @tc.type: FUNC + */ +HWTEST_F(Conv2DTransposePadmodeBuilderTest, conv2dtranpose_build_padmode_015, TestSize.Level1) +{ + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + int32_t weightNum = 3; + std::vector m_input_dim{1, 4, 4, 1}; + std::vector weightDim = {1, 3, 1}; + std::vector biasDim = {1}; + + std::shared_ptr inTensor; + inTensor = TransToNNTensor(OH_NN_FLOAT32, m_input_dim, nullptr, OH_NN_TENSOR); + m_allTensors.emplace_back(inTensor); + inTensor = TransToNNTensor(OH_NN_FLOAT32, weightDim, nullptr, OH_NN_TENSOR); + float* weightValue = new (std::nothrow) float[3]{1, 1, 1}; + EXPECT_NE(nullptr, weightValue); + + inTensor->SetBuffer(weightValue, weightNum * sizeof(weightValue)); + m_allTensors.emplace_back(inTensor); + inTensor = TransToNNTensor(OH_NN_FLOAT32, biasDim, nullptr, OH_NN_TENSOR); + float* biasValue = new (std::nothrow) float[1]{0}; + EXPECT_NE(nullptr, biasValue); + + inTensor->SetBuffer(biasValue, sizeof(float)); + m_allTensors.emplace_back(inTensor); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: conv2dtranpose_build_padmode_016 + * @tc.desc: Verify the invalid param to conv2d transpose length of the build function + * @tc.type: FUNC + */ +HWTEST_F(Conv2DTransposePadmodeBuilderTest, conv2dtranpose_build_padmode_016, TestSize.Level1) +{ + SetConv2dTransposeInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_STRIDES); + SetDilation(OH_NN_INT64, m_dilation_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_DILATION); + SetPadMode(OH_NN_INT8, m_param_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_PAD_MODE); + SetOutPaddings(OH_NN_INT64, outPaddingsDim, nullptr, OH_NN_CONV2D_TRANSPOSE_OUTPUT_PADDINGS); + SetGroup(OH_NN_INT64, m_param_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_GROUP); + + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT8, m_param_dim, nullptr, + OH_NN_CONV2D_ACTIVATION_TYPE); + int8_t* activationValue = new (std::nothrow) int8_t(0); + EXPECT_NE(nullptr, activationValue); + + tensor->SetBuffer(activationValue, sizeof(int8_t)); + m_allTensors.emplace_back(tensor); + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: conv2dtranpose_build_padmode_017 + * @tc.desc: Verify the activation value of the build function + * @tc.type: FUNC + */ +HWTEST_F(Conv2DTransposePadmodeBuilderTest, conv2dtranpose_build_padmode_017, TestSize.Level1) +{ + SetConv2dTransposeInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_STRIDES); + SetDilation(OH_NN_INT64, m_dilation_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_DILATION); + SetPadMode(OH_NN_INT8, m_param_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_PAD_MODE); + SetOutPaddings(OH_NN_INT64, outPaddingsDim, nullptr, OH_NN_CONV2D_TRANSPOSE_OUTPUT_PADDINGS); + SetGroup(OH_NN_INT64, m_param_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_GROUP); + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT8, m_param_dim, nullptr, + OH_NN_CONV2D_TRANSPOSE_ACTIVATION_TYPE); + int8_t* activationValue = new (std::nothrow) int8_t(10); + EXPECT_NE(nullptr, activationValue); + + tensor->SetBuffer(activationValue, sizeof(int8_t)); + m_allTensors.emplace_back(tensor); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: conv2dtranpose_build_padmode_018 + * @tc.desc: Verify the padmode value of the build function + * @tc.type: FUNC + */ +HWTEST_F(Conv2DTransposePadmodeBuilderTest, conv2dtranpose_build_padmode_018, TestSize.Level1) +{ + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetConv2dTransposeInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_STRIDES); + SetDilation(OH_NN_INT64, m_dilation_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_DILATION); + + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT8, m_param_dim, nullptr, + OH_NN_CONV2D_TRANSPOSE_PAD_MODE); + int8_t* padModeValue = new (std::nothrow) int8_t(10); + EXPECT_NE(nullptr, padModeValue); + + tensor->SetBuffer(padModeValue, sizeof(int8_t)); + m_allTensors.emplace_back(tensor); + SetOutPaddings(OH_NN_INT64, outPaddingsDim, nullptr, OH_NN_CONV2D_TRANSPOSE_OUTPUT_PADDINGS); + SetGroup(OH_NN_INT64, m_param_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_GROUP); + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_ACTIVATION_TYPE); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: conv2dtranpose_build_padmode_019 + * @tc.desc: Verify the pad dim invalid of the build function + * @tc.type: FUNC + */ +HWTEST_F(Conv2DTransposePadmodeBuilderTest, conv2dtranpose_build_padmode_019, TestSize.Level1) +{ + std::vector m_pad_dim = {3}; + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetConv2dTransposeInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_STRIDES); + SetDilation(OH_NN_INT64, m_dilation_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_DILATION); + SetPadMode(OH_NN_INT8, m_param_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_PAD_MODE); + + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT64, m_pad_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_PAD); + int64_t* padValue = new (std::nothrow) int64_t[3]{1, 1, 1}; + EXPECT_NE(nullptr, padValue); + + tensor->SetBuffer(padValue, 3 * sizeof(int64_t)); + m_allTensors.emplace_back(tensor); + SetGroup(OH_NN_INT64, m_param_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_GROUP); + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_ACTIVATION_TYPE); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: conv2dtranpose_build_padmode_020 + * @tc.desc: Verify the conv2dtranspose without set stride of the build function + * @tc.type: FUNC + */ +HWTEST_F(Conv2DTransposePadmodeBuilderTest, conv2dtranpose_build_padmode_020, TestSize.Level1) +{ + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetConv2dTransposeInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT64, m_stride_dim, nullptr, + OH_NN_CONV2D_TRANSPOSE_STRIDES); + m_allTensors.emplace_back(tensor); + + SetDilation(OH_NN_INT64, m_dilation_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_DILATION); + SetPadMode(OH_NN_INT8, m_param_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_PAD_MODE); + SetOutPaddings(OH_NN_INT64, outPaddingsDim, nullptr, OH_NN_CONV2D_TRANSPOSE_OUTPUT_PADDINGS); + SetGroup(OH_NN_INT64, m_param_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_GROUP); + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_ACTIVATION_TYPE); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: conv2dtranpose_build_padmode_021 + * @tc.desc: Verify the conv2dtranspose without set dilation of the build function + * @tc.type: FUNC + */ +HWTEST_F(Conv2DTransposePadmodeBuilderTest, conv2dtranpose_build_padmode_021, TestSize.Level1) +{ + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetConv2dTransposeInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_STRIDES); + + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT64, m_dilation_dim, nullptr, + OH_NN_CONV2D_TRANSPOSE_DILATION); + m_allTensors.emplace_back(tensor); + + SetPadMode(OH_NN_INT8, m_param_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_PAD_MODE); + SetOutPaddings(OH_NN_INT64, outPaddingsDim, nullptr, OH_NN_CONV2D_TRANSPOSE_OUTPUT_PADDINGS); + SetGroup(OH_NN_INT64, m_param_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_GROUP); + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_ACTIVATION_TYPE); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: conv2dtranpose_build_padmode_022 + * @tc.desc: Verify the conv2dtranspose without set pad of the build function + * @tc.type: FUNC + */ +HWTEST_F(Conv2DTransposePadmodeBuilderTest, conv2dtranpose_build_padmode_022, TestSize.Level1) +{ + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetConv2dTransposeInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_STRIDES); + SetDilation(OH_NN_INT64, m_dilation_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_DILATION); + + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT8, m_param_dim, nullptr, + OH_NN_CONV2D_TRANSPOSE_PAD_MODE); + m_allTensors.emplace_back(tensor); + + SetOutPaddings(OH_NN_INT64, outPaddingsDim, nullptr, OH_NN_CONV2D_TRANSPOSE_OUTPUT_PADDINGS); + SetGroup(OH_NN_INT64, m_param_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_GROUP); + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_ACTIVATION_TYPE); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: conv2dtranpose_build_padmode_023 + * @tc.desc: Verify the conv2dtranspose without set outpaddings of the build function + * @tc.type: FUNC + */ +HWTEST_F(Conv2DTransposePadmodeBuilderTest, conv2dtranpose_build_padmode_023, TestSize.Level1) +{ + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetConv2dTransposeInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_STRIDES); + SetDilation(OH_NN_INT64, m_dilation_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_DILATION); + SetPadMode(OH_NN_INT8, m_param_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_PAD_MODE); + + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT64, outPaddingsDim, nullptr, + OH_NN_CONV2D_TRANSPOSE_OUTPUT_PADDINGS); + m_allTensors.emplace_back(tensor); + + SetGroup(OH_NN_INT64, m_param_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_GROUP); + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_ACTIVATION_TYPE); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: conv2dtranpose_build_padmode_024 + * @tc.desc: Verify the conv2dtranspose without set group of the build function + * @tc.type: FUNC + */ +HWTEST_F(Conv2DTransposePadmodeBuilderTest, conv2dtranpose_build_padmode_024, TestSize.Level1) +{ + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetConv2dTransposeInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_STRIDES); + SetDilation(OH_NN_INT64, m_dilation_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_DILATION); + SetPadMode(OH_NN_INT8, m_param_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_PAD_MODE); + SetOutPaddings(OH_NN_INT64, outPaddingsDim, nullptr, OH_NN_CONV2D_TRANSPOSE_OUTPUT_PADDINGS); + + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT64, m_param_dim, nullptr, + OH_NN_CONV2D_TRANSPOSE_GROUP); + m_allTensors.emplace_back(tensor); + + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_ACTIVATION_TYPE); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: conv2dtranpose_build_padmode_025 + * @tc.desc: Verify the conv2dtranspose without set activation of the build function + * @tc.type: FUNC + */ +HWTEST_F(Conv2DTransposePadmodeBuilderTest, conv2dtranpose_build_padmode_025, TestSize.Level1) +{ + m_paramsIndex = m_params; + SetConv2dTransposeInput(); + + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_STRIDES); + SetDilation(OH_NN_INT64, m_dilation_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_DILATION); + SetPadMode(OH_NN_INT8, m_param_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_PAD_MODE); + SetOutPaddings(OH_NN_INT64, outPaddingsDim, nullptr, OH_NN_CONV2D_TRANSPOSE_OUTPUT_PADDINGS); + SetGroup(OH_NN_INT64, m_param_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_GROUP); + + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT8, m_param_dim, nullptr, + OH_NN_CONV2D_TRANSPOSE_ACTIVATION_TYPE); + m_allTensors.emplace_back(tensor); + m_inputsIndex = m_inputs; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: conv2dtranpose_getprimitive_padmode_001 + * @tc.desc: Verify the behavior of the GetPrimitive function + * @tc.type: FUNC + */ +HWTEST_F(Conv2DTransposePadmodeBuilderTest, conv2dtranpose_getprimitive_padmode_001, TestSize.Level1) +{ + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetConv2dTransposeInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetParam(); + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + + LiteGraphTensorPtr primitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); + EXPECT_NE(expectPrimitive, primitive); + + std::vector returnStrides = mindspore::lite::MindIR_Conv2dTransposeFusion_GetStride(primitive.get()); + std::vector strideValueTest{1, 1}; + std::vector expectDliation = mindspore::lite::MindIR_Conv2dTransposeFusion_GetDilation(primitive.get()); + std::vector dilationValueTest{1, 1}; + int expectpadMode = mindspore::lite::MindIR_Conv2dTransposeFusion_GetPadMode(primitive.get()); + EXPECT_EQ(1, expectpadMode); + + int expectGroup = mindspore::lite::MindIR_Conv2dTransposeFusion_GetGroup(primitive.get()); + EXPECT_EQ(0, expectGroup); + + std::vector expectoutPadding = + mindspore::lite::MindIR_Conv2dTransposeFusion_GetOutputPaddings(primitive.get()); + std::vector outPaddingTest{0, 0}; + EXPECT_EQ(outPaddingTest, expectoutPadding); + + int expectActivation = mindspore::lite::MindIR_Conv2dTransposeFusion_GetActivationType(primitive.get()); + EXPECT_EQ(0, expectActivation); +} + +/** + * @tc.name: conv2dtranpose_getprimitive_padmode_002 + * @tc.desc: Verify the behavior of the GetPrimitive function + * @tc.type: FUNC + */ +HWTEST_F(Conv2DTransposePadmodeBuilderTest, conv2dtranpose_getprimitive_padmode_002, TestSize.Level1) +{ + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetConv2dTransposeInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetParam(); + + LiteGraphTensorPtr primitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); + EXPECT_EQ(expectPrimitive, primitive); +} +} // namespace UnitTest +} // namespace NeuralNetworkRuntime +} // namespace OHOS diff --git a/test/unittest/ops/conv2d_transpose_pad_test.cpp b/test/unittest/ops/conv2d_transpose_pad_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..a0a817921a9b28d427312cec8c12d6bd48c2be00 --- /dev/null +++ b/test/unittest/ops/conv2d_transpose_pad_test.cpp @@ -0,0 +1,488 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "frameworks/native/ops/conv2d_transpose_builder.h" + +#include "ops_test.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Ops; + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class Conv2DTransposeBuilderTest : public OpsTest { +public: + void SetUp() override; + void TearDown() override; + + void SetConv2dTransposeInput(); + void SetPad(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + void SetOutPaddings(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + void SetPadParam(); + +public: + Conv2DTransposeBuilder m_builder; + std::vector m_inputs{0, 1, 2}; + std::vector m_outputs{3}; + std::vector m_params{4, 5, 6, 7, 8, 9}; + std::vector m_output_dim{1, 3, 3, 1}; + std::vector m_stride_dim{2}; + std::vector m_dilation_dim{2}; + std::vector m_outpaddings_dim{2}; + std::vector m_pad_dim{4}; + std::vector m_param_dim{}; +}; + +void Conv2DTransposeBuilderTest::SetUp() {} + +void Conv2DTransposeBuilderTest::TearDown() {} + +void Conv2DTransposeBuilderTest::SetPad(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + int32_t padNum = 4; + std::shared_ptr tensor = TransToNNTensor(dataType, dim, quantParam, type); + int64_t* padValue = new (std::nothrow) int64_t[4]{1, 1, 1, 1}; + EXPECT_NE(nullptr, padValue); + + tensor->SetBuffer(padValue, padNum * sizeof(int64_t)); + m_allTensors.emplace_back(tensor); +} + +void Conv2DTransposeBuilderTest::SetOutPaddings(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + int32_t outPaddingsNum = 2; + std::shared_ptr tensor = TransToNNTensor(dataType, dim, quantParam, type); + int64_t* outPaddingsValue = new (std::nothrow) int64_t[2]{0, 0}; + EXPECT_NE(nullptr, outPaddingsValue); + + tensor->SetBuffer(outPaddingsValue, outPaddingsNum * sizeof(int64_t)); + m_allTensors.emplace_back(tensor); +} + +void Conv2DTransposeBuilderTest::SetPadParam() +{ + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_STRIDES); + SetDilation(OH_NN_INT64, m_dilation_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_DILATION); + SetPad(OH_NN_INT64, m_pad_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_PAD); + SetOutPaddings(OH_NN_INT64, m_outpaddings_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_OUTPUT_PADDINGS); + SetGroup(OH_NN_INT64, m_param_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_GROUP); + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_ACTIVATION_TYPE); +} + +void Conv2DTransposeBuilderTest::SetConv2dTransposeInput() +{ + int32_t weightNum = 4; + std::vector m_input_dim{1, 4, 4, 1}; + std::vector weightDim = {1, 2, 2, 1}; + std::vector biasDim = {1}; + std::shared_ptr tensor; + tensor = TransToNNTensor(OH_NN_FLOAT32, m_input_dim, nullptr, OH_NN_TENSOR); + m_allTensors.emplace_back(tensor); + + tensor = TransToNNTensor(OH_NN_FLOAT32, weightDim, nullptr, OH_NN_TENSOR); + float* weightValue = new (std::nothrow) float[4]{1, 1, 1, 1}; + EXPECT_NE(nullptr, weightValue); + + tensor->SetBuffer(weightValue, weightNum * sizeof(weightValue)); + m_allTensors.emplace_back(tensor); + tensor = TransToNNTensor(OH_NN_FLOAT32, biasDim, nullptr, OH_NN_TENSOR); + float* biasValue = new (std::nothrow) float[1]{0}; + EXPECT_NE(nullptr, biasValue); + + tensor->SetBuffer(biasValue, sizeof(float)); + m_allTensors.emplace_back(tensor); +} + +/** + * @tc.name: conv2dtranpose_build_pad_001 + * @tc.desc: Verify the success of the build function + * @tc.type: FUNC + */ +HWTEST_F(Conv2DTransposeBuilderTest, conv2dtranpose_build_pad_001, TestSize.Level1) +{ + SetConv2dTransposeInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetPadParam(); + + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: conv2dtranpose_build_pad_002 + * @tc.desc: Verify the forbidden of the build function + * @tc.type: FUNC + */ +HWTEST_F(Conv2DTransposeBuilderTest, conv2dtranpose_build_pad_002, TestSize.Level1) +{ + SetConv2dTransposeInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetPadParam(); + + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: conv2dtranpose_build_pad_003 + * @tc.desc: Verify the missing input of the build function + * @tc.type: FUNC + */ +HWTEST_F(Conv2DTransposeBuilderTest, conv2dtranpose_build_pad_003, TestSize.Level1) +{ + m_inputs = {0}; + m_outputs = {1}; + m_params = {2, 3, 4, 5, 6, 7}; + + SetConv2dTransposeInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetPadParam(); + + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: conv2dtranpose_build_pad_004 + * @tc.desc: Verify the missing output of the build function + * @tc.type: FUNC + */ +HWTEST_F(Conv2DTransposeBuilderTest, conv2dtranpose_build_pad_004, TestSize.Level1) +{ + m_inputs = {0, 1, 2}; + m_outputs = {}; + m_params = {3, 4, 5, 6, 7, 8}; + + SetConv2dTransposeInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetPadParam(); + + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: conv2dtranpose_build_pad_005 + * @tc.desc: Verify the inputIndex out of bounds of the build function + * @tc.type: FUNC + */ +HWTEST_F(Conv2DTransposeBuilderTest, conv2dtranpose_build_pad_005, TestSize.Level1) +{ + m_inputs = {0, 1, 10}; + m_outputs = {3}; + m_params = {4, 5, 6, 7, 8, 9}; + + SetConv2dTransposeInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetPadParam(); + + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: conv2dtranpose_build_pad_006 + * @tc.desc: Verify the outputIndex out of bounds of the build function + * @tc.type: FUNC + */ +HWTEST_F(Conv2DTransposeBuilderTest, conv2dtranpose_build_pad_006, TestSize.Level1) +{ + m_inputs = {0, 1, 2}; + m_outputs = {10}; + m_params = {4, 5, 6, 7, 8, 9};; + + SetConv2dTransposeInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetPadParam(); + + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: conv2dtranpose_build_pad_007 + * @tc.desc: Verify the invalid stride of the build function + * @tc.type: FUNC + */ +HWTEST_F(Conv2DTransposeBuilderTest, conv2dtranpose_build_pad_007, TestSize.Level1) +{ + SetConv2dTransposeInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT32, m_stride_dim, nullptr, + OH_NN_CONV2D_TRANSPOSE_STRIDES); + int32_t* strideValue = new (std::nothrow) int32_t[2]{1, 1}; + EXPECT_NE(nullptr, strideValue); + + tensor->SetBuffer(strideValue, 2 * sizeof(int32_t)); + m_allTensors.emplace_back(tensor); + + SetDilation(OH_NN_INT64, m_dilation_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_DILATION); + SetPad(OH_NN_INT64, m_pad_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_PAD); + SetOutPaddings(OH_NN_INT64, m_outpaddings_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_OUTPUT_PADDINGS); + SetGroup(OH_NN_INT64, m_param_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_GROUP); + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_ACTIVATION_TYPE); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: conv2dtranpose_build_pad_008 + * @tc.desc: Verify the invalid dilation of the build function + * @tc.type: FUNC + */ +HWTEST_F(Conv2DTransposeBuilderTest, conv2dtranpose_build_pad_008, TestSize.Level1) +{ + SetConv2dTransposeInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_STRIDES); + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT32, m_dilation_dim, nullptr, + OH_NN_CONV2D_TRANSPOSE_DILATION); + int32_t* dilationValue = new (std::nothrow) int32_t[2]{1, 1}; + EXPECT_NE(nullptr, dilationValue); + + tensor->SetBuffer(dilationValue, 2 * sizeof(int32_t)); + m_allTensors.emplace_back(tensor); + + SetPad(OH_NN_INT64, m_pad_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_PAD); + SetOutPaddings(OH_NN_INT64, m_outpaddings_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_OUTPUT_PADDINGS); + SetGroup(OH_NN_INT64, m_param_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_GROUP); + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_ACTIVATION_TYPE); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: conv2dtranpose_build_pad_009 + * @tc.desc: Verify the invalid pad of the build function + * @tc.type: FUNC + */ +HWTEST_F(Conv2DTransposeBuilderTest, conv2dtranpose_build_pad_009, TestSize.Level1) +{ + SetConv2dTransposeInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_STRIDES); + SetDilation(OH_NN_INT64, m_dilation_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_DILATION); + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT32, m_pad_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_PAD); + int32_t* padValue = new (std::nothrow) int32_t[4]{1, 1, 1, 1}; + EXPECT_NE(nullptr, padValue); + + tensor->SetBuffer(padValue, 4 * sizeof(int32_t)); + m_allTensors.emplace_back(tensor); + + SetOutPaddings(OH_NN_INT64, m_outpaddings_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_OUTPUT_PADDINGS); + SetGroup(OH_NN_INT64, m_param_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_GROUP); + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_ACTIVATION_TYPE); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: conv2dtranpose_build_pad_010 + * @tc.desc: Verify the invalid outpaddings of the build function + * @tc.type: FUNC + */ +HWTEST_F(Conv2DTransposeBuilderTest, conv2dtranpose_build_pad_010, TestSize.Level1) +{ + SetConv2dTransposeInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_STRIDES); + SetDilation(OH_NN_INT64, m_dilation_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_DILATION); + SetPad(OH_NN_INT64, m_pad_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_PAD); + + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT32, m_outpaddings_dim, nullptr, + OH_NN_CONV2D_TRANSPOSE_OUTPUT_PADDINGS); + + int32_t* outPaddingsTypeInvalid = new (std::nothrow) int32_t(0); + EXPECT_NE(nullptr, outPaddingsTypeInvalid); + tensor->SetBuffer(outPaddingsTypeInvalid, sizeof(int32_t)); + m_allTensors.emplace_back(tensor); + + SetGroup(OH_NN_INT64, m_param_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_GROUP); + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_ACTIVATION_TYPE); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: conv2dtranpose_build_pad_011 + * @tc.desc: Verify the invalid group of the build function + * @tc.type: FUNC + */ +HWTEST_F(Conv2DTransposeBuilderTest, conv2dtranpose_build_pad_011, TestSize.Level1) +{ + SetConv2dTransposeInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_STRIDES); + SetDilation(OH_NN_INT64, m_dilation_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_DILATION); + SetPad(OH_NN_INT64, m_pad_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_PAD); + SetOutPaddings(OH_NN_INT64, m_outpaddings_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_OUTPUT_PADDINGS); + + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT32, m_param_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_GROUP); + int32_t* groupValue = new (std::nothrow) int32_t(0); + EXPECT_NE(nullptr, groupValue); + + tensor->SetBuffer(groupValue, sizeof(int32_t)); + m_allTensors.emplace_back(tensor); + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_ACTIVATION_TYPE); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: conv2dtranpose_build_pad_012 + * @tc.desc: Verify the invalid activation of the build function + * @tc.type: FUNC + */ +HWTEST_F(Conv2DTransposeBuilderTest, conv2dtranpose_build_pad_012, TestSize.Level1) +{ + SetConv2dTransposeInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_STRIDES); + SetDilation(OH_NN_INT64, m_dilation_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_DILATION); + SetPad(OH_NN_INT64, m_pad_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_PAD); + SetOutPaddings(OH_NN_INT64, m_outpaddings_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_OUTPUT_PADDINGS); + SetGroup(OH_NN_INT64, m_param_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_GROUP); + + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT32, m_param_dim, nullptr, + OH_NN_CONV2D_TRANSPOSE_ACTIVATION_TYPE); + int32_t* activationValue = new (std::nothrow) int32_t(0); + EXPECT_NE(nullptr, activationValue); + + tensor->SetBuffer(activationValue, sizeof(int32_t)); + m_allTensors.emplace_back(tensor); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: conv2dtranpose_build_pad_013 + * @tc.desc: Verify the group scalar length of the build function + * @tc.type: FUNC + */ +HWTEST_F(Conv2DTransposeBuilderTest, conv2dtranpose_build_pad_013, TestSize.Level1) +{ + SetConv2dTransposeInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_STRIDES); + SetDilation(OH_NN_INT64, m_dilation_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_DILATION); + SetPad(OH_NN_INT64, m_pad_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_PAD); + SetOutPaddings(OH_NN_INT64, m_outpaddings_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_OUTPUT_PADDINGS); + + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT64, m_outpaddings_dim, nullptr, + OH_NN_CONV2D_TRANSPOSE_GROUP); + int64_t* groupValue = new (std::nothrow) int64_t[2]{0, 0}; + EXPECT_NE(nullptr, groupValue); + + tensor->SetBuffer(groupValue, 2 * sizeof(int64_t)); + m_allTensors.emplace_back(tensor); + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_ACTIVATION_TYPE); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: conv2dtranpose_build_pad_014 + * @tc.desc: Verify the activation scalar length of the build function + * @tc.type: FUNC + */ +HWTEST_F(Conv2DTransposeBuilderTest, conv2dtranpose_build_pad_014, TestSize.Level1) +{ + SetConv2dTransposeInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_STRIDES); + SetDilation(OH_NN_INT64, m_dilation_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_DILATION); + SetPad(OH_NN_INT64, m_pad_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_PAD); + SetOutPaddings(OH_NN_INT64, m_outpaddings_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_OUTPUT_PADDINGS); + SetGroup(OH_NN_INT64, m_param_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_GROUP); + + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT8, m_outpaddings_dim, nullptr, + OH_NN_CONV2D_TRANSPOSE_ACTIVATION_TYPE); + int8_t* activationValue = new (std::nothrow) int8_t[2]{0, 0}; + EXPECT_NE(nullptr, activationValue); + tensor->SetBuffer(activationValue, 2 * sizeof(int8_t)); + m_allTensors.emplace_back(tensor); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: conv2dtranpose_getprimitive_padmode_001 + * @tc.desc: Verify the behavior of the GetPrimitive function + * @tc.type: FUNC + */ +HWTEST_F(Conv2DTransposeBuilderTest, conv2dtranpose_getprimitive_padmode_001, TestSize.Level1) +{ + SetConv2dTransposeInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetPadParam(); + + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + + LiteGraphTensorPtr primitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); + EXPECT_NE(expectPrimitive, primitive); + + std::vector returnStrides = mindspore::lite::MindIR_Conv2dTransposeFusion_GetStride(primitive.get()); + std::vector strideValueTest{1, 1}; + std::vector returnDliation = mindspore::lite::MindIR_Conv2dTransposeFusion_GetDilation(primitive.get()); + std::vector dilationValueTest{1, 1}; + std::vector returnPad = mindspore::lite::MindIR_Conv2dTransposeFusion_GetPadList(primitive.get()); + std::vector padValueTest{1, 1, 1, 1}; + int returnGroup = mindspore::lite::MindIR_Conv2dTransposeFusion_GetGroup(primitive.get()); + EXPECT_EQ(0, returnGroup); + + std::vector outPaddingReturn = + mindspore::lite::MindIR_Conv2dTransposeFusion_GetOutputPaddings(primitive.get()); + std::vector outPaddingTest{0, 0}; + EXPECT_EQ(outPaddingTest, outPaddingReturn); + + int returnActivation = mindspore::lite::MindIR_Conv2dTransposeFusion_GetActivationType(primitive.get()); + EXPECT_EQ(0, returnActivation); +} + +/** + * @tc.name: conv2dtranpose_getprimitive_padmode_002 + * @tc.desc: Verify the behavior of the GetPrimitive function + * @tc.type: FUNC + */ +HWTEST_F(Conv2DTransposeBuilderTest, conv2dtranpose_getprimitive_padmode_002, TestSize.Level1) +{ + SetConv2dTransposeInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetPadParam(); + + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + LiteGraphTensorPtr primitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); + EXPECT_EQ(expectPrimitive, primitive); +} +} // namespace UnitTest +} // namespace NeuralNetworkRuntime +} // namespace OHOS diff --git a/test/unittest/ops/depthwise_conv2d_native_pad_test.cpp b/test/unittest/ops/depthwise_conv2d_native_pad_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..0e4a6ae821243eff0b74ad0fd820e96194cb1178 --- /dev/null +++ b/test/unittest/ops/depthwise_conv2d_native_pad_test.cpp @@ -0,0 +1,629 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "frameworks/native/ops/depthwise_conv2d_native_builder.h" + +#include "ops_test.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Ops; + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class DepthwiseConv2DNativeBuilderTest : public OpsTest { +public: + void SetUp() override; + void TearDown() override; + + void SetDepthwiseConv2dInput(); + void SetPad(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + void SetPadParam(); + +public: + DepthwiseConv2DNativeBuilder m_builder; + std::vector m_inputs{0, 1, 2}; + std::vector m_outputs{3}; + std::vector m_params{4, 5, 6, 7}; + std::vector m_output_dim{1, 4, 4, 2}; + std::vector m_stride_dim{2}; + std::vector m_dilation_dim{2}; + std::vector m_pad_dim{4}; + std::vector m_param_dim{}; +}; + +void DepthwiseConv2DNativeBuilderTest::SetUp() {} + +void DepthwiseConv2DNativeBuilderTest::TearDown() {} + +void DepthwiseConv2DNativeBuilderTest::SetPad(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + int32_t padNum = 4; + std::shared_ptr tensor = TransToNNTensor(dataType, dim, quantParam, type); + int64_t* padValue = new (std::nothrow) int64_t[4]{1, 1, 1, 1}; + EXPECT_NE(nullptr, padValue); + + tensor->SetBuffer(padValue, padNum * sizeof(int64_t)); + m_allTensors.emplace_back(tensor); +} + +void DepthwiseConv2DNativeBuilderTest::SetPadParam() +{ + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_DEPTHWISE_CONV2D_NATIVE_STRIDES); + SetDilation(OH_NN_INT64, m_dilation_dim, nullptr, OH_NN_DEPTHWISE_CONV2D_NATIVE_DILATION); + SetPad(OH_NN_INT64, m_pad_dim, nullptr, OH_NN_DEPTHWISE_CONV2D_NATIVE_PAD); + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_DEPTHWISE_CONV2D_NATIVE_ACTIVATION_TYPE); +} + +void DepthwiseConv2DNativeBuilderTest::SetDepthwiseConv2dInput() +{ + int32_t weightNum = 8; + int32_t biasNum = 2; + std::vector m_input_dim{1, 3, 3, 2}; + std::vector weightDim = {2, 2, 2, 1}; + std::vector biasDim = {2}; + + std::shared_ptr inputsTensor; + inputsTensor = TransToNNTensor(OH_NN_FLOAT32, m_input_dim, nullptr, OH_NN_TENSOR); + m_allTensors.emplace_back(inputsTensor); + inputsTensor = TransToNNTensor(OH_NN_FLOAT32, weightDim, nullptr, OH_NN_TENSOR); + float* weightValue = new (std::nothrow) float[8]{1, 0, 0, 1, 0, 1, 1, 0}; + EXPECT_NE(nullptr, weightValue); + + inputsTensor->SetBuffer(weightValue, weightNum * sizeof(weightValue)); + m_allTensors.emplace_back(inputsTensor); + inputsTensor = TransToNNTensor(OH_NN_FLOAT32, biasDim, nullptr, OH_NN_TENSOR); + float* biasValue = new (std::nothrow) float[2]{0, 0}; + EXPECT_NE(nullptr, biasValue); + inputsTensor->SetBuffer(biasValue, biasNum * sizeof(float)); + m_allTensors.emplace_back(inputsTensor); +} + +/** + * @tc.name: depthwiseconv2d_build_padmode_001 + * @tc.desc: Verify the success of the build function + * @tc.type: FUNC + */ +HWTEST_F(DepthwiseConv2DNativeBuilderTest, depthwiseconv2d_build_padmode_001, TestSize.Level1) +{ + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetDepthwiseConv2dInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetPadParam(); + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: depthwiseconv2d_build_padmode_002 + * @tc.desc: Verify the forbidden of the build function + * @tc.type: FUNC + */ +HWTEST_F(DepthwiseConv2DNativeBuilderTest, depthwiseconv2d_build_padmode_002, TestSize.Level1) +{ + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetDepthwiseConv2dInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetPadParam(); + + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: depthwiseconv2d_build_padmode_003 + * @tc.desc: Verify the missing input of the build function + * @tc.type: FUNC + */ +HWTEST_F(DepthwiseConv2DNativeBuilderTest, depthwiseconv2d_build_padmode_003, TestSize.Level1) +{ + m_inputs = {0}; + m_outputs = {1}; + m_params = {2, 3, 4, 5}; + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetDepthwiseConv2dInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetPadParam(); + + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: depthwiseconv2d_build_padmode_004 + * @tc.desc: Verify the missing output of the build function + * @tc.type: FUNC + */ +HWTEST_F(DepthwiseConv2DNativeBuilderTest, depthwiseconv2d_build_padmode_004, TestSize.Level1) +{ + m_inputs = {0, 1, 2}; + m_outputs = {}; + m_params = {3, 4, 5, 6}; + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetDepthwiseConv2dInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetPadParam(); + + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: depthwiseconv2d_build_padmode_005 + * @tc.desc: Verify the inputIndex out of bounds of the build function + * @tc.type: FUNC + */ +HWTEST_F(DepthwiseConv2DNativeBuilderTest, depthwiseconv2d_build_padmode_005, TestSize.Level1) +{ + m_inputs = {0, 1, 9}; + m_outputs = {3}; + m_params = {4, 5, 6, 7}; + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetDepthwiseConv2dInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetPadParam(); + + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: depthwiseconv2d_build_padmode_006 + * @tc.desc: Verify the outputIndex out of bounds of the build function + * @tc.type: FUNC + */ +HWTEST_F(DepthwiseConv2DNativeBuilderTest, depthwiseconv2d_build_padmode_006, TestSize.Level1) +{ + m_inputs = {0, 1, 2}; + m_outputs = {9}; + m_params = {4, 5, 6, 7}; + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetDepthwiseConv2dInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetPadParam(); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: depthwiseconv2d_build_padmode_007 + * @tc.desc: Verify the invalid stride of the build function + * @tc.type: FUNC + */ +HWTEST_F(DepthwiseConv2DNativeBuilderTest, depthwiseconv2d_build_padmode_007, TestSize.Level1) +{ + SetDepthwiseConv2dInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT32, m_stride_dim, nullptr, + OH_NN_DEPTHWISE_CONV2D_NATIVE_STRIDES); + int32_t* strideValue = new (std::nothrow) int32_t[2]{1, 1}; + EXPECT_NE(nullptr, strideValue); + + tensor->SetBuffer(strideValue, 2 * sizeof(int32_t)); + m_allTensors.emplace_back(tensor); + SetDilation(OH_NN_INT64, m_dilation_dim, nullptr, OH_NN_DEPTHWISE_CONV2D_NATIVE_DILATION); + SetPad(OH_NN_INT64, m_pad_dim, nullptr, OH_NN_DEPTHWISE_CONV2D_NATIVE_PAD); + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_DEPTHWISE_CONV2D_NATIVE_ACTIVATION_TYPE); + + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: depthwiseconv2d_build_padmode_008 + * @tc.desc: Verify the invalid dilation of the build function + * @tc.type: FUNC + */ +HWTEST_F(DepthwiseConv2DNativeBuilderTest, depthwiseconv2d_build_padmode_008, TestSize.Level1) +{ + SetDepthwiseConv2dInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_DEPTHWISE_CONV2D_NATIVE_STRIDES); + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT32, m_dilation_dim, nullptr, + OH_NN_DEPTHWISE_CONV2D_NATIVE_DILATION); + int32_t* dilationValue = new (std::nothrow) int32_t[2]{1, 1}; + EXPECT_NE(nullptr, dilationValue); + + tensor->SetBuffer(dilationValue, 2 * sizeof(int32_t)); + m_allTensors.emplace_back(tensor); + SetPad(OH_NN_INT64, m_pad_dim, nullptr, OH_NN_DEPTHWISE_CONV2D_NATIVE_PAD); + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_DEPTHWISE_CONV2D_NATIVE_ACTIVATION_TYPE); + + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: depthwiseconv2d_build_padmode_009 + * @tc.desc: Verify the invalid pad of the build function + * @tc.type: FUNC + */ +HWTEST_F(DepthwiseConv2DNativeBuilderTest, depthwiseconv2d_build_padmode_009, TestSize.Level1) +{ + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + SetDepthwiseConv2dInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_DEPTHWISE_CONV2D_NATIVE_STRIDES); + SetDilation(OH_NN_INT64, m_dilation_dim, nullptr, OH_NN_DEPTHWISE_CONV2D_NATIVE_DILATION); + + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT32, m_pad_dim, nullptr, + OH_NN_DEPTHWISE_CONV2D_NATIVE_PAD); + int32_t* padValue = new (std::nothrow) int32_t[4]{1, 1, 1, 1}; + EXPECT_NE(nullptr, padValue); + + tensor->SetBuffer(padValue, 4 * sizeof(int32_t)); + m_allTensors.emplace_back(tensor); + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_DEPTHWISE_CONV2D_NATIVE_ACTIVATION_TYPE); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} +/** + * @tc.name: depthwiseconv2d_build_padmode_010 + * @tc.desc: Verify the invalid activation of the build function + * @tc.type: FUNC + */ +HWTEST_F(DepthwiseConv2DNativeBuilderTest, depthwiseconv2d_build_padmode_010, TestSize.Level1) +{ + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetDepthwiseConv2dInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_DEPTHWISE_CONV2D_NATIVE_STRIDES); + SetDilation(OH_NN_INT64, m_dilation_dim, nullptr, OH_NN_DEPTHWISE_CONV2D_NATIVE_DILATION); + SetPad(OH_NN_INT64, m_pad_dim, nullptr, OH_NN_DEPTHWISE_CONV2D_NATIVE_PAD); + + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT32, m_param_dim, nullptr, + OH_NN_DEPTHWISE_CONV2D_NATIVE_ACTIVATION_TYPE); + int32_t* activationValue = new (std::nothrow) int32_t(0); + EXPECT_NE(nullptr, activationValue); + + tensor->SetBuffer(activationValue, sizeof(int32_t)); + m_allTensors.emplace_back(tensor); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: depthwiseconv2d_build_padmode_011 + * @tc.desc: Verify the scalar activation of the build function + * @tc.type: FUNC + */ +HWTEST_F(DepthwiseConv2DNativeBuilderTest, depthwiseconv2d_build_padmode_011, TestSize.Level1) +{ + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetDepthwiseConv2dInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_DEPTHWISE_CONV2D_NATIVE_STRIDES); + SetDilation(OH_NN_INT64, m_dilation_dim, nullptr, OH_NN_DEPTHWISE_CONV2D_NATIVE_DILATION); + SetPad(OH_NN_INT64, m_pad_dim, nullptr, OH_NN_DEPTHWISE_CONV2D_NATIVE_PAD); + + std::vector activationDim = {2}; + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT8, activationDim, nullptr, + OH_NN_DEPTHWISE_CONV2D_NATIVE_ACTIVATION_TYPE); + int8_t* activationValue = new (std::nothrow) int8_t[2]{0, 0}; + EXPECT_NE(nullptr, activationValue); + + tensor->SetBuffer(activationValue, 2 * sizeof(int8_t)); + m_allTensors.emplace_back(tensor); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: depthwiseconv2d_build_padmode_012 + * @tc.desc: Verify the invalid param to depthwiseconv2d of the build function + * @tc.type: FUNC + */ +HWTEST_F(DepthwiseConv2DNativeBuilderTest, depthwiseconv2d_build_padmode_012, TestSize.Level1) +{ + std::vector activationDim = {2}; + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetDepthwiseConv2dInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_DEPTHWISE_CONV2D_NATIVE_STRIDES); + SetDilation(OH_NN_INT64, m_dilation_dim, nullptr, OH_NN_DEPTHWISE_CONV2D_NATIVE_DILATION); + SetPad(OH_NN_INT64, m_pad_dim, nullptr, OH_NN_DEPTHWISE_CONV2D_NATIVE_PAD); + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT8, activationDim, nullptr, + OH_NN_DIV_ACTIVATIONTYPE); + int8_t* activationValue = new (std::nothrow) int8_t(0); + EXPECT_NE(nullptr, activationValue); + + tensor->SetBuffer(activationValue, sizeof(int8_t)); + m_allTensors.emplace_back(tensor); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: depthwiseconv2d_build_padmode_013 + * @tc.desc: Verify the invalid activation value of the build function + * @tc.type: FUNC + */ +HWTEST_F(DepthwiseConv2DNativeBuilderTest, depthwiseconv2d_build_padmode_013, TestSize.Level1) +{ + std::vector activationDim = {}; + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetDepthwiseConv2dInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_DEPTHWISE_CONV2D_NATIVE_STRIDES); + SetDilation(OH_NN_INT64, m_dilation_dim, nullptr, OH_NN_DEPTHWISE_CONV2D_NATIVE_DILATION); + SetPad(OH_NN_INT64, m_pad_dim, nullptr, OH_NN_DEPTHWISE_CONV2D_NATIVE_PAD); + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT8, activationDim, nullptr, + OH_NN_DEPTHWISE_CONV2D_NATIVE_ACTIVATION_TYPE); + int8_t* activationValue = new (std::nothrow) int8_t(10); + EXPECT_NE(nullptr, activationValue); + + tensor->SetBuffer(activationValue, sizeof(int8_t)); + m_allTensors.emplace_back(tensor); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: depthwiseconv2d_build_padmode_014 + * @tc.desc: Verify the invalid pad dim value of the build function + * @tc.type: FUNC + */ +HWTEST_F(DepthwiseConv2DNativeBuilderTest, depthwiseconv2d_build_padmode_014, TestSize.Level1) +{ + std::vector m_pad_dim = {3}; + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetDepthwiseConv2dInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_DEPTHWISE_CONV2D_NATIVE_STRIDES); + SetDilation(OH_NN_INT64, m_dilation_dim, nullptr, OH_NN_DEPTHWISE_CONV2D_NATIVE_DILATION); + + int32_t padNum = 3; + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT64, m_pad_dim, nullptr, + OH_NN_DEPTHWISE_CONV2D_NATIVE_PAD); + int64_t* padValue = new (std::nothrow) int64_t[3]{1, 1, 1}; + EXPECT_NE(nullptr, padValue); + + tensor->SetBuffer(padValue, padNum * sizeof(int64_t)); + m_allTensors.emplace_back(tensor); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: depthwiseconv2d_build_padmode_015 + * @tc.desc: Verify the invalid weigth size of the build function + * @tc.type: FUNC + */ +HWTEST_F(DepthwiseConv2DNativeBuilderTest, depthwiseconv2d_build_padmode_015, TestSize.Level1) +{ + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + int32_t weightNum = 3; + int32_t biasNum = 2; + std::vector m_input_dim{1, 3, 3, 2}; + std::vector weightDim = {1, 3, 3}; + std::vector biasDim = {2}; + + std::shared_ptr inputsTensor; + inputsTensor = TransToNNTensor(OH_NN_FLOAT32, m_input_dim, nullptr, OH_NN_TENSOR); + m_allTensors.emplace_back(inputsTensor); + + inputsTensor = TransToNNTensor(OH_NN_FLOAT32, weightDim, nullptr, OH_NN_TENSOR); + float* weightValue = new (std::nothrow) float[3]{1, 0, 0}; + EXPECT_NE(nullptr, weightValue); + + inputsTensor->SetBuffer(weightValue, weightNum * sizeof(weightValue)); + m_allTensors.emplace_back(inputsTensor); + + inputsTensor = TransToNNTensor(OH_NN_FLOAT32, biasDim, nullptr, OH_NN_TENSOR); + float* biasValue = new (std::nothrow) float[2]{0, 0}; + EXPECT_NE(nullptr, biasValue); + + inputsTensor->SetBuffer(biasValue, biasNum * sizeof(float)); + m_allTensors.emplace_back(inputsTensor); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: depthwiseconv2d_build_padmode_016 + * @tc.desc: Verify the invalid inputdim of the build function + * @tc.type: FUNC + */ +HWTEST_F(DepthwiseConv2DNativeBuilderTest, depthwiseconv2d_build_padmode_016, TestSize.Level1) +{ + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + int32_t weightNum = 3; + int32_t biasNum = 2; + std::vector m_input_dim{1, 3, 3}; + std::vector weightDim = {2, 2, 2, 1}; + std::vector biasDim = {2}; + + std::shared_ptr inTensor; + inTensor = TransToNNTensor(OH_NN_FLOAT32, m_input_dim, nullptr, OH_NN_TENSOR); + m_allTensors.emplace_back(inTensor); + + inTensor = TransToNNTensor(OH_NN_FLOAT32, weightDim, nullptr, OH_NN_TENSOR); + float* weightValue = new (std::nothrow) float[8]{1, 0, 0, 1, 0, 1, 1, 0}; + EXPECT_NE(nullptr, weightValue); + + inTensor->SetBuffer(weightValue, weightNum * sizeof(weightValue)); + m_allTensors.emplace_back(inTensor); + + inTensor = TransToNNTensor(OH_NN_FLOAT32, biasDim, nullptr, OH_NN_TENSOR); + float* biasValue = new (std::nothrow) float[2]{0, 0}; + EXPECT_NE(nullptr, biasValue); + + inTensor->SetBuffer(biasValue, biasNum * sizeof(float)); + m_allTensors.emplace_back(inTensor); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: depthwiseconv2d_build_padmode_017 + * @tc.desc: Verify the depthwiseconv2d without set stride of the build function + * @tc.type: FUNC + */ +HWTEST_F(DepthwiseConv2DNativeBuilderTest, depthwiseconv2d_build_padmode_017, TestSize.Level1) +{ + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetDepthwiseConv2dInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT64, m_stride_dim, nullptr, + OH_NN_DEPTHWISE_CONV2D_NATIVE_STRIDES); + m_allTensors.emplace_back(tensor); + + SetDilation(OH_NN_INT64, m_dilation_dim, nullptr, OH_NN_DEPTHWISE_CONV2D_NATIVE_DILATION); + SetPad(OH_NN_INT64, m_pad_dim, nullptr, OH_NN_DEPTHWISE_CONV2D_NATIVE_PAD); + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_DEPTHWISE_CONV2D_NATIVE_ACTIVATION_TYPE); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: depthwiseconv2d_build_padmode_018 + * @tc.desc: Verify the depthwiseconv2d without set dilation of the build function + * @tc.type: FUNC + */ +HWTEST_F(DepthwiseConv2DNativeBuilderTest, depthwiseconv2d_build_padmode_018, TestSize.Level1) +{ + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetDepthwiseConv2dInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_DEPTHWISE_CONV2D_NATIVE_STRIDES); + + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT64, m_dilation_dim, nullptr, + OH_NN_DEPTHWISE_CONV2D_NATIVE_DILATION); + m_allTensors.emplace_back(tensor); + + SetPad(OH_NN_INT64, m_pad_dim, nullptr, OH_NN_DEPTHWISE_CONV2D_NATIVE_PAD); + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_DEPTHWISE_CONV2D_NATIVE_ACTIVATION_TYPE); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: depthwiseconv2d_build_padmode_019 + * @tc.desc: Verify the depthwiseconv2d without set pad of the build function + * @tc.type: FUNC + */ +HWTEST_F(DepthwiseConv2DNativeBuilderTest, depthwiseconv2d_build_padmode_019, TestSize.Level1) +{ + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetDepthwiseConv2dInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_DEPTHWISE_CONV2D_NATIVE_STRIDES); + SetDilation(OH_NN_INT64, m_dilation_dim, nullptr, OH_NN_DEPTHWISE_CONV2D_NATIVE_DILATION); + + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT64, m_pad_dim, nullptr, + OH_NN_DEPTHWISE_CONV2D_NATIVE_PAD); + m_allTensors.emplace_back(tensor); + + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_DEPTHWISE_CONV2D_NATIVE_ACTIVATION_TYPE); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: depthwiseconv2d_build_padmode_020 + * @tc.desc: Verify the depthwiseconv2d without set activation of the build function + * @tc.type: FUNC + */ +HWTEST_F(DepthwiseConv2DNativeBuilderTest, depthwiseconv2d_build_padmode_020, TestSize.Level1) +{ + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetDepthwiseConv2dInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_DEPTHWISE_CONV2D_NATIVE_STRIDES); + SetDilation(OH_NN_INT64, m_dilation_dim, nullptr, OH_NN_DEPTHWISE_CONV2D_NATIVE_DILATION); + SetPad(OH_NN_INT64, m_pad_dim, nullptr, OH_NN_DEPTHWISE_CONV2D_NATIVE_PAD); + + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT8, m_param_dim, nullptr, + OH_NN_DEPTHWISE_CONV2D_NATIVE_ACTIVATION_TYPE); + m_allTensors.emplace_back(tensor); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: depthwiseconv2d_getprimitive_padmode_001 + * @tc.desc: Verify the success of the GetPrimitive function + * @tc.type: FUNC + */ +HWTEST_F(DepthwiseConv2DNativeBuilderTest, depthwiseconv2d_getprimitive_padmode_001, TestSize.Level1) +{ + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetDepthwiseConv2dInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetPadParam(); + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + + std::vector padValueTest{1, 1, 1, 1}; + LiteGraphTensorPtr primitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive = {nullptr, DestroyLiteGraphPrimitive}; + EXPECT_NE(expectPrimitive, primitive); + + std::vector expectStrides = mindspore::lite::MindIR_Conv2DFusion_GetStride(primitive.get()); + std::vector strideValueTest{1, 1}; + std::vector expectDliation = mindspore::lite::MindIR_Conv2DFusion_GetDilation(primitive.get()); + std::vector dilationValueTest{1, 1}; + std::vector expectPad = mindspore::lite::MindIR_Conv2DFusion_GetPadList(primitive.get()); + EXPECT_EQ(padValueTest, expectPad); + + int returnActivation = mindspore::lite::MindIR_Conv2DFusion_GetActivationType(primitive.get()); + EXPECT_EQ(0, returnActivation); +} + +/** + * @tc.name: depthwiseconv2d_getprimitive_padmode_002 + * @tc.desc: Verify the nullptr return of the GetPrimitive function + * @tc.type: FUNC + */ +HWTEST_F(DepthwiseConv2DNativeBuilderTest, depthwiseconv2d_getprimitive_padmode_002, TestSize.Level1) +{ + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetDepthwiseConv2dInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetPadParam(); + + LiteGraphTensorPtr primitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive = {nullptr, DestroyLiteGraphPrimitive}; + EXPECT_EQ(expectPrimitive, primitive); +} +} // namespace UnitTest +} // namespace NeuralNetworkRuntime +} // namespace OHOS \ No newline at end of file diff --git a/test/unittest/ops/depthwise_conv2d_native_padmode_test.cpp b/test/unittest/ops/depthwise_conv2d_native_padmode_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..8181be2a94fa7bf778df7a3a1c8baac0dee90d49 --- /dev/null +++ b/test/unittest/ops/depthwise_conv2d_native_padmode_test.cpp @@ -0,0 +1,409 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "frameworks/native/ops/depthwise_conv2d_native_builder.h" + +#include "ops_test.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Ops; + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class DepthwiseConv2DNativePadModeBuilderTest : public OpsTest { +public: + void SetUp(); + void TearDown(); + + void SetDepthwiseConv2dInput(); + void SetPadMode(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + void SetParam(); + +public: + DepthwiseConv2DNativeBuilder m_builder; + std::vector m_inputs{0, 1, 2}; + std::vector m_outputs{3}; + std::vector m_params{4, 5, 6, 7}; + std::vector m_output_dim{1, 4, 4, 2}; + std::vector m_stride_dim{2}; + std::vector m_dilation_dim{2}; + std::vector m_param_dim{}; +}; + +void DepthwiseConv2DNativePadModeBuilderTest::SetUp() {} + +void DepthwiseConv2DNativePadModeBuilderTest::TearDown() {} + +void DepthwiseConv2DNativePadModeBuilderTest::SetPadMode(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + std::shared_ptr tensor = TransToNNTensor(dataType, dim, quantParam, type); + int8_t* padModeValue = new (std::nothrow) int8_t(0); + EXPECT_NE(nullptr, padModeValue); + tensor->SetBuffer(padModeValue, sizeof(int8_t)); + m_allTensors.emplace_back(tensor); +} + +void DepthwiseConv2DNativePadModeBuilderTest::SetParam() +{ + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_DEPTHWISE_CONV2D_NATIVE_STRIDES); + SetDilation(OH_NN_INT64, m_dilation_dim, nullptr, OH_NN_DEPTHWISE_CONV2D_NATIVE_DILATION); + SetPadMode(OH_NN_INT8, m_param_dim, nullptr, OH_NN_DEPTHWISE_CONV2D_NATIVE_PAD_MODE); + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_DEPTHWISE_CONV2D_NATIVE_ACTIVATION_TYPE); +} + +void DepthwiseConv2DNativePadModeBuilderTest::SetDepthwiseConv2dInput() +{ + int32_t weightNum = 8; + int32_t biasNum = 2; + std::vector m_input_dim{1, 3, 3, 2}; + std::vector weightDim = {2, 2, 2, 1}; + std::vector biasDim = {2}; + std::shared_ptr inputTensor; + inputTensor = TransToNNTensor(OH_NN_FLOAT32, m_input_dim, nullptr, OH_NN_TENSOR); + m_allTensors.emplace_back(inputTensor); + + inputTensor = TransToNNTensor(OH_NN_FLOAT32, weightDim, nullptr, OH_NN_TENSOR); + float* weightValue = new (std::nothrow) float[8]{1, 0, 0, 1, 0, 1, 1, 0}; + EXPECT_NE(nullptr, weightValue); + + inputTensor->SetBuffer(weightValue, weightNum * sizeof(weightValue)); + m_allTensors.emplace_back(inputTensor); + + inputTensor = TransToNNTensor(OH_NN_FLOAT32, biasDim, nullptr, OH_NN_TENSOR); + float* biasValue = new (std::nothrow) float[2]{0, 0}; + EXPECT_NE(nullptr, biasValue); + + inputTensor->SetBuffer(biasValue, biasNum * sizeof(float)); + m_allTensors.emplace_back(inputTensor); +} + +/** + * @tc.name: depthwiseconv2d_build_padmode_001 + * @tc.desc: Verify the success of the build function + * @tc.type: FUNC + */ +HWTEST_F(DepthwiseConv2DNativePadModeBuilderTest, depthwiseconv2d_build_padmode_001, TestSize.Level1) +{ + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetDepthwiseConv2dInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetParam(); + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: depthwiseconv2d_build_padmode_002 + * @tc.desc: Verify the forbidden of the build function + * @tc.type: FUNC + */ +HWTEST_F(DepthwiseConv2DNativePadModeBuilderTest, depthwiseconv2d_build_padmode_002, TestSize.Level1) +{ + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetDepthwiseConv2dInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetParam(); + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: depthwiseconv2d_build_padmode_003 + * @tc.desc: Verify the missing input of the build function + * @tc.type: FUNC + */ +HWTEST_F(DepthwiseConv2DNativePadModeBuilderTest, depthwiseconv2d_build_padmode_003, TestSize.Level1) +{ + m_inputs = {0}; + m_outputs = {1}; + m_params = {2, 3, 4, 5}; + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetDepthwiseConv2dInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetParam(); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: depthwiseconv2d_build_padmode_004 + * @tc.desc: Verify the missing output of the build function + * @tc.type: FUNC + */ +HWTEST_F(DepthwiseConv2DNativePadModeBuilderTest, depthwiseconv2d_build_padmode_004, TestSize.Level1) +{ + m_inputs = {0, 1, 2}; + m_outputs = {}; + m_params = {3, 4, 5, 6}; + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetDepthwiseConv2dInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetParam(); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: depthwiseconv2d_build_padmode_005 + * @tc.desc: Verify the inputIndex out of bounds of the build function + * @tc.type: FUNC + */ +HWTEST_F(DepthwiseConv2DNativePadModeBuilderTest, depthwiseconv2d_build_padmode_005, TestSize.Level1) +{ + m_inputs = {0, 1, 9}; + m_outputs = {3}; + m_params = {4, 5, 6, 7}; + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetDepthwiseConv2dInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetParam(); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: depthwiseconv2d_build_padmode_006 + * @tc.desc: Verify the outputIndex out of bounds of the build function + * @tc.type: FUNC + */ +HWTEST_F(DepthwiseConv2DNativePadModeBuilderTest, depthwiseconv2d_build_padmode_006, TestSize.Level1) +{ + m_inputs = {0, 1, 2}; + m_outputs = {9}; + m_params = {4, 5, 6, 7}; + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetDepthwiseConv2dInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetParam(); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: depthwiseconv2d_build_padmode_007 + * @tc.desc: Verify the invalid stride of the build function + * @tc.type: FUNC + */ +HWTEST_F(DepthwiseConv2DNativePadModeBuilderTest, depthwiseconv2d_build_padmode_007, TestSize.Level1) +{ + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + SetDepthwiseConv2dInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT32, m_stride_dim, nullptr, + OH_NN_DEPTHWISE_CONV2D_NATIVE_STRIDES); + int32_t* strideValue = new (std::nothrow) int32_t[2]{1, 1}; + EXPECT_NE(nullptr, strideValue); + + tensor->SetBuffer(strideValue, 2 * sizeof(int32_t)); + m_allTensors.emplace_back(tensor); + + SetDilation(OH_NN_INT64, m_dilation_dim, nullptr, OH_NN_DEPTHWISE_CONV2D_NATIVE_DILATION); + SetPadMode(OH_NN_INT8, m_param_dim, nullptr, OH_NN_DEPTHWISE_CONV2D_NATIVE_PAD_MODE); + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_DEPTHWISE_CONV2D_NATIVE_ACTIVATION_TYPE); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: depthwiseconv2d_build_padmode_008 + * @tc.desc: Verify the invalid dilation of the build function + * @tc.type: FUNC + */ +HWTEST_F(DepthwiseConv2DNativePadModeBuilderTest, depthwiseconv2d_build_padmode_008, TestSize.Level1) +{ + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetDepthwiseConv2dInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_DEPTHWISE_CONV2D_NATIVE_STRIDES); + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT32, m_dilation_dim, nullptr, + OH_NN_DEPTHWISE_CONV2D_NATIVE_DILATION); + int32_t* dilationValue = new (std::nothrow) int32_t[2]{1, 1}; + EXPECT_NE(nullptr, dilationValue); + + tensor->SetBuffer(dilationValue, 2 * sizeof(int32_t)); + m_allTensors.emplace_back(tensor); + SetPadMode(OH_NN_INT8, m_param_dim, nullptr, OH_NN_DEPTHWISE_CONV2D_NATIVE_PAD_MODE); + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_DEPTHWISE_CONV2D_NATIVE_ACTIVATION_TYPE); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: depthwiseconv2d_build_padmode_009 + * @tc.desc: Verify the invalid pad of the build function + * @tc.type: FUNC + */ +HWTEST_F(DepthwiseConv2DNativePadModeBuilderTest, depthwiseconv2d_build_padmode_009, TestSize.Level1) +{ + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetDepthwiseConv2dInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_DEPTHWISE_CONV2D_NATIVE_STRIDES); + SetDilation(OH_NN_INT64, m_dilation_dim, nullptr, OH_NN_DEPTHWISE_CONV2D_NATIVE_DILATION); + + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT32, m_param_dim, nullptr, + OH_NN_DEPTHWISE_CONV2D_NATIVE_PAD_MODE); + int32_t* padModeValue = new (std::nothrow) int32_t(0); + EXPECT_NE(nullptr, padModeValue); + tensor->SetBuffer(padModeValue, sizeof(int32_t)); + m_allTensors.emplace_back(tensor); + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_DEPTHWISE_CONV2D_NATIVE_ACTIVATION_TYPE); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: depthwiseconv2d_build_padmode_010 + * @tc.desc: Verify the invalid activation of the build function + * @tc.type: FUNC + */ +HWTEST_F(DepthwiseConv2DNativePadModeBuilderTest, depthwiseconv2d_build_padmode_010, TestSize.Level1) +{ + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetDepthwiseConv2dInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_DEPTHWISE_CONV2D_NATIVE_STRIDES); + SetDilation(OH_NN_INT64, m_dilation_dim, nullptr, OH_NN_DEPTHWISE_CONV2D_NATIVE_DILATION); + SetPadMode(OH_NN_INT8, m_param_dim, nullptr, OH_NN_DEPTHWISE_CONV2D_NATIVE_PAD_MODE); + + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT32, m_param_dim, nullptr, + OH_NN_DEPTHWISE_CONV2D_NATIVE_ACTIVATION_TYPE); + int32_t* activationValue = new (std::nothrow) int32_t(0); + EXPECT_NE(nullptr, activationValue); + tensor->SetBuffer(activationValue, sizeof(int32_t)); + m_allTensors.emplace_back(tensor); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: depthwiseconv2d_build_padmode_011 + * @tc.desc: Verify the scalar activation of the build function + * @tc.type: FUNC + */ +HWTEST_F(DepthwiseConv2DNativePadModeBuilderTest, depthwiseconv2d_build_padmode_011, TestSize.Level1) +{ + std::vector activationDim = {2}; + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetDepthwiseConv2dInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_DEPTHWISE_CONV2D_NATIVE_STRIDES); + SetDilation(OH_NN_INT64, m_dilation_dim, nullptr, OH_NN_DEPTHWISE_CONV2D_NATIVE_DILATION); + SetPadMode(OH_NN_INT8, m_param_dim, nullptr, OH_NN_DEPTHWISE_CONV2D_NATIVE_PAD_MODE); + + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT8, activationDim, nullptr, + OH_NN_DEPTHWISE_CONV2D_NATIVE_ACTIVATION_TYPE); + int8_t* activationValue = new (std::nothrow) int8_t[2]{0, 0}; + EXPECT_NE(nullptr, activationValue); + + tensor->SetBuffer(activationValue, 2 * sizeof(int8_t)); + m_allTensors.emplace_back(tensor); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: depthwiseconv2d_build_padmode_012 + * @tc.desc: Verify the invalid pad of the build function + * @tc.type: FUNC + */ +HWTEST_F(DepthwiseConv2DNativePadModeBuilderTest, depthwiseconv2d_build_padmode_012, TestSize.Level1) +{ + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetDepthwiseConv2dInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_DEPTHWISE_CONV2D_NATIVE_STRIDES); + SetDilation(OH_NN_INT64, m_dilation_dim, nullptr, OH_NN_DEPTHWISE_CONV2D_NATIVE_DILATION); + + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT8, m_param_dim, nullptr, + OH_NN_DEPTHWISE_CONV2D_NATIVE_PAD_MODE); + int8_t* padModeValue = new (std::nothrow) int8_t(10); + EXPECT_NE(nullptr, padModeValue); + + tensor->SetBuffer(padModeValue, sizeof(int8_t)); + m_allTensors.emplace_back(tensor); + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_DEPTHWISE_CONV2D_NATIVE_ACTIVATION_TYPE); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: depthwiseconv2d_getprimitive_padmode_001 + * @tc.desc: Verify the success of the GetPrimitive function + * @tc.type: FUNC + */ +HWTEST_F(DepthwiseConv2DNativePadModeBuilderTest, depthwiseconv2d_getprimitive_padmode_001, TestSize.Level1) +{ + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetDepthwiseConv2dInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetParam(); + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + + LiteGraphTensorPtr primitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive = {nullptr, DestroyLiteGraphPrimitive}; + EXPECT_NE(expectPrimitive, primitive); + + std::vector returnStrides = mindspore::lite::MindIR_Conv2DFusion_GetStride(primitive.get()); + std::vector strideValueTest{1, 1}; + std::vector returnDliation = mindspore::lite::MindIR_Conv2DFusion_GetDilation(primitive.get()); + std::vector dilationValueTest{1, 1}; + EXPECT_EQ(dilationValueTest, returnDliation); + + int returnpadMode = mindspore::lite::MindIR_Conv2DFusion_GetPadMode(primitive.get()); + EXPECT_EQ(1, returnpadMode); + int returnActivation = mindspore::lite::MindIR_Conv2DFusion_GetActivationType(primitive.get()); + EXPECT_EQ(0, returnActivation); +} + +/** + * @tc.name: depthwiseconv2d_getprimitive_padmode_002 + * @tc.desc: Verify the nullptr return of the GetPrimitive function + * @tc.type: FUNC + */ +HWTEST_F(DepthwiseConv2DNativePadModeBuilderTest, depthwiseconv2d_getprimitive_padmode_002, TestSize.Level1) +{ + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetDepthwiseConv2dInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetParam(); + LiteGraphTensorPtr primitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive = {nullptr, DestroyLiteGraphPrimitive}; + EXPECT_EQ(expectPrimitive, primitive); +} +} // namespace UnitTest +} // namespace NeuralNetworkRuntime +} // namespace OHOS diff --git a/test/unittest/ops/div_test.cpp b/test/unittest/ops/div_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..31668cbb619203a3a85d46fb2a5495043e9e4f35 --- /dev/null +++ b/test/unittest/ops/div_test.cpp @@ -0,0 +1,288 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "frameworks/native/ops/div_builder.h" + +#include "ops_test.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Ops; + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class DivFusionTest : public OpsTest { +public: + void SetUp() override; + void TearDown() override; + + void SaveParamsTensor(const std::vector& m_params, OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + +public: + DivBuilder m_builder; + std::vector m_inputs{0, 1}; + std::vector m_outputs{2}; + std::vector m_params{3}; + std::vector m_input_dim{3, 3}; + std::vector m_output_dim{3, 3}; + std::vector m_param_dim{}; +}; + +void DivFusionTest::SetUp() {} + +void DivFusionTest::TearDown() {} + +void DivFusionTest::SaveParamsTensor(const std::vector& m_params, OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + m_paramsIndex = m_params; + std::shared_ptr tensor = TransToNNTensor(dataType, dim, quantParam, type); + int8_t* activationValue = new (std::nothrow) int8_t(0); + EXPECT_NE(nullptr, activationValue); + tensor->SetBuffer(activationValue, sizeof(int8_t)); + m_allTensors.emplace_back(tensor); +} + +/** + * @tc.name: div_build_001 + * @tc.desc: Verify the success of the build function + * @tc.type: FUNC + */ +HWTEST_F(DivFusionTest, div_build_001, TestSize.Level1) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SaveParamsTensor(m_params, OH_NN_INT8, m_param_dim, nullptr, OH_NN_DIV_ACTIVATIONTYPE); + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: div_build_002 + * @tc.desc: Verify the forbidden of the build function + * @tc.type: FUNC + */ +HWTEST_F(DivFusionTest, div_build_002, TestSize.Level1) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SaveParamsTensor(m_params, OH_NN_INT8, m_param_dim, nullptr, OH_NN_DIV_ACTIVATIONTYPE); + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: div_build_003 + * @tc.desc: Verify the missing input of the build function + * @tc.type: FUNC + */ +HWTEST_F(DivFusionTest, div_build_003, TestSize.Level1) +{ + m_inputs = {0}; + m_outputs = {1}; + m_params = {2}; + + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SaveParamsTensor(m_params, OH_NN_INT8, m_param_dim, nullptr, OH_NN_DIV_ACTIVATIONTYPE); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: div_build_004 + * @tc.desc: Verify the missing output of the build function + * @tc.type: FUNC + */ +HWTEST_F(DivFusionTest, div_build_004, TestSize.Level1) +{ + m_inputs = {0, 1}; + m_outputs = {}; + m_params = {2}; + + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SaveParamsTensor(m_params, OH_NN_INT8, m_param_dim, nullptr, OH_NN_DIV_ACTIVATIONTYPE); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: div_build_005 + * @tc.desc: Verify the inputIndex out of bounds of the build function + * @tc.type: FUNC + */ +HWTEST_F(DivFusionTest, div_build_005, TestSize.Level1) +{ + m_inputs = {0, 6}; + m_outputs = {2}; + m_params = {3}; + + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SaveParamsTensor(m_params, OH_NN_INT8, m_param_dim, nullptr, OH_NN_DIV_ACTIVATIONTYPE); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: div_build_006 + * @tc.desc: Verify the outputIndex out of bounds of the build function + * @tc.type: FUNC + */ +HWTEST_F(DivFusionTest, div_build_006, TestSize.Level1) +{ + m_inputs = {0, 1}; + m_outputs = {6}; + m_params = {3}; + + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SaveParamsTensor(m_params, OH_NN_INT8, m_param_dim, nullptr, OH_NN_DIV_ACTIVATIONTYPE); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: div_build_007 + * @tc.desc: Verify the param invalid of the build function + * @tc.type: FUNC + */ +HWTEST_F(DivFusionTest, div_build_007, TestSize.Level1) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + m_paramsIndex = m_params; + + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT32, m_param_dim, nullptr, OH_NN_DIV_ACTIVATIONTYPE); + int32_t* activationValueTest = new (std::nothrow) int32_t[0]; + EXPECT_NE(nullptr, activationValueTest); + + tensor->SetBuffer(activationValueTest, sizeof(int32_t)); + m_allTensors.emplace_back(tensor); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: div_build_008 + * @tc.desc: Verify the scalar length of the build function + * @tc.type: FUNC + */ +HWTEST_F(DivFusionTest, div_build_008, TestSize.Level1) +{ + m_param_dim = {2}; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + m_paramsIndex = m_params; + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT8, m_param_dim, nullptr, OH_NN_DIV_ACTIVATIONTYPE); + int8_t* activationValueTest = new (std::nothrow) int8_t[2]{0, 0}; + EXPECT_NE(nullptr, activationValueTest); + + tensor->SetBuffer(activationValueTest, 2 * sizeof(int8_t)); + m_allTensors.emplace_back(tensor); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: div_build_009 + * @tc.desc: Verify the invalid activation value of the build function + * @tc.type: FUNC + */ +HWTEST_F(DivFusionTest, div_build_009, TestSize.Level1) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + m_paramsIndex = m_params; + + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT8, m_param_dim, nullptr, OH_NN_DIV_ACTIVATIONTYPE); + int8_t* activationValueTest = new (std::nothrow) int8_t(10); + EXPECT_NE(nullptr, activationValueTest); + + tensor->SetBuffer(activationValueTest, sizeof(int8_t)); + m_allTensors.emplace_back(tensor); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: div_build_010 + * @tc.desc: Verify the invalid param to div of the build function + * @tc.type: FUNC + */ +HWTEST_F(DivFusionTest, div_build_010, TestSize.Level1) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + m_paramsIndex = m_params; + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT8, m_param_dim, nullptr, OH_NN_ADD_ACTIVATIONTYPE); + int8_t* activationValueTest = new (std::nothrow) int8_t(0); + EXPECT_NE(nullptr, activationValueTest); + + tensor->SetBuffer(activationValueTest, sizeof(int8_t)); + m_allTensors.emplace_back(tensor); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: div_build_011 + * @tc.desc: Verify the div without set activation of the build function + * @tc.type: FUNC + */ +HWTEST_F(DivFusionTest, div_build_011, TestSize.Level1) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT8, m_param_dim, nullptr, + OH_NN_DIV_ACTIVATIONTYPE); + m_allTensors.emplace_back(tensor); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: div_getprimitive_001 + * @tc.desc: Verify the success of the GetPrimitive function + * @tc.type: FUNC + */ +HWTEST_F(DivFusionTest, div_getprimitive_001, TestSize.Level1) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SaveParamsTensor(m_params, OH_NN_INT8, m_param_dim, nullptr, OH_NN_DIV_ACTIVATIONTYPE); + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + + LiteGraphTensorPtr primitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive = {nullptr, DestroyLiteGraphPrimitive}; + EXPECT_NE(expectPrimitive, primitive); + + int8_t activationValueTest = 0; + int8_t returnValue = mindspore::lite::MindIR_DivFusion_GetActivationType(primitive.get()); + EXPECT_EQ(returnValue, activationValueTest); +} + +/** + * @tc.name: div_getprimitive_002 + * @tc.desc: Verify the nullptr return of the GetPrimitive function + * @tc.type: FUNC + */ +HWTEST_F(DivFusionTest, div_getprimitive_002, TestSize.Level1) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SaveParamsTensor(m_params, OH_NN_INT8, m_param_dim, nullptr, OH_NN_DIV_ACTIVATIONTYPE); + + LiteGraphTensorPtr primitive = {nullptr, DestroyLiteGraphPrimitive}; + LiteGraphTensorPtr expectPrimitive = m_builder.GetPrimitive(); + EXPECT_EQ(primitive, expectPrimitive); +} +} // namespace UnitTest +} // namespace NeuralNetworkRuntime +} // namespace OHOS \ No newline at end of file diff --git a/test/unittest/ops/eltwise_test.cpp b/test/unittest/ops/eltwise_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..3a44e47f5c823b31e6f7a4006e9eb6dee29e9ef1 --- /dev/null +++ b/test/unittest/ops/eltwise_test.cpp @@ -0,0 +1,297 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "frameworks/native/ops/eltwise_builder.h" + +#include "ops_test.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Ops; + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class EltwiseBuilderTest : public OpsTest { +public: + void SetUp(); + void TearDown(); + + void SetEltwiseMode(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); +public: + EltwiseBuilder m_builder; + std::vector m_inputs {0, 1}; + std::vector m_outputs {2}; + std::vector m_params {3}; + std::vector m_input_dim {3, 3}; + std::vector m_output_dim {3, 3}; + std::vector m_param_dim {}; +}; + +void EltwiseBuilderTest::SetUp() {} + +void EltwiseBuilderTest::TearDown() {} + +void EltwiseBuilderTest::SetEltwiseMode(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + std::shared_ptr tensor = TransToNNTensor(dataType, dim, quantParam, type); + int8_t* modeValue = new (std::nothrow) int8_t(0); + EXPECT_NE(nullptr, modeValue); + tensor->SetBuffer(modeValue, sizeof(int8_t)); + m_allTensors.emplace_back(tensor); +} + +/** + * @tc.name: eltwise_build_001 + * @tc.desc: Verify the success of the build function + * @tc.type: FUNC + */ +HWTEST_F(EltwiseBuilderTest, eltwise_build_001, TestSize.Level1) +{ + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetEltwiseMode(OH_NN_INT8, m_param_dim, nullptr, OH_NN_ELTWISE_MODE); + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: eltwise_build_002 + * @tc.desc: Verify the forbidden of the build function + * @tc.type: FUNC + */ +HWTEST_F(EltwiseBuilderTest, eltwise_build_002, TestSize.Level1) +{ + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetEltwiseMode(OH_NN_INT8, m_param_dim, nullptr, OH_NN_ELTWISE_MODE); + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: eltwise_build_003 + * @tc.desc: Verify the missing input of the build function + * @tc.type: FUNC + */ +HWTEST_F(EltwiseBuilderTest, eltwise_build_003, TestSize.Level1) +{ + m_inputs = {0}; + m_outputs = {1}; + m_params = {2}; + m_paramsIndex = m_params; + + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetEltwiseMode(OH_NN_INT8, m_param_dim, nullptr, OH_NN_ELTWISE_MODE); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: eltwise_build_004 + * @tc.desc: Verify the missing output of the build function + * @tc.type: FUNC + */ +HWTEST_F(EltwiseBuilderTest, eltwise_build_004, TestSize.Level1) +{ + m_outputs = {}; + m_params = {2}; + m_paramsIndex = m_params; + + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetEltwiseMode(OH_NN_INT8, m_param_dim, nullptr, OH_NN_ELTWISE_MODE); + + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: eltwise_build_005 + * @tc.desc: Verify the inputIndex out of bounds of the build function + * @tc.type: FUNC + */ +HWTEST_F(EltwiseBuilderTest, eltwise_build_005, TestSize.Level1) +{ + m_inputs = {0, 6}; + m_paramsIndex = m_params; + + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetEltwiseMode(OH_NN_INT8, m_param_dim, nullptr, OH_NN_ELTWISE_MODE); + + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: eltwise_build_006 + * @tc.desc: Verify the outputIndex out of bounds of the build function + * @tc.type: FUNC + */ +HWTEST_F(EltwiseBuilderTest, eltwise_build_006, TestSize.Level1) +{ + m_outputs = {6}; + m_paramsIndex = m_params; + + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetEltwiseMode(OH_NN_INT8, m_param_dim, nullptr, OH_NN_ELTWISE_MODE); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: eltwise_build_007 + * @tc.desc: Verify the invalid eltwiseMode of the build function + * @tc.type: FUNC + */ + +HWTEST_F(EltwiseBuilderTest, eltwise_build_007, TestSize.Level1) +{ + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT32, m_param_dim, nullptr, OH_NN_ELTWISE_MODE); + int32_t* modeValue = new (std::nothrow) int32_t(0); + EXPECT_NE(nullptr, modeValue); + + tensor->SetBuffer(modeValue, sizeof(int32_t)); + m_allTensors.emplace_back(tensor); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: eltwise_build_008 + * @tc.desc: Verify the scalar length of the build function + * @tc.type: FUNC + */ + +HWTEST_F(EltwiseBuilderTest, eltwise_build_008, TestSize.Level1) +{ + m_paramsIndex = m_params; + m_param_dim = {2}; + + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT8, m_param_dim, nullptr, OH_NN_ELTWISE_MODE); + int8_t* modeValue = new (std::nothrow) int8_t[2]{0, 0}; + EXPECT_NE(nullptr, modeValue); + + tensor->SetBuffer(modeValue, 2 * sizeof(int8_t)); + m_allTensors.emplace_back(tensor); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: eltwise_build_008 + * @tc.desc: Verify the invalid mode value of the build function + * @tc.type: FUNC + */ + +HWTEST_F(EltwiseBuilderTest, eltwise_build_009, TestSize.Level1) +{ + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT8, m_param_dim, nullptr, OH_NN_ELTWISE_MODE); + int8_t* modeValue = new (std::nothrow) int8_t(10); + EXPECT_NE(nullptr, modeValue); + + tensor->SetBuffer(modeValue, sizeof(int8_t)); + m_allTensors.emplace_back(tensor); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: eltwise_build_010 + * @tc.desc: Verify the invalid param to eltwise of the build function + * @tc.type: FUNC + */ + +HWTEST_F(EltwiseBuilderTest, eltwise_build_010, TestSize.Level1) +{ + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT8, m_param_dim, nullptr, OH_NN_DIV_ACTIVATIONTYPE); + int8_t* modeValue = new (std::nothrow) int8_t(0); + EXPECT_NE(nullptr, modeValue); + + tensor->SetBuffer(modeValue, sizeof(int8_t)); + m_allTensors.emplace_back(tensor); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: eltwise_build_011 + * @tc.desc: Verify the eltwise without set mode of the build function + * @tc.type: FUNC + */ +HWTEST_F(EltwiseBuilderTest, eltwise_build_011, TestSize.Level1) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT8, m_param_dim, nullptr, OH_NN_ELTWISE_MODE); + m_allTensors.emplace_back(tensor); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: eltwise_getprimitive_001 + * @tc.desc: Verify the success of the GetPrimitive function + * @tc.type: FUNC + */ +HWTEST_F(EltwiseBuilderTest, eltwise_getprimitive_001, TestSize.Level1) +{ + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetEltwiseMode(OH_NN_INT8, m_param_dim, nullptr, OH_NN_ELTWISE_MODE); + + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + LiteGraphTensorPtr primitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive = {nullptr, DestroyLiteGraphPrimitive}; + EXPECT_NE(expectPrimitive, primitive); + bool eltwiseModeReturn = mindspore::lite::MindIR_Eltwise_GetMode(primitive.get()); + EXPECT_EQ(eltwiseModeReturn, eltwiseModeReturn); +} + +/** + * @tc.name: eltwise_getprimitive_002 + * @tc.desc: Verify the nullptr return of the GetPrimitive function + * @tc.type: FUNC + */ +HWTEST_F(EltwiseBuilderTest, eltwise_getprimitive_002, TestSize.Level1) +{ + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetEltwiseMode(OH_NN_INT8, m_param_dim, nullptr, OH_NN_ELTWISE_MODE); + + LiteGraphTensorPtr primitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive = {nullptr, DestroyLiteGraphPrimitive}; + EXPECT_EQ(expectPrimitive, primitive); +} +} // namespace UnitTest +} // namespace NeuralNetworkRuntime +} // namespace OHOS \ No newline at end of file diff --git a/test/unittest/ops/expandims_test.cpp b/test/unittest/ops/expandims_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..ad3c040b8febbbb6ac28f1931e85cbad2a378ed8 --- /dev/null +++ b/test/unittest/ops/expandims_test.cpp @@ -0,0 +1,186 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "frameworks/native/ops/expandims_builder.h" + +#include "ops_test.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Ops; + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class ExpandDimsBuilderTest : public OpsTest { +public: + void SetUp() override; + void TearDown() override; + +public: + ExpandDimsBuilder m_builder; + std::vector m_inputs {0, 1}; + std::vector m_outputs {2}; + std::vector m_params {}; + std::vector m_input_dim {3, 3}; + std::vector m_output_dim {3, 3}; + std::vector m_param_dim {}; +}; + +void ExpandDimsBuilderTest::SetUp() {} + +void ExpandDimsBuilderTest::TearDown() {} + +/** + * @tc.name: expandims_build_001 + * @tc.desc: Verify the success of the build function + * @tc.type: FUNC + */ +HWTEST_F(ExpandDimsBuilderTest, expandims_build_001, TestSize.Level1) +{ + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: expandims_build_002 + * @tc.desc: Verify the forbidden of the build function + * @tc.type: FUNC + */ +HWTEST_F(ExpandDimsBuilderTest, expandims_build_002, TestSize.Level1) +{ + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: expandims_build_003 + * @tc.desc: Verify the misssing input of the build function + * @tc.type: FUNC + */ +HWTEST_F(ExpandDimsBuilderTest, expandims_build_003, TestSize.Level1) +{ + m_inputs = {0}; + m_outputs = {1}; + m_paramsIndex = m_params; + + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: expandims_build_004 + * @tc.desc: Verify the missing output of the build function + * @tc.type: FUNC + */ +HWTEST_F(ExpandDimsBuilderTest, expandims_build_004, TestSize.Level1) +{ + m_outputs = {}; + m_paramsIndex = m_params; + + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: expandims_build_005 + * @tc.desc: Verify the inputIndex out of bounds of the build function + * @tc.type: FUNC + */ +HWTEST_F(ExpandDimsBuilderTest, expandims_build_005, TestSize.Level1) +{ + m_inputs = {0, 6}; + m_paramsIndex = m_params; + + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: expandims_build_006 + * @tc.desc: Verify the outputIndex out of bounds of the build function + * @tc.type: FUNC + */ +HWTEST_F(ExpandDimsBuilderTest, expandims_build_006, TestSize.Level1) +{ + m_outputs = {6}; + m_paramsIndex = m_params; + + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: expandims_build_007 + * @tc.desc: Verify the paramIndex not empth of the build function + * @tc.type: FUNC + */ + +HWTEST_F(ExpandDimsBuilderTest, expandims_build_007, TestSize.Level1) +{ + m_params = {1}; + m_param_dim = {1}; + m_paramsIndex = m_params; + + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: expandims_getprimitive_001 + * @tc.desc: Verify the success of the GetPrimitive function + * @tc.type: FUNC + */ +HWTEST_F(ExpandDimsBuilderTest, expandims_getprimitive_001, TestSize.Level1) +{ + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + + LiteGraphTensorPtr primitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive = {nullptr, DestroyLiteGraphPrimitive}; + EXPECT_NE(expectPrimitive, primitive); +} + +/** + * @tc.name: expandims_getprimitive_002 + * @tc.desc: Verify the nullptr return of the GetPrimitive function + * @tc.type: FUNC + */ +HWTEST_F(ExpandDimsBuilderTest, expandims_getprimitive_002, TestSize.Level1) +{ + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + LiteGraphTensorPtr primitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive = {nullptr, DestroyLiteGraphPrimitive}; + EXPECT_EQ(expectPrimitive, primitive); +} +} // namespace UnitTest +} // namespace NeuralNetworkRuntime +} // namespace OHOS \ No newline at end of file diff --git a/test/unittest/ops/fill_builder_test.cpp b/test/unittest/ops/fill_builder_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..59bde4583343c742d9d27b8edced195aa53e0fcd --- /dev/null +++ b/test/unittest/ops/fill_builder_test.cpp @@ -0,0 +1,180 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "frameworks/native/ops/fill_builder.h" + +#include "ops_test.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Ops; + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class FillBuilderTest : public OpsTest { +public: + void SetUp() override; + void TearDown() override; + +protected: + FillBuilder m_fill; + std::vector m_inputs {0, 1}; + std::vector m_outputs {2}; + std::vector m_params {}; + std::vector m_inputDim {}; + std::vector m_outputDim {2, 3}; +}; + +void FillBuilderTest::SetUp() {} + +void FillBuilderTest::TearDown() {} + +/** + * @tc.name: fill_build_001 + * @tc.desc: Verify that the build function returns a successful message. + * @tc.type: FUNC + */ +HWTEST_F(FillBuilderTest, fill_build_001, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + + OH_NN_ReturnCode ret = m_fill.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/** + * @tc.name: fill_build_002 + * @tc.desc: Verify that the build function returns a failed message with true m_isBuild. + * @tc.type: FUNC + */ +HWTEST_F(FillBuilderTest, fill_build_002, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + + EXPECT_EQ(OH_NN_SUCCESS, m_fill.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors)); + OH_NN_ReturnCode ret = m_fill.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/** + * @tc.name: fill_build_003 + * @tc.desc: Verify that the build function returns a failed message with invalided input. + * @tc.type: FUNC + */ +HWTEST_F(FillBuilderTest, fill_build_003, TestSize.Level0) +{ + m_inputs = {0, 1, 2}; + m_outputs = {3}; + + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + + OH_NN_ReturnCode ret = m_fill.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: fill_build_004 + * @tc.desc: Verify that the build function returns a failed message with invalided output. + * @tc.type: FUNC + */ +HWTEST_F(FillBuilderTest, fill_build_004, TestSize.Level0) +{ + m_outputs = {2, 3, 4}; + + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + + OH_NN_ReturnCode ret = m_fill.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: fill_build_005 + * @tc.desc: Verify that the build function returns a failed message with empty allTensor. + * @tc.type: FUNC + */ +HWTEST_F(FillBuilderTest, fill_build_005, TestSize.Level0) +{ + OH_NN_ReturnCode ret = m_fill.Build(m_params, m_inputs, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: fill_build_006 + * @tc.desc: Verify that the build function returns a failed message without output tensor. + * @tc.type: FUNC + */ +HWTEST_F(FillBuilderTest, fill_build_006, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + + OH_NN_ReturnCode ret = m_fill.Build(m_params, m_inputsIndex, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: fill_build_007 + * @tc.desc: Verify that the build function returns a failed message with a virtual parameter. + * @tc.type: FUNC + */ +HWTEST_F(FillBuilderTest, fill_build_007, TestSize.Level0) +{ + m_params = {3}; + std::vector paramDim = {}; + + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + std::shared_ptr paramTensor; + paramTensor = TransToNNTensor(OH_NN_INT32, paramDim, nullptr, OH_NN_TENSOR); + m_allTensors.emplace_back(paramTensor); + + OH_NN_ReturnCode ret = m_fill.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: fill_getprimitive_001 + * @tc.desc: Verify that the getPrimitive function returns a successful message + * @tc.type: FUNC + */ +HWTEST_F(FillBuilderTest, fill_getprimitive_001, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + + EXPECT_EQ(OH_NN_SUCCESS, m_fill.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors)); + LiteGraphPrimitvePtr primitive = m_fill.GetPrimitive(); + LiteGraphPrimitvePtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); + EXPECT_NE(expectPrimitive, primitive); +} + +/** + * @tc.name: fill_getprimitive_002 + * @tc.desc: Verify that the getPrimitive function returns a failed message without build. + * @tc.type: FUNC + */ +HWTEST_F(FillBuilderTest, fill_getprimitive_002, TestSize.Level0) +{ + LiteGraphPrimitvePtr primitive = m_fill.GetPrimitive(); + LiteGraphPrimitvePtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); + EXPECT_EQ(expectPrimitive, primitive); +} +} +} +} \ No newline at end of file diff --git a/test/unittest/ops/fullconnection_test.cpp b/test/unittest/ops/fullconnection_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..0754a713dcafc8698b8d659d1af73fda402a0ff5 --- /dev/null +++ b/test/unittest/ops/fullconnection_test.cpp @@ -0,0 +1,289 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "frameworks/native/ops/fullconnection_builder.h" + +#include "ops_test.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Ops; + + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class FullConnectionBuilderTest : public OpsTest { +public: + void SetUp() override; + void TearDown() override; + + void SetInputToAlltensor(); + void SetActivation(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + +public: + FullConnectionBuilder m_builder; + std::vector m_inputs {0, 1, 2}; + std::vector m_outputs {3}; + std::vector m_params {4}; + std::vector m_output_dim {2, 2}; + std::vector m_param_dim {}; +}; + +void FullConnectionBuilderTest::SetUp() {} + +void FullConnectionBuilderTest::TearDown() {} + +void FullConnectionBuilderTest::SetInputToAlltensor() +{ + std::vector m_input_dim{2, 2}; + std::vector biasDim = {2}; + std::shared_ptr tensor = TransToNNTensor(OH_NN_FLOAT32, m_input_dim, nullptr, OH_NN_TENSOR); + m_allTensors.emplace_back(tensor); + + int32_t numWeight = 4; + int32_t numBias = 2; + tensor = TransToNNTensor(OH_NN_FLOAT32, m_input_dim, nullptr, OH_NN_TENSOR); + float* valueWeight = new (std::nothrow) float[4]{1, 1, 1, 1}; + EXPECT_NE(nullptr, valueWeight); + + tensor->SetBuffer(valueWeight, numWeight * sizeof(float)); + m_allTensors.emplace_back(tensor); + + tensor = TransToNNTensor(OH_NN_FLOAT32, biasDim, nullptr, OH_NN_TENSOR); + float* valueBias = new (std::nothrow) float[2]{0, 0}; + EXPECT_NE(nullptr, valueBias); + tensor->SetBuffer(valueBias, numBias * sizeof(float)); + m_allTensors.emplace_back(tensor); +} + +void FullConnectionBuilderTest::SetActivation(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + std::shared_ptr tensor = TransToNNTensor(dataType, dim, quantParam, type); + int8_t* activationValue = new (std::nothrow) int8_t(0); + EXPECT_NE(nullptr, activationValue); + + tensor->SetBuffer(activationValue, sizeof(int8_t)); + m_allTensors.emplace_back(tensor); +} + +/** + * @tc.name: fullconnection_build_001 + * @tc.desc: Verify the success of the build function + * @tc.type: FUNC + */ +HWTEST_F(FullConnectionBuilderTest, fullconnection_build_001, TestSize.Level1) +{ + m_inputsIndex = m_inputs; + m_paramsIndex = m_params; + SetInputToAlltensor(); + + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_FULL_CONNECTION_ACTIVATIONTYPE); + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: fullconnection_build_002 + * @tc.desc: Verify the forbidden of the build function + * @tc.type: FUNC + */ +HWTEST_F(FullConnectionBuilderTest, fullconnection_build_002, TestSize.Level1) +{ + m_inputsIndex = m_inputs; + m_paramsIndex = m_params; + SetInputToAlltensor(); + + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_FULL_CONNECTION_ACTIVATIONTYPE); + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: fullconnection_build_003 + * @tc.desc: Verify the missing output of the build function + * @tc.type: FUNC + */ +HWTEST_F(FullConnectionBuilderTest, fullconnection_build_003, TestSize.Level1) +{ + m_outputs = {}; + m_params = {3}; + m_inputsIndex = m_inputs; + m_paramsIndex = m_params; + SetInputToAlltensor(); + + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_FULL_CONNECTION_ACTIVATIONTYPE); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: fullconnection_build_004 + * @tc.desc: Verify the inputIndex out of bounds of the build function + * @tc.type: FUNC + */ +HWTEST_F(FullConnectionBuilderTest, fullconnection_build_004, TestSize.Level1) +{ + m_inputs = {0, 1, 6}; + m_outputs = {3}; + m_params = {4}; + + m_inputsIndex = m_inputs; + m_paramsIndex = m_params; + SetInputToAlltensor(); + + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_FULL_CONNECTION_ACTIVATIONTYPE); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: fullconnection_build_005 + * @tc.desc: Verify the behavior of the build function + * @tc.type: FUNC + */ + +HWTEST_F(FullConnectionBuilderTest, fullconnection_build_005, TestSize.Level1) +{ + m_inputsIndex = m_inputs; + m_paramsIndex = m_params; + SetInputToAlltensor(); + + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT32, m_param_dim, nullptr, + OH_NN_FULL_CONNECTION_ACTIVATIONTYPE); + int32_t *activationValue = new (std::nothrow) int32_t(0); + EXPECT_NE(nullptr, activationValue); + + tensor->SetBuffer(activationValue, sizeof(int32_t)); + m_allTensors.emplace_back(tensor); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: fullconnection_build_006 + * @tc.desc: Verify the behavior of the build function + * @tc.type: FUNC + */ + +HWTEST_F(FullConnectionBuilderTest, fullconnection_build_006, TestSize.Level1) +{ + m_param_dim = {2}; + m_inputsIndex = m_inputs; + m_paramsIndex = m_params; + SetInputToAlltensor(); + + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT8, m_param_dim, nullptr, + OH_NN_FULL_CONNECTION_ACTIVATIONTYPE); + int8_t *activationValue = new (std::nothrow) int8_t[2]{0, 0}; + EXPECT_NE(nullptr, activationValue); + + tensor->SetBuffer(activationValue, 2 * sizeof(int8_t)); + m_allTensors.emplace_back(tensor); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: fullconnection_build_007 + * @tc.desc: Verify the invalid avtivation value of the build function + * @tc.type: FUNC + */ + +HWTEST_F(FullConnectionBuilderTest, fullconnection_build_007, TestSize.Level1) +{ + m_inputsIndex = m_inputs; + m_paramsIndex = m_params; + SetInputToAlltensor(); + + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT8, m_param_dim, nullptr, + OH_NN_FULL_CONNECTION_ACTIVATIONTYPE); + int8_t *activationValue = new (std::nothrow) int8_t(10); + EXPECT_NE(nullptr, activationValue); + + tensor->SetBuffer(activationValue, sizeof(int8_t)); + m_allTensors.emplace_back(tensor); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: fullconnection_build_008 + * @tc.desc: Verify the invalid param to fullconnection of the build function + * @tc.type: FUNC + */ + +HWTEST_F(FullConnectionBuilderTest, fullconnection_build_008, TestSize.Level1) +{ + m_inputsIndex = m_inputs; + m_paramsIndex = m_params; + SetInputToAlltensor(); + + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT8, m_param_dim, nullptr, + OH_NN_DIV_ACTIVATIONTYPE); + int8_t *activationValue = new (std::nothrow) int8_t(0); + EXPECT_NE(nullptr, activationValue); + tensor->SetBuffer(activationValue, sizeof(int8_t)); + + m_allTensors.emplace_back(tensor); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: fullconnection_getprimitive_001 + * @tc.desc: Verify the success of the GetPrimitive function + * @tc.type: FUNC + */ +HWTEST_F(FullConnectionBuilderTest, fullconnection_getprimitive_001, TestSize.Level1) +{ + m_inputsIndex = m_inputs; + m_paramsIndex = m_params; + SetInputToAlltensor(); + + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_FULL_CONNECTION_ACTIVATIONTYPE); + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + LiteGraphTensorPtr primitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive = {nullptr, DestroyLiteGraphPrimitive}; + EXPECT_NE(expectPrimitive, primitive); + + int8_t activationReturn = mindspore::lite::MindIR_FullConnection_GetActivationType(primitive.get()); + EXPECT_EQ(activationReturn, 0); +} + +/** + * @tc.name: fullconnection_getprimitive_002 + * @tc.desc: Verify the nullptr return of the GetPrimitive function + * @tc.type: FUNC + */ +HWTEST_F(FullConnectionBuilderTest, fullconnection_getprimitive_002, TestSize.Level1) +{ + m_inputsIndex = m_inputs; + m_paramsIndex = m_params; + SetInputToAlltensor(); + + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_FULL_CONNECTION_ACTIVATIONTYPE); + LiteGraphTensorPtr primitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive = {nullptr, DestroyLiteGraphPrimitive}; + EXPECT_EQ(expectPrimitive, primitive); +} +} // namespace UnitTest +} // namespace NeuralNetworkRuntime +} // namespace OHOS \ No newline at end of file diff --git a/test/unittest/ops/fullconnection_with_axis_test.cpp b/test/unittest/ops/fullconnection_with_axis_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..6f379e20d816291a37faa29632676ee96822010c --- /dev/null +++ b/test/unittest/ops/fullconnection_with_axis_test.cpp @@ -0,0 +1,341 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "frameworks/native/ops/fullconnection_builder.h" + +#include "ops_test.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Ops; + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class FullConnectionAxisBuilderTest : public OpsTest { +public: + void SetUp(); + void TearDown(); + + void SetInputToAlltensor(); + void SetActivation(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + void SeAxis(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + +public: + FullConnectionBuilder m_builder; + std::vector m_inputs {0, 1, 2}; + std::vector m_outputs {3}; + std::vector m_params {4, 5}; + std::vector m_output_dim {2, 2}; + std::vector m_param_dim {}; +}; + +void FullConnectionAxisBuilderTest::SetUp() {} + +void FullConnectionAxisBuilderTest::TearDown() {} + +void FullConnectionAxisBuilderTest::SetInputToAlltensor() +{ + std::vector m_input_dim{2, 2}; + std::vector biasDim{2}; + std::shared_ptr tensor = TransToNNTensor(OH_NN_FLOAT32, m_input_dim, nullptr, OH_NN_TENSOR); + m_allTensors.emplace_back(tensor); + int32_t weightNum = 4; + int32_t biasNum = 2; + tensor = TransToNNTensor(OH_NN_FLOAT32, m_input_dim, nullptr, OH_NN_TENSOR); + float* valueWeight = new (std::nothrow) float[4]{1, 1, 1, 1}; + EXPECT_NE(nullptr, valueWeight); + tensor->SetBuffer(valueWeight, weightNum * sizeof(float)); + m_allTensors.emplace_back(tensor); + + tensor = TransToNNTensor(OH_NN_FLOAT32, biasDim, nullptr, OH_NN_TENSOR); + float* valueBias = new (std::nothrow) float[2]{0, 0}; + EXPECT_NE(nullptr, valueBias); + tensor->SetBuffer(valueBias, biasNum * sizeof(float)); + m_allTensors.emplace_back(tensor); +} + +void FullConnectionAxisBuilderTest::SetActivation(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + std::shared_ptr tensor = TransToNNTensor(dataType, dim, quantParam, type); + int8_t* activationValue = new (std::nothrow) int8_t(0); + EXPECT_NE(nullptr, activationValue); + + tensor->SetBuffer(activationValue, sizeof(int8_t)); + m_allTensors.emplace_back(tensor); +} + +void FullConnectionAxisBuilderTest::SeAxis(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + std::shared_ptr tensor = TransToNNTensor(dataType, dim, quantParam, type); + int64_t* axisValue = new (std::nothrow) int64_t(0); + EXPECT_NE(nullptr, axisValue); + + tensor->SetBuffer(axisValue, sizeof(int64_t)); + m_allTensors.emplace_back(tensor); +} + +/** + * @tc.name: fullconnection_build_axis_001 + * @tc.desc: Verify the behavior of the build function + * @tc.type: FUNC + */ +HWTEST_F(FullConnectionAxisBuilderTest, fullconnection_build_axis_001, TestSize.Level1) +{ + m_inputsIndex = m_inputs; + m_paramsIndex = m_params; + SetInputToAlltensor(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SeAxis(OH_NN_INT64, m_param_dim, nullptr, OH_NN_FULL_CONNECTION_AXIS); + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_FULL_CONNECTION_ACTIVATIONTYPE); + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: fullconnection_build_axis_002 + * @tc.desc: Verify the behavior of the build function + * @tc.type: FUNC + */ +HWTEST_F(FullConnectionAxisBuilderTest, fullconnection_build_axis_002, TestSize.Level1) +{ + m_inputsIndex = m_inputs; + m_paramsIndex = m_params; + SetInputToAlltensor(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SeAxis(OH_NN_INT64, m_param_dim, nullptr, OH_NN_FULL_CONNECTION_AXIS); + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_FULL_CONNECTION_ACTIVATIONTYPE); + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: fullconnection_build_axis_003 + * @tc.desc: Verify the missing output of the build function + * @tc.type: FUNC + */ +HWTEST_F(FullConnectionAxisBuilderTest, fullconnection_build_axis_003, TestSize.Level1) +{ + m_outputs = {}; + m_inputsIndex = m_inputs; + m_paramsIndex = m_params; + SetInputToAlltensor(); + + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SeAxis(OH_NN_INT64, m_param_dim, nullptr, OH_NN_FULL_CONNECTION_AXIS); + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_FULL_CONNECTION_ACTIVATIONTYPE); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: fullconnection_build_axis_004 + * @tc.desc: Verify the inputIndex out of bounds of the build function + * @tc.type: FUNC + */ +HWTEST_F(FullConnectionAxisBuilderTest, fullconnection_build_axis_004, TestSize.Level1) +{ + m_inputs = {0, 1, 6}; + m_inputsIndex = m_inputs; + m_paramsIndex = m_params; + SetInputToAlltensor(); + + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SeAxis(OH_NN_INT64, m_param_dim, nullptr, OH_NN_FULL_CONNECTION_AXIS); + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_FULL_CONNECTION_ACTIVATIONTYPE); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: fullconnection_build_axis_005 + * @tc.desc: Verify the invalid axis of the build function + * @tc.type: FUNC + */ +HWTEST_F(FullConnectionAxisBuilderTest, fullconnection_build_axis_005, TestSize.Level1) +{ + m_inputsIndex = m_inputs; + m_paramsIndex = m_params; + SetInputToAlltensor(); + + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT32, m_param_dim, nullptr, OH_NN_FULL_CONNECTION_AXIS); + int32_t *axisValueTest = new (std::nothrow) int32_t(0); + EXPECT_NE(nullptr, axisValueTest); + + tensor->SetBuffer(axisValueTest, sizeof(int32_t)); + m_allTensors.emplace_back(tensor); + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_FULL_CONNECTION_ACTIVATIONTYPE); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: fullconnection_build_axis_006 + * @tc.desc: Verify the behavior of the build function + * @tc.type: FUNC + */ +HWTEST_F(FullConnectionAxisBuilderTest, fullconnection_build_axis_006, TestSize.Level1) +{ + m_inputsIndex = m_inputs; + m_paramsIndex = m_params; + SetInputToAlltensor(); + + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SeAxis(OH_NN_INT64, m_param_dim, nullptr, OH_NN_FULL_CONNECTION_AXIS); + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT32, m_param_dim, nullptr, + OH_NN_FULL_CONNECTION_ACTIVATIONTYPE); + int32_t *activationValue = new (std::nothrow) int32_t(0); + EXPECT_NE(nullptr, activationValue); + + tensor->SetBuffer(activationValue, sizeof(int32_t)); + m_allTensors.emplace_back(tensor); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: fullconnection_build_axis_007 + * @tc.desc: Verify the behavior of the build function + * @tc.type: FUNC + */ +HWTEST_F(FullConnectionAxisBuilderTest, fullconnection_build_axis_007, TestSize.Level1) +{ + std::vector paramDimTest = {2}; + m_inputsIndex = m_inputs; + m_paramsIndex = m_params; + + SetInputToAlltensor(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT64, paramDimTest, nullptr, + OH_NN_FULL_CONNECTION_AXIS); + int64_t *axisValueTest = new (std::nothrow) int64_t[2]{0, 0}; + EXPECT_NE(nullptr, axisValueTest); + + tensor->SetBuffer(axisValueTest, 2 * sizeof(int64_t)); + m_allTensors.emplace_back(tensor); + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_FULL_CONNECTION_ACTIVATIONTYPE); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: fullconnection_build_axis_008 + * @tc.desc: Verify the behavior of the build function + * @tc.type: FUNC + */ +HWTEST_F(FullConnectionAxisBuilderTest, fullconnection_build_axis_008, TestSize.Level1) +{ + std::vector paramDimTest = {2}; + m_inputsIndex = m_inputs; + m_paramsIndex = m_params; + SetInputToAlltensor(); + + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SeAxis(OH_NN_INT64, m_param_dim, nullptr, OH_NN_FULL_CONNECTION_AXIS); + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT8, paramDimTest, nullptr, + OH_NN_FULL_CONNECTION_ACTIVATIONTYPE); + int8_t *activationValue = new (std::nothrow) int8_t[2]{0, 0}; + EXPECT_NE(nullptr, activationValue); + + tensor->SetBuffer(activationValue, 2 * sizeof(int8_t)); + m_allTensors.emplace_back(tensor); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: fullconnection_build_axis_009 + * @tc.desc: Verify the fullconnection without set axis of the build function + * @tc.type: FUNC + */ +HWTEST_F(FullConnectionAxisBuilderTest, fullconnection_build_axis_009, TestSize.Level1) +{ + m_inputsIndex = m_inputs; + m_paramsIndex = m_params; + SetInputToAlltensor(); + + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT64, m_param_dim, nullptr, OH_NN_FULL_CONNECTION_AXIS); + m_allTensors.emplace_back(tensor); + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_FULL_CONNECTION_ACTIVATIONTYPE); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: fullconnection_build_axis_010 + * @tc.desc: Verify the fullconnection without set activation of the build function + * @tc.type: FUNC + */ +HWTEST_F(FullConnectionAxisBuilderTest, fullconnection_build_axis_010, TestSize.Level1) +{ + m_inputsIndex = m_inputs; + m_paramsIndex = m_params; + SetInputToAlltensor(); + + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SeAxis(OH_NN_INT64, m_param_dim, nullptr, OH_NN_FULL_CONNECTION_AXIS); + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT8, m_param_dim, nullptr, + OH_NN_FULL_CONNECTION_ACTIVATIONTYPE); + m_allTensors.emplace_back(tensor); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: fullconnection_getprimitive_axis_001 + * @tc.desc: Verify the behavior of the GetPrimitive function + * @tc.type: FUNC + */ +HWTEST_F(FullConnectionAxisBuilderTest, fullconnection_getprimitive_axis_001, TestSize.Level1) +{ + m_inputsIndex = m_inputs; + m_paramsIndex = m_params; + SetInputToAlltensor(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SeAxis(OH_NN_INT64, m_param_dim, nullptr, OH_NN_FULL_CONNECTION_AXIS); + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_FULL_CONNECTION_ACTIVATIONTYPE); + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + + LiteGraphTensorPtr primitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive = {nullptr, DestroyLiteGraphPrimitive}; + EXPECT_NE(expectPrimitive, primitive); + + int returnValue = mindspore::lite::MindIR_FullConnection_GetAxis(primitive.get()); + EXPECT_EQ(returnValue, 0); + bool activationReturn = mindspore::lite::MindIR_FullConnection_GetActivationType(primitive.get()); + EXPECT_EQ(activationReturn, 0); +} + +/** + * @tc.name: fullconnection_getprimitive_axis_002 + * @tc.desc: Verify the behavior of the GetPrimitive function + * @tc.type: FUNC + */ +HWTEST_F(FullConnectionAxisBuilderTest, fullconnection_getprimitive_axis_002, TestSize.Level1) +{ + m_inputsIndex = m_inputs; + m_paramsIndex = m_params; + SetInputToAlltensor(); + + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SeAxis(OH_NN_INT64, m_param_dim, nullptr, OH_NN_FULL_CONNECTION_AXIS); + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_FULL_CONNECTION_ACTIVATIONTYPE); + + LiteGraphTensorPtr primitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive = {nullptr, DestroyLiteGraphPrimitive}; + EXPECT_EQ(expectPrimitive, primitive); +} +} // namespace UnitTest +} // namespace NeuralNetworkRuntime +} // namespace OHOS \ No newline at end of file diff --git a/test/unittest/ops/gather_builder_test.cpp b/test/unittest/ops/gather_builder_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..74a2a03cd9eba5c5279d739e0a13966d978a55e5 --- /dev/null +++ b/test/unittest/ops/gather_builder_test.cpp @@ -0,0 +1,181 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "frameworks/native/ops/gather_builder.h" + +#include "ops_test.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Ops; + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class GatherBuilderTest : public OpsTest { +public: + void SetUp() override; + void TearDown() override; + +protected: + GatherBuilder m_gather; + std::vector m_inputs {0, 1, 2}; + std::vector m_outputs {3}; + std::vector m_params {}; + std::vector m_inputDim {4, 3}; + std::vector m_outputDim {4, 2}; +}; + +void GatherBuilderTest::SetUp() {} + +void GatherBuilderTest::TearDown() {} + +/** + * @tc.name: gather_build_001 + * @tc.desc: Verify that the build function returns a successful message. + * @tc.type: FUNC + */ +HWTEST_F(GatherBuilderTest, gather_build_001, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + + OH_NN_ReturnCode ret = m_gather.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/** + * @tc.name: gather_build_002 + * @tc.desc: Verify that the build function returns a failed message with true m_isBuild. + * @tc.type: FUNC + */ +HWTEST_F(GatherBuilderTest, gather_build_002, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + + EXPECT_EQ(OH_NN_SUCCESS, m_gather.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors)); + OH_NN_ReturnCode ret = m_gather.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/** + * @tc.name: gather_build_003 + * @tc.desc: Verify that the build function returns a failed message with invalided input. + * @tc.type: FUNC + */ +HWTEST_F(GatherBuilderTest, gather_build_003, TestSize.Level0) +{ + m_inputs = {0, 1, 2, 3}; + m_outputs = {4}; + + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + + OH_NN_ReturnCode ret = m_gather.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: gather_build_004 + * @tc.desc: Verify that the build function returns a failed message with invalided output. + * @tc.type: FUNC + */ +HWTEST_F(GatherBuilderTest, gather_build_004, TestSize.Level0) +{ + std::vector m_outputs = {3, 4, 5}; + + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + + OH_NN_ReturnCode ret = m_gather.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: gather_build_005 + * @tc.desc: Verify that the build function returns a failed message with empty allTensor. + * @tc.type: FUNC + */ +HWTEST_F(GatherBuilderTest, gather_build_005, TestSize.Level0) +{ + OH_NN_ReturnCode ret = m_gather.Build(m_params, m_inputs, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: gather_build_006 + * @tc.desc: Verify that the build function returns a failed message without output tensor. + * @tc.type: FUNC + */ +HWTEST_F(GatherBuilderTest, gather_build_006, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + + OH_NN_ReturnCode ret = m_gather.Build(m_params, m_inputsIndex, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: gather_build_007 + * @tc.desc: Verify that the build function returns a failed message with a virtual parameter. + * @tc.type: FUNC + */ +HWTEST_F(GatherBuilderTest, gather_build_007, TestSize.Level0) +{ + m_params = {4}; + std::vector paramDim = {}; + + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + std::shared_ptr paramTensor; + paramTensor = TransToNNTensor(OH_NN_INT32, paramDim, nullptr, OH_NN_TENSOR); + m_allTensors.emplace_back(paramTensor); + + OH_NN_ReturnCode ret = m_gather.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: gather_getprimitive_001 + * @tc.desc: Verify that the getPrimitive function returns a successful message + * @tc.type: FUNC + */ +HWTEST_F(GatherBuilderTest, gather_getprimitive_001, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + + EXPECT_EQ(OH_NN_SUCCESS, m_gather.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors)); + LiteGraphPrimitvePtr primitive = m_gather.GetPrimitive(); + LiteGraphPrimitvePtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); + EXPECT_NE(expectPrimitive, primitive); +} + +/** + * @tc.name: gather_getprimitive_002 + * @tc.desc: Verify that the getPrimitive function returns a failed message without build. + * @tc.type: FUNC + */ +HWTEST_F(GatherBuilderTest, gather_getprimitive_002, TestSize.Level0) +{ + GatherBuilder gather; + LiteGraphPrimitvePtr primitive = m_gather.GetPrimitive(); + LiteGraphPrimitvePtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); + EXPECT_EQ(expectPrimitive, primitive); +} +} +} +} \ No newline at end of file diff --git a/test/unittest/ops/gelu_builder_test.cpp b/test/unittest/ops/gelu_builder_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..e6b56b38a0faa6e926cb97e5b7abf01b6144cabd --- /dev/null +++ b/test/unittest/ops/gelu_builder_test.cpp @@ -0,0 +1,185 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "frameworks/native/ops/gelu_builder.h" + +#include "ops_test.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Ops; + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class GeluBuilderTest : public OpsTest { +public: + void SetUp() override; + void TearDown() override; + +protected: + GeluBuilder m_gelu; + std::vector m_inputs {0}; + std::vector m_outputs {1}; + std::vector m_params {}; + std::vector m_inputDim {1, 5, 1, 1}; + std::vector m_outputDim {1, 5, 1, 1}; +}; + +void GeluBuilderTest::SetUp() {} + +void GeluBuilderTest::TearDown() {} + +/** + * @tc.name: gelu_build_001 + * @tc.desc: Verify that the build function returns a successful message. + * @tc.type: FUNC + */ +HWTEST_F(GeluBuilderTest, gelu_build_001, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + + OH_NN_ReturnCode ret = m_gelu.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/** + * @tc.name: gelu_build_002 + * @tc.desc: Verify that the build function returns a failed message with true m_isBuild. + * @tc.type: FUNC + */ +HWTEST_F(GeluBuilderTest, gelu_build_002, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + + EXPECT_EQ(OH_NN_SUCCESS, m_gelu.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors)); + OH_NN_ReturnCode ret = m_gelu.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/** + * @tc.name: gelu_build_003 + * @tc.desc: Verify that the build function returns a failed message with invalided input. + * @tc.type: FUNC + */ +HWTEST_F(GeluBuilderTest, gelu_build_003, TestSize.Level0) +{ + m_inputs = {0, 1}; + m_outputs = {2}; + + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + + OH_NN_ReturnCode ret = m_gelu.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: gelu_build_004 + * @tc.desc: Verify that the build function returns a failed message with invalided output. + * @tc.type: FUNC + */ +HWTEST_F(GeluBuilderTest, gelu_build_004, TestSize.Level0) +{ + std::vector m_outputs = {1, 2}; + + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + + OH_NN_ReturnCode ret = m_gelu.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: gelu_build_005 + * @tc.desc: Verify that the build function returns a failed message with empty allTensor. + * @tc.type: FUNC + */ +HWTEST_F(GeluBuilderTest, gelu_build_005, TestSize.Level0) +{ + OH_NN_ReturnCode ret = m_gelu.Build(m_params, m_inputs, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: gelu_build_006 + * @tc.desc: Verify that the build function returns a failed message without output tensor. + * @tc.type: FUNC + */ +HWTEST_F(GeluBuilderTest, gelu_build_006, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + + OH_NN_ReturnCode ret = m_gelu.Build(m_params, m_inputsIndex, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: gelu_build_007 + * @tc.desc: Verify that the build function returns a failed message with a virtual parameter. + * @tc.type: FUNC + */ +HWTEST_F(GeluBuilderTest, gelu_build_007, TestSize.Level0) +{ + m_params = {2}; + std::vector paramDim = {}; + + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + std::shared_ptr paramTensor; + paramTensor = TransToNNTensor(OH_NN_INT32, paramDim, nullptr, OH_NN_TENSOR); + m_allTensors.emplace_back(paramTensor); + + OH_NN_ReturnCode ret = m_gelu.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: gelu_getprimitive_001 + * @tc.desc: Verify that the getPrimitive function returns a successful message + * @tc.type: FUNC + */ +HWTEST_F(GeluBuilderTest, gelu_getprimitive_001, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + + EXPECT_EQ(OH_NN_SUCCESS, m_gelu.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors)); + LiteGraphPrimitvePtr primitive = m_gelu.GetPrimitive(); + LiteGraphPrimitvePtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); + EXPECT_NE(expectPrimitive, primitive); + + mindspore::lite::ActivationType activationType = mindspore::lite::ACTIVATION_TYPE_GELU; + auto returnValue = mindspore::lite::MindIR_Activation_GetActivationType(primitive.get()); + EXPECT_EQ(returnValue, activationType); +} + +/** + * @tc.name: gelu_getprimitive_002 + * @tc.desc: Verify that the getPrimitive function returns a failed message without build. + * @tc.type: FUNC + */ +HWTEST_F(GeluBuilderTest, gelu_getprimitive_002, TestSize.Level0) +{ + GeluBuilder gelu; + LiteGraphPrimitvePtr primitive = m_gelu.GetPrimitive(); + LiteGraphPrimitvePtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); + EXPECT_EQ(expectPrimitive, primitive); +} +} +} +} \ No newline at end of file diff --git a/test/unittest/ops/hswish_builder_test.cpp b/test/unittest/ops/hswish_builder_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..a80a48b5b1272589da7f88ebf5049bacdaa9eee0 --- /dev/null +++ b/test/unittest/ops/hswish_builder_test.cpp @@ -0,0 +1,185 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "frameworks/native/ops/hswish_builder.h" + +#include "ops_test.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Ops; + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class HswishBuilderTest : public OpsTest { +public: + void SetUp() override; + void TearDown() override; + +protected: + HswishBuilder m_hswish; + std::vector m_inputs {0}; + std::vector m_outputs {1}; + std::vector m_params {}; + std::vector m_inputDim {1, 5, 1, 1}; + std::vector m_outputDim {1, 5, 1, 1}; +}; + +void HswishBuilderTest::SetUp() {} + +void HswishBuilderTest::TearDown() {} + +/** + * @tc.name: hswish_build_001 + * @tc.desc: Verify that the build function returns a successful message. + * @tc.type: FUNC + */ +HWTEST_F(HswishBuilderTest, hswish_build_001, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + + OH_NN_ReturnCode ret = m_hswish.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/** + * @tc.name: hswish_build_002 + * @tc.desc: Verify that the build function returns a failed message with true m_isBuild. + * @tc.type: FUNC + */ +HWTEST_F(HswishBuilderTest, hswish_build_002, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + + EXPECT_EQ(OH_NN_SUCCESS, m_hswish.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors)); + OH_NN_ReturnCode ret = m_hswish.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/** + * @tc.name: hswish_build_003 + * @tc.desc: Verify that the build function returns a failed message with invalided input. + * @tc.type: FUNC + */ +HWTEST_F(HswishBuilderTest, hswish_build_003, TestSize.Level0) +{ + m_inputs = {0, 1}; + m_outputs = {2}; + + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + + OH_NN_ReturnCode ret = m_hswish.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: hswish_build_004 + * @tc.desc: Verify that the build function returns a failed message with invalided output. + * @tc.type: FUNC + */ +HWTEST_F(HswishBuilderTest, hswish_build_004, TestSize.Level0) +{ + std::vector m_outputs = {1, 2}; + + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + + OH_NN_ReturnCode ret = m_hswish.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: hswish_build_005 + * @tc.desc: Verify that the build function returns a failed message with empty allTensor. + * @tc.type: FUNC + */ +HWTEST_F(HswishBuilderTest, hswish_build_005, TestSize.Level0) +{ + OH_NN_ReturnCode ret = m_hswish.Build(m_params, m_inputs, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: hswish_build_006 + * @tc.desc: Verify that the build function returns a failed message without output tensor. + * @tc.type: FUNC + */ +HWTEST_F(HswishBuilderTest, hswish_build_006, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + + OH_NN_ReturnCode ret = m_hswish.Build(m_params, m_inputsIndex, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: hswish_build_007 + * @tc.desc: Verify that the build function returns a failed message with a virtual parameter. + * @tc.type: FUNC + */ +HWTEST_F(HswishBuilderTest, hswish_build_007, TestSize.Level0) +{ + std::vector m_params = {2}; + std::vector paramDim = {}; + + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + std::shared_ptr paramTensor; + paramTensor = TransToNNTensor(OH_NN_INT32, paramDim, nullptr, OH_NN_TENSOR); + m_allTensors.emplace_back(paramTensor); + + OH_NN_ReturnCode ret = m_hswish.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: hswish_getprimitive_001 + * @tc.desc: Verify that the getPrimitive function returns a successful message + * @tc.type: FUNC + */ +HWTEST_F(HswishBuilderTest, hswish_getprimitive_001, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + + EXPECT_EQ(OH_NN_SUCCESS, m_hswish.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors)); + LiteGraphPrimitvePtr primitive = m_hswish.GetPrimitive(); + LiteGraphPrimitvePtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); + EXPECT_NE(expectPrimitive, primitive); + + mindspore::lite::ActivationType activationType = mindspore::lite::ACTIVATION_TYPE_HSWISH; + auto returnValue = mindspore::lite::MindIR_Activation_GetActivationType(primitive.get()); + EXPECT_EQ(returnValue, activationType); +} + +/** + * @tc.name: hswish_getprimitive_001 + * @tc.desc: Verify that the getPrimitive function returns a failed message without build. + * @tc.type: FUNC + */ +HWTEST_F(HswishBuilderTest, hswish_getprimitive_002, TestSize.Level0) +{ + HswishBuilder hswish; + LiteGraphPrimitvePtr primitive = m_hswish.GetPrimitive(); + LiteGraphPrimitvePtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); + EXPECT_EQ(expectPrimitive, primitive); +} +} +} +} \ No newline at end of file diff --git a/test/unittest/ops/layernorm_builder_test.cpp b/test/unittest/ops/layernorm_builder_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..254909ea0661a6809ec605f55fefc123e76cb218 --- /dev/null +++ b/test/unittest/ops/layernorm_builder_test.cpp @@ -0,0 +1,465 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "frameworks/native/ops/layernorm_builder.h" + +#include "ops_test.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Ops; + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class LayerNormBuilderTest : public OpsTest { +public: + void SetUp() override; + void TearDown() override; + +protected: +void SaveNormAixsTensor(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); +void SaveEpsilonTensor(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); +void SaveParamAxisTensor(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); +void SetInputTensor(std::shared_ptr inputTensor); + +public: + LayerNormBuilder m_layerNorm; + std::vector m_inputs {0, 1, 2}; + std::vector m_outputs {3}; + std::vector m_params {4, 5, 6}; + std::vector m_inputDimNorm {2, 3}; + std::vector m_inputDimEpsilon {3}; + std::vector m_inputDimParam {3}; + std::vector m_outputDim {3}; + std::vector m_paramDim {}; + std::shared_ptr m_inputTensor {}; +}; + +void LayerNormBuilderTest::SetUp() {} + +void LayerNormBuilderTest::TearDown() {} + +void LayerNormBuilderTest::SaveNormAixsTensor(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + int32_t* beginNormAxisValue = new (std::nothrow) int32_t(1); + EXPECT_NE(nullptr, beginNormAxisValue); + std::shared_ptr normAxisTensor = TransToNNTensor(dataType, dim, quantParam, type); + normAxisTensor->SetBuffer(beginNormAxisValue, sizeof(int32_t)); + m_allTensors.emplace_back(normAxisTensor); +} + +void LayerNormBuilderTest::SaveEpsilonTensor(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + float* epsilonValue = new (std::nothrow) float(0.0f); + EXPECT_NE(nullptr, epsilonValue); + std::shared_ptr transposeBTensor = TransToNNTensor(dataType, dim, quantParam, type); + transposeBTensor->SetBuffer(epsilonValue, sizeof(float)); + m_allTensors.emplace_back(transposeBTensor); +} + +void LayerNormBuilderTest::SaveParamAxisTensor(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + int32_t* beginNormParamValue = new (std::nothrow) int32_t(1); + EXPECT_NE(nullptr, beginNormParamValue); + std::shared_ptr paramAxisTensor = TransToNNTensor(dataType, dim, quantParam, type); + paramAxisTensor->SetBuffer(beginNormParamValue, sizeof(int32_t)); + m_allTensors.emplace_back(paramAxisTensor); +} + +void LayerNormBuilderTest::SetInputTensor(std::shared_ptr inputTensor) +{ + inputTensor = TransToNNTensor(OH_NN_FLOAT32, m_inputDimNorm, nullptr, OH_NN_TENSOR); + m_allTensors.emplace_back(inputTensor); + + inputTensor = TransToNNTensor(OH_NN_FLOAT32, m_inputDimEpsilon, nullptr, OH_NN_TENSOR); + m_allTensors.emplace_back(inputTensor); + + inputTensor = TransToNNTensor(OH_NN_FLOAT32, m_inputDimParam, nullptr, OH_NN_TENSOR); + m_allTensors.emplace_back(inputTensor); +} + +/** + * @tc.name: layernorm_build_001 + * @tc.desc: Verify that the build function returns a successful message. + * @tc.type: FUNC + */ +HWTEST_F(LayerNormBuilderTest, layernorm_build_001, TestSize.Level0) +{ + SetInputTensor(m_inputTensor); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + SaveNormAixsTensor(OH_NN_INT32, m_paramDim, nullptr, OH_NN_LAYER_NORM_BEGIN_NORM_AXIS); + SaveEpsilonTensor(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_LAYER_NORM_EPSILON); + SaveParamAxisTensor(OH_NN_INT32, m_paramDim, nullptr, OH_NN_LAYER_NORM_BEGIN_PARAM_AXIS); + + OH_NN_ReturnCode ret = m_layerNorm.Build(m_params, m_inputs, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/** + * @tc.name: layernorm_build_002 + * @tc.desc: Verify that the build function returns a failed message with duplicate Build(). + * @tc.type: FUNC + */ +HWTEST_F(LayerNormBuilderTest, layernorm_build_002, TestSize.Level0) +{ + SetInputTensor(m_inputTensor); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + SaveNormAixsTensor(OH_NN_INT32, m_paramDim, nullptr, OH_NN_LAYER_NORM_BEGIN_NORM_AXIS); + SaveEpsilonTensor(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_LAYER_NORM_EPSILON); + SaveParamAxisTensor(OH_NN_INT32, m_paramDim, nullptr, OH_NN_LAYER_NORM_BEGIN_PARAM_AXIS); + + EXPECT_EQ(OH_NN_SUCCESS, m_layerNorm.Build(m_params, m_inputs, m_outputsIndex, m_allTensors)); + OH_NN_ReturnCode ret = m_layerNorm.Build(m_params, m_inputs, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/** + * @tc.name: layernorm_build_003 + * @tc.desc: Verify that the build function returns a failed message with invalided input. + * @tc.type: FUNC + */ +HWTEST_F(LayerNormBuilderTest, layernorm_build_003, TestSize.Level0) +{ + m_inputs = {0, 1, 2, 3}; + m_outputs = {4}; + m_params = {5, 6, 7}; + + SetInputTensor(m_inputTensor); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + SaveNormAixsTensor(OH_NN_INT32, m_paramDim, nullptr, OH_NN_LAYER_NORM_BEGIN_NORM_AXIS); + SaveEpsilonTensor(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_LAYER_NORM_EPSILON); + SaveParamAxisTensor(OH_NN_INT32, m_paramDim, nullptr, OH_NN_LAYER_NORM_BEGIN_PARAM_AXIS); + + OH_NN_ReturnCode ret = m_layerNorm.Build(m_params, m_inputs, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: layernorm_build_004 + * @tc.desc: Verify that the build function returns a failed message with invalided output. + * @tc.type: FUNC + */ +HWTEST_F(LayerNormBuilderTest, layernorm_build_004, TestSize.Level0) +{ + m_outputs = {3, 4}; + m_params = {5, 6, 7}; + + SetInputTensor(m_inputTensor); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + SaveNormAixsTensor(OH_NN_INT32, m_paramDim, nullptr, OH_NN_LAYER_NORM_BEGIN_NORM_AXIS); + SaveEpsilonTensor(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_LAYER_NORM_EPSILON); + SaveParamAxisTensor(OH_NN_INT32, m_paramDim, nullptr, OH_NN_LAYER_NORM_BEGIN_PARAM_AXIS); + + OH_NN_ReturnCode ret = m_layerNorm.Build(m_params, m_inputs, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: layernorm_build_005 + * @tc.desc: Verify that the build function returns a failed message with null allTensor. + * @tc.type: FUNC + */ +HWTEST_F(LayerNormBuilderTest, layernorm_build_005, TestSize.Level0) +{ + OH_NN_ReturnCode ret = m_layerNorm.Build(m_params, m_inputs, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: layernorm_build_006 + * @tc.desc: Verify that the build function returns a failed message with invalided allTensor. + * @tc.type: FUNC + */ +HWTEST_F(LayerNormBuilderTest, layernorm_build_006, TestSize.Level0) +{ + SetInputTensor(m_inputTensor); + + OH_NN_ReturnCode ret = m_layerNorm.Build(m_params, m_inputs, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: layernorm_build_007 + * @tc.desc: Verify that the build function returns a failed message with invalid beginNormAxis's dataType. + * @tc.type: FUNC + */ +HWTEST_F(LayerNormBuilderTest, layernorm_build_007, TestSize.Level0) +{ + SetInputTensor(m_inputTensor); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + + std::shared_ptr normAxisTensor; + normAxisTensor = TransToNNTensor(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_LAYER_NORM_BEGIN_NORM_AXIS); + float beginNormAxisValue = 1e-7; + normAxisTensor->SetBuffer(&beginNormAxisValue, sizeof(beginNormAxisValue)); + m_allTensors.emplace_back(normAxisTensor); + + SaveEpsilonTensor(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_LAYER_NORM_EPSILON); + SaveParamAxisTensor(OH_NN_INT32, m_paramDim, nullptr, OH_NN_LAYER_NORM_BEGIN_PARAM_AXIS); + + OH_NN_ReturnCode ret = m_layerNorm.Build(m_params, m_inputs, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + normAxisTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: layernorm_build_008 + * @tc.desc: Verify that the build function returns a failed message with invalid beginNormAxis's dimension. + * @tc.type: FUNC + */ +HWTEST_F(LayerNormBuilderTest, layernorm_build_008, TestSize.Level0) +{ + std::vector expectParamDim = {2}; + + SetInputTensor(m_inputTensor); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + + std::shared_ptr normAxisTensor; + normAxisTensor = TransToNNTensor(OH_NN_INT32, expectParamDim, nullptr, OH_NN_LAYER_NORM_BEGIN_NORM_AXIS); + int32_t beginNormAxisValue[2] = {1, 2}; + normAxisTensor->SetBuffer(beginNormAxisValue, 2 * sizeof(int32_t)); + m_allTensors.emplace_back(normAxisTensor); + + SaveEpsilonTensor(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_LAYER_NORM_EPSILON); + SaveParamAxisTensor(OH_NN_INT32, m_paramDim, nullptr, OH_NN_LAYER_NORM_BEGIN_PARAM_AXIS); + + OH_NN_ReturnCode ret = m_layerNorm.Build(m_params, m_inputs, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + normAxisTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: layernorm_build_009 + * @tc.desc: Verify that the build function returns a failed message with invalid epsilon's dataType. + * @tc.type: FUNC + */ +HWTEST_F(LayerNormBuilderTest, layernorm_build_009, TestSize.Level0) +{ + SetInputTensor(m_inputTensor); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + SaveNormAixsTensor(OH_NN_INT32, m_paramDim, nullptr, OH_NN_LAYER_NORM_BEGIN_NORM_AXIS); + SaveParamAxisTensor(OH_NN_INT32, m_paramDim, nullptr, OH_NN_LAYER_NORM_BEGIN_PARAM_AXIS); + + std::shared_ptr epsilonTensor; + epsilonTensor = TransToNNTensor(OH_NN_INT32, m_paramDim, nullptr, OH_NN_LAYER_NORM_EPSILON); + int32_t epsilonValue = 1; + epsilonTensor->SetBuffer(&epsilonValue, sizeof(epsilonValue)); + m_allTensors.emplace_back(epsilonTensor); + + OH_NN_ReturnCode ret = m_layerNorm.Build(m_params, m_inputs, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + epsilonTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: layernorm_build_010 + * @tc.desc: Verify that the build function returns a failed message with invalid epsilon's dimension. + * @tc.type: FUNC + */ +HWTEST_F(LayerNormBuilderTest, layernorm_build_010, TestSize.Level0) +{ + std::vector expectParamDim = {2}; + + SetInputTensor(m_inputTensor); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + SaveNormAixsTensor(OH_NN_INT32, m_paramDim, nullptr, OH_NN_LAYER_NORM_BEGIN_NORM_AXIS); + SaveParamAxisTensor(OH_NN_INT32, m_paramDim, nullptr, OH_NN_LAYER_NORM_BEGIN_PARAM_AXIS); + + std::shared_ptr epsilonTensor; + epsilonTensor = TransToNNTensor(OH_NN_FLOAT32, expectParamDim, nullptr, OH_NN_LAYER_NORM_EPSILON); + float epsilonValue[2] = {1e-7, 1e-7}; + epsilonTensor->SetBuffer(epsilonValue, 2 * sizeof(float)); + m_allTensors.emplace_back(epsilonTensor); + + OH_NN_ReturnCode ret = m_layerNorm.Build(m_params, m_inputs, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + epsilonTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: layernorm_build_011 + * @tc.desc: Verify that the build function returns a failed message with invalid beginParamAxis's dataType. + * @tc.type: FUNC + */ +HWTEST_F(LayerNormBuilderTest, layernorm_build_011, TestSize.Level0) +{ + SetInputTensor(m_inputTensor); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + SaveNormAixsTensor(OH_NN_INT32, m_paramDim, nullptr, OH_NN_LAYER_NORM_BEGIN_NORM_AXIS); + SaveEpsilonTensor(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_LAYER_NORM_EPSILON); + + std::shared_ptr paramAxisTensor; + paramAxisTensor = TransToNNTensor(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_LAYER_NORM_BEGIN_PARAM_AXIS); + float beginNormParamValue = 1; + paramAxisTensor->SetBuffer(&beginNormParamValue, sizeof(beginNormParamValue)); + m_allTensors.emplace_back(paramAxisTensor); + + OH_NN_ReturnCode ret = m_layerNorm.Build(m_params, m_inputs, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + paramAxisTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: layernorm_build_012 + * @tc.desc: Verify that the build function returns a failed message with invalid beginParamAxis's dimension. + * @tc.type: FUNC + */ +HWTEST_F(LayerNormBuilderTest, layernorm_build_012, TestSize.Level0) +{ + std::vector expectParamDim = {2}; + + SetInputTensor(m_inputTensor); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + SaveNormAixsTensor(OH_NN_INT32, m_paramDim, nullptr, OH_NN_LAYER_NORM_BEGIN_NORM_AXIS); + SaveEpsilonTensor(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_LAYER_NORM_EPSILON); + + std::shared_ptr paramAxisTensor; + paramAxisTensor = TransToNNTensor(OH_NN_INT32, expectParamDim, nullptr, OH_NN_LAYER_NORM_BEGIN_PARAM_AXIS); + int32_t beginNormParamValue[2] = {1, 1}; + paramAxisTensor->SetBuffer(beginNormParamValue, 2 * sizeof(int32_t)); + m_allTensors.emplace_back(paramAxisTensor); + + OH_NN_ReturnCode ret = m_layerNorm.Build(m_params, m_inputs, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + paramAxisTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: layernorm_build_0013 + * @tc.desc: Verify that the build function returns a failed message with invalid param. + * @tc.type: FUNC + */ +HWTEST_F(LayerNormBuilderTest, layernorm_build_0013, TestSize.Level0) +{ + SetInputTensor(m_inputTensor); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + SaveNormAixsTensor(OH_NN_INT32, m_paramDim, nullptr, OH_NN_BATCH_NORM_EPSILON); + SaveEpsilonTensor(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_LAYER_NORM_EPSILON); + SaveParamAxisTensor(OH_NN_INT32, m_paramDim, nullptr, OH_NN_LAYER_NORM_BEGIN_PARAM_AXIS); + + OH_NN_ReturnCode ret = m_layerNorm.Build(m_params, m_inputs, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: layernorm_build_014 + * @tc.desc: Verify that the build function returns a failed message without set buffer for normAxis. + * @tc.type: FUNC + */ +HWTEST_F(LayerNormBuilderTest, layernorm_build_014, TestSize.Level0) +{ + SetInputTensor(m_inputTensor); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + SaveEpsilonTensor(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_LAYER_NORM_EPSILON); + SaveParamAxisTensor(OH_NN_INT32, m_paramDim, nullptr, OH_NN_LAYER_NORM_BEGIN_PARAM_AXIS); + + std::shared_ptr normAxisTensor; + normAxisTensor = TransToNNTensor(OH_NN_INT32, m_paramDim, nullptr, OH_NN_LAYER_NORM_BEGIN_NORM_AXIS); + m_allTensors.emplace_back(normAxisTensor); + + OH_NN_ReturnCode ret = m_layerNorm.Build(m_params, m_inputs, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: layernorm_build_015 + * @tc.desc: Verify that the build function returns a failed message without set buffer for epsilon. + * @tc.type: FUNC + */ +HWTEST_F(LayerNormBuilderTest, layernorm_build_015, TestSize.Level0) +{ + SetInputTensor(m_inputTensor); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + SaveNormAixsTensor(OH_NN_INT32, m_paramDim, nullptr, OH_NN_LAYER_NORM_BEGIN_NORM_AXIS); + SaveParamAxisTensor(OH_NN_INT32, m_paramDim, nullptr, OH_NN_LAYER_NORM_BEGIN_PARAM_AXIS); + + std::shared_ptr epsilonTensor; + epsilonTensor = TransToNNTensor(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_LAYER_NORM_EPSILON); + m_allTensors.emplace_back(epsilonTensor); + + OH_NN_ReturnCode ret = m_layerNorm.Build(m_params, m_inputs, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: layernorm_build_016 + * @tc.desc: Verify that the build function returns a failed message without set buffer for paramsAxis. + * @tc.type: FUNC + */ +HWTEST_F(LayerNormBuilderTest, layernorm_build_016, TestSize.Level0) +{ + SetInputTensor(m_inputTensor); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + SaveNormAixsTensor(OH_NN_INT32, m_paramDim, nullptr, OH_NN_LAYER_NORM_BEGIN_NORM_AXIS); + SaveEpsilonTensor(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_LAYER_NORM_EPSILON); + + std::shared_ptr paramAxisTensor; + paramAxisTensor = TransToNNTensor(OH_NN_INT32, m_paramDim, nullptr, OH_NN_LAYER_NORM_BEGIN_PARAM_AXIS); + m_allTensors.emplace_back(paramAxisTensor); + + OH_NN_ReturnCode ret = m_layerNorm.Build(m_params, m_inputs, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: layernorm_getprimitive_001 + * @tc.desc: Verify that the getPrimitive function returns a successful message + * @tc.type: FUNC + */ +HWTEST_F(LayerNormBuilderTest, layernorm_getprimitive_001, TestSize.Level0) +{ + SetInputTensor(m_inputTensor); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + SaveNormAixsTensor(OH_NN_INT32, m_paramDim, nullptr, OH_NN_LAYER_NORM_BEGIN_NORM_AXIS); + SaveEpsilonTensor(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_LAYER_NORM_EPSILON); + SaveParamAxisTensor(OH_NN_INT32, m_paramDim, nullptr, OH_NN_LAYER_NORM_BEGIN_PARAM_AXIS); + + int32_t beginNormAxisValue = 1; + float epsilonValue = 0.0f; + int32_t beginNormParamValue = 1; + EXPECT_EQ(OH_NN_SUCCESS, m_layerNorm.Build(m_params, m_inputs, m_outputsIndex, m_allTensors)); + LiteGraphPrimitvePtr primitive = m_layerNorm.GetPrimitive(); + LiteGraphPrimitvePtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); + EXPECT_NE(expectPrimitive, primitive); + auto returnValue = mindspore::lite::MindIR_LayerNormFusion_GetBeginNormAxis(primitive.get()); + EXPECT_EQ(returnValue, beginNormAxisValue); + returnValue = mindspore::lite::MindIR_LayerNormFusion_GetEpsilon(primitive.get()); + EXPECT_EQ(returnValue, epsilonValue); + returnValue = mindspore::lite::MindIR_LayerNormFusion_GetBeginParamsAxis(primitive.get()); + EXPECT_EQ(returnValue, beginNormParamValue); +} + +/** + * @tc.name: layernorm_getprimitive_002 + * @tc.desc: Verify that the getPrimitive function returns a failed message without build. + * @tc.type: FUNC + */ +HWTEST_F(LayerNormBuilderTest, layernorm_getprimitive_002, TestSize.Level0) +{ + LayerNormBuilder layerNorm; + LiteGraphPrimitvePtr primitive = m_layerNorm.GetPrimitive(); + LiteGraphPrimitvePtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); + EXPECT_EQ(expectPrimitive, primitive); +} +} +} +} \ No newline at end of file diff --git a/test/unittest/ops/lessequal_builder_test.cpp b/test/unittest/ops/lessequal_builder_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..63898180bd520cb6bee5c4e543ce6249cd7bc131 --- /dev/null +++ b/test/unittest/ops/lessequal_builder_test.cpp @@ -0,0 +1,181 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "frameworks/native/ops/lessequal_builder.h" + +#include "ops_test.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Ops; + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class LessEqualBuilderTest : public OpsTest { +public: + void SetUp() override; + void TearDown() override; + +protected: + LessEqualBuilder m_lessEqual; + std::vector m_inputs {0, 1}; + std::vector m_outputs {2}; + std::vector m_params {}; + std::vector m_inputDim {1, 2, 1, 1}; + std::vector m_outputDim {1, 2, 1, 1}; +}; + +void LessEqualBuilderTest::SetUp() {} + +void LessEqualBuilderTest::TearDown() {} + +/** + * @tc.name: lessequal_build_001 + * @tc.desc: Verify that the build function returns a successful message. + * @tc.type: FUNC + */ +HWTEST_F(LessEqualBuilderTest, lessequal_build_001, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_BOOL, m_outputDim, nullptr); + + OH_NN_ReturnCode ret = m_lessEqual.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/** + * @tc.name: lessequal_build_002 + * @tc.desc: Verify that the build function returns a failed message with true m_isBuild. + * @tc.type: FUNC + */ +HWTEST_F(LessEqualBuilderTest, lessequal_build_002, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_BOOL, m_outputDim, nullptr); + + EXPECT_EQ(OH_NN_SUCCESS, m_lessEqual.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors)); + OH_NN_ReturnCode ret = m_lessEqual.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/** + * @tc.name: lessequal_build_003 + * @tc.desc: Verify that the build function returns a failed message with invalided input. + * @tc.type: FUNC + */ +HWTEST_F(LessEqualBuilderTest, lessequal_build_003, TestSize.Level0) +{ + m_inputs = {0, 1, 2, 3}; + m_outputs = {4}; + + SaveInputTensor(m_inputs, OH_NN_INT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_BOOL, m_outputDim, nullptr); + + OH_NN_ReturnCode ret = m_lessEqual.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: lessequal_build_004 + * @tc.desc: Verify that the build function returns a failed message with invalided output. + * @tc.type: FUNC + */ +HWTEST_F(LessEqualBuilderTest, lessequal_build_004, TestSize.Level0) +{ + std::vector m_outputs = {2, 3, 4}; + + SaveInputTensor(m_inputs, OH_NN_INT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_BOOL, m_outputDim, nullptr); + + OH_NN_ReturnCode ret = m_lessEqual.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: lessequal_build_005 + * @tc.desc: Verify that the build function returns a failed message with empty allTensor. + * @tc.type: FUNC + */ +HWTEST_F(LessEqualBuilderTest, lessequal_build_005, TestSize.Level0) +{ + OH_NN_ReturnCode ret = m_lessEqual.Build(m_params, m_inputs, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: lessequal_build_006 + * @tc.desc: Verify that the build function returns a failed message without output tensor. + * @tc.type: FUNC + */ +HWTEST_F(LessEqualBuilderTest, lessequal_build_006, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_inputDim, nullptr); + + OH_NN_ReturnCode ret = m_lessEqual.Build(m_params, m_inputsIndex, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: lessequal_build_007 + * @tc.desc: Verify that the build function returns a failed message with a virtual parameter. + * @tc.type: FUNC + */ +HWTEST_F(LessEqualBuilderTest, lessequal_build_007, TestSize.Level0) +{ + std::vector m_params = {3}; + std::vector paramDim = {}; + + SaveInputTensor(m_inputs, OH_NN_INT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_BOOL, m_outputDim, nullptr); + std::shared_ptr paramTensor; + paramTensor = TransToNNTensor(OH_NN_INT32, paramDim, nullptr, OH_NN_TENSOR); + m_allTensors.emplace_back(paramTensor); + + OH_NN_ReturnCode ret = m_lessEqual.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: lessequal_getprimitive_001 + * @tc.desc: Verify that the getPrimitive function returns a successful message + * @tc.type: FUNC + */ +HWTEST_F(LessEqualBuilderTest, lessequal_getprimitive_001, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_BOOL, m_outputDim, nullptr); + + EXPECT_EQ(OH_NN_SUCCESS, m_lessEqual.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors)); + LiteGraphPrimitvePtr primitive = m_lessEqual.GetPrimitive(); + LiteGraphPrimitvePtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); + EXPECT_NE(expectPrimitive, primitive); +} + +/** + * @tc.name: lessequal_getprimitive_002 + * @tc.desc: Verify that the getPrimitive function returns a failed message without build. + * @tc.type: FUNC + */ +HWTEST_F(LessEqualBuilderTest, lessequal_getprimitive_002, TestSize.Level0) +{ + LessEqualBuilder lessEqual; + LiteGraphPrimitvePtr primitive = m_lessEqual.GetPrimitive(); + LiteGraphPrimitvePtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); + EXPECT_EQ(expectPrimitive, primitive); +} +} +} +} \ No newline at end of file diff --git a/test/unittest/ops/matmul_builder_test.cpp b/test/unittest/ops/matmul_builder_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..4af6c958dbaea8136887eed859cf1a4d2283faa9 --- /dev/null +++ b/test/unittest/ops/matmul_builder_test.cpp @@ -0,0 +1,483 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "frameworks/native/ops/matmul_builder.h" + +#include "ops_test.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Ops; + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class MatMulBuilderTest : public OpsTest { +public: + void SetUp() override; + void TearDown() override; + +protected: + void SaveTransposeATensor(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + void SaveTransposeBTensor(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + void SaveActivationTensor(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + void SetInputTensor(std::shared_ptr inputTensor); + +protected: + MatmulBuilder m_matmul; + std::vector m_inputs {0, 1}; + std::vector m_outputs {2}; + std::vector m_params {3, 4, 5}; + std::vector m_inputXDim {1, 1, 3, 2}; + std::vector m_inputYDim {1, 1, 2, 3}; + std::vector m_outputDim {1, 1, 3, 3}; + std::vector m_paramDim {}; + std::shared_ptr m_inputTensor {}; +}; + +void MatMulBuilderTest::SetUp() {} + +void MatMulBuilderTest::TearDown() {} + +void MatMulBuilderTest::SaveTransposeATensor(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + std::shared_ptr transposeATensor = TransToNNTensor(dataType, dim, quantParam, type); + bool* transposeAValue = new (std::nothrow) bool(false); + EXPECT_NE(nullptr, transposeAValue); + transposeATensor->SetBuffer(transposeAValue, sizeof(bool)); + m_allTensors.emplace_back(transposeATensor); +} + +void MatMulBuilderTest::SaveTransposeBTensor(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + std::shared_ptr transposeBTensor = TransToNNTensor(dataType, dim, quantParam, type); + bool* transposeBValue = new (std::nothrow) bool(false); + EXPECT_NE(nullptr, transposeBValue); + transposeBTensor->SetBuffer(transposeBValue, sizeof(bool)); + m_allTensors.emplace_back(transposeBTensor); +} + +void MatMulBuilderTest::SaveActivationTensor(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + std::shared_ptr activationTensor = TransToNNTensor(dataType, dim, quantParam, type); + int8_t* activationValue = new (std::nothrow) int8_t(0); + EXPECT_NE(nullptr, activationValue); + activationTensor->SetBuffer(activationValue, sizeof(int8_t)); + m_allTensors.emplace_back(activationTensor); +} + +void MatMulBuilderTest::SetInputTensor(std::shared_ptr inputTensor) +{ + inputTensor = TransToNNTensor(OH_NN_FLOAT32, m_inputXDim, nullptr, OH_NN_TENSOR); + m_allTensors.emplace_back(inputTensor); + + inputTensor = TransToNNTensor(OH_NN_FLOAT32, m_inputYDim, nullptr, OH_NN_TENSOR); + m_allTensors.emplace_back(inputTensor); +} +/** + * @tc.name: matmul_build_001 + * @tc.desc: Verify that the build function returns a successful message. + * @tc.type: FUNC + */ +HWTEST_F(MatMulBuilderTest, matmul_build_001, TestSize.Level0) +{ + SetInputTensor(m_inputTensor); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + SaveTransposeATensor(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_MATMUL_TRANSPOSE_A); + SaveTransposeBTensor(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_MATMUL_TRANSPOSE_B); + SaveActivationTensor(OH_NN_INT8, m_paramDim, nullptr, OH_NN_MATMUL_ACTIVATION_TYPE); + + OH_NN_ReturnCode ret = m_matmul.Build(m_params, m_inputs, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/** + * @tc.name: matmul_build_002 + * @tc.desc: Verify that the build function returns a failed message with true m_isBuild. + * @tc.type: FUNC + */ +HWTEST_F(MatMulBuilderTest, matmul_build_002, TestSize.Level0) +{ + SetInputTensor(m_inputTensor); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + SaveTransposeATensor(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_MATMUL_TRANSPOSE_A); + SaveTransposeBTensor(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_MATMUL_TRANSPOSE_B); + SaveActivationTensor(OH_NN_INT8, m_paramDim, nullptr, OH_NN_MATMUL_ACTIVATION_TYPE); + + EXPECT_EQ(OH_NN_SUCCESS, m_matmul.Build(m_params, m_inputs, m_outputsIndex, m_allTensors)); + OH_NN_ReturnCode ret = m_matmul.Build(m_params, m_inputs, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/** + * @tc.name: matmul_build_003 + * @tc.desc: Verify that the build function returns a failed message with invalided input. + * @tc.type: FUNC + */ +HWTEST_F(MatMulBuilderTest, matmul_build_003, TestSize.Level0) +{ + m_inputs = {0, 1, 2}; + m_outputs = {3}; + m_params = {4, 5, 6}; + + SetInputTensor(m_inputTensor); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + SaveTransposeATensor(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_MATMUL_TRANSPOSE_A); + SaveTransposeBTensor(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_MATMUL_TRANSPOSE_B); + SaveActivationTensor(OH_NN_INT8, m_paramDim, nullptr, OH_NN_MATMUL_ACTIVATION_TYPE); + + OH_NN_ReturnCode ret = m_matmul.Build(m_params, m_inputs, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: matmul_build_004 + * @tc.desc: Verify that the build function returns a failed message with invalided output. + * @tc.type: FUNC + */ +HWTEST_F(MatMulBuilderTest, matmul_build_004, TestSize.Level0) +{ + m_outputs = {2, 3}; + m_params = {4, 5, 6}; + + SetInputTensor(m_inputTensor); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + SaveTransposeATensor(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_MATMUL_TRANSPOSE_A); + SaveTransposeBTensor(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_MATMUL_TRANSPOSE_B); + SaveActivationTensor(OH_NN_INT8, m_paramDim, nullptr, OH_NN_MATMUL_ACTIVATION_TYPE); + OH_NN_ReturnCode ret = m_matmul.Build(m_params, m_inputs, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: matmul_build_005 + * @tc.desc: Verify that the build function returns a failed message with empty allTensor. + * @tc.type: FUNC + */ +HWTEST_F(MatMulBuilderTest, matmul_build_005, TestSize.Level0) +{ + OH_NN_ReturnCode ret = m_matmul.Build(m_params, m_inputs, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: matmul_build_006 + * @tc.desc: Verify that the build function returns a failed message without output tensor. + * @tc.type: FUNC + */ +HWTEST_F(MatMulBuilderTest, matmul_build_006, TestSize.Level0) +{ + SetInputTensor(m_inputTensor); + + OH_NN_ReturnCode ret = m_matmul.Build(m_params, m_inputs, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: matmul_build_007 + * @tc.desc: Verify that the build function returns a failed message with invalid transposeA's dataType. + * @tc.type: FUNC + */ +HWTEST_F(MatMulBuilderTest, matmul_build_007, TestSize.Level0) +{ + SetInputTensor(m_inputTensor); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + SaveTransposeBTensor(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_MATMUL_TRANSPOSE_B); + SaveActivationTensor(OH_NN_INT8, m_paramDim, nullptr, OH_NN_MATMUL_ACTIVATION_TYPE); + + std::shared_ptr transposeATensor; + transposeATensor = TransToNNTensor(OH_NN_INT32, m_paramDim, nullptr, OH_NN_MATMUL_TRANSPOSE_A); + int32_t transposeAValue = 1; + transposeATensor->SetBuffer(&transposeAValue, sizeof(transposeAValue)); + m_allTensors.emplace_back(transposeATensor); + + OH_NN_ReturnCode ret = m_matmul.Build(m_params, m_inputs, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + transposeATensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: matmul_build_008 + * @tc.desc: Verify that the build function returns a failed message with invalid transposeA's dimension. + * @tc.type: FUNC + */ +HWTEST_F(MatMulBuilderTest, matmul_build_008, TestSize.Level0) +{ + std::vector expectParamDim = {2}; + + SetInputTensor(m_inputTensor); + + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + SaveTransposeBTensor(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_MATMUL_TRANSPOSE_B); + SaveActivationTensor(OH_NN_INT8, m_paramDim, nullptr, OH_NN_MATMUL_ACTIVATION_TYPE); + + std::shared_ptr transposeATensor; + transposeATensor = TransToNNTensor(OH_NN_BOOL, expectParamDim, nullptr, OH_NN_MATMUL_TRANSPOSE_A); + bool transposeAValue[2] = {false, false}; + transposeATensor->SetBuffer(transposeAValue, 2 * sizeof(bool)); + m_allTensors.emplace_back(transposeATensor); + + OH_NN_ReturnCode ret = m_matmul.Build(m_params, m_inputs, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + transposeATensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: matmul_build_009 + * @tc.desc: Verify that the build function returns a failed message with invalid transposeB's dataType. + * @tc.type: FUNC + */ +HWTEST_F(MatMulBuilderTest, matmul_build_009, TestSize.Level0) +{ + SetInputTensor(m_inputTensor); + + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + SaveTransposeATensor(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_MATMUL_TRANSPOSE_A); + SaveActivationTensor(OH_NN_INT8, m_paramDim, nullptr, OH_NN_MATMUL_ACTIVATION_TYPE); + + std::shared_ptr transposeBTensor; + transposeBTensor = TransToNNTensor(OH_NN_INT32, m_paramDim, nullptr, OH_NN_MATMUL_TRANSPOSE_B); + int32_t transposeBValue = 1; + transposeBTensor->SetBuffer(&transposeBValue, sizeof(transposeBValue)); + m_allTensors.emplace_back(transposeBTensor); + + OH_NN_ReturnCode ret = m_matmul.Build(m_params, m_inputs, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + transposeBTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: matmul_build_010 + * @tc.desc: Verify that the build function returns a failed message with invalid transposeB's dimension. + * @tc.type: FUNC + */ +HWTEST_F(MatMulBuilderTest, matMul_build_010, TestSize.Level0) +{ + std::vector expectParamDim = {2}; + + SetInputTensor(m_inputTensor); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + SaveTransposeATensor(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_MATMUL_TRANSPOSE_A); + SaveActivationTensor(OH_NN_INT8, m_paramDim, nullptr, OH_NN_MATMUL_ACTIVATION_TYPE); + + std::shared_ptr transposeBTensor; + transposeBTensor = TransToNNTensor(OH_NN_BOOL, expectParamDim, nullptr, OH_NN_MATMUL_TRANSPOSE_B); + bool transposeBValue[2] = {false, false}; + transposeBTensor->SetBuffer(transposeBValue, 2 * sizeof(bool)); + m_allTensors.emplace_back(transposeBTensor); + + OH_NN_ReturnCode ret = m_matmul.Build(m_params, m_inputs, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + transposeBTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: matmul_build_011 + * @tc.desc: Verify that the build function returns a failed message with invalid activation's dataType. + * @tc.type: FUNC + */ +HWTEST_F(MatMulBuilderTest, matMul_build_011, TestSize.Level0) +{ + SetInputTensor(m_inputTensor); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + SaveTransposeATensor(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_MATMUL_TRANSPOSE_A); + SaveTransposeBTensor(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_MATMUL_TRANSPOSE_B); + + std::shared_ptr activationTensor; + activationTensor = TransToNNTensor(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_MATMUL_ACTIVATION_TYPE); + bool activationValue = false; + activationTensor->SetBuffer(&activationValue, sizeof(activationValue)); + m_allTensors.emplace_back(activationTensor); + + OH_NN_ReturnCode ret = m_matmul.Build(m_params, m_inputs, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + activationTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: matmul_build_012 + * @tc.desc: Verify that the build function returns a failed message with invalid activation's dimension. + * @tc.type: FUNC + */ +HWTEST_F(MatMulBuilderTest, matmul_build_012, TestSize.Level0) +{ + std::vector expectParamDim = {2}; + + SetInputTensor(m_inputTensor); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + SaveTransposeATensor(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_MATMUL_TRANSPOSE_A); + SaveTransposeBTensor(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_MATMUL_TRANSPOSE_B); + + std::shared_ptr activationTensor; + activationTensor = TransToNNTensor(OH_NN_INT8, expectParamDim, nullptr, OH_NN_MATMUL_ACTIVATION_TYPE); + int8_t activationValue[2] = {0, 1}; + activationTensor->SetBuffer(activationValue, 2 * sizeof(int8_t)); + m_allTensors.emplace_back(activationTensor); + + OH_NN_ReturnCode ret = m_matmul.Build(m_params, m_inputs, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + activationTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: matmul_build_013 + * @tc.desc: Verify that the build function returns a failed message with invalid activation's data. + * @tc.type: FUNC + */ +HWTEST_F(MatMulBuilderTest, matmul_build_013, TestSize.Level0) +{ + SetInputTensor(m_inputTensor); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + SaveTransposeATensor(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_MATMUL_TRANSPOSE_A); + SaveTransposeBTensor(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_MATMUL_TRANSPOSE_B); + + std::shared_ptr activationTensor; + activationTensor = TransToNNTensor(OH_NN_INT8, m_paramDim, nullptr, OH_NN_MATMUL_ACTIVATION_TYPE); + int8_t activationValue = -1; + activationTensor->SetBuffer(&activationValue, sizeof(activationValue)); + m_allTensors.emplace_back(activationTensor); + + OH_NN_ReturnCode ret = m_matmul.Build(m_params, m_inputs, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + activationTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: matmul_build_014 + * @tc.desc: Verify that the build function returns a failed message with passing invalid param. + * @tc.type: FUNC + */ +HWTEST_F(MatMulBuilderTest, matmul_build_014, TestSize.Level0) +{ + SetInputTensor(m_inputTensor); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + SaveTransposeATensor(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_LAYER_NORM_BEGIN_PARAM_AXIS); + SaveTransposeBTensor(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_MATMUL_TRANSPOSE_B); + SaveActivationTensor(OH_NN_INT8, m_paramDim, nullptr, OH_NN_MATMUL_ACTIVATION_TYPE); + + OH_NN_ReturnCode ret = m_matmul.Build(m_params, m_inputs, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: matmul_build_015 + * @tc.desc: Verify that the build function returns a failed message without set buffer for transposeA. + * @tc.type: FUNC + */ +HWTEST_F(MatMulBuilderTest, matmul_build_015, TestSize.Level0) +{ + SetInputTensor(m_inputTensor); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + SaveTransposeBTensor(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_MATMUL_TRANSPOSE_B); + SaveActivationTensor(OH_NN_INT8, m_paramDim, nullptr, OH_NN_MATMUL_ACTIVATION_TYPE); + + std::shared_ptr transposeATensor; + transposeATensor = TransToNNTensor(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_MATMUL_TRANSPOSE_A); + m_allTensors.emplace_back(transposeATensor); + + OH_NN_ReturnCode ret = m_matmul.Build(m_params, m_inputs, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: matmul_build_016 + * @tc.desc: Verify that the build function returns a failed message without set buffer for transposeB. + * @tc.type: FUNC + */ +HWTEST_F(MatMulBuilderTest, matmul_build_016, TestSize.Level0) +{ + SetInputTensor(m_inputTensor); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + SaveTransposeATensor(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_MATMUL_TRANSPOSE_A); + SaveActivationTensor(OH_NN_INT8, m_paramDim, nullptr, OH_NN_MATMUL_ACTIVATION_TYPE); + + std::shared_ptr transposeBTensor; + transposeBTensor = TransToNNTensor(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_MATMUL_TRANSPOSE_B); + m_allTensors.emplace_back(transposeBTensor); + + OH_NN_ReturnCode ret = m_matmul.Build(m_params, m_inputs, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: matmul_build_017 + * @tc.desc: Verify that the build function returns a failed message without set buffer for activation. + * @tc.type: FUNC + */ +HWTEST_F(MatMulBuilderTest, matmul_build_017, TestSize.Level0) +{ + SetInputTensor(m_inputTensor); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + SaveTransposeATensor(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_MATMUL_TRANSPOSE_A); + SaveTransposeBTensor(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_MATMUL_TRANSPOSE_B); + + std::shared_ptr activationTensor; + activationTensor = TransToNNTensor(OH_NN_INT8, m_paramDim, nullptr, OH_NN_MATMUL_ACTIVATION_TYPE); + m_allTensors.emplace_back(activationTensor); + + OH_NN_ReturnCode ret = m_matmul.Build(m_params, m_inputs, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: matmul_getprimitive_001 + * @tc.desc: Verify that the getPrimitive function returns a successful message + * @tc.type: FUNC + */ +HWTEST_F(MatMulBuilderTest, matmul_getprimitive_001, TestSize.Level0) +{ + SetInputTensor(m_inputTensor); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + SaveTransposeATensor(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_MATMUL_TRANSPOSE_A); + SaveTransposeBTensor(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_MATMUL_TRANSPOSE_B); + SaveActivationTensor(OH_NN_INT8, m_paramDim, nullptr, OH_NN_MATMUL_ACTIVATION_TYPE); + + bool transposeAValue = false; + bool transposeBValue = false; + int8_t activationValue = 0; + EXPECT_EQ(OH_NN_SUCCESS, m_matmul.Build(m_params, m_inputs, m_outputsIndex, m_allTensors)); + LiteGraphPrimitvePtr primitive = m_matmul.GetPrimitive(); + LiteGraphPrimitvePtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); + EXPECT_NE(expectPrimitive, primitive); + + auto returnValue = mindspore::lite::MindIR_MatMulFusion_GetTransposeA(primitive.get()); + EXPECT_EQ(returnValue, transposeAValue); + returnValue = mindspore::lite::MindIR_MatMulFusion_GetTransposeB(primitive.get()); + EXPECT_EQ(returnValue, transposeBValue); + returnValue = mindspore::lite::MindIR_MatMulFusion_GetActivationType(primitive.get()); + EXPECT_EQ(returnValue, activationValue); +} + +/** + * @tc.name: matmul_getprimitive_002 + * @tc.desc: Verify that the getPrimitive function returns a failed message without build. + * @tc.type: FUNC + */ +HWTEST_F(MatMulBuilderTest, matmul_getprimitive_002, TestSize.Level0) +{ + MatmulBuilder matmul; + LiteGraphPrimitvePtr primitive = m_matmul.GetPrimitive(); + LiteGraphPrimitvePtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); + EXPECT_EQ(expectPrimitive, primitive); +} +} +} +} \ No newline at end of file diff --git a/test/unittest/ops/maximum_builder_test.cpp b/test/unittest/ops/maximum_builder_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..1e038afe020d5297ad171d54965dc4f79480da9f --- /dev/null +++ b/test/unittest/ops/maximum_builder_test.cpp @@ -0,0 +1,182 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "frameworks/native/ops/maximum_builder.h" + +#include "ops_test.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Ops; + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class MaximumBuilderTest : public OpsTest { +public: + void SetUp() override; + void TearDown() override; + +protected: + MaximumBuilder m_maximum; + std::vector m_inputs {0, 1}; + std::vector m_outputs {2}; + std::vector m_params {}; + std::vector m_inputDim {1, 3, 1, 1}; + std::vector m_outputDim {1, 3, 1, 1}; +}; + +void MaximumBuilderTest::SetUp() {} + +void MaximumBuilderTest::TearDown() {} + + +/** + * @tc.name: maximum_build_001 + * @tc.desc: Verify that the build function returns a successful message. + * @tc.type: FUNC + */ +HWTEST_F(MaximumBuilderTest, maximum_build_001, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + + OH_NN_ReturnCode ret = m_maximum.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/** + * @tc.name: maximum_build_002 + * @tc.desc: Verify that the build function returns a failed message with true m_isBuild. + * @tc.type: FUNC + */ +HWTEST_F(MaximumBuilderTest, maximum_build_002, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + + EXPECT_EQ(OH_NN_SUCCESS, m_maximum.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors)); + OH_NN_ReturnCode ret = m_maximum.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/** + * @tc.name: maximum_build_003 + * @tc.desc: Verify that the build function returns a failed message with invalided input. + * @tc.type: FUNC + */ +HWTEST_F(MaximumBuilderTest, maximum_build_003, TestSize.Level0) +{ + m_inputs = {0, 1, 2}; + m_outputs = {3}; + + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + + OH_NN_ReturnCode ret = m_maximum.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: maximum_build_004 + * @tc.desc: Verify that the build function returns a failed message with invalided output. + * @tc.type: FUNC + */ +HWTEST_F(MaximumBuilderTest, maximum_build_004, TestSize.Level0) +{ + m_outputs = {2, 3, 4}; + + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + + OH_NN_ReturnCode ret = m_maximum.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: maximum_build_005 + * @tc.desc: Verify that the build function returns a failed message with empty allTensor. + * @tc.type: FUNC + */ +HWTEST_F(MaximumBuilderTest, maximum_build_005, TestSize.Level0) +{ + OH_NN_ReturnCode ret = m_maximum.Build(m_params, m_inputs, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: maximum_build_006 + * @tc.desc: Verify that the build function returns a failed message without output tensor. + * @tc.type: FUNC + */ +HWTEST_F(MaximumBuilderTest, maximum_build_006, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + + OH_NN_ReturnCode ret = m_maximum.Build(m_params, m_inputsIndex, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: maximum_build_007 + * @tc.desc: Verify that the build function returns a failed message with a virtual parameter. + * @tc.type: FUNC + */ +HWTEST_F(MaximumBuilderTest, maximum_build_007, TestSize.Level0) +{ + std::vector m_params = {3}; + std::vector paramDim = {}; + + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + std::shared_ptr paramTensor; + paramTensor = TransToNNTensor(OH_NN_INT32, paramDim, nullptr, OH_NN_TENSOR); + m_allTensors.emplace_back(paramTensor); + + OH_NN_ReturnCode ret = m_maximum.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: maximum_getprimitive_001 + * @tc.desc: Verify that the getPrimitive function returns a successful message + * @tc.type: FUNC + */ +HWTEST_F(MaximumBuilderTest, maximum_getprimitive_001, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + + EXPECT_EQ(OH_NN_SUCCESS, m_maximum.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors)); + LiteGraphPrimitvePtr primitive = m_maximum.GetPrimitive(); + LiteGraphPrimitvePtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); + EXPECT_NE(expectPrimitive, primitive); +} + +/** + * @tc.name: maximum_getprimitive_002 + * @tc.desc: Verify that the getPrimitive function returns a failed message without build. + * @tc.type: FUNC + */ +HWTEST_F(MaximumBuilderTest, maximum_getprimitive_002, TestSize.Level0) +{ + MaximumBuilder maximum; + LiteGraphPrimitvePtr primitive = m_maximum.GetPrimitive(); + LiteGraphPrimitvePtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); + EXPECT_EQ(expectPrimitive, primitive); +} +} +} +} \ No newline at end of file diff --git a/test/unittest/ops/maxpool_pad_test.cpp b/test/unittest/ops/maxpool_pad_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..a331abf6304bc0e77cc149453308a47adab13dca --- /dev/null +++ b/test/unittest/ops/maxpool_pad_test.cpp @@ -0,0 +1,432 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "frameworks/native/ops/maxpool_builder.h" + +#include "ops_test.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Ops; + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class MaxPoolPadBuilderTest : public OpsTest { +public: + void SetUp(); + void TearDown(); + + void SetPad(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + void SetPadParam(); + +public: + MaxPoolBuilder m_builder; + std::vector m_inputs{0}; + std::vector m_outputs{1}; + std::vector m_params{2, 3, 4, 5}; + std::vector m_input_dim{1, 3, 3, 1}; + std::vector m_output_dim{1, 2, 2, 1}; + std::vector m_kenelsize_dim{2}; + std::vector m_stride_dim{2}; + std::vector m_pad_dim{4}; + std::vector m_param_dim{}; +}; + +void MaxPoolPadBuilderTest::SetUp() {} + +void MaxPoolPadBuilderTest::TearDown() {} + +void MaxPoolPadBuilderTest::SetPad(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + int32_t padNum{4}; + std::shared_ptr tensor = TransToNNTensor(dataType, dim, quantParam, type); + int64_t* padValue = new (std::nothrow) int64_t[padNum]{0, 0, 0, 0}; + EXPECT_NE(nullptr, padValue); + + tensor->SetBuffer(padValue, sizeof(int64_t) * padNum); + m_allTensors.emplace_back(tensor); +} + +void MaxPoolPadBuilderTest::SetPadParam() +{ + SetKernelSize(OH_NN_INT64, m_kenelsize_dim, nullptr, OH_NN_MAX_POOL_KERNEL_SIZE); + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_MAX_POOL_STRIDE); + SetPad(OH_NN_INT64, m_pad_dim, nullptr, OH_NN_MAX_POOL_PAD); + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_MAX_POOL_ACTIVATION_TYPE); +} + +/** + * @tc.name: maxpool_build_pad_001 + * @tc.desc: Verify the success of the build function + * @tc.type: FUNC + */ +HWTEST_F(MaxPoolPadBuilderTest, maxpool_build_pad_001, TestSize.Level1) +{ + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetPadParam(); + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: maxpool_build_pad_002 + * @tc.desc: Verify the forbidden of the build function + * @tc.type: FUNC + */ +HWTEST_F(MaxPoolPadBuilderTest, maxpool_build_pad_002, TestSize.Level1) +{ + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetPadParam(); + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: maxpool_build_pad_003 + * @tc.desc: Verify the missing input of the build function + * @tc.type: FUNC + */ +HWTEST_F(MaxPoolPadBuilderTest, maxpool_build_pad_003, TestSize.Level1) +{ + m_inputs = {}; + m_outputs = {0}; + m_params = {1, 2, 3, 4}; + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetPadParam(); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: maxpool_build_pad_004 + * @tc.desc: Verify the missing output of the build function + * @tc.type: FUNC + */ +HWTEST_F(MaxPoolPadBuilderTest, maxpool_build_pad_004, TestSize.Level1) +{ + m_inputs = {0}; + m_outputs = {}; + m_params = {1, 2, 3, 4}; + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetPadParam(); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: maxpool_build_pad_005 + * @tc.desc: Verify the inputIndex out of bounds of the build function + * @tc.type: FUNC + */ +HWTEST_F(MaxPoolPadBuilderTest, maxpool_build_pad_005, TestSize.Level1) +{ + m_inputs = {6}; + m_outputs = {1}; + m_params = {2, 3, 4, 5}; + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetPadParam(); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: maxpool_build_pad_006 + * @tc.desc: Verify the outputIndex out of bounds of the build function + * @tc.type: FUNC + */ +HWTEST_F(MaxPoolPadBuilderTest, maxpool_build_pad_006, TestSize.Level1) +{ + m_inputs = {0}; + m_outputs = {6}; + m_params = {2, 3, 4, 5}; + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetPadParam(); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: maxpool_build_pad_007 + * @tc.desc: Verify the invalid kernelSize of the build function + * @tc.type: FUNC + */ +HWTEST_F(MaxPoolPadBuilderTest, maxpool_build_pad_007, TestSize.Level1) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + int32_t kernelsNum{2}; + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT32, m_kenelsize_dim, nullptr, + OH_NN_MAX_POOL_KERNEL_SIZE); + int32_t* valueKernelSize = new (std::nothrow) int32_t[kernelsNum]{1, 1}; + EXPECT_NE(nullptr, valueKernelSize); + + tensor->SetBuffer(valueKernelSize, sizeof(int32_t) * kernelsNum); + m_allTensors.emplace_back(tensor); + + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_MAX_POOL_STRIDE); + SetPad(OH_NN_INT64, m_pad_dim, nullptr, OH_NN_MAX_POOL_PAD); + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_MAX_POOL_ACTIVATION_TYPE); + m_paramsIndex = m_params; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: maxpool_build_pad_008 + * @tc.desc: Verify the invalid stride of the build function + * @tc.type: FUNC + */ +HWTEST_F(MaxPoolPadBuilderTest, maxpool_build_pad_008, TestSize.Level1) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + m_paramsIndex = m_params; + + SetKernelSize(OH_NN_INT64, m_kenelsize_dim, nullptr, OH_NN_MAX_POOL_KERNEL_SIZE); + int32_t strideNum{2}; + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT32, m_stride_dim, nullptr, OH_NN_MAX_POOL_STRIDE); + int32_t* strideValue = new (std::nothrow) int32_t[strideNum]{1, 1}; + EXPECT_NE(nullptr, strideValue); + + tensor->SetBuffer(strideValue, sizeof(int32_t) * strideNum); + m_allTensors.emplace_back(tensor); + SetPad(OH_NN_INT64, m_pad_dim, nullptr, OH_NN_MAX_POOL_PAD); + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_MAX_POOL_ACTIVATION_TYPE); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: maxpool_build_pad_009 + * @tc.desc: Verify the invalid pad of the build function + * @tc.type: FUNC + */ +HWTEST_F(MaxPoolPadBuilderTest, maxpool_build_pad_009, TestSize.Level1) +{ + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetKernelSize(OH_NN_INT64, m_kenelsize_dim, nullptr, OH_NN_MAX_POOL_KERNEL_SIZE); + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_MAX_POOL_STRIDE); + int32_t padNum{4}; + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT32, m_pad_dim, nullptr, OH_NN_MAX_POOL_PAD); + int32_t* padValue = new (std::nothrow) int32_t[padNum]{0, 0, 0, 0}; + EXPECT_NE(nullptr, padValue); + + tensor->SetBuffer(padValue, sizeof(int32_t) * padNum); + m_allTensors.emplace_back(tensor); + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_MAX_POOL_ACTIVATION_TYPE); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + + +/** + * @tc.name: maxpool_build_pad_010 + * @tc.desc: Verify the invalid activation of the build function + * @tc.type: FUNC + */ +HWTEST_F(MaxPoolPadBuilderTest, maxpool_build_pad_010, TestSize.Level1) +{ + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetKernelSize(OH_NN_INT64, m_kenelsize_dim, nullptr, OH_NN_MAX_POOL_KERNEL_SIZE); + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_MAX_POOL_STRIDE); + SetPad(OH_NN_INT64, m_pad_dim, nullptr, OH_NN_MAX_POOL_PAD); + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT32, m_param_dim, nullptr, + OH_NN_MAX_POOL_ACTIVATION_TYPE); + int32_t* activationValue = new (std::nothrow) int32_t(0); + EXPECT_NE(nullptr, activationValue); + + tensor->SetBuffer(activationValue, sizeof(int32_t)); + m_allTensors.emplace_back(tensor); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: maxpool_build_pad_011 + * @tc.desc: Verify the activation scalar length of the build function + * @tc.type: FUNC + */ +HWTEST_F(MaxPoolPadBuilderTest, maxpool_build_pad_011, TestSize.Level1) +{ + m_param_dim = {2}; + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetKernelSize(OH_NN_INT64, m_kenelsize_dim, nullptr, OH_NN_MAX_POOL_KERNEL_SIZE); + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_MAX_POOL_STRIDE); + SetPad(OH_NN_INT64, m_pad_dim, nullptr, OH_NN_MAX_POOL_PAD); + int8_t* activationValue = new (std::nothrow) int8_t[2]{1, 2}; + EXPECT_NE(nullptr, activationValue); + + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT8, m_param_dim, nullptr, + OH_NN_MAX_POOL_ACTIVATION_TYPE); + tensor->SetBuffer(activationValue, 2 * sizeof(int8_t)); + m_allTensors.emplace_back(tensor); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: maxpool_build_pad_012 + * @tc.desc: Verify the maxpool without set kernelsize of the build function + * @tc.type: FUNC + */ +HWTEST_F(MaxPoolPadBuilderTest, maxpool_build_pad_012, TestSize.Level1) +{ + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT64, m_kenelsize_dim, nullptr, + OH_NN_MAX_POOL_KERNEL_SIZE); + m_allTensors.emplace_back(tensor); + + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_MAX_POOL_STRIDE); + SetPad(OH_NN_INT64, m_pad_dim, nullptr, OH_NN_MAX_POOL_PAD); + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_MAX_POOL_ACTIVATION_TYPE); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: maxpool_build_pad_013 + * @tc.desc: Verify the maxpool without set stride of the build function + * @tc.type: FUNC + */ +HWTEST_F(MaxPoolPadBuilderTest, maxpool_build_pad_013, TestSize.Level1) +{ + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetKernelSize(OH_NN_INT64, m_kenelsize_dim, nullptr, OH_NN_MAX_POOL_KERNEL_SIZE); + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_MAX_POOL_STRIDE); + m_allTensors.emplace_back(tensor); + + SetPad(OH_NN_INT64, m_pad_dim, nullptr, OH_NN_MAX_POOL_PAD); + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_MAX_POOL_ACTIVATION_TYPE); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: maxpool_build_pad_014 + * @tc.desc: Verify the maxpool without set pad of the build function + * @tc.type: FUNC + */ +HWTEST_F(MaxPoolPadBuilderTest, maxpool_build_pad_014, TestSize.Level1) +{ + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetKernelSize(OH_NN_INT64, m_kenelsize_dim, nullptr, OH_NN_MAX_POOL_KERNEL_SIZE); + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_MAX_POOL_STRIDE); + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT64, m_pad_dim, nullptr, OH_NN_MAX_POOL_PAD); + m_allTensors.emplace_back(tensor); + + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_MAX_POOL_ACTIVATION_TYPE); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: maxpool_build_pad_015 + * @tc.desc: Verify the maxpool without set activation of the build function + * @tc.type: FUNC + */ +HWTEST_F(MaxPoolPadBuilderTest, maxpool_build_pad_015, TestSize.Level1) +{ + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetKernelSize(OH_NN_INT64, m_kenelsize_dim, nullptr, OH_NN_MAX_POOL_KERNEL_SIZE); + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_MAX_POOL_STRIDE); + SetPad(OH_NN_INT64, m_pad_dim, nullptr, OH_NN_MAX_POOL_PAD); + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT8, m_param_dim, nullptr, + OH_NN_MAX_POOL_ACTIVATION_TYPE); + m_allTensors.emplace_back(tensor); + + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: maxpool_getprimitive_pad_001 + * @tc.desc: Verify the behavior of the GetPrimitive function + * @tc.type: FUNC + */ +HWTEST_F(MaxPoolPadBuilderTest, maxpool_getprimitive_pad_001, TestSize.Level1) +{ + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetPadParam(); + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + LiteGraphTensorPtr primitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive = {nullptr, DestroyLiteGraphPrimitive}; + EXPECT_NE(expectPrimitive, primitive); + + std::vector expectKernelSize = mindspore::lite::MindIR_MaxPoolFusion_GetKernelSize(primitive.get()); + std::vector kernelSizeValueTest{1, 1}; + EXPECT_EQ(kernelSizeValueTest, expectKernelSize); + + std::vector expectStrides = mindspore::lite::MindIR_MaxPoolFusion_GetStrides(primitive.get()); + std::vector strideValueTest{1, 1}; + std::vector expectPadValue = mindspore::lite::MindIR_MaxPoolFusion_GetPad(primitive.get()); + std::vector padValueValueTest{0, 0, 0, 0}; + EXPECT_EQ(padValueValueTest, expectPadValue); + + int8_t activationValue = 0; + int expectActivation = mindspore::lite::MindIR_MaxPoolFusion_GetActivationType(primitive.get()); + EXPECT_EQ(activationValue, expectActivation); +} + +/** + * @tc.name: maxpool_getprimitive_pad_002 + * @tc.desc: Verify the behavior of the GetPrimitive function + * @tc.type: FUNC + */ +HWTEST_F(MaxPoolPadBuilderTest, maxpool_getprimitive_pad_002, TestSize.Level1) +{ + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetPadParam(); + LiteGraphTensorPtr primitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive = {nullptr, DestroyLiteGraphPrimitive}; + EXPECT_EQ(expectPrimitive, primitive); +} +} // namespace UnitTest +} // namespace NeuralNetworkRuntime +} // namespace OHOS diff --git a/test/unittest/ops/maxpool_padmode_test.cpp b/test/unittest/ops/maxpool_padmode_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..799edca0ef0d8be2e36535b50293a8707bd41b80 --- /dev/null +++ b/test/unittest/ops/maxpool_padmode_test.cpp @@ -0,0 +1,341 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "frameworks/native/ops/maxpool_builder.h" + +#include "ops_test.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Ops; + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class MaxPoolBuilderTest : public OpsTest { +public: + void SetUp() override; + void TearDown() override; + + void SetPadMode(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + void SetParam(); + +public: + MaxPoolBuilder m_builder; + std::vector m_inputs{0}; + std::vector m_outputs{1}; + std::vector m_params{2, 3, 4, 5}; + std::vector m_input_dim{1, 3, 3, 1}; + std::vector m_output_dim{1, 2, 2, 1}; + std::vector m_kenelsize_dim{2}; + std::vector m_stride_dim{2}; + std::vector m_param_dim{}; +}; + +void MaxPoolBuilderTest::SetUp() {} + +void MaxPoolBuilderTest::TearDown() {} + +void MaxPoolBuilderTest::SetPadMode(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + std::shared_ptr tensor = TransToNNTensor(dataType, dim, quantParam, type); + int8_t* padModeValue = new (std::nothrow) int8_t(0); + EXPECT_NE(nullptr, padModeValue); + tensor->SetBuffer(padModeValue, sizeof(int8_t)); + m_allTensors.emplace_back(tensor); +} + +void MaxPoolBuilderTest::SetParam() +{ + SetKernelSize(OH_NN_INT64, m_kenelsize_dim, nullptr, OH_NN_MAX_POOL_KERNEL_SIZE); + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_MAX_POOL_STRIDE); + SetPadMode(OH_NN_INT8, m_param_dim, nullptr, OH_NN_MAX_POOL_PAD_MODE); + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_MAX_POOL_ACTIVATION_TYPE); +} + +/** + * @tc.name: maxpool_build_pad_mode_001 + * @tc.desc: Verify the success of the build function + * @tc.type: FUNC + */ +HWTEST_F(MaxPoolBuilderTest, maxpool_build_pad_mode_001, TestSize.Level1) +{ + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetParam(); + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: maxpool_build_pad_mode_002 + * @tc.desc: Verify the forbidden of the build function + * @tc.type: FUNC + */ +HWTEST_F(MaxPoolBuilderTest, maxpool_build_pad_mode_002, TestSize.Level1) +{ + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetParam(); + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: maxpool_build_pad_mode_003 + * @tc.desc: Verify the missing input of the build function + * @tc.type: FUNC + */ +HWTEST_F(MaxPoolBuilderTest, maxpool_build_pad_mode_003, TestSize.Level1) +{ + m_inputs = {}; + m_outputs = {0}; + m_params = {1, 2, 3, 4}; + m_paramsIndex = m_params; + + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetParam(); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: maxpool_build_pad_mode_004 + * @tc.desc: Verify the missing output of the build function + * @tc.type: FUNC + */ +HWTEST_F(MaxPoolBuilderTest, maxpool_build_pad_mode_004, TestSize.Level1) +{ + m_inputs = {0}; + m_outputs = {}; + m_params = {1, 2, 3, 4}; + m_paramsIndex = m_params; + + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetParam(); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: maxpool_build_pad_mode_005 + * @tc.desc: Verify the inputIndex out of bounds of the build function + * @tc.type: FUNC + */ +HWTEST_F(MaxPoolBuilderTest, maxpool_build_pad_mode_005, TestSize.Level1) +{ + m_inputs = {6}; + m_outputs = {1}; + m_params = {2, 3, 4, 5}; + m_paramsIndex = m_params; + + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetParam(); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: maxpool_build_pad_mode_006 + * @tc.desc: Verify the outputIndex out of bounds of the build function + * @tc.type: FUNC + */ +HWTEST_F(MaxPoolBuilderTest, maxpool_build_pad_mode_006, TestSize.Level1) +{ + m_inputs = {0}; + m_outputs = {6}; + m_params = {2, 3, 4, 5}; + m_paramsIndex = m_params; + + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetParam(); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: maxpool_build_pad_mode_007 + * @tc.desc: Verify the invalid kernelSize of the build function + * @tc.type: FUNC + */ +HWTEST_F(MaxPoolBuilderTest, maxpool_build_pad_mode_007, TestSize.Level1) +{ + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + int32_t kernelsNum{2}; + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT32, m_kenelsize_dim, nullptr, + OH_NN_MAX_POOL_KERNEL_SIZE); + int32_t* kernelSizeValue = new (std::nothrow) int32_t[kernelsNum]{1, 1}; + EXPECT_NE(nullptr, kernelSizeValue); + tensor->SetBuffer(kernelSizeValue, sizeof(int32_t) * kernelsNum); + m_allTensors.emplace_back(tensor); + + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_MAX_POOL_STRIDE); + SetPadMode(OH_NN_INT8, m_param_dim, nullptr, OH_NN_MAX_POOL_PAD_MODE); + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_MAX_POOL_ACTIVATION_TYPE); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: maxpool_build_pad_mode_008 + * @tc.desc: Verify the invalid stride of the build function + * @tc.type: FUNC + */ +HWTEST_F(MaxPoolBuilderTest, maxpool_build_pad_mode_008, TestSize.Level1) +{ + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetKernelSize(OH_NN_INT64, m_kenelsize_dim, nullptr, OH_NN_MAX_POOL_KERNEL_SIZE); + int32_t strideNum{2}; + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT32, m_stride_dim, nullptr, OH_NN_MAX_POOL_STRIDE); + int32_t* strideValue = new (std::nothrow) int32_t[strideNum]{1, 1}; + EXPECT_NE(nullptr, strideValue); + + tensor->SetBuffer(strideValue, sizeof(int32_t) * strideNum); + m_allTensors.emplace_back(tensor); + SetPadMode(OH_NN_INT8, m_param_dim, nullptr, OH_NN_MAX_POOL_PAD_MODE); + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_MAX_POOL_ACTIVATION_TYPE); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: maxpool_build_pad_mode_009 + * @tc.desc: Verify the invalid padmode of the build function + * @tc.type: FUNC + */ +HWTEST_F(MaxPoolBuilderTest, maxpool_build_pad_mode_009, TestSize.Level1) +{ + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetKernelSize(OH_NN_INT64, m_kenelsize_dim, nullptr, OH_NN_MAX_POOL_KERNEL_SIZE); + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_MAX_POOL_STRIDE); + int32_t *padValueTest = new (std::nothrow) int32_t(0); + EXPECT_NE(nullptr, padValueTest); + + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT32, m_param_dim, nullptr, OH_NN_MAX_POOL_PAD); + tensor->SetBuffer(padValueTest, sizeof(int32_t)); + m_allTensors.emplace_back(tensor); + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_MAX_POOL_ACTIVATION_TYPE); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + + +/** + * @tc.name: maxpool_build_pad_mode_010 + * @tc.desc: Verify the invalid activation of the build function + * @tc.type: FUNC + */ +HWTEST_F(MaxPoolBuilderTest, maxpool_build_pad_mode_010, TestSize.Level1) +{ + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetKernelSize(OH_NN_INT64, m_kenelsize_dim, nullptr, OH_NN_MAX_POOL_KERNEL_SIZE); + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_MAX_POOL_STRIDE); + SetPadMode(OH_NN_INT8, m_param_dim, nullptr, OH_NN_MAX_POOL_PAD_MODE); + + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT32, m_param_dim, nullptr, + OH_NN_MAX_POOL_ACTIVATION_TYPE); + int32_t* activationValue = new (std::nothrow) int32_t(0); + EXPECT_NE(nullptr, activationValue); + + tensor->SetBuffer(activationValue, sizeof(int32_t)); + m_allTensors.emplace_back(tensor); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: maxpool_build_pad_mode_011 + * @tc.desc: Verify the scalar length of the build function + * @tc.type: FUNC + */ +HWTEST_F(MaxPoolBuilderTest, maxpool_build_pad_mode_011, TestSize.Level1) +{ + m_param_dim = {2}; + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetKernelSize(OH_NN_INT64, m_kenelsize_dim, nullptr, OH_NN_MAX_POOL_KERNEL_SIZE); + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_MAX_POOL_STRIDE); + SetPadMode(OH_NN_INT8, m_param_dim, nullptr, OH_NN_MAX_POOL_PAD_MODE); + int8_t* activationValue = new (std::nothrow) int8_t[2]{1, 2}; + EXPECT_NE(nullptr, activationValue); + + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT8, m_param_dim, nullptr, + OH_NN_MAX_POOL_ACTIVATION_TYPE); + tensor->SetBuffer(activationValue, 2 * sizeof(int8_t)); + m_allTensors.emplace_back(tensor); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: maxpool_getprimitive_pad_mode_001 + * @tc.desc: Verify the behavior of the GetPrimitive function + * @tc.type: FUNC + */ +HWTEST_F(MaxPoolBuilderTest, maxpool_getprimitive_pad_mode_001, TestSize.Level1) +{ + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetParam(); + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + LiteGraphTensorPtr primitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive = {nullptr, DestroyLiteGraphPrimitive}; + EXPECT_NE(expectPrimitive, primitive); + + std::vector returnKernelSize = mindspore::lite::MindIR_MaxPoolFusion_GetKernelSize(primitive.get()); + std::vector kernelSizeValueTest{1, 1}; + EXPECT_EQ(kernelSizeValueTest, returnKernelSize); + + std::vector returnStrides = mindspore::lite::MindIR_MaxPoolFusion_GetStrides(primitive.get()); + std::vector strideValueTest{1, 1}; + int returnPadMode = mindspore::lite::MindIR_MaxPoolFusion_GetPadMode(primitive.get()); + EXPECT_EQ(1, returnPadMode); + + int returnActivation = mindspore::lite::MindIR_MaxPoolFusion_GetActivationType(primitive.get()); + EXPECT_EQ(0, returnActivation); +} + +/** + * @tc.name: maxpool_getprimitive_pad_mode_002 + * @tc.desc: Verify the behavior of the GetPrimitive function + * @tc.type: FUNC + */ +HWTEST_F(MaxPoolBuilderTest, maxpool_getprimitive_pad_mode_002, TestSize.Level1) +{ + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetParam(); + LiteGraphTensorPtr primitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive = {nullptr, DestroyLiteGraphPrimitive}; + EXPECT_EQ(expectPrimitive, primitive); +} +} // namespace UnitTest +} // namespace NeuralNetworkRuntime +} // namespace OHOS \ No newline at end of file diff --git a/test/unittest/ops/mul_builder_test.cpp b/test/unittest/ops/mul_builder_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..de6c0e1418db4b6327aa63a0f8bf5c374e72496d --- /dev/null +++ b/test/unittest/ops/mul_builder_test.cpp @@ -0,0 +1,288 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "frameworks/native/ops/mul_builder.h" + +#include "ops_test.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Ops; + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class MulBuilderTest : public OpsTest { +public: + void SetUp() override; + void TearDown() override; + +protected: + void SaveParamsTensor(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + +protected: + MulBuilder m_mul; + std::vector m_inputs {0, 1}; + std::vector m_outputs {2}; + std::vector m_params {3}; + std::vector m_inputDim {1, 2, 2, 1}; + std::vector m_outputDim {1, 2, 2, 1}; + std::vector m_paramDim {}; +}; + +void MulBuilderTest::SetUp() {} + +void MulBuilderTest::TearDown() {} + +void MulBuilderTest::SaveParamsTensor(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + std::shared_ptr activationTensor = TransToNNTensor(dataType, dim, quantParam, type); + int8_t* activationValue = new (std::nothrow) int8_t(0); + EXPECT_NE(nullptr, activationValue); + activationTensor->SetBuffer(activationValue, sizeof(int8_t)); + m_allTensors.emplace_back(activationTensor); +} + +/** + * @tc.name: mul_build_001 + * @tc.desc: Verify that the build function returns a successful message. + * @tc.type: FUNC + */ +HWTEST_F(MulBuilderTest, mul_build_001, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + SaveParamsTensor(OH_NN_INT8, m_paramDim, nullptr, OH_NN_MUL_ACTIVATION_TYPE); + + OH_NN_ReturnCode ret = m_mul.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/** + * @tc.name: mul_build_002 + * @tc.desc: Verify that the build function returns a failed message with true m_isBuild. + * @tc.type: FUNC + */ +HWTEST_F(MulBuilderTest, mul_build_002, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + SaveParamsTensor(OH_NN_INT8, m_paramDim, nullptr, OH_NN_MUL_ACTIVATION_TYPE); + + EXPECT_EQ(OH_NN_SUCCESS, m_mul.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors)); + OH_NN_ReturnCode ret = m_mul.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/** + * @tc.name: mul_build_003 + * @tc.desc: Verify that the build function returns a failed message with invalided input. + * @tc.type: FUNC + */ +HWTEST_F(MulBuilderTest, mul_build_003, TestSize.Level0) +{ + m_inputs = {0, 1, 2}; + m_outputs = {3}; + m_params = {4}; + + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + SaveParamsTensor(OH_NN_INT8, m_paramDim, nullptr, OH_NN_MUL_ACTIVATION_TYPE); + + OH_NN_ReturnCode ret = m_mul.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: mul_build_004 + * @tc.desc: Verify that the build function returns a failed message with invalided output. + * @tc.type: FUNC + */ +HWTEST_F(MulBuilderTest, mul_build_004, TestSize.Level0) +{ + m_outputs = {2, 3}; + m_params = {4}; + + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + SaveParamsTensor(OH_NN_INT8, m_paramDim, nullptr, OH_NN_MUL_ACTIVATION_TYPE); + + OH_NN_ReturnCode ret = m_mul.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: mul_build_005 + * @tc.desc: Verify that the build function returns a failed message with empty allTensor. + * @tc.type: FUNC + */ +HWTEST_F(MulBuilderTest, mul_build_005, TestSize.Level0) +{ + OH_NN_ReturnCode ret = m_mul.Build(m_params, m_inputs, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: mul_build_006 + * @tc.desc: Verify that the build function returns a failed message without output tensor. + * @tc.type: FUNC + */ +HWTEST_F(MulBuilderTest, mul_build_006, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + + OH_NN_ReturnCode ret = m_mul.Build(m_params, m_inputsIndex, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: mul_build_007 + * @tc.desc: Verify that the build function returns a failed message with invalid activation's dataType. + * @tc.type: FUNC + */ +HWTEST_F(MulBuilderTest, mul_build_007, TestSize.Level0) +{ + m_params = {3}; + std::vector m_paramDim = {}; + + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + + std::shared_ptr activationTensor = TransToNNTensor(OH_NN_FLOAT32, m_paramDim, + nullptr, OH_NN_MUL_ACTIVATION_TYPE); + float activationValue = 1e-7; + activationTensor->SetBuffer(&activationValue, sizeof(activationValue)); + m_allTensors.emplace_back(activationTensor); + + OH_NN_ReturnCode ret = m_mul.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + activationTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: mul_build_008 + * @tc.desc: Verify that the build function returns a failed message with invalid activation's dimension. + * @tc.type: FUNC + */ +HWTEST_F(MulBuilderTest, mul_build_008, TestSize.Level0) +{ + m_paramDim = {2}; + + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + + std::shared_ptr activationTensor = TransToNNTensor(OH_NN_INT8, m_paramDim, + nullptr, OH_NN_MUL_ACTIVATION_TYPE); + int8_t activationValue[2] = {0, 1}; + activationTensor->SetBuffer(activationValue, 2 * sizeof(int8_t)); + m_allTensors.emplace_back(activationTensor); + + OH_NN_ReturnCode ret = m_mul.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + activationTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: mul_build_009 + * @tc.desc: Verify that the build function returns a failed message with invalid activation's data. + * @tc.type: FUNC + */ +HWTEST_F(MulBuilderTest, mul_build_009, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + + std::shared_ptr activationTensor = TransToNNTensor(OH_NN_INT8, m_paramDim, + nullptr, OH_NN_MUL_ACTIVATION_TYPE); + int8_t activationValue = -1; + activationTensor->SetBuffer(&activationValue, sizeof(activationValue)); + m_allTensors.emplace_back(activationTensor); + + OH_NN_ReturnCode ret = m_mul.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + activationTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: mul_build_010 + * @tc.desc: Verify that the build function returns a failed message with passing invalid param. + * @tc.type: FUNC + */ +HWTEST_F(MulBuilderTest, mul_build_010, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + SaveParamsTensor(OH_NN_INT8, m_paramDim, nullptr, OH_NN_MATMUL_ACTIVATION_TYPE); + + OH_NN_ReturnCode ret = m_mul.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: mul_build_011 + * @tc.desc: Verify that the build function returns a failed message without set buffer for activation. + * @tc.type: FUNC + */ +HWTEST_F(MulBuilderTest, mul_build_011, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + + std::shared_ptr activationTensor = TransToNNTensor(OH_NN_INT8, m_paramDim, + nullptr, OH_NN_MUL_ACTIVATION_TYPE); + m_allTensors.emplace_back(activationTensor); + + OH_NN_ReturnCode ret = m_mul.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: mul_getprimitive_001 + * @tc.desc: Verify that the getPrimitive function returns a successful message + * @tc.type: FUNC + */ +HWTEST_F(MulBuilderTest, mul_getprimitive_001, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + SaveParamsTensor(OH_NN_INT8, m_paramDim, nullptr, OH_NN_MUL_ACTIVATION_TYPE); + + int8_t activationValue = 0; + EXPECT_EQ(OH_NN_SUCCESS, m_mul.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors)); + LiteGraphPrimitvePtr primitive = m_mul.GetPrimitive(); + LiteGraphPrimitvePtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); + EXPECT_NE(expectPrimitive, primitive); + + auto returnValue = mindspore::lite::MindIR_MulFusion_GetActivationType(primitive.get()); + EXPECT_EQ(returnValue, activationValue); +} + +/** + * @tc.name: mul_getprimitive_002 + * @tc.desc: Verify that the getPrimitive function returns a failed message without build. + * @tc.type: FUNC + */ +HWTEST_F(MulBuilderTest, mul_getprimitive_002, TestSize.Level0) +{ + MulBuilder mul; + LiteGraphPrimitvePtr primitive = m_mul.GetPrimitive(); + LiteGraphPrimitvePtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); + EXPECT_EQ(expectPrimitive, primitive); +} +} +} +} \ No newline at end of file diff --git a/test/unittest/ops/onehot_builder_test.cpp b/test/unittest/ops/onehot_builder_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..16832ffddc14a5083144c22c7dfa6953d6ee70bf --- /dev/null +++ b/test/unittest/ops/onehot_builder_test.cpp @@ -0,0 +1,239 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "frameworks/native/ops/onehot_builder.h" + +#include "ops_test.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Ops; + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class OneHotBuilderTest : public OpsTest { +public: + void SetUp() override; + void TearDown() override; + +protected: + void SaveParamsTensor(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + +protected: + OnehotBuilder m_oneHot; + std::vector m_inputs {0, 1, 2, 3}; + std::vector m_outputs {4}; + std::vector m_params {5}; + std::vector m_inputDim {3}; + std::vector m_outputDim {3, 3}; + std::vector m_paramDim {}; +}; + +void OneHotBuilderTest::SetUp() {} + +void OneHotBuilderTest::TearDown() {} + +void OneHotBuilderTest::SaveParamsTensor(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + std::shared_ptr axisTensor = TransToNNTensor(dataType, dim, quantParam, type); + int64_t* axisValue = new (std::nothrow) int64_t(-1); + EXPECT_NE(nullptr, axisValue); + axisTensor->SetBuffer(axisValue, sizeof(int64_t)); + m_allTensors.emplace_back(axisTensor); +} + +/** + * @tc.name: onehot_build_001 + * @tc.desc: Verify that the build function returns a successful message. + * @tc.type: FUNC + */ +HWTEST_F(OneHotBuilderTest, onehot_build_001, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + SaveParamsTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_ONE_HOT_AXIS); + + OH_NN_ReturnCode ret = m_oneHot.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/** + * @tc.name: onehot_build_001 + * @tc.desc: Verify that the build function returns a failed message with true m_isBuild. + * @tc.type: FUNC + */ +HWTEST_F(OneHotBuilderTest, onehot_build_002, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + SaveParamsTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_ONE_HOT_AXIS); + + EXPECT_EQ(OH_NN_SUCCESS, m_oneHot.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors)); + OH_NN_ReturnCode ret = m_oneHot.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/** + * @tc.name: onehot_build_003 + * @tc.desc: Verify that the build function returns a failed message with invalided input. + * @tc.type: FUNC + */ +HWTEST_F(OneHotBuilderTest, onehot_build_003, TestSize.Level0) +{ + m_inputs = {0, 1, 2, 3, 4}; + m_outputs = {5}; + m_params = {6}; + + SaveInputTensor(m_inputs, OH_NN_INT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + SaveParamsTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_ONE_HOT_AXIS); + + OH_NN_ReturnCode ret = m_oneHot.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: onehot_build_004 + * @tc.desc: Verify that the build function returns a failed message with invalided output. + * @tc.type: FUNC + */ +HWTEST_F(OneHotBuilderTest, onehot_build_004, TestSize.Level0) +{ + m_outputs = {4, 5}; + m_params = {6}; + + SaveInputTensor(m_inputs, OH_NN_INT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + SaveParamsTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_ONE_HOT_AXIS); + + OH_NN_ReturnCode ret = m_oneHot.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: onehot_build_005 + * @tc.desc: Verify that the build function returns a failed message with empty allTensor. + * @tc.type: FUNC + */ +HWTEST_F(OneHotBuilderTest, onehot_build_005, TestSize.Level0) +{ + OH_NN_ReturnCode ret = m_oneHot.Build(m_params, m_inputs, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: onehot_build_006 + * @tc.desc: Verify that the build function returns a failed message without output tensor. + * @tc.type: FUNC + */ +HWTEST_F(OneHotBuilderTest, onehot_build_006, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_inputDim, nullptr); + + OH_NN_ReturnCode ret = m_oneHot.Build(m_params, m_inputsIndex, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: onehot_build_007 + * @tc.desc: Verify that the build function returns a failed message with invalid axis's dataType. + * @tc.type: FUNC + */ +HWTEST_F(OneHotBuilderTest, onehot_build_007, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + std::shared_ptr axisTensor = TransToNNTensor(OH_NN_FLOAT32, m_paramDim, + nullptr, OH_NN_ONE_HOT_AXIS); + float axisValue = 1e-7; + axisTensor->SetBuffer(&axisValue, sizeof(axisValue)); + m_allTensors.emplace_back(axisTensor); + + OH_NN_ReturnCode ret = m_oneHot.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + axisTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: onehot_build_008 + * @tc.desc: Verify that the build function returns a failed message with passing invalid param. + * @tc.type: FUNC + */ +HWTEST_F(OneHotBuilderTest, onehot_build_008, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + SaveParamsTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_MUL_ACTIVATION_TYPE); + + OH_NN_ReturnCode ret = m_oneHot.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: onehot_build_009 + * @tc.desc: Verify that the build function returns a failed message without set buffer for axis. + * @tc.type: FUNC + */ +HWTEST_F(OneHotBuilderTest, onehot_build_009, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + std::shared_ptr axisTensor = TransToNNTensor(OH_NN_INT64, m_paramDim, + nullptr, OH_NN_ONE_HOT_AXIS); + m_allTensors.emplace_back(axisTensor); + + OH_NN_ReturnCode ret = m_oneHot.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: onehot_getprimitive_001 + * @tc.desc: Verify that the getPrimitive function returns a successful message + * @tc.type: FUNC + */ +HWTEST_F(OneHotBuilderTest, onehot_getprimitive_001, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + SaveParamsTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_ONE_HOT_AXIS); + + int64_t axisValue = -1; + EXPECT_EQ(OH_NN_SUCCESS, m_oneHot.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors)); + LiteGraphPrimitvePtr primitive = m_oneHot.GetPrimitive(); + LiteGraphPrimitvePtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); + EXPECT_NE(expectPrimitive, primitive); + + auto returnValue = mindspore::lite::MindIR_OneHot_GetAxis(primitive.get()); + EXPECT_EQ(returnValue, axisValue); +} + +/** + * @tc.name: onehot_getprimitive_002 + * @tc.desc: Verify that the getPrimitive function returns a failed message without build. + * @tc.type: FUNC + */ +HWTEST_F(OneHotBuilderTest, onehot_getprimitive_002, TestSize.Level0) +{ + OnehotBuilder oneHot; + LiteGraphPrimitvePtr primitive = m_oneHot.GetPrimitive(); + LiteGraphPrimitvePtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); + EXPECT_EQ(expectPrimitive, primitive); +} +} +} +} \ No newline at end of file diff --git a/test/unittest/ops/ops_test.cpp b/test/unittest/ops/ops_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..2a800a003db694f35f844bf464d3270f8895db8a --- /dev/null +++ b/test/unittest/ops/ops_test.cpp @@ -0,0 +1,100 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ops_test.h" + +using namespace OHOS::NeuralNetworkRuntime::Ops; +using namespace std; +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +void OpsTest::SaveInputTensor(const std::vector& inputsIndex, OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam) +{ + m_inputsIndex = inputsIndex; + for (size_t i = 0; i < inputsIndex.size(); ++i) { + std::shared_ptr inputTensor; + inputTensor = TransToNNTensor(dataType, dim, quantParam, OH_NN_TENSOR); + m_allTensors.emplace_back(inputTensor); + } +} + +void OpsTest::SaveOutputTensor(const std::vector& outputsIndex, OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam) +{ + m_outputsIndex = outputsIndex; + for (size_t i = 0; i < outputsIndex.size(); ++i) { + std::shared_ptr outputTensor; + outputTensor = TransToNNTensor(dataType, dim, quantParam, OH_NN_TENSOR); + m_allTensors.emplace_back(outputTensor); + } +} + +void OpsTest::SetKernelSize(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + int32_t kernelsNum{2}; + std::shared_ptr tensor = TransToNNTensor(dataType, dim, quantParam, type); + int64_t* kernelSizeValue = new (std::nothrow) int64_t[kernelsNum]{1, 1}; + EXPECT_NE(nullptr, kernelSizeValue); + tensor->SetBuffer(kernelSizeValue, sizeof(int64_t) * kernelsNum); + m_allTensors.emplace_back(tensor); +} + +void OpsTest::SetStride(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + int32_t strideNum{2}; + std::shared_ptr tensor = TransToNNTensor(dataType, dim, quantParam, type); + int64_t* strideValue = new (std::nothrow) int64_t[strideNum]{1, 1}; + EXPECT_NE(nullptr, strideValue); + tensor->SetBuffer(strideValue, sizeof(int64_t) * strideNum); + m_allTensors.emplace_back(tensor); +} + +void OpsTest::SetActivation(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + std::shared_ptr tensor = TransToNNTensor(dataType, dim, quantParam, type); + int8_t* activationValue = new (std::nothrow) int8_t(0); + EXPECT_NE(nullptr, activationValue); + tensor->SetBuffer(activationValue, sizeof(int8_t)); + m_allTensors.emplace_back(tensor); +} + +void OpsTest::SetDilation(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + int32_t dilationNum = 2; + std::shared_ptr tensor = TransToNNTensor(dataType, dim, quantParam, type); + int64_t* dilationValue = new (std::nothrow) int64_t[2]{1, 1}; + EXPECT_NE(nullptr, dilationValue); + tensor->SetBuffer(dilationValue, dilationNum * sizeof(int64_t)); + m_allTensors.emplace_back(tensor); +} + +void OpsTest::SetGroup(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + std::shared_ptr tensor = TransToNNTensor(dataType, dim, quantParam, type); + int64_t* groupValue = new (std::nothrow) int64_t(0); + EXPECT_NE(nullptr, groupValue); + tensor->SetBuffer(groupValue, sizeof(int64_t)); + m_allTensors.emplace_back(tensor); +} + +} // namespace UnitTest +} // namespace NeuralNetworkRuntime +} // namespace OHOS diff --git a/test/unittest/ops/ops_test.h b/test/unittest/ops/ops_test.h new file mode 100644 index 0000000000000000000000000000000000000000..a35221805f0e73f1a5367b47af5e1e9628aa729b --- /dev/null +++ b/test/unittest/ops/ops_test.h @@ -0,0 +1,58 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_OPS_TEST_H +#define NEURAL_NETWORK_RUNTIME_OPS_TEST_H + +#include + +#include "mindir.h" + +#include "frameworks/native/nn_tensor.h" +#include "test/unittest/common/base_test.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class OpsTest : public BaseTest { +public: + OpsTest() = default; + virtual void SaveInputTensor(const std::vector& inputsIndex, OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam); + virtual void SaveOutputTensor(const std::vector& outputsIndex, OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam); + virtual void InitTensor(const std::vector& inputsIndex, const std::vector& outputsIndex) {}; + + void SetKernelSize(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + void SetStride(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + void SetActivation(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + void SetDilation(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + void SetGroup(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + +public: + std::vector m_inputsIndex {}; + std::vector m_outputsIndex {}; + std::vector m_paramsIndex {}; + std::vector> m_allTensors; +}; +} // namespace UnitTest +} // namespace NeuralNetworkRuntime +} // namespace OHOS +#endif // NEURAL_NETWORK_RUNTIME_OPS_TEST_H diff --git a/test/unittest/ops/pad_builder_test.cpp b/test/unittest/ops/pad_builder_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..402d54500195d2534e9f1b31efa84c22b9bb739b --- /dev/null +++ b/test/unittest/ops/pad_builder_test.cpp @@ -0,0 +1,261 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "frameworks/native/ops/pad_builder.h" + +#include "ops_test.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Ops; + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class PadBuilderTest : public OpsTest { +public: + void SetUp() override; + void TearDown() override; + +protected: + void SaveParamsTensor(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + +protected: + PadBuilder m_pad; + std::vector m_inputs {0, 1}; + std::vector m_outputs {2}; + std::vector m_params {3}; + std::vector m_inputDim {1, 1, 2, 3}; + std::vector m_outputDim {1, 2, 7, 7}; + std::vector m_paramDim {}; +}; + +void PadBuilderTest::SetUp() {} + +void PadBuilderTest::TearDown() {} + +void PadBuilderTest::SaveParamsTensor(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + std::shared_ptr constantValueTensor = TransToNNTensor(dataType, dim, quantParam, type); + float* constantValue = new (std::nothrow) float(2.0); + EXPECT_NE(nullptr, constantValue); + constantValueTensor->SetBuffer(constantValue, sizeof(float)); + m_allTensors.emplace_back(constantValueTensor); +} + +/** + * @tc.name: pad_build_001 + * @tc.desc: Verify that the build function returns a successful message. + * @tc.type: FUNC + */ +HWTEST_F(PadBuilderTest, pad_build_001, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + SaveParamsTensor(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_PAD_CONSTANT_VALUE); + + OH_NN_ReturnCode ret = m_pad.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/** + * @tc.name: pad_build_002 + * @tc.desc: Verify that the build function returns a failed message with true m_isBuild. + * @tc.type: FUNC + */ +HWTEST_F(PadBuilderTest, pad_build_002, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + SaveParamsTensor(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_PAD_CONSTANT_VALUE); + + EXPECT_EQ(OH_NN_SUCCESS, m_pad.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors)); + OH_NN_ReturnCode ret = m_pad.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/** + * @tc.name: pad_build_003 + * @tc.desc: Verify that the build function returns a failed message with invalided input. + * @tc.type: FUNC + */ +HWTEST_F(PadBuilderTest, pad_build_003, TestSize.Level0) +{ + m_inputs = {0, 1, 2}; + m_outputs = {3}; + m_params = {4}; + + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + SaveParamsTensor(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_PAD_CONSTANT_VALUE); + + OH_NN_ReturnCode ret = m_pad.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: pad_build_004 + * @tc.desc: Verify that the build function returns a failed message with invalided output. + * @tc.type: FUNC + */ +HWTEST_F(PadBuilderTest, pad_build_004, TestSize.Level0) +{ + m_outputs = {2, 3}; + m_params = {4}; + + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + SaveParamsTensor(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_PAD_CONSTANT_VALUE); + + OH_NN_ReturnCode ret = m_pad.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: pad_build_005 + * @tc.desc: Verify that the build function returns a failed message with empty allTensor. + * @tc.type: FUNC + */ +HWTEST_F(PadBuilderTest, pad_build_005, TestSize.Level0) +{ + OH_NN_ReturnCode ret = m_pad.Build(m_params, m_inputs, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: pad_build_006 + * @tc.desc: Verify that the build function returns a failed message without output tensor. + * @tc.type: FUNC + */ +HWTEST_F(PadBuilderTest, pad_build_006, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + + OH_NN_ReturnCode ret = m_pad.Build(m_params, m_inputsIndex, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: pad_build_007 + * @tc.desc: Verify that the build function returns a failed message with invalid constant's dataType. + * @tc.type: FUNC + */ +HWTEST_F(PadBuilderTest, pad_build_007, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + std::shared_ptr constantValueTensor = TransToNNTensor(OH_NN_INT32, m_paramDim, + nullptr, OH_NN_PAD_CONSTANT_VALUE); + int32_t constantValue = 0; + constantValueTensor->SetBuffer(&constantValue, sizeof(constantValue)); + m_allTensors.emplace_back(constantValueTensor); + + OH_NN_ReturnCode ret = m_pad.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + constantValueTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: pad_build_008 + * @tc.desc: Verify that the build function returns a failed message with invalid constant's dimension. + * @tc.type: FUNC + */ +HWTEST_F(PadBuilderTest, pad_build_008, TestSize.Level0) +{ + m_paramDim = {2}; + + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + + std::shared_ptr constantValueTensor = TransToNNTensor(OH_NN_FLOAT32, m_paramDim, + nullptr, OH_NN_PAD_CONSTANT_VALUE); + float constantValue[2] = {2.0, 2.0}; + constantValueTensor->SetBuffer(constantValue, 2 * sizeof(float)); + m_allTensors.emplace_back(constantValueTensor); + OH_NN_ReturnCode ret = m_pad.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + constantValueTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: pad_build_009 + * @tc.desc: Verify that the build function returns a failed message with passing invalid param. + * @tc.type: FUNC + */ +HWTEST_F(PadBuilderTest, pad_build_009, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + SaveParamsTensor(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_ONE_HOT_AXIS); + + OH_NN_ReturnCode ret = m_pad.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: pad_build_010 + * @tc.desc: Verify that the build function returns a failed message without set buffer for constantValue. + * @tc.type: FUNC + */ +HWTEST_F(PadBuilderTest, pad_build_010, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + std::shared_ptr constantValueTensor = TransToNNTensor(OH_NN_FLOAT32, m_paramDim, + nullptr, OH_NN_PAD_CONSTANT_VALUE); + m_allTensors.emplace_back(constantValueTensor); + + OH_NN_ReturnCode ret = m_pad.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: pad_getprimitive_001 + * @tc.desc: Verify that the getPrimitive function returns a successful message + * @tc.type: FUNC + */ +HWTEST_F(PadBuilderTest, pad_getprimitive_001, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + SaveParamsTensor(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_PAD_CONSTANT_VALUE); + + float constantValue = 2.0; + EXPECT_EQ(OH_NN_SUCCESS, m_pad.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors)); + LiteGraphPrimitvePtr primitive = m_pad.GetPrimitive(); + LiteGraphPrimitvePtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); + EXPECT_NE(expectPrimitive, primitive); + + auto returnValue = mindspore::lite::MindIR_PadFusion_GetConstantValue(primitive.get()); + EXPECT_EQ(returnValue, constantValue); +} + +/** + * @tc.name: pad_getprimitive_002 + * @tc.desc: Verify that the getPrimitive function returns a failed message without build. + * @tc.type: FUNC + */ +HWTEST_F(PadBuilderTest, pad_getprimitive_002, TestSize.Level0) +{ + PadBuilder pad; + LiteGraphPrimitvePtr primitive = m_pad.GetPrimitive(); + LiteGraphPrimitvePtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); + EXPECT_EQ(expectPrimitive, primitive); +} +} +} +} \ No newline at end of file diff --git a/test/unittest/ops/pow_builder_test.cpp b/test/unittest/ops/pow_builder_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..099e2bf8838565a7a996a397f79cdcf334569fbc --- /dev/null +++ b/test/unittest/ops/pow_builder_test.cpp @@ -0,0 +1,177 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "frameworks/native/ops/pow_builder.h" + +#include "ops_test.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Ops; + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class PowBuilderTest : public OpsTest { +public: + void SetUp() override; + void TearDown() override; + +protected: + PowBuilder m_builder; + std::vector m_inputs {0, 1}; + std::vector m_outputs {2}; + std::vector m_dim {1, 2, 2, 1}; +}; + +void PowBuilderTest::SetUp() {} + +void PowBuilderTest::TearDown() {} + +/** + * @tc.name: pow_build_001 + * @tc.desc: Provide normal input, output, and parameters to verify the normal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(PowBuilderTest, pow_build_001, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_dim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/** + * @tc.name: pow_build_002 + * @tc.desc: Call Build func twice to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(PowBuilderTest, pow_build_002, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_dim, nullptr); + + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/** + * @tc.name: pow_build_003 + * @tc.desc: Provide one more than normal input to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(PowBuilderTest, pow_build_003, TestSize.Level0) +{ + m_inputs = {0, 1, 2}; + m_outputs = {3}; + + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_dim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: pow_build_004 + * @tc.desc: Provide one more than normal output to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(PowBuilderTest, pow_build_004, TestSize.Level0) +{ + m_outputs = {2, 3}; + + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_dim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: pow_build_005 + * @tc.desc: Verify that the build function return a failed message with null allTensor + * @tc.type: FUNC + */ +HWTEST_F(PowBuilderTest, pow_build_005, TestSize.Level0) +{ + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputs, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: pow_build_006 + * @tc.desc: Verify that the build function return a failed message without output tensor + * @tc.type: FUNC + */ +HWTEST_F(PowBuilderTest, pow_build_006, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_dim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: pow_build_007 + * @tc.desc: Verify that the build function return a failed message with a virtual parameter + * @tc.type: FUNC + */ +HWTEST_F(PowBuilderTest, pow_build_007, TestSize.Level0) +{ + std::vector paramsIndex = {3}; + std::vector paramDim = {}; + + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_dim, nullptr); + std::shared_ptr powTensor = TransToNNTensor(OH_NN_INT32, paramDim, nullptr, OH_NN_TENSOR); + m_allTensors.emplace_back(powTensor); + + OH_NN_ReturnCode ret = m_builder.Build(paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: pow_get_primitive_001 + * @tc.desc: Verify the GetPrimitive function return nullptr + * @tc.type: FUNC + */ +HWTEST_F(PowBuilderTest, pow_get_primitive_001, TestSize.Level0) +{ + LiteGraphTensorPtr primitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive = {nullptr, DestroyLiteGraphPrimitive}; + EXPECT_EQ(primitive, expectPrimitive); +} + +/** + * @tc.name: pow_get_primitive_002 + * @tc.desc: Verify the normal params return behavior of the getprimitive function + * @tc.type: FUNC + */ +HWTEST_F(PowBuilderTest, pow_get_primitive_002, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_dim, nullptr); + + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + LiteGraphTensorPtr powPrimitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive = {nullptr, DestroyLiteGraphPrimitive}; + EXPECT_NE(powPrimitive, expectPrimitive); +} +} // namespace UnitTest +} // namespace NeuralNetworkRuntime +} // namespace OHOS \ No newline at end of file diff --git a/test/unittest/ops/prelu_builder_test.cpp b/test/unittest/ops/prelu_builder_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..030cd2bdeb8b65d2f17243a9e184e711b7074190 --- /dev/null +++ b/test/unittest/ops/prelu_builder_test.cpp @@ -0,0 +1,177 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "frameworks/native/ops/prelu_builder.h" + +#include "ops_test.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Ops; + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class PReluBuilderTest : public OpsTest { +public: + void SetUp() override; + void TearDown() override; + +protected: + PReluBuilder m_builder; + std::vector m_inputs {0, 1}; + std::vector m_outputs {2}; + std::vector m_dim {1, 2, 2, 2}; +}; + +void PReluBuilderTest::SetUp() {} + +void PReluBuilderTest::TearDown() {} + +/** + * @tc.name: prelu_build_001 + * @tc.desc: Provide normal input, output, and parameters to verify the normal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(PReluBuilderTest, prelu_build_001, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_dim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/** + * @tc.name: prelu_build_002 + * @tc.desc: Call Build func twice to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(PReluBuilderTest, prelu_build_002, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_dim, nullptr); + + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/** + * @tc.name: prelu_build_003 + * @tc.desc: Provide one more than normal input to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(PReluBuilderTest, prelu_build_003, TestSize.Level0) +{ + m_inputs = {0, 1, 2}; + m_outputs = {3}; + + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_dim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: prelu_build_004 + * @tc.desc: Provide one more than normal output to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(PReluBuilderTest, prelu_build_004, TestSize.Level0) +{ + m_outputs = {2, 3}; + + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_dim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: prelu_build_005 + * @tc.desc: Verify that the build function return a failed message with null allTensor + * @tc.type: FUNC + */ +HWTEST_F(PReluBuilderTest, prelu_build_005, TestSize.Level0) +{ + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputs, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: prelu_build_006 + * @tc.desc: Verify that the build function return a failed message without output tensor. + * @tc.type: FUNC + */ +HWTEST_F(PReluBuilderTest, prelu_build_006, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_dim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: prelu_build_007 + * @tc.desc: Verify that the build function return a successful message with a virtual parameter + * @tc.type: FUNC + */ +HWTEST_F(PReluBuilderTest, prelu_build_007, TestSize.Level0) +{ + std::vector paramsIndex = {3}; + std::vector paramDim = {}; + + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_dim, nullptr); + std::shared_ptr preluTensor = TransToNNTensor(OH_NN_INT32, paramDim, nullptr, OH_NN_TENSOR); + m_allTensors.emplace_back(preluTensor); + + OH_NN_ReturnCode ret = m_builder.Build(paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: prelu_get_primitive_001 + * @tc.desc: Verify the GetPrimitive function return nullptr + * @tc.type: FUNC + */ +HWTEST_F(PReluBuilderTest, prelu_get_primitive_001, TestSize.Level0) +{ + LiteGraphTensorPtr primitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive = {nullptr, DestroyLiteGraphPrimitive}; + EXPECT_EQ(primitive, expectPrimitive); +} + +/** + * @tc.name: prelu_get_primitive_002 + * @tc.desc: Verify the normal params return behavior of the getprimitive function + * @tc.type: FUNC + */ +HWTEST_F(PReluBuilderTest, prelu_get_primitive_002, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_dim, nullptr); + + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + LiteGraphTensorPtr preluPrimitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive = {nullptr, DestroyLiteGraphPrimitive}; + EXPECT_NE(preluPrimitive, expectPrimitive); +} +} // namespace UnitTest +} // namespace NeuralNetworkRuntime +} // namespace OHOS \ No newline at end of file diff --git a/test/unittest/ops/quant_dtype_cast_builder_test.cpp b/test/unittest/ops/quant_dtype_cast_builder_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..69a2d5d7d2369b4a606be399b8e5f7efe8e10c79 --- /dev/null +++ b/test/unittest/ops/quant_dtype_cast_builder_test.cpp @@ -0,0 +1,307 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "frameworks/native/ops/quant_dtype_cast_builder.h" + +#include "ops_test.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Ops; + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class QuantDTypeCastBuilderTest : public OpsTest { +public: + void SetUp() override; + void TearDown() override; + +protected: + void SaveSrcTensor(OH_NN_DataType dataType, const std::vector &dim, + const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + void SaveDstTensor(OH_NN_DataType dataType, const std::vector &dim, + const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + +protected: + QuantDTypeCastBuilder m_builder; + std::vector m_inputs {0}; + std::vector m_outputs {1}; + std::vector m_params {2, 3}; + std::vector m_dim {3, 3}; + std::vector m_paramDim {}; +}; + +void QuantDTypeCastBuilderTest::SetUp() {} + +void QuantDTypeCastBuilderTest::TearDown() {} + +void QuantDTypeCastBuilderTest::SaveSrcTensor(OH_NN_DataType dataType, const std::vector &dim, + const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + std::shared_ptr srcTensor = TransToNNTensor(dataType, dim, quantParam, type); + int64_t *srcValue = new (std::nothrow) int64_t(1); + EXPECT_NE(nullptr, srcValue); + srcTensor->SetBuffer(srcValue, sizeof(int64_t)); + m_allTensors.emplace_back(srcTensor); +} + +void QuantDTypeCastBuilderTest::SaveDstTensor(OH_NN_DataType dataType, const std::vector &dim, + const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + std::shared_ptr dstTensor = TransToNNTensor(dataType, dim, quantParam, type); + int64_t *dstValue = new (std::nothrow) int64_t(1); + EXPECT_NE(nullptr, dstValue); + dstTensor->SetBuffer(dstValue, sizeof(int64_t)); + m_allTensors.emplace_back(dstTensor); +} + +/** + * @tc.name: quantdtypecast_build_001 + * @tc.desc: Provide normal input, output, and parameters to verify the normal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(QuantDTypeCastBuilderTest, quantdtypecast_build_001, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_INT8, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT8, m_dim, nullptr); + SaveSrcTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_QUANT_DTYPE_CAST_SRC_T); + SaveDstTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_QUANT_DTYPE_CAST_DST_T); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/** + * @tc.name: quantdtypecast_build_002 + * @tc.desc: Call Build func twice to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(QuantDTypeCastBuilderTest, quantdtypecast_build_002, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_INT8, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT8, m_dim, nullptr); + SaveSrcTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_QUANT_DTYPE_CAST_SRC_T); + SaveDstTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_QUANT_DTYPE_CAST_DST_T); + + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors)); + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/** + * @tc.name: quantdtypecast_build_003 + * @tc.desc: Provide one more than normal input to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(QuantDTypeCastBuilderTest, quantdtypecast_build_003, TestSize.Level0) +{ + m_inputs = {0, 1}; + m_outputs = {2}; + m_params = {3, 4}; + + SaveInputTensor(m_inputs, OH_NN_INT8, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT8, m_dim, nullptr); + SaveSrcTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_QUANT_DTYPE_CAST_SRC_T); + SaveDstTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_QUANT_DTYPE_CAST_DST_T); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: quantdtypecast_build_004 + * @tc.desc: Provide one more than normal output to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(QuantDTypeCastBuilderTest, quantdtypecast_build_004, TestSize.Level0) +{ + m_outputs = {1, 2}; + m_params = {3, 4}; + + SaveInputTensor(m_inputs, OH_NN_INT8, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT8, m_dim, nullptr); + SaveSrcTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_QUANT_DTYPE_CAST_SRC_T); + SaveDstTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_QUANT_DTYPE_CAST_DST_T); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: quantdtypecast_build_005 + * @tc.desc: Verify that the build function return a failed message with null allTensor + * @tc.type: FUNC + */ +HWTEST_F(QuantDTypeCastBuilderTest, quantdtypecast_build_005, TestSize.Level0) +{ + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputs, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: quantdtypecast_build_006 + * @tc.desc: Verify that the build function return a failed message without output tensor + * @tc.type: FUNC + */ +HWTEST_F(QuantDTypeCastBuilderTest, quantdtypecast_build_006, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_INT8, m_dim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: quantdtypecast_build_007 + * @tc.desc: Verify that the build function return a failed message with invalided src's dataType + * @tc.type: FUNC + */ +HWTEST_F(QuantDTypeCastBuilderTest, quantdtypecast_build_007, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_INT8, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT8, m_dim, nullptr); + SaveDstTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_QUANT_DTYPE_CAST_DST_T); + + std::shared_ptr srcTensor = TransToNNTensor(OH_NN_INT32, m_paramDim, + nullptr, OH_NN_QUANT_DTYPE_CAST_SRC_T); + int32_t srcValue = 1; + srcTensor->SetBuffer(&srcValue, sizeof(srcValue)); + m_allTensors.emplace_back(srcTensor); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + srcTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: quantdtypecast_build_008 + * @tc.desc: Verify that the build function return a failed message with invalided dst's dataType + * @tc.type: FUNC + */ +HWTEST_F(QuantDTypeCastBuilderTest, quantdtypecast_build_008, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_INT8, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT8, m_dim, nullptr); + SaveSrcTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_QUANT_DTYPE_CAST_SRC_T); + + + std::shared_ptr dstTensor = TransToNNTensor(OH_NN_INT32, m_paramDim, + nullptr, OH_NN_QUANT_DTYPE_CAST_DST_T); + int32_t dstValue = 1; + dstTensor->SetBuffer(&dstValue, sizeof(dstValue)); + m_allTensors.emplace_back(dstTensor); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + dstTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: quantdtypecast_build_009 + * @tc.desc: Verify that the build function return a failed message with invalided parameter + * @tc.type: FUNC + */ +HWTEST_F(QuantDTypeCastBuilderTest, quantdtypecast_build_009, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_INT8, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT8, m_dim, nullptr); + SaveSrcTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_QUANT_DTYPE_CAST_SRC_T); + SaveDstTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_REDUCE_ALL_KEEP_DIMS); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: quantdtypecast_build_010 + * @tc.desc: Verify that the build function return a failed message with empty src's buffer + * @tc.type: FUNC + */ +HWTEST_F(QuantDTypeCastBuilderTest, quantdtypecast_build_010, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_INT8, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT8, m_dim, nullptr); + SaveDstTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_QUANT_DTYPE_CAST_DST_T); + + std::shared_ptr srcTensor = TransToNNTensor(OH_NN_INT64, m_paramDim, + nullptr, OH_NN_QUANT_DTYPE_CAST_SRC_T); + m_allTensors.emplace_back(srcTensor); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + srcTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: quantdtypecast_build_011 + * @tc.desc: Verify that the build function return a failed message with empty dst's buffer + * @tc.type: FUNC + */ +HWTEST_F(QuantDTypeCastBuilderTest, quantdtypecast_build_011, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_INT8, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT8, m_dim, nullptr); + SaveSrcTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_QUANT_DTYPE_CAST_SRC_T); + + + std::shared_ptr dstTensor = TransToNNTensor(OH_NN_INT64, m_paramDim, + nullptr, OH_NN_QUANT_DTYPE_CAST_DST_T); + m_allTensors.emplace_back(dstTensor); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + dstTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: quantdtypecast_get_primitive_001 + * @tc.desc: Verify the GetPrimitive function return nullptr + * @tc.type: FUNC + */ +HWTEST_F(QuantDTypeCastBuilderTest, quantdtypecast_get_primitive_001, TestSize.Level0) +{ + LiteGraphTensorPtr primitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive = {nullptr, DestroyLiteGraphPrimitive}; + EXPECT_EQ(primitive, expectPrimitive); +} + +/** + * @tc.name: quantdtypecast_get_primitive_002 + * @tc.desc: Verify the normal params return behavior of the getprimitive function + * @tc.type: FUNC + */ +HWTEST_F(QuantDTypeCastBuilderTest, quantdtypecast_get_primitive_002, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_INT8, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT8, m_dim, nullptr); + SaveSrcTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_QUANT_DTYPE_CAST_SRC_T); + SaveDstTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_QUANT_DTYPE_CAST_DST_T); + + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors)); + LiteGraphTensorPtr primitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive = {nullptr, DestroyLiteGraphPrimitive}; + EXPECT_NE(primitive, expectPrimitive); + + int64_t srcValue = 1; + int64_t dstValue = 1; + auto srcReturn = mindspore::lite::MindIR_QuantDTypeCast_GetSrcT(primitive.get()); + EXPECT_EQ(srcReturn, srcValue); + auto dstReturn = mindspore::lite::MindIR_QuantDTypeCast_GetDstT(primitive.get()); + EXPECT_EQ(dstReturn, dstValue); +} +} // namespace UnitTest +} // namespace NeuralNetworkRuntime +} // namespace OHOS \ No newline at end of file diff --git a/test/unittest/ops/reduce_all_builder_test.cpp b/test/unittest/ops/reduce_all_builder_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..ff661f5da412919e615960ed5b5d48eb672fe90d --- /dev/null +++ b/test/unittest/ops/reduce_all_builder_test.cpp @@ -0,0 +1,263 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "frameworks/native/ops/reduceall_builder.h" + +#include "ops_test.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Ops; + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class ReduceAllBuilderTest : public OpsTest { +public: + void SetUp() override; + void TearDown() override; + +protected: + void SaveParamsTensor(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + +protected: + ReduceAllBuilder m_builder; + std::vector m_inputs {0, 1}; + std::vector m_outputs {2}; + std::vector m_params {3}; + std::vector m_inputDim {1, 1, 2, 2}; + std::vector m_outputDim {1, 1, 1, 2}; + std::vector m_paramDim {1}; +}; + +void ReduceAllBuilderTest::SetUp() {} + +void ReduceAllBuilderTest::TearDown() {} + +void ReduceAllBuilderTest::SaveParamsTensor(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + std::shared_ptr keepDimsTensor = TransToNNTensor(dataType, dim, quantParam, type); + bool *keepDimsValue = new (std::nothrow) bool(true); + EXPECT_NE(nullptr, keepDimsValue); + keepDimsTensor->SetBuffer(keepDimsValue, sizeof(bool)); + m_allTensors.emplace_back(keepDimsTensor); +} + +/** + * @tc.name: reduceall_build_001 + * @tc.desc: Provide normal input, output, and parameters to verify the normal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(ReduceAllBuilderTest, reduceall_build_001, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_BOOL, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_BOOL, m_outputDim, nullptr); + SaveParamsTensor(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_ALL_KEEP_DIMS); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/** + * @tc.name: reduceall_build_002 + * @tc.desc: Call Build func twice to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(ReduceAllBuilderTest, reduceall_build_002, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_BOOL, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_BOOL, m_outputDim, nullptr); + SaveParamsTensor(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_ALL_KEEP_DIMS); + + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors)); + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/** + * @tc.name: reduceall_build_003 + * @tc.desc: Provide one more than normal input to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(ReduceAllBuilderTest, reduceall_build_003, TestSize.Level0) +{ + m_inputs = {0, 1, 2}; + m_outputs = {3}; + m_params = {4}; + + SaveInputTensor(m_inputs, OH_NN_BOOL, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_BOOL, m_outputDim, nullptr); + SaveParamsTensor(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_ALL_KEEP_DIMS); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: reduceall_build_004 + * @tc.desc: Provide one more than normal output to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(ReduceAllBuilderTest, reduceall_build_004, TestSize.Level0) +{ + m_outputs = {2, 3}; + m_params = {4}; + + SaveInputTensor(m_inputs, OH_NN_BOOL, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_BOOL, m_outputDim, nullptr); + SaveParamsTensor(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_ALL_KEEP_DIMS); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: reduceall_build_005 + * @tc.desc: Verify that the build function return a failed message with null allTensor + * @tc.type: FUNC + */ +HWTEST_F(ReduceAllBuilderTest, reduceall_build_005, TestSize.Level0) +{ + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputs, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: reduceall_build_006 + * @tc.desc: Verify that the build function return a failed message without output tensor + * @tc.type: FUNC + */ +HWTEST_F(ReduceAllBuilderTest, reduceall_build_006, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_BOOL, m_inputDim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: reduceall_build_007 + * @tc.desc: Verify that the build function return a failed message with invalided keepdims's dataType + * @tc.type: FUNC + */ +HWTEST_F(ReduceAllBuilderTest, reduceall_build_007, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_BOOL, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_BOOL, m_outputDim, nullptr); + + std::shared_ptr keepDimsTensor = TransToNNTensor(OH_NN_INT64, + m_paramDim, nullptr, OH_NN_REDUCE_ALL_KEEP_DIMS); + int64_t keepDimsValue = 1; + keepDimsTensor->SetBuffer(&keepDimsValue, sizeof(keepDimsValue)); + m_allTensors.emplace_back(keepDimsTensor); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + keepDimsTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: reduceall_build_008 + * @tc.desc: Verify that the build function return a failed message with invalided keepdims's dimension + * @tc.type: FUNC + */ +HWTEST_F(ReduceAllBuilderTest, reduceall_build_008, TestSize.Level0) +{ + m_paramDim = {1, 2}; + + SaveInputTensor(m_inputs, OH_NN_BOOL, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_BOOL, m_outputDim, nullptr); + + std::shared_ptr keepDimsTensor = TransToNNTensor(OH_NN_BOOL, m_paramDim, + nullptr, OH_NN_REDUCE_ALL_KEEP_DIMS); + bool keepDimsValue[2] = {true, true}; + keepDimsTensor->SetBuffer(keepDimsValue, 2 * sizeof(bool)); + m_allTensors.emplace_back(keepDimsTensor); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + keepDimsTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: reduceall_build_009 + * @tc.desc: Verify that the build function return a failed message with invalided parameter + * @tc.type: FUNC + */ +HWTEST_F(ReduceAllBuilderTest, reduceall_build_009, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_BOOL, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_BOOL, m_outputDim, nullptr); + SaveParamsTensor(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_QUANT_DTYPE_CAST_SRC_T); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: reduceall_build_010 + * @tc.desc: Verify that the build function return a failed message with empty keepdims's buffer + * @tc.type: FUNC + */ +HWTEST_F(ReduceAllBuilderTest, reduceall_build_010, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_BOOL, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_BOOL, m_outputDim, nullptr); + + std::shared_ptr keepDimsTensor = TransToNNTensor(OH_NN_BOOL, + m_paramDim, nullptr, OH_NN_REDUCE_ALL_KEEP_DIMS); + m_allTensors.emplace_back(keepDimsTensor); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + keepDimsTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: reduceall_get_primitive_001 + * @tc.desc: Verify the GetPrimitive function return nullptr + * @tc.type: FUNC + */ +HWTEST_F(ReduceAllBuilderTest, reduceall_get_primitive_001, TestSize.Level0) +{ + LiteGraphTensorPtr primitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive = {nullptr, DestroyLiteGraphPrimitive}; + EXPECT_EQ(primitive, expectPrimitive); +} + +/** + * @tc.name: reduceall_get_primitive_002 + * @tc.desc: Verify the normal params return behavior of the getprimitive function + * @tc.type: FUNC + */ +HWTEST_F(ReduceAllBuilderTest, reduceall_get_primitive_002, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_BOOL, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_BOOL, m_outputDim, nullptr); + SaveParamsTensor(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_ALL_KEEP_DIMS); + + bool keepDimsValue = true; + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors)); + LiteGraphTensorPtr reduceallPrimitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive = {nullptr, DestroyLiteGraphPrimitive}; + EXPECT_NE(reduceallPrimitive, expectPrimitive); + auto returnValue = mindspore::lite::MindIR_ReduceFusion_GetKeepDims(reduceallPrimitive.get()); + EXPECT_EQ(returnValue, keepDimsValue); +} +} // namespace UnitTest +} // namespace NeuralNetworkRuntime +} // namespace OHOS \ No newline at end of file diff --git a/test/unittest/ops/reduce_mean_builder_test.cpp b/test/unittest/ops/reduce_mean_builder_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..3f3f657e874c93f27dfe1c7e6697c86925204dda --- /dev/null +++ b/test/unittest/ops/reduce_mean_builder_test.cpp @@ -0,0 +1,263 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "frameworks/native/ops/reducemean_builder.h" + +#include "ops_test.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Ops; + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class ReduceMeanBuilderTest : public OpsTest { +public: + void SetUp() override; + void TearDown() override; + +protected: + void SaveParamsTensor(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + +protected: + ReduceMeanBuilder m_builder; + std::vector m_inputs {0, 1}; + std::vector m_outputs {2}; + std::vector m_params {3}; + std::vector m_inputDim {3, 5, 6, 4}; + std::vector m_outputDim {3, 5, 6, 1}; + std::vector m_paramDim {1}; +}; + +void ReduceMeanBuilderTest::SetUp() {} + +void ReduceMeanBuilderTest::TearDown() {} + +void ReduceMeanBuilderTest::SaveParamsTensor(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + std::shared_ptr keepDimsTensor = TransToNNTensor(dataType, dim, quantParam, type); + bool *keepDimsValue = new (std::nothrow) bool(true); + EXPECT_NE(nullptr, keepDimsValue); + keepDimsTensor->SetBuffer(keepDimsValue, sizeof(bool)); + m_allTensors.emplace_back(keepDimsTensor); +} + +/** + * @tc.name: reducemean_build_001 + * @tc.desc: Provide normal input, output, and parameters to verify the normal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(ReduceMeanBuilderTest, reducemean_build_001, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_BOOL, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_BOOL, m_outputDim, nullptr); + SaveParamsTensor(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_MEAN_KEEP_DIMS); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/** + * @tc.name: reducemean_build_002 + * @tc.desc: Call Build func twice to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(ReduceMeanBuilderTest, reducemean_build_002, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_BOOL, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_BOOL, m_outputDim, nullptr); + SaveParamsTensor(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_MEAN_KEEP_DIMS); + + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors)); + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/** + * @tc.name: reducemean_build_003 + * @tc.desc: Provide one more than normal input to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(ReduceMeanBuilderTest, reducemean_build_003, TestSize.Level0) +{ + m_inputs = {0, 1, 2}; + m_outputs = {3}; + m_params = {4}; + + SaveInputTensor(m_inputs, OH_NN_BOOL, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_BOOL, m_outputDim, nullptr); + SaveParamsTensor(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_MEAN_KEEP_DIMS); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: reducemean_build_004 + * @tc.desc: Provide one more than normal output to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(ReduceMeanBuilderTest, reducemean_build_004, TestSize.Level0) +{ + m_outputs = {2, 3}; + m_params = {4}; + + SaveInputTensor(m_inputs, OH_NN_BOOL, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_BOOL, m_outputDim, nullptr); + SaveParamsTensor(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_MEAN_KEEP_DIMS); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: reducemean_build_005 + * @tc.desc: Verify that the build function return a failed message with null allTensor + * @tc.type: FUNC + */ +HWTEST_F(ReduceMeanBuilderTest, reducemean_build_005, TestSize.Level0) +{ + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputs, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: reducemean_build_006 + * @tc.desc: Verify that the build function return a failed message without output tensor + * @tc.type: FUNC + */ +HWTEST_F(ReduceMeanBuilderTest, reducemean_build_006, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_BOOL, m_inputDim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: reducemean_build_007 + * @tc.desc: Verify that the build function return a failed message with invalided keepdims's dataType + * @tc.type: FUNC + */ +HWTEST_F(ReduceMeanBuilderTest, reducemean_build_007, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_BOOL, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_BOOL, m_outputDim, nullptr); + + std::shared_ptr keepDimsTensor = TransToNNTensor(OH_NN_INT64, + m_paramDim, nullptr, OH_NN_REDUCE_MEAN_KEEP_DIMS); + int64_t keepDimsValue = 1; + keepDimsTensor->SetBuffer(&keepDimsValue, sizeof(keepDimsValue)); + m_allTensors.emplace_back(keepDimsTensor); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + keepDimsTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: reducemean_build_008 + * @tc.desc: Verify that the build function return a failed message with invalided keepdims's dimension + * @tc.type: FUNC + */ +HWTEST_F(ReduceMeanBuilderTest, reducemean_build_008, TestSize.Level0) +{ + m_paramDim = {1, 2}; + + SaveInputTensor(m_inputs, OH_NN_BOOL, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_BOOL, m_outputDim, nullptr); + + std::shared_ptr keepDimsTensor = TransToNNTensor(OH_NN_BOOL, + m_paramDim, nullptr, OH_NN_REDUCE_MEAN_KEEP_DIMS); + bool keepDimsValue[2] = {true, true}; + keepDimsTensor->SetBuffer(keepDimsValue, 2 * sizeof(bool)); + m_allTensors.emplace_back(keepDimsTensor); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + keepDimsTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: reducemean_build_009 + * @tc.desc: Verify that the build function return a failed message with invalided parameter + * @tc.type: FUNC + */ +HWTEST_F(ReduceMeanBuilderTest, reducemean_build_009, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_BOOL, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_BOOL, m_outputDim, nullptr); + SaveParamsTensor(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_QUANT_DTYPE_CAST_SRC_T); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: reducemean_build_010 + * @tc.desc: Verify that the build function return a failed message with empty keepdims's buffer + * @tc.type: FUNC + */ +HWTEST_F(ReduceMeanBuilderTest, reducemean_build_010, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_BOOL, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_BOOL, m_outputDim, nullptr); + + std::shared_ptr keepDimsTensor = TransToNNTensor(OH_NN_BOOL, + m_paramDim, nullptr, OH_NN_REDUCE_MEAN_KEEP_DIMS); + m_allTensors.emplace_back(keepDimsTensor); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + keepDimsTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: reducemean_get_primitive_001 + * @tc.desc: Verify the GetPrimitive function return nullptr + * @tc.type: FUNC + */ +HWTEST_F(ReduceMeanBuilderTest, reducemean_get_primitive_001, TestSize.Level0) +{ + LiteGraphTensorPtr primitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive = {nullptr, DestroyLiteGraphPrimitive}; + EXPECT_EQ(primitive, expectPrimitive); +} + +/** + * @tc.name: reducemean_get_primitive_002 + * @tc.desc: Verify the normal params return behavior of the getprimitive function + * @tc.type: FUNC + */ +HWTEST_F(ReduceMeanBuilderTest, reducemean_get_primitive_002, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_BOOL, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_BOOL, m_outputDim, nullptr); + SaveParamsTensor(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_MEAN_KEEP_DIMS); + + bool keepDimsValue = true; + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors)); + LiteGraphTensorPtr reducemeanPrimitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive = {nullptr, DestroyLiteGraphPrimitive}; + EXPECT_NE(reducemeanPrimitive, expectPrimitive); + auto returnValue = mindspore::lite::MindIR_ReduceFusion_GetKeepDims(reducemeanPrimitive.get()); + EXPECT_EQ(returnValue, keepDimsValue); +} +} // namespace UnitTest +} // namespace NeuralNetworkRuntime +} // namespace OHOS \ No newline at end of file diff --git a/test/unittest/ops/reduce_prod_builder_test.cpp b/test/unittest/ops/reduce_prod_builder_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..5b160a811af9102f6cda4a4dbb85acae0a07d7ba --- /dev/null +++ b/test/unittest/ops/reduce_prod_builder_test.cpp @@ -0,0 +1,263 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "frameworks/native/ops/reduceprod_builder.h" + +#include "ops_test.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Ops; + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class ReduceProdBuilderTest : public OpsTest { +public: + void SetUp() override; + void TearDown() override; + +protected: + void SaveParamsTensor(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + +protected: + ReduceProdBuilder m_builder; + std::vector m_inputs {0, 1}; + std::vector m_outputs {2}; + std::vector m_params {3}; + std::vector m_inputDim {3, 5, 6, 4}; + std::vector m_outputDim {3, 5, 6, 1}; + std::vector m_paramDim {1}; +}; + +void ReduceProdBuilderTest::SetUp() {} + +void ReduceProdBuilderTest::TearDown() {} + +void ReduceProdBuilderTest::SaveParamsTensor(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + std::shared_ptr keepDimsTensor = TransToNNTensor(dataType, dim, quantParam, type); + bool *keepDimsValue = new (std::nothrow) bool(true); + EXPECT_NE(nullptr, keepDimsValue); + keepDimsTensor->SetBuffer(keepDimsValue, sizeof(bool)); + m_allTensors.emplace_back(keepDimsTensor); +} + +/** + * @tc.name: reduceprod_build_001 + * @tc.desc: Provide normal input, output, and parameters to verify the normal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(ReduceProdBuilderTest, reduceprod_build_001, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_BOOL, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_BOOL, m_outputDim, nullptr); + SaveParamsTensor(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_PROD_KEEP_DIMS); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/** + * @tc.name: reduceprod_build_002 + * @tc.desc: Call Build func twice to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(ReduceProdBuilderTest, reduceprod_build_002, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_BOOL, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_BOOL, m_outputDim, nullptr); + SaveParamsTensor(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_PROD_KEEP_DIMS); + + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors)); + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/** + * @tc.name: reduceprod_build_003 + * @tc.desc: Provide one more than normal input to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(ReduceProdBuilderTest, reduceprod_build_003, TestSize.Level0) +{ + m_inputs = {0, 1, 2}; + m_outputs = {3}; + m_params = {4}; + + SaveInputTensor(m_inputs, OH_NN_BOOL, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_BOOL, m_outputDim, nullptr); + SaveParamsTensor(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_PROD_KEEP_DIMS); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: reduceprod_build_004 + * @tc.desc: Provide one more than normal output to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(ReduceProdBuilderTest, reduceprod_build_004, TestSize.Level0) +{ + m_outputs = {2, 3}; + m_params = {4}; + + SaveInputTensor(m_inputs, OH_NN_BOOL, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_BOOL, m_outputDim, nullptr); + SaveParamsTensor(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_PROD_KEEP_DIMS); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: reduceprod_build_005 + * @tc.desc: Verify that the build function return a failed message with null allTensor + * @tc.type: FUNC + */ +HWTEST_F(ReduceProdBuilderTest, reduceprod_build_005, TestSize.Level0) +{ + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputs, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: reduceprod_build_006 + * @tc.desc: Verify that the build function return a failed message without output tensor + * @tc.type: FUNC + */ +HWTEST_F(ReduceProdBuilderTest, reduceprod_build_006, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_BOOL, m_inputDim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: reduceprod_build_007 + * @tc.desc: Verify that the build function return a failed message with invalided keepdims's dataType + * @tc.type: FUNC + */ +HWTEST_F(ReduceProdBuilderTest, reduceprod_build_007, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_BOOL, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_BOOL, m_outputDim, nullptr); + + std::shared_ptr keepDimsTensor = TransToNNTensor(OH_NN_INT64, + m_paramDim, nullptr, OH_NN_REDUCE_PROD_KEEP_DIMS); + int64_t keepDimsValue = 1; + keepDimsTensor->SetBuffer(&keepDimsValue, sizeof(keepDimsValue)); + m_allTensors.emplace_back(keepDimsTensor); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + keepDimsTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: reduceprod_build_008 + * @tc.desc: Verify that the build function return a failed message with invalided keepdims's dimension + * @tc.type: FUNC + */ +HWTEST_F(ReduceProdBuilderTest, reduceprod_build_008, TestSize.Level0) +{ + m_paramDim = {1, 2}; + + SaveInputTensor(m_inputs, OH_NN_BOOL, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_BOOL, m_outputDim, nullptr); + + std::shared_ptr keepDimsTensor = TransToNNTensor(OH_NN_BOOL, + m_paramDim, nullptr, OH_NN_REDUCE_PROD_KEEP_DIMS); + bool keepDimsValue[2] = {true, true}; + keepDimsTensor->SetBuffer(keepDimsValue, 2 * sizeof(bool)); + m_allTensors.emplace_back(keepDimsTensor); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + keepDimsTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: reduceprod_build_009 + * @tc.desc: Verify that the build function return a failed message with invalided parameter + * @tc.type: FUNC + */ +HWTEST_F(ReduceProdBuilderTest, reduceprod_build_009, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_BOOL, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_BOOL, m_outputDim, nullptr); + SaveParamsTensor(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_QUANT_DTYPE_CAST_SRC_T); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: reduceprod_build_010 + * @tc.desc: Verify that the build function return a failed message with empty keepdims's buffer + * @tc.type: FUNC + */ +HWTEST_F(ReduceProdBuilderTest, reduceprod_build_010, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_BOOL, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_BOOL, m_outputDim, nullptr); + + std::shared_ptr keepDimsTensor = TransToNNTensor(OH_NN_BOOL, + m_paramDim, nullptr, OH_NN_REDUCE_PROD_KEEP_DIMS); + m_allTensors.emplace_back(keepDimsTensor); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + keepDimsTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: reduceprod_get_primitive_001 + * @tc.desc: Verify the GetPrimitive function return nullptr + * @tc.type: FUNC + */ +HWTEST_F(ReduceProdBuilderTest, reduceprod_get_primitive_001, TestSize.Level0) +{ + LiteGraphTensorPtr primitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive = {nullptr, DestroyLiteGraphPrimitive}; + EXPECT_EQ(primitive, expectPrimitive); +} + +/** + * @tc.name: reduceprod_get_primitive_002 + * @tc.desc: Verify the normal params return behavior of the getprimitive function + * @tc.type: FUNC + */ +HWTEST_F(ReduceProdBuilderTest, reduceprod_get_primitive_002, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_BOOL, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_BOOL, m_outputDim, nullptr); + SaveParamsTensor(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_PROD_KEEP_DIMS); + + bool keepDimsValue = true; + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors)); + LiteGraphTensorPtr reduceprodPrimitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive = {nullptr, DestroyLiteGraphPrimitive}; + EXPECT_NE(reduceprodPrimitive, expectPrimitive); + auto returnValue = mindspore::lite::MindIR_ReduceFusion_GetKeepDims(reduceprodPrimitive.get()); + EXPECT_EQ(returnValue, keepDimsValue); +} +} // namespace UnitTest +} // namespace NeuralNetworkRuntime +} // namespace OHOS \ No newline at end of file diff --git a/test/unittest/ops/relu6_builder_test.cpp b/test/unittest/ops/relu6_builder_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..4ce5c1e8865cdf58c3cd21a73f58a3ee832538fb --- /dev/null +++ b/test/unittest/ops/relu6_builder_test.cpp @@ -0,0 +1,180 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "frameworks/native/ops/relu6_builder.h" + +#include "ops_test.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Ops; + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class Relu6BuilderTest : public OpsTest { +public: + void SetUp() override; + void TearDown() override; + +protected: + Relu6Builder m_builder; + std::vector m_inputs {0}; + std::vector m_outputs {1}; + std::vector m_dim {1, 5, 1, 1}; +}; + +void Relu6BuilderTest::SetUp() {} + +void Relu6BuilderTest::TearDown() {} + +/** + * @tc.name: relu6_build_001 + * @tc.desc: Provide normal input, output, and parameters to verify the normal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(Relu6BuilderTest, relu6_build_001, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_dim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/** + * @tc.name: relu6_build_002 + * @tc.desc: Call Build func twice to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(Relu6BuilderTest, relu6_build_002, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_dim, nullptr); + + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: relu6_build_003 + * @tc.desc: Provide one more than normal input to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(Relu6BuilderTest, relu6_build_003, TestSize.Level0) +{ + m_inputs = {0, 1}; + m_outputs = {2}; + + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_dim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: relu6_build_004 + * @tc.desc: Provide one more than normal output to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(Relu6BuilderTest, relu6_build_004, TestSize.Level0) +{ + m_outputs = {1, 2}; + + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_dim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: relu6_build_005 + * @tc.desc: Verify that the build function return a failed message with null allTensor + * @tc.type: FUNC + */ +HWTEST_F(Relu6BuilderTest, relu6_build_005, TestSize.Level0) +{ + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputs, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: relu6_build_006 + * @tc.desc: Verify that the build function return a failed message without output tensor + * @tc.type: FUNC + */ +HWTEST_F(Relu6BuilderTest, relu6_build_006, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_dim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: relu6_build_007 + * @tc.desc: Verify that the build function return a successful message with a virtual parameter + * @tc.type: FUNC + */ +HWTEST_F(Relu6BuilderTest, relu6_build_007, TestSize.Level0) +{ + std::vector paramsIndex = {2}; + std::vector paramDim = {}; + + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_dim, nullptr); + std::shared_ptr relu6Tensor = TransToNNTensor(OH_NN_INT32, paramDim, nullptr, OH_NN_TENSOR); + m_allTensors.emplace_back(relu6Tensor); + + OH_NN_ReturnCode ret = m_builder.Build(paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: relu6_get_primitive_001 + * @tc.desc: Verify the GetPrimitive function return nullptr + * @tc.type: FUNC + */ +HWTEST_F(Relu6BuilderTest, relu6_get_primitive_001, TestSize.Level0) +{ + LiteGraphTensorPtr primitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive = {nullptr, DestroyLiteGraphPrimitive}; + EXPECT_EQ(primitive, expectPrimitive); +} + +/** + * @tc.name: relu6_get_primitive_002 + * @tc.desc: Verify the normal params return behavior of the getprimitive function + * @tc.type: FUNC + */ +HWTEST_F(Relu6BuilderTest, relu6_get_primitive_002, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_dim, nullptr); + + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + LiteGraphTensorPtr relu6Primitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive = {nullptr, DestroyLiteGraphPrimitive}; + EXPECT_NE(relu6Primitive, expectPrimitive); + + mindspore::lite::ActivationType activationType = mindspore::lite::ACTIVATION_TYPE_RELU6; + auto returnValue = mindspore::lite::MindIR_Activation_GetActivationType(relu6Primitive.get()); + EXPECT_EQ(returnValue, activationType); +} +} // namespace UnitTest +} // namespace NeuralNetworkRuntime +} // namespace OHOS \ No newline at end of file diff --git a/test/unittest/ops/relu_builder_test.cpp b/test/unittest/ops/relu_builder_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..5fabb49fdf957bde2de79f75d7ab704a0dcd4b3e --- /dev/null +++ b/test/unittest/ops/relu_builder_test.cpp @@ -0,0 +1,180 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "frameworks/native/ops/relu_builder.h" + +#include "ops_test.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Ops; + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class ReluBuilderTest : public OpsTest { +public: + void SetUp() override; + void TearDown() override; + +protected: + ReluBuilder m_builder; + std::vector m_inputs {0}; + std::vector m_outputs {1}; + std::vector m_dim {1, 5, 1, 1}; +}; + +void ReluBuilderTest::SetUp() {} + +void ReluBuilderTest::TearDown() {} + +/** + * @tc.name: relu_build_001 + * @tc.desc: Provide normal input, output, and parameters to verify the normal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(ReluBuilderTest, relu_build_001, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_dim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/** + * @tc.name: relu_build_002 + * @tc.desc: Call Build func twice to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(ReluBuilderTest, relu_build_002, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_dim, nullptr); + + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: relu_build_003 + * @tc.desc: Provide one more than normal input to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(ReluBuilderTest, relu_build_003, TestSize.Level0) +{ + m_inputs = {0, 1}; + m_outputs = {2}; + + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_dim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: relu_build_004 + * @tc.desc: Provide one more than normal output to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(ReluBuilderTest, relu_build_004, TestSize.Level0) +{ + m_outputs = {1, 2}; + + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_dim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: relu_build_005 + * @tc.desc: Verify that the build function return a failed message with null allTensor + * @tc.type: FUNC + */ +HWTEST_F(ReluBuilderTest, relu_build_005, TestSize.Level0) +{ + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputs, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: relu_build_006 + * @tc.desc: Verify that the build function return a failed message without output tensor + * @tc.type: FUNC + */ +HWTEST_F(ReluBuilderTest, relu_build_006, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_dim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: relu_build_007 + * @tc.desc: Verify that the build function return a successful message with a virtual parameter + * @tc.type: FUNC + */ +HWTEST_F(ReluBuilderTest, relu_build_007, TestSize.Level0) +{ + std::vector paramsIndex = {2}; + std::vector paramDim = {}; + + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_dim, nullptr); + std::shared_ptr reluTensor = TransToNNTensor(OH_NN_INT32, paramDim, nullptr, OH_NN_TENSOR); + m_allTensors.emplace_back(reluTensor); + + OH_NN_ReturnCode ret = m_builder.Build(paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: relu_get_primitive_001 + * @tc.desc: Verify the GetPrimitive function return nullptr + * @tc.type: FUNC + */ +HWTEST_F(ReluBuilderTest, relu_get_primitive_001, TestSize.Level0) +{ + LiteGraphTensorPtr primitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive = {nullptr, DestroyLiteGraphPrimitive}; + EXPECT_EQ(primitive, expectPrimitive); +} + +/** + * @tc.name: relu_get_primitive_002 + * @tc.desc: Verify the normal params return behavior of the getprimitive function + * @tc.type: FUNC + */ +HWTEST_F(ReluBuilderTest, relu_get_primitive_002, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_dim, nullptr); + + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + LiteGraphTensorPtr reluPrimitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive = {nullptr, DestroyLiteGraphPrimitive}; + EXPECT_NE(reluPrimitive, expectPrimitive); + + mindspore::lite::ActivationType activationType = mindspore::lite::ACTIVATION_TYPE_RELU; + auto returnValue = mindspore::lite::MindIR_Activation_GetActivationType(reluPrimitive.get()); + EXPECT_EQ(returnValue, activationType); +} +} // namespace UnitTest +} // namespace NeuralNetworkRuntime +} // namespace OHOS \ No newline at end of file diff --git a/test/unittest/ops/reshape_builder_test.cpp b/test/unittest/ops/reshape_builder_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..6443b49d14f759dd75eeeadc86e8599eda589696 --- /dev/null +++ b/test/unittest/ops/reshape_builder_test.cpp @@ -0,0 +1,177 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "frameworks/native/ops/reshape_builder.h" + +#include "ops_test.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Ops; + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class ReshapeBuilderTest : public OpsTest { +public: + void SetUp() override; + void TearDown() override; + +protected: + ReshapeBuilder m_builder; + std::vector m_inputs {0, 1}; + std::vector m_outputs {2}; + std::vector m_inputDim {1, 2, 4, 1}; + std::vector m_outputDim {1, 4, 2, 4}; +}; + +void ReshapeBuilderTest::SetUp() {} + +void ReshapeBuilderTest::TearDown() {} + +/** + * @tc.name: reshape_build_001 + * @tc.desc: Provide normal input, output, and parameters to verify the normal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(ReshapeBuilderTest, reshape_build_001, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/** + * @tc.name: reshape_build_002 + * @tc.desc: Call Build func twice to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(ReshapeBuilderTest, reshape_build_002, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/** + * @tc.name: reshape_build_003 + * @tc.desc: Provide one more than normal input to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(ReshapeBuilderTest, reshape_build_003, TestSize.Level0) +{ + m_inputs = {0, 1, 2}; + m_outputs = {3}; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: reshape_build_004 + * @tc.desc: Provide one more than normal output to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(ReshapeBuilderTest, reshape_build_004, TestSize.Level0) +{ + m_outputs = {2, 3}; + + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: reshape_build_005 + * @tc.desc: Verify that the build function return a failed message with null allTensor + * @tc.type: FUNC + */ +HWTEST_F(ReshapeBuilderTest, reshape_build_005, TestSize.Level0) +{ + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputs, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: reshape_build_006 + * @tc.desc: Verify that the build function return a failed message without output tensor + * @tc.type: FUNC + */ +HWTEST_F(ReshapeBuilderTest, reshape_build_006, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: reshape_build_007 + * @tc.desc: Verify that the build function return a failed message with a virtual parameter + * @tc.type: FUNC + */ +HWTEST_F(ReshapeBuilderTest, reshape_build_007, TestSize.Level0) +{ + std::vector paramsIndex = {3}; + std::vector paramDim = {}; + + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + std::shared_ptr paramTensor = TransToNNTensor(OH_NN_INT32, paramDim, nullptr, OH_NN_TENSOR); + m_allTensors.emplace_back(paramTensor); + + OH_NN_ReturnCode ret = m_builder.Build(paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: reshape_get_primitive_001 + * @tc.desc: Verify the GetPrimitive function return nullptr + * @tc.type: FUNC + */ +HWTEST_F(ReshapeBuilderTest, reshape_get_primitive_001, TestSize.Level0) +{ + LiteGraphTensorPtr primitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive = {nullptr, DestroyLiteGraphPrimitive}; + EXPECT_EQ(primitive, expectPrimitive); +} + +/** + * @tc.name: reshape_get_primitive_002 + * @tc.desc: Verify the normal params return behavior of the getprimitive function + * @tc.type: FUNC + */ +HWTEST_F(ReshapeBuilderTest, reshape_get_primitive_002, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + LiteGraphTensorPtr reshapePrimitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive = {nullptr, DestroyLiteGraphPrimitive}; + EXPECT_NE(reshapePrimitive, expectPrimitive); +} +} // namespace UnitTest +} // namespace NeuralNetworkRuntime +} // namespace OHOS \ No newline at end of file diff --git a/test/unittest/ops/resize_bilinear_builder_test.cpp b/test/unittest/ops/resize_bilinear_builder_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..0ada3d92d71e78ca41e7a3e7dd1e9d8ba71f8b73 --- /dev/null +++ b/test/unittest/ops/resize_bilinear_builder_test.cpp @@ -0,0 +1,664 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "frameworks/native/ops/resize_bilinear_builder.h" + +#include "ops_test.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Ops; + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class ResizeBilinearBuilderTest : public OpsTest { +public: + void SetUp() override; + void TearDown() override; + +protected: + void SaveHeightTensor(OH_NN_DataType dataType, const std::vector &dim, + const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + void SaveWidthTensor(OH_NN_DataType dataType, const std::vector &dim, + const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + void SaveRatioTensor(OH_NN_DataType dataType, const std::vector &dim, + const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + void SaveModeTensor(OH_NN_DataType dataType, const std::vector &dim, + const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + void SaveOutsideTensor(OH_NN_DataType dataType, const std::vector &dim, + const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + void SetParameterTensor(); + +protected: + ResizeBilinearBuilder m_builder; + + std::shared_ptr heightTensor {nullptr}; + std::shared_ptr widthTensor {nullptr}; + std::shared_ptr ratioTensor {nullptr}; + std::shared_ptr modeTensor {nullptr}; + std::shared_ptr outsideTensor {nullptr}; + + std::vector m_inputs {0}; + std::vector m_outputs {1}; + std::vector m_params {2, 3, 4, 5, 6}; + std::vector m_dim {1, 2, 2, 2}; + std::vector m_paramDim {}; +}; + +void ResizeBilinearBuilderTest::SetUp() {} + +void ResizeBilinearBuilderTest::TearDown() {} + +void ResizeBilinearBuilderTest::SaveHeightTensor(OH_NN_DataType dataType, const std::vector &dim, + const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + heightTensor = TransToNNTensor(dataType, dim, quantParam, type); + int64_t *heightValue = new (std::nothrow) int64_t(1); + EXPECT_NE(nullptr, heightValue); + heightTensor->SetBuffer(heightValue, sizeof(int64_t)); + m_allTensors.emplace_back(heightTensor); +} + +void ResizeBilinearBuilderTest::SaveWidthTensor(OH_NN_DataType dataType, const std::vector &dim, + const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + widthTensor = TransToNNTensor(dataType, dim, quantParam, type); + int64_t *widthValue = new (std::nothrow) int64_t(1); + EXPECT_NE(nullptr, widthValue); + widthTensor->SetBuffer(widthValue, sizeof(int64_t)); + m_allTensors.emplace_back(widthTensor); +} + +void ResizeBilinearBuilderTest::SaveRatioTensor(OH_NN_DataType dataType, const std::vector &dim, + const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + ratioTensor = TransToNNTensor(dataType, dim, quantParam, type); + bool *ratioValue = new (std::nothrow) bool(true); + EXPECT_NE(nullptr, ratioValue); + ratioTensor->SetBuffer(ratioValue, sizeof(bool)); + m_allTensors.emplace_back(ratioTensor); +} + +void ResizeBilinearBuilderTest::SaveModeTensor(OH_NN_DataType dataType, const std::vector &dim, + const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + modeTensor = TransToNNTensor(dataType, dim, quantParam, type); + int8_t *modeValue = new (std::nothrow) int8_t(1); + EXPECT_NE(nullptr, modeValue); + modeTensor->SetBuffer(modeValue, sizeof(int8_t)); + m_allTensors.emplace_back(modeTensor); +} + +void ResizeBilinearBuilderTest::SaveOutsideTensor(OH_NN_DataType dataType, const std::vector &dim, + const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + outsideTensor = TransToNNTensor(dataType, dim, quantParam, type); + int64_t *outsideValue = new (std::nothrow) int64_t(1); + EXPECT_NE(nullptr, outsideValue); + outsideTensor->SetBuffer(outsideValue, sizeof(int64_t)); + m_allTensors.emplace_back(outsideTensor); +} + +void ResizeBilinearBuilderTest::SetParameterTensor() +{ + SaveHeightTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RESIZE_BILINEAR_NEW_HEIGHT); + SaveWidthTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RESIZE_BILINEAR_NEW_WIDTH); + SaveRatioTensor(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_RESIZE_BILINEAR_PRESERVE_ASPECT_RATIO); + SaveModeTensor(OH_NN_INT8, m_paramDim, nullptr, OH_NN_RESIZE_BILINEAR_COORDINATE_TRANSFORM_MODE); + SaveOutsideTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RESIZE_BILINEAR_EXCLUDE_OUTSIDE); +} + +/** + * @tc.name: resizebilinear_build_001 + * @tc.desc: Provide normal input, output, and parameters to verify the normal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(ResizeBilinearBuilderTest, resizebilinear_build_001, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_dim, nullptr); + + SetParameterTensor(); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/** + * @tc.name: resizebilinear_build_002 + * @tc.desc: Call Build func twice to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(ResizeBilinearBuilderTest, resizebilinear_build_002, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_dim, nullptr); + + SetParameterTensor(); + + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors)); + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/** + * @tc.name: resizebilinear_build_003 + * @tc.desc: Provide one more than normal input to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(ResizeBilinearBuilderTest, resizebilinear_build_003, TestSize.Level0) +{ + m_inputs = {0, 1}; + m_outputs = {2}; + + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_dim, nullptr); + + SetParameterTensor(); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: resizebilinear_build_004 + * @tc.desc: Provide one more than normal output to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(ResizeBilinearBuilderTest, resizebilinear_build_004, TestSize.Level0) +{ + m_outputs = {1, 2}; + + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_dim, nullptr); + + SetParameterTensor(); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: resizebilinear_build_005 + * @tc.desc: Verify that the build function return a failed message with null allTensor + * @tc.type: FUNC + */ +HWTEST_F(ResizeBilinearBuilderTest, resizebilinear_build_005, TestSize.Level0) +{ + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputs, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: resizebilinear_build_006 + * @tc.desc: Verify that the build function return a failed message without output tensor + * @tc.type: FUNC + */ +HWTEST_F(ResizeBilinearBuilderTest, resizebilinear_build_006, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_dim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: resizebilinear_build_007 + * @tc.desc: Verify that the build function return a failed message with invalided height's dataType + * @tc.type: FUNC + */ +HWTEST_F(ResizeBilinearBuilderTest, resizebilinear_build_007, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_dim, nullptr); + + heightTensor = TransToNNTensor(OH_NN_INT32, m_paramDim, nullptr, OH_NN_RESIZE_BILINEAR_NEW_HEIGHT); + int32_t heightValues = 1; + heightTensor->SetBuffer(&heightValues, sizeof(heightValues)); + m_allTensors.emplace_back(heightTensor); + + SaveWidthTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RESIZE_BILINEAR_NEW_WIDTH); + SaveRatioTensor(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_RESIZE_BILINEAR_PRESERVE_ASPECT_RATIO); + SaveModeTensor(OH_NN_INT8, m_paramDim, nullptr, OH_NN_RESIZE_BILINEAR_COORDINATE_TRANSFORM_MODE); + SaveOutsideTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RESIZE_BILINEAR_EXCLUDE_OUTSIDE); + + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + heightTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: resizebilinear_build_008 + * @tc.desc: Verify that the build function return a failed message with invalided width's dataType + * @tc.type: FUNC + */ +HWTEST_F(ResizeBilinearBuilderTest, resizebilinear_build_008, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_dim, nullptr); + + SaveHeightTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RESIZE_BILINEAR_NEW_HEIGHT); + SaveRatioTensor(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_RESIZE_BILINEAR_PRESERVE_ASPECT_RATIO); + SaveModeTensor(OH_NN_INT8, m_paramDim, nullptr, OH_NN_RESIZE_BILINEAR_COORDINATE_TRANSFORM_MODE); + SaveOutsideTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RESIZE_BILINEAR_EXCLUDE_OUTSIDE); + + + widthTensor = TransToNNTensor(OH_NN_INT32, m_paramDim, nullptr, OH_NN_RESIZE_BILINEAR_NEW_WIDTH); + int32_t widthValues = 1; + widthTensor->SetBuffer(&widthValues, sizeof(widthValues)); + m_allTensors.emplace_back(widthTensor); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + widthTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: resizebilinear_build_009 + * @tc.desc: Verify that the build function return a failed message with invalided ratio's dataType + * @tc.type: FUNC + */ +HWTEST_F(ResizeBilinearBuilderTest, resizebilinear_build_009, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_dim, nullptr); + + SaveHeightTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RESIZE_BILINEAR_NEW_HEIGHT); + SaveWidthTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RESIZE_BILINEAR_NEW_WIDTH); + SaveModeTensor(OH_NN_INT8, m_paramDim, nullptr, OH_NN_RESIZE_BILINEAR_COORDINATE_TRANSFORM_MODE); + SaveOutsideTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RESIZE_BILINEAR_EXCLUDE_OUTSIDE); + + ratioTensor = TransToNNTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RESIZE_BILINEAR_PRESERVE_ASPECT_RATIO); + int64_t ratioValues = 1; + ratioTensor->SetBuffer(&ratioValues, sizeof(ratioValues)); + m_allTensors.emplace_back(ratioTensor); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + ratioTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: resizebilinear_build_010 + * @tc.desc: Verify that the build function return a failed message with invalided mode's dataType + * @tc.type: FUNC + */ +HWTEST_F(ResizeBilinearBuilderTest, resizebilinear_build_010, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_dim, nullptr); + + SaveHeightTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RESIZE_BILINEAR_NEW_HEIGHT); + SaveWidthTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RESIZE_BILINEAR_NEW_WIDTH); + SaveRatioTensor(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_RESIZE_BILINEAR_PRESERVE_ASPECT_RATIO); + SaveOutsideTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RESIZE_BILINEAR_EXCLUDE_OUTSIDE); + + modeTensor = TransToNNTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RESIZE_BILINEAR_COORDINATE_TRANSFORM_MODE); + int64_t modeValues = 1; + modeTensor->SetBuffer(&modeValues, sizeof(modeValues)); + m_allTensors.emplace_back(modeTensor); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + modeTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: resizebilinear_build_011 + * @tc.desc: Verify that the build function return a failed message with invalided outside's dataType + * @tc.type: FUNC + */ +HWTEST_F(ResizeBilinearBuilderTest, resizebilinear_build_011, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_dim, nullptr); + + SaveHeightTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RESIZE_BILINEAR_NEW_HEIGHT); + SaveWidthTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RESIZE_BILINEAR_NEW_WIDTH); + SaveRatioTensor(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_RESIZE_BILINEAR_PRESERVE_ASPECT_RATIO); + SaveModeTensor(OH_NN_INT8, m_paramDim, nullptr, OH_NN_RESIZE_BILINEAR_COORDINATE_TRANSFORM_MODE); + + outsideTensor = TransToNNTensor(OH_NN_INT32, + m_paramDim, nullptr, OH_NN_RESIZE_BILINEAR_EXCLUDE_OUTSIDE); + int32_t outsideValues = 1; + outsideTensor->SetBuffer(&outsideValues, sizeof(outsideValues)); + m_allTensors.emplace_back(outsideTensor); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + outsideTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: resizebilinear_build_012 + * @tc.desc: Verify that the build function return a failed message with invalided height's dimension + * @tc.type: FUNC + */ +HWTEST_F(ResizeBilinearBuilderTest, resizebilinear_build_012, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_dim, nullptr); + + SaveWidthTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RESIZE_BILINEAR_NEW_WIDTH); + SaveRatioTensor(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_RESIZE_BILINEAR_PRESERVE_ASPECT_RATIO); + SaveModeTensor(OH_NN_INT8, m_paramDim, nullptr, OH_NN_RESIZE_BILINEAR_COORDINATE_TRANSFORM_MODE); + SaveOutsideTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RESIZE_BILINEAR_EXCLUDE_OUTSIDE); + + std::vector heightDim = {2}; + heightTensor = TransToNNTensor(OH_NN_INT64, heightDim, nullptr, OH_NN_RESIZE_BILINEAR_NEW_HEIGHT); + int64_t heightValues[2] = {1, 1}; + heightTensor->SetBuffer(heightValues, 2 * sizeof(int64_t)); + m_allTensors.emplace_back(heightTensor); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + heightTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: resizebilinear_build_013 + * @tc.desc: Verify that the build function return a failed message with invalided width's dimension + * @tc.type: FUNC + */ +HWTEST_F(ResizeBilinearBuilderTest, resizebilinear_build_013, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_dim, nullptr); + + SaveHeightTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RESIZE_BILINEAR_NEW_HEIGHT); + SaveRatioTensor(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_RESIZE_BILINEAR_PRESERVE_ASPECT_RATIO); + SaveModeTensor(OH_NN_INT8, m_paramDim, nullptr, OH_NN_RESIZE_BILINEAR_COORDINATE_TRANSFORM_MODE); + SaveOutsideTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RESIZE_BILINEAR_EXCLUDE_OUTSIDE); + + std::vector widthDim = {2}; + widthTensor = TransToNNTensor(OH_NN_INT64, widthDim, nullptr, OH_NN_RESIZE_BILINEAR_NEW_WIDTH); + int64_t widthValues[2] = {1, 1}; + widthTensor->SetBuffer(widthValues, 2 * sizeof(int64_t)); + m_allTensors.emplace_back(widthTensor); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + widthTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: resizebilinear_build_014 + * @tc.desc: Verify that the build function return a failed message with invalided ratio's dimension + * @tc.type: FUNC + */ +HWTEST_F(ResizeBilinearBuilderTest, resizebilinear_build_014, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_dim, nullptr); + + SaveHeightTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RESIZE_BILINEAR_NEW_HEIGHT); + SaveWidthTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RESIZE_BILINEAR_NEW_WIDTH); + SaveModeTensor(OH_NN_INT8, m_paramDim, nullptr, OH_NN_RESIZE_BILINEAR_COORDINATE_TRANSFORM_MODE); + SaveOutsideTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RESIZE_BILINEAR_EXCLUDE_OUTSIDE); + + std::vector ratioDim = {2}; + ratioTensor = TransToNNTensor(OH_NN_BOOL, + ratioDim, nullptr, OH_NN_RESIZE_BILINEAR_PRESERVE_ASPECT_RATIO); + bool ratioValues[2] = {true, true}; + ratioTensor->SetBuffer(ratioValues, 2 * sizeof(bool)); + m_allTensors.emplace_back(ratioTensor); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + ratioTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: resizebilinear_build_015 + * @tc.desc: Verify that the build function return a failed message with invalided mode's dimension + * @tc.type: FUNC + */ +HWTEST_F(ResizeBilinearBuilderTest, resizebilinear_build_015, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_dim, nullptr); + + SaveHeightTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RESIZE_BILINEAR_NEW_HEIGHT); + SaveWidthTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RESIZE_BILINEAR_NEW_WIDTH); + SaveRatioTensor(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_RESIZE_BILINEAR_PRESERVE_ASPECT_RATIO); + SaveOutsideTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RESIZE_BILINEAR_EXCLUDE_OUTSIDE); + + + std::vector modeDim = {2}; + modeTensor = TransToNNTensor(OH_NN_INT8, + modeDim, nullptr, OH_NN_RESIZE_BILINEAR_COORDINATE_TRANSFORM_MODE); + int8_t modeValues[2] = {1, 1}; + modeTensor->SetBuffer(modeValues, 2 * sizeof(int8_t)); + m_allTensors.emplace_back(modeTensor); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + modeTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: resizebilinear_build_016 + * @tc.desc: Verify that the build function return a failed message with invalided outside's dimension + * @tc.type: FUNC + */ +HWTEST_F(ResizeBilinearBuilderTest, resizebilinear_build_016, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_dim, nullptr); + + SaveHeightTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RESIZE_BILINEAR_NEW_HEIGHT); + SaveWidthTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RESIZE_BILINEAR_NEW_WIDTH); + SaveRatioTensor(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_RESIZE_BILINEAR_PRESERVE_ASPECT_RATIO); + SaveModeTensor(OH_NN_INT8, m_paramDim, nullptr, OH_NN_RESIZE_BILINEAR_COORDINATE_TRANSFORM_MODE); + + std::vector outsideDim = {2}; + outsideTensor = TransToNNTensor(OH_NN_INT64, + outsideDim, nullptr, OH_NN_RESIZE_BILINEAR_EXCLUDE_OUTSIDE); + int64_t outsideValues[2] = {1, 1}; + outsideTensor->SetBuffer(outsideValues, 2 * sizeof(int64_t)); + m_allTensors.emplace_back(outsideTensor); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + outsideTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: resizebilinear_build_017 + * @tc.desc: Verify that the build function return a failed message with invalided parameter + * @tc.type: FUNC + */ +HWTEST_F(ResizeBilinearBuilderTest, resizebilinear_build_017, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_dim, nullptr); + + SaveHeightTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RESIZE_BILINEAR_NEW_HEIGHT); + SaveWidthTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RESIZE_BILINEAR_NEW_WIDTH); + SaveRatioTensor(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_RESIZE_BILINEAR_PRESERVE_ASPECT_RATIO); + SaveModeTensor(OH_NN_INT8, m_paramDim, nullptr, OH_NN_RESIZE_BILINEAR_COORDINATE_TRANSFORM_MODE); + SaveOutsideTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_QUANT_DTYPE_CAST_SRC_T); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: resizebilinear_build_018 + * @tc.desc: Verify that the build function return a failed message with empty height's buffer + * @tc.type: FUNC + */ +HWTEST_F(ResizeBilinearBuilderTest, resizebilinear_build_018, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_dim, nullptr); + + heightTensor = TransToNNTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RESIZE_BILINEAR_NEW_HEIGHT); + m_allTensors.emplace_back(heightTensor); + + SaveWidthTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RESIZE_BILINEAR_NEW_WIDTH); + SaveRatioTensor(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_RESIZE_BILINEAR_PRESERVE_ASPECT_RATIO); + SaveModeTensor(OH_NN_INT8, m_paramDim, nullptr, OH_NN_RESIZE_BILINEAR_COORDINATE_TRANSFORM_MODE); + SaveOutsideTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RESIZE_BILINEAR_EXCLUDE_OUTSIDE); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + heightTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: resizebilinear_build_019 + * @tc.desc: Verify that the build function return a failed message with empty width's buffer + * @tc.type: FUNC + */ +HWTEST_F(ResizeBilinearBuilderTest, resizebilinear_build_019, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_dim, nullptr); + + SaveHeightTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RESIZE_BILINEAR_NEW_HEIGHT); + SaveRatioTensor(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_RESIZE_BILINEAR_PRESERVE_ASPECT_RATIO); + SaveModeTensor(OH_NN_INT8, m_paramDim, nullptr, OH_NN_RESIZE_BILINEAR_COORDINATE_TRANSFORM_MODE); + SaveOutsideTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RESIZE_BILINEAR_EXCLUDE_OUTSIDE); + + widthTensor = TransToNNTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RESIZE_BILINEAR_NEW_WIDTH); + m_allTensors.emplace_back(widthTensor); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + widthTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: resizebilinear_build_020 + * @tc.desc: Verify that the build function return a failed message with empty ratio's buffer + * @tc.type: FUNC + */ +HWTEST_F(ResizeBilinearBuilderTest, resizebilinear_build_020, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_dim, nullptr); + + SaveHeightTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RESIZE_BILINEAR_NEW_HEIGHT); + SaveWidthTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RESIZE_BILINEAR_NEW_WIDTH); + SaveModeTensor(OH_NN_INT8, m_paramDim, nullptr, OH_NN_RESIZE_BILINEAR_COORDINATE_TRANSFORM_MODE); + SaveOutsideTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RESIZE_BILINEAR_EXCLUDE_OUTSIDE); + + ratioTensor = TransToNNTensor(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_RESIZE_BILINEAR_PRESERVE_ASPECT_RATIO); + m_allTensors.emplace_back(ratioTensor); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + ratioTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: resizebilinear_build_021 + * @tc.desc: Verify that the build function return a failed message with empty mode's buffer + * @tc.type: FUNC + */ +HWTEST_F(ResizeBilinearBuilderTest, resizebilinear_build_021, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_dim, nullptr); + + SaveHeightTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RESIZE_BILINEAR_NEW_HEIGHT); + SaveWidthTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RESIZE_BILINEAR_NEW_WIDTH); + SaveRatioTensor(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_RESIZE_BILINEAR_PRESERVE_ASPECT_RATIO); + SaveOutsideTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RESIZE_BILINEAR_EXCLUDE_OUTSIDE); + + modeTensor = TransToNNTensor(OH_NN_INT8, m_paramDim, nullptr, OH_NN_RESIZE_BILINEAR_COORDINATE_TRANSFORM_MODE); + m_allTensors.emplace_back(modeTensor); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + modeTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: resizebilinear_build_022 + * @tc.desc: Verify that the build function return a failed message with empty outside's buffer + * @tc.type: FUNC + */ +HWTEST_F(ResizeBilinearBuilderTest, resizebilinear_build_022, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_dim, nullptr); + + SaveHeightTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RESIZE_BILINEAR_NEW_HEIGHT); + SaveWidthTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RESIZE_BILINEAR_NEW_WIDTH); + SaveRatioTensor(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_RESIZE_BILINEAR_PRESERVE_ASPECT_RATIO); + SaveModeTensor(OH_NN_INT8, m_paramDim, nullptr, OH_NN_RESIZE_BILINEAR_COORDINATE_TRANSFORM_MODE); + + outsideTensor = TransToNNTensor(OH_NN_INT64, + m_paramDim, nullptr, OH_NN_RESIZE_BILINEAR_EXCLUDE_OUTSIDE); + m_allTensors.emplace_back(outsideTensor); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + outsideTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: resizebilinear_get_primitive_001 + * @tc.desc: Verify the GetPrimitive function return nullptr + * @tc.type: FUNC + */ +HWTEST_F(ResizeBilinearBuilderTest, resizebilinear_get_primitive_001, TestSize.Level0) +{ + LiteGraphTensorPtr primitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive = {nullptr, DestroyLiteGraphPrimitive}; + EXPECT_EQ(primitive, expectPrimitive); +} + +/** + * @tc.name: resizebilinear_get_primitive_002 + * @tc.desc: Verify the normal params return behavior of the getprimitive function + * @tc.type: FUNC + */ +HWTEST_F(ResizeBilinearBuilderTest, resizebilinear_get_primitive_002, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_dim, nullptr); + + SetParameterTensor(); + + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors)); + LiteGraphTensorPtr primitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive = {nullptr, DestroyLiteGraphPrimitive}; + EXPECT_NE(primitive, expectPrimitive); + + int64_t heightValue = 1; + int64_t widthValue = 1; + bool ratioValue = true; + int8_t modeValue = 1; + int64_t outsideValue = 1; + + int64_t heightReturn = mindspore::lite::MindIR_Resize_GetNewHeight(primitive.get()); + EXPECT_EQ(heightReturn, heightValue); + int64_t widthReturn = mindspore::lite::MindIR_Resize_GetNewWidth(primitive.get()); + EXPECT_EQ(widthReturn, widthValue); + bool ratioReturn = mindspore::lite::MindIR_Resize_GetPreserveAspectRatio(primitive.get()); + EXPECT_EQ(ratioReturn, ratioValue); + int8_t modeReturn = mindspore::lite::MindIR_Resize_GetCoordinateTransformMode(primitive.get()); + EXPECT_EQ(modeReturn, modeValue); + int64_t outsideReturn = mindspore::lite::MindIR_Resize_GetExcludeOutside(primitive.get()); + EXPECT_EQ(outsideReturn, outsideValue); +} +} // namespace UnitTest +} // namespace NeuralNetworkRuntime +} // namespace OHOS \ No newline at end of file diff --git a/test/unittest/ops/rsqrt_builder_test.cpp b/test/unittest/ops/rsqrt_builder_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..62c98b3808d4e70dd4c71acc00dbb809bebacb92 --- /dev/null +++ b/test/unittest/ops/rsqrt_builder_test.cpp @@ -0,0 +1,177 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "frameworks/native/ops/rsqrt_builder.h" + +#include "ops_test.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Ops; + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class RsqrtBuilderTest : public OpsTest { +public: + void SetUp() override; + void TearDown() override; + +protected: + RsqrtBuilder m_builder; + std::vector m_inputs {0}; + std::vector m_outputs {1}; + std::vector m_dim {1, 8, 1, 1}; +}; + +void RsqrtBuilderTest::SetUp() {} + +void RsqrtBuilderTest::TearDown() {} + +/** + * @tc.name: rsqrt_build_001 + * @tc.desc: Provide normal input, output, and parameters to verify the normal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(RsqrtBuilderTest, rsqrt_build_001, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_dim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputs, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/** + * @tc.name: rsqrt_build_002 + * @tc.desc: Call Build func twice to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(RsqrtBuilderTest, rsqrt_build_002, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_dim, nullptr); + + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputs, m_outputsIndex, m_allTensors)); + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputs, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/** + * @tc.name: rsqrt_build_003 + * @tc.desc: Provide one more than normal input to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(RsqrtBuilderTest, rsqrt_build_003, TestSize.Level0) +{ + m_inputs = {0, 1}; + m_outputs = {2}; + + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_dim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputs, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: rsqrt_build_004 + * @tc.desc: Provide one more than normal output to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(RsqrtBuilderTest, rsqrt_build_004, TestSize.Level0) +{ + m_outputs = {1, 2}; + + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_dim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputs, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: rsqrt_build_005 + * @tc.desc: Verify that the build function return a failed message with null allTensor + * @tc.type: FUNC + */ +HWTEST_F(RsqrtBuilderTest, rsqrt_build_005, TestSize.Level0) +{ + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputs, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: rsqrt_build_006 + * @tc.desc: Verify that the build function return a failed message without output tensor + * @tc.type: FUNC + */ +HWTEST_F(RsqrtBuilderTest, rsqrt_build_006, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_dim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputs, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: rsqrt_build_007 + * @tc.desc: Verify that the build function return a failed message with a virtual parameter + * @tc.type: FUNC + */ +HWTEST_F(RsqrtBuilderTest, rsqrt_build_007, TestSize.Level0) +{ + std::vector paramsIndex = {2}; + std::vector paramDim = {}; + + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_dim, nullptr); + std::shared_ptr paramTensor = TransToNNTensor(OH_NN_INT32, paramDim, nullptr, OH_NN_TENSOR); + m_allTensors.emplace_back(paramTensor); + + OH_NN_ReturnCode ret = m_builder.Build(paramsIndex, m_inputs, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: rsqrt_get_primitive_001 + * @tc.desc: Verify the GetPrimitive function return nullptr + * @tc.type: FUNC + */ +HWTEST_F(RsqrtBuilderTest, rsqrt_get_primitive_001, TestSize.Level0) +{ + LiteGraphTensorPtr primitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive = {nullptr, DestroyLiteGraphPrimitive}; + EXPECT_EQ(primitive, expectPrimitive); +} + +/** + * @tc.name: rsqrt_get_primitive_002 + * @tc.desc: Verify the normal params return behavior of the getprimitive function + * @tc.type: FUNC + */ +HWTEST_F(RsqrtBuilderTest, rsqrt_get_primitive_002, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_dim, nullptr); + + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputs, m_outputsIndex, m_allTensors)); + LiteGraphTensorPtr rsqrtPrimitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive = {nullptr, DestroyLiteGraphPrimitive}; + EXPECT_NE(rsqrtPrimitive, expectPrimitive); +} +} // namespace UnitTest +} // namespace NeuralNetworkRuntime +} // namespace OHOS \ No newline at end of file diff --git a/test/unittest/ops/scale_builder_test.cpp b/test/unittest/ops/scale_builder_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..5ab6946cf4630a2adf5248d31489e0041b466059 --- /dev/null +++ b/test/unittest/ops/scale_builder_test.cpp @@ -0,0 +1,377 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "frameworks/native/ops/scale_builder.h" + +#include "ops_test.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Ops; + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class ScaleBuilderTest : public OpsTest { +public: + void SetUp() override; + void TearDown() override; + +protected: + void SaveAxisTensor(OH_NN_DataType dataType, const std::vector &dim, + const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + void SaveActivationTensor(OH_NN_DataType dataType, const std::vector &dim, + const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + +protected: + ScaleBuilder m_builder; + std::vector m_inputs {0, 1, 2}; + std::vector m_outputs {3}; + std::vector m_params {4, 5}; + std::vector m_dim {1, 4, 1, 1}; + std::vector m_paramDim {}; +}; + +void ScaleBuilderTest::SetUp() {} + +void ScaleBuilderTest::TearDown() {} + +void ScaleBuilderTest::SaveAxisTensor(OH_NN_DataType dataType, const std::vector &dim, + const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + std::shared_ptr axisTensor = TransToNNTensor(dataType, dim, quantParam, type); + int64_t *axisValue = new (std::nothrow) int64_t(1); + EXPECT_NE(nullptr, axisValue); + axisTensor->SetBuffer(axisValue, sizeof(int64_t)); + m_allTensors.emplace_back(axisTensor); +} + +void ScaleBuilderTest::SaveActivationTensor(OH_NN_DataType dataType, const std::vector &dim, + const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + std::shared_ptr activationTensor = TransToNNTensor(dataType, dim, quantParam, type); + int8_t *activationValue = new (std::nothrow) int8_t(0); + EXPECT_NE(nullptr, activationValue); + activationTensor->SetBuffer(activationValue, sizeof(int64_t)); + m_allTensors.emplace_back(activationTensor); +} + +/** + * @tc.name: scale_build_001 + * @tc.desc: Provide normal input, output, and parameters to verify the normal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(ScaleBuilderTest, scale_build_001, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_dim, nullptr); + SaveAxisTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_SCALE_AXIS); + SaveActivationTensor(OH_NN_INT8, m_paramDim, nullptr, OH_NN_SCALE_ACTIVATIONTYPE); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/** + * @tc.name: scale_build_002 + * @tc.desc: Call Build func twice to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(ScaleBuilderTest, scale_build_002, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_dim, nullptr); + SaveAxisTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_SCALE_AXIS); + SaveActivationTensor(OH_NN_INT8, m_paramDim, nullptr, OH_NN_SCALE_ACTIVATIONTYPE); + + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors)); + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/** + * @tc.name: scale_build_003 + * @tc.desc: Provide one more than normal input to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(ScaleBuilderTest, scale_build_003, TestSize.Level0) +{ + m_inputs = {0, 1, 2, 3}; + m_outputs = {4}; + m_params = {5, 6}; + + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_dim, nullptr); + SaveAxisTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_SCALE_AXIS); + SaveActivationTensor(OH_NN_INT8, m_paramDim, nullptr, OH_NN_SCALE_ACTIVATIONTYPE); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: scale_build_004 + * @tc.desc: Provide one more than normal output to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(ScaleBuilderTest, scale_build_004, TestSize.Level0) +{ + m_outputs = {3, 4}; + m_params = {5, 6}; + + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_dim, nullptr); + SaveAxisTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_SCALE_AXIS); + SaveActivationTensor(OH_NN_INT8, m_paramDim, nullptr, OH_NN_SCALE_ACTIVATIONTYPE); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: scale_build_005 + * @tc.desc: Verify that the build function return a failed message with null allTensor + * @tc.type: FUNC + */ +HWTEST_F(ScaleBuilderTest, scale_build_005, TestSize.Level0) +{ + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputs, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: scale_build_006 + * @tc.desc: Verify that the build function return a failed message without output tensor + * @tc.type: FUNC + */ +HWTEST_F(ScaleBuilderTest, scale_build_006, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_dim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: scale_build_007 + * @tc.desc: Verify that the build function return a failed message with invalided axis's dataType + * @tc.type: FUNC + */ +HWTEST_F(ScaleBuilderTest, scale_build_007, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_dim, nullptr); + + SaveActivationTensor(OH_NN_INT8, m_paramDim, nullptr, OH_NN_SCALE_ACTIVATIONTYPE); + + std::shared_ptr axisTensor = TransToNNTensor(OH_NN_INT32, m_paramDim, nullptr, OH_NN_SCALE_AXIS); + int32_t axisValue = 1; + axisTensor->SetBuffer(&axisValue, sizeof(axisValue)); + m_allTensors.emplace_back(axisTensor); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + axisTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: scale_build_008 + * @tc.desc: Verify that the build function return a failed message with invalided activation's dataType + * @tc.type: FUNC + */ +HWTEST_F(ScaleBuilderTest, scale_build_008, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_dim, nullptr); + + SaveAxisTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_SCALE_AXIS); + + std::shared_ptr activationTensor = TransToNNTensor(OH_NN_INT64, m_paramDim, + nullptr, OH_NN_SCALE_ACTIVATIONTYPE); + int64_t activationValue = 0; + activationTensor->SetBuffer(&activationValue, sizeof(activationValue)); + m_allTensors.emplace_back(activationTensor); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + activationTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: scale_build_009 + * @tc.desc: Verify that the build function return a failed message with invalided axis's dimension + * @tc.type: FUNC + */ +HWTEST_F(ScaleBuilderTest, scale_build_009, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_dim, nullptr); + + std::vector axistDim = {2}; + std::shared_ptr axisTensor = TransToNNTensor(OH_NN_INT64, axistDim, nullptr, OH_NN_SCALE_AXIS); + int64_t axisValue[2] = {1, 1}; + axisTensor->SetBuffer(axisValue, 2 * sizeof(int64_t)); + m_allTensors.emplace_back(axisTensor); + + SaveActivationTensor(OH_NN_INT8, m_paramDim, nullptr, OH_NN_SCALE_ACTIVATIONTYPE); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + axisTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: scale_build_010 + * @tc.desc: Verify that the build function return a failed message with invalided activation's dimension + * @tc.type: FUNC + */ +HWTEST_F(ScaleBuilderTest, scale_build_010, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_dim, nullptr); + + SaveAxisTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_SCALE_AXIS); + + std::vector activationDim = {2}; + std::shared_ptr activationTensor = TransToNNTensor(OH_NN_INT8, + activationDim, nullptr, OH_NN_SCALE_ACTIVATIONTYPE); + int64_t activationValue[2] = {1, 1}; + activationTensor->SetBuffer(activationValue, 2 * sizeof(int64_t)); + m_allTensors.emplace_back(activationTensor); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + activationTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: scale_build_011 + * @tc.desc: Verify that the build function return a failed message with invalided activation's buffer + * @tc.type: FUNC + */ +HWTEST_F(ScaleBuilderTest, scale_build_011, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_dim, nullptr); + + SaveAxisTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_SCALE_AXIS); + + std::shared_ptr activationTensor = TransToNNTensor(OH_NN_INT8, + m_paramDim, nullptr, OH_NN_SCALE_ACTIVATIONTYPE); + int8_t activationValue = -1; + activationTensor->SetBuffer(&activationValue, sizeof(activationValue)); + m_allTensors.emplace_back(activationTensor); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + activationTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: scale_build_012 + * @tc.desc: Verify that the build function return a failed message with invalided parameter + * @tc.type: FUNC + */ +HWTEST_F(ScaleBuilderTest, scale_build_012, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_dim, nullptr); + SaveAxisTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_SCALE_AXIS); + SaveActivationTensor(OH_NN_INT8, m_paramDim, nullptr, OH_NN_QUANT_DTYPE_CAST_SRC_T); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: scale_build_013 + * @tc.desc: Verify that the build function return a failed message with empty axis's buffer + * @tc.type: FUNC + */ +HWTEST_F(ScaleBuilderTest, scale_build_013, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_dim, nullptr); + + SaveActivationTensor(OH_NN_INT8, m_paramDim, nullptr, OH_NN_SCALE_ACTIVATIONTYPE); + + std::shared_ptr axisTensor = TransToNNTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_SCALE_AXIS); + m_allTensors.emplace_back(axisTensor); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + axisTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: scale_build_014 + * @tc.desc: Verify that the build function return a failed message with empty activation's buffer + * @tc.type: FUNC + */ +HWTEST_F(ScaleBuilderTest, scale_build_014, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_dim, nullptr); + + SaveAxisTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_SCALE_AXIS); + + std::shared_ptr activationTensor = TransToNNTensor(OH_NN_INT8, m_paramDim, + nullptr, OH_NN_SCALE_ACTIVATIONTYPE); + m_allTensors.emplace_back(activationTensor); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + activationTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: scale_get_primitive_001 + * @tc.desc: Verify the GetPrimitive function return nullptr + * @tc.type: FUNC + */ +HWTEST_F(ScaleBuilderTest, scale_get_primitive_001, TestSize.Level0) +{ + LiteGraphTensorPtr primitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive = {nullptr, DestroyLiteGraphPrimitive}; + EXPECT_EQ(primitive, expectPrimitive); +} + +/** + * @tc.name: scale_get_primitive_002 + * @tc.desc: Verify the normal params return behavior of the getprimitive function + * @tc.type: FUNC + */ +HWTEST_F(ScaleBuilderTest, scale_get_primitive_002, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_dim, nullptr); + SaveAxisTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_SCALE_AXIS); + SaveActivationTensor(OH_NN_INT8, m_paramDim, nullptr, OH_NN_SCALE_ACTIVATIONTYPE); + + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors)); + LiteGraphTensorPtr primitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive = {nullptr, DestroyLiteGraphPrimitive}; + EXPECT_NE(primitive, expectPrimitive); + + int64_t axisValue = 1; + int8_t activationValue = 0; + auto axisReturn = mindspore::lite::MindIR_ScaleFusion_GetAxis(primitive.get()); + EXPECT_EQ(axisReturn, axisValue); + auto activationReturn = mindspore::lite::MindIR_ScaleFusion_GetActivationType(primitive.get()); + EXPECT_EQ(activationReturn, activationValue); +} +} // namespace UnitTest +} // namespace NeuralNetworkRuntime +} // namespace OHOS \ No newline at end of file diff --git a/test/unittest/ops/shape_builder_test.cpp b/test/unittest/ops/shape_builder_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..b52f0dbea84f66a84e409e78f8747399c39a08c2 --- /dev/null +++ b/test/unittest/ops/shape_builder_test.cpp @@ -0,0 +1,178 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "frameworks/native/ops/shape_builder.h" + +#include "ops_test.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Ops; + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class ShapeBuilderTest : public OpsTest { +public: + void SetUp() override; + void TearDown() override; + +protected: + ShapeBuilder m_builder; + std::vector m_inputs {0}; + std::vector m_outputs {1}; + std::vector m_inputDim {1, 2, 3, 1}; + std::vector m_outputDim {4}; +}; + +void ShapeBuilderTest::SetUp() {} + +void ShapeBuilderTest::TearDown() {} + +/** + * @tc.name: shape_build_001 + * @tc.desc: Provide normal input, output, and parameters to verify the normal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(ShapeBuilderTest, shape_build_001, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/** + * @tc.name: shape_build_002 + * @tc.desc: Call Build func twice to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(ShapeBuilderTest, shape_build_002, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/** + * @tc.name: shape_build_003 + * @tc.desc: Provide one more than normal input to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(ShapeBuilderTest, shape_build_003, TestSize.Level0) +{ + m_inputs = {0, 1}; + m_outputs = {2}; + + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: shape_build_004 + * @tc.desc: Provide one more than normal output to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(ShapeBuilderTest, shape_build_004, TestSize.Level0) +{ + m_outputs = {1, 2}; + + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: shape_build_005 + * @tc.desc: Verify that the build function return a failed message with null allTensor + * @tc.type: FUNC + */ +HWTEST_F(ShapeBuilderTest, shape_build_005, TestSize.Level0) +{ + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputs, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: shape_build_006 + * @tc.desc: Verify that the build function return a failed message without output tensor + * @tc.type: FUNC + */ +HWTEST_F(ShapeBuilderTest, shape_build_006, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: shape_build_007 + * @tc.desc: Verify that the build function return a failed message with a virtual parameter + * @tc.type: FUNC + */ +HWTEST_F(ShapeBuilderTest, shape_build_007, TestSize.Level0) +{ + std::vector paramsIndex = {2}; + std::vector paramDim = {}; + + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + std::shared_ptr paramTensor = TransToNNTensor(OH_NN_INT32, paramDim, nullptr, OH_NN_TENSOR); + m_allTensors.emplace_back(paramTensor); + + OH_NN_ReturnCode ret = m_builder.Build(paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: shape_get_primitive_001 + * @tc.desc: Verify the GetPrimitive function return nullptr + * @tc.type: FUNC + */ +HWTEST_F(ShapeBuilderTest, shape_get_primitive_001, TestSize.Level0) +{ + LiteGraphTensorPtr primitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive = {nullptr, DestroyLiteGraphPrimitive}; + EXPECT_EQ(primitive, expectPrimitive); +} + +/** + * @tc.name: shape_get_primitive_002 + * @tc.desc: Verify the normal params return behavior of the getprimitive function + * @tc.type: FUNC + */ +HWTEST_F(ShapeBuilderTest, shape_get_primitive_002, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + LiteGraphTensorPtr shapePrimitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive = {nullptr, DestroyLiteGraphPrimitive}; + EXPECT_NE(shapePrimitive, expectPrimitive); +} +} // namespace UnitTest +} // namespace NeuralNetworkRuntime +} // namespace OHOS \ No newline at end of file diff --git a/test/unittest/ops/sigmoid_builder_test.cpp b/test/unittest/ops/sigmoid_builder_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..0eb4dcc1c2071ff07f7f08ff1ba7b21d0168ec16 --- /dev/null +++ b/test/unittest/ops/sigmoid_builder_test.cpp @@ -0,0 +1,180 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "frameworks/native/ops/sigmoid_builder.h" + +#include "ops_test.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Ops; + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class SigmoidBuilderTest : public OpsTest { +public: + void SetUp() override; + void TearDown() override; + +protected: + SigmoidBuilder m_builder; + std::vector m_inputs {0}; + std::vector m_outputs {1}; + std::vector m_dim {1, 5, 1, 1}; +}; + +void SigmoidBuilderTest::SetUp() {} + +void SigmoidBuilderTest::TearDown() {} + +/** + * @tc.name: sigmoid_build_001 + * @tc.desc: Provide normal input, output, and parameters to verify the normal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(SigmoidBuilderTest, sigmoid_build_001, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_dim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/** + * @tc.name: sigmoid_build_002 + * @tc.desc: Call Build func twice to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(SigmoidBuilderTest, sigmoid_build_002, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_dim, nullptr); + + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: sigmoid_build_003 + * @tc.desc: Provide one more than normal input to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(SigmoidBuilderTest, sigmoid_build_003, TestSize.Level0) +{ + m_inputs = {0, 1}; + m_outputs = {2}; + + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_dim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: sigmoid_build_004 + * @tc.desc: Provide one more than normal output to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(SigmoidBuilderTest, sigmoid_build_004, TestSize.Level0) +{ + m_outputs = {1, 2}; + + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_dim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: sigmoid_build_005 + * @tc.desc: Verify that the build function return a failed message with null allTensor + * @tc.type: FUNC + */ +HWTEST_F(SigmoidBuilderTest, sigmoid_build_005, TestSize.Level0) +{ + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputs, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: sigmoid_build_006 + * @tc.desc: Verify that the build function return a failed message without output tensor + * @tc.type: FUNC + */ +HWTEST_F(SigmoidBuilderTest, sigmoid_build_006, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_dim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: sigmoid_build_007 + * @tc.desc: Verify that the build function return a failed message with a virtual parameter + * @tc.type: FUNC + */ +HWTEST_F(SigmoidBuilderTest, sigmoid_build_007, TestSize.Level0) +{ + std::vector paramsIndex = {2}; + std::vector paramDim = {}; + + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_dim, nullptr); + std::shared_ptr sigmoidTensor = TransToNNTensor(OH_NN_INT32, paramDim, nullptr, OH_NN_TENSOR); + m_allTensors.emplace_back(sigmoidTensor); + + OH_NN_ReturnCode ret = m_builder.Build(paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: sigmoid_get_primitive_001 + * @tc.desc: Verify the GetPrimitive function return nullptr + * @tc.type: FUNC + */ +HWTEST_F(SigmoidBuilderTest, sigmoid_get_primitive_001, TestSize.Level0) +{ + LiteGraphTensorPtr primitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive = {nullptr, DestroyLiteGraphPrimitive}; + EXPECT_EQ(primitive, expectPrimitive); +} + +/** + * @tc.name: sigmoid_get_primitive_002 + * @tc.desc: Verify the normal params return behavior of the getprimitive function + * @tc.type: FUNC + */ +HWTEST_F(SigmoidBuilderTest, sigmoid_get_primitive_002, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_dim, nullptr); + + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + LiteGraphTensorPtr sigmoidPrimitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive = {nullptr, DestroyLiteGraphPrimitive}; + EXPECT_NE(sigmoidPrimitive, expectPrimitive); + + mindspore::lite::ActivationType activationType = mindspore::lite::ACTIVATION_TYPE_SIGMOID; + auto returnValue = mindspore::lite::MindIR_Activation_GetActivationType(sigmoidPrimitive.get()); + EXPECT_EQ(returnValue, activationType); +} +} // namespace UnitTest +} // namespace NeuralNetworkRuntime +} // namespace OHOS \ No newline at end of file diff --git a/test/unittest/ops/slice_builder_test.cpp b/test/unittest/ops/slice_builder_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..b1a2fcf06a7ed912359ca9cb8da68c5fd70317a9 --- /dev/null +++ b/test/unittest/ops/slice_builder_test.cpp @@ -0,0 +1,214 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "frameworks/native/ops/slice_builder.h" + +#include +#include "frameworks/native/nn_tensor.h" +#include "ops_test.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Ops; + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class SliceBuilderTest : public OpsTest { +protected: + void InitTensor(const std::vector& inputsIndex, + const std::vector& outputsIndex) override; + void SaveAxesTensor(OH_NN_DataType dataType, const std::vector &dim, + const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + +protected: + SliceBuilder m_builder; +}; + +void SliceBuilderTest::InitTensor(const std::vector& inputsIndex, + const std::vector& outputsIndex) +{ + std::vector paramsIndex = {}; + std::vector inputDim = {3, 2, 3}; + std::vector OutputDim = {1, 1, 3}; + + m_paramsIndex = paramsIndex; + SaveInputTensor(inputsIndex, OH_NN_FLOAT32, inputDim, nullptr); + SaveOutputTensor(outputsIndex, OH_NN_FLOAT32, OutputDim, nullptr); +} + +void SliceBuilderTest::SaveAxesTensor(OH_NN_DataType dataType, const std::vector &dim, + const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + std::shared_ptr axesTensor = TransToNNTensor(dataType, dim, quantParam, type); + int64_t* axesValue = new (std::nothrow) int64_t[1]{0}; + EXPECT_NE(nullptr, axesValue); + axesTensor->SetBuffer(axesValue, sizeof(int64_t)); + m_allTensors.emplace_back(axesTensor); +} + +/** + * @tc.name: slice_build_001 + * @tc.desc: Provide normal input, output, and parameters to verify the normal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(SliceBuilderTest, slice_build_001, TestSize.Level0) +{ + std::vector inputsIndex = { 0, 1, 2 }; + std::vector outputsIndex = { 3 }; + + InitTensor(inputsIndex, outputsIndex); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/** + * @tc.name: slice_build_002 + * @tc.desc: Call Build func twice to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(SliceBuilderTest, slice_build_002, TestSize.Level0) +{ + std::vector inputsIndex = { 0, 1, 2 }; + std::vector outputsIndex = { 3 }; + + InitTensor(inputsIndex, outputsIndex); + + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/** + * @tc.name: slice_build_003 + * @tc.desc: Provide one more than normal input to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(SliceBuilderTest, slice_build_003, TestSize.Level0) +{ + std::vector inputsIndex = { 0, 1, 2, 3 }; + std::vector outputsIndex = { 4 }; + + InitTensor(inputsIndex, outputsIndex); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: slice_build_004 + * @tc.desc: Provide one more than normal output to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(SliceBuilderTest, slice_build_004, TestSize.Level0) +{ + std::vector inputsIndex = { 0, 1, 2 }; + std::vector outputsIndex = { 3, 4 }; + + InitTensor(inputsIndex, outputsIndex); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: slice_build_005 + * @tc.desc: Provide empty input, output, and parameters to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(SliceBuilderTest, slice_build_005, TestSize.Level0) +{ + std::vector inputsIndex = {}; + std::vector outputsIndex = {}; + + InitTensor(inputsIndex, outputsIndex); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: slice_build_006 + * @tc.desc: Provide empty output to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(SliceBuilderTest, slice_build_006, TestSize.Level0) +{ + std::vector inputsIndex = { 0, 1, 2 }; + std::vector outputsIndex = {}; + + InitTensor(inputsIndex, outputsIndex); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: slice_build_007 + * @tc.desc: Provide a param to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(SliceBuilderTest, slice_build_007, TestSize.Level0) +{ + std::vector inputsIndex = { 0, 1, 2 }; + std::vector outputsIndex = {3}; + std::vector paramsIndex = { 4 }; + std::vector inputDim = {3, 2, 3}; + std::vector OutputDim = {1, 1, 3}; + std::vector paramDim = {}; + + m_paramsIndex = paramsIndex; + SaveInputTensor(inputsIndex, OH_NN_FLOAT32, inputDim, nullptr); + SaveOutputTensor(outputsIndex, OH_NN_FLOAT32, OutputDim, nullptr); + SaveAxesTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_SPLIT_AXIS); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: slice_getprimitive_001 + * @tc.desc: Verify the GetPrimitive function return nullptr + * @tc.type: FUNC + */ +HWTEST_F(SliceBuilderTest, slice_getprimitive_001, TestSize.Level0) +{ + LiteGraphTensorPtr primitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); + EXPECT_EQ(primitive, expectPrimitive); +} + +/** + * @tc.name: slice_getprimitive_002 + * @tc.desc: Verify the normal params return behavior of the getprimitive function + * @tc.type: FUNC + */ +HWTEST_F(SliceBuilderTest, slice_getprimitive_002, TestSize.Level0) +{ + std::vector inputsIndex = { 0, 1, 2 }; + std::vector outputsIndex = { 3 }; + + InitTensor(inputsIndex, outputsIndex); + + std::vector expectAxesValue = {0}; + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + LiteGraphTensorPtr primitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); + EXPECT_NE(primitive, expectPrimitive); +} +} // namespace UnitTest +} // namespace NeuralNetworkRuntime +} // namespace OHOS diff --git a/test/unittest/ops/softmax_builder_test.cpp b/test/unittest/ops/softmax_builder_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..58ce8337a1ce0dc1ca8961070a4c247793ec629a --- /dev/null +++ b/test/unittest/ops/softmax_builder_test.cpp @@ -0,0 +1,303 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "frameworks/native/ops/softmax_builder.h" + +#include +#include "frameworks/native/nn_tensor.h" +#include "ops_test.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Ops; + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class SoftmaxBuilderTest : public OpsTest { +protected: + void InitTensor(const std::vector& inputsIndex, + const std::vector& outputsIndex) override; + void SaveAxisTensor(OH_NN_DataType dataType, const std::vector &dim, + const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + +protected: + SoftmaxBuilder m_builder; + std::vector m_expectAxisValue; +}; + +void SoftmaxBuilderTest::SaveAxisTensor(OH_NN_DataType dataType, const std::vector &dim, + const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + std::shared_ptr axisTensor = TransToNNTensor(dataType, dim, quantParam, type); + int64_t* axisValue = new (std::nothrow) int64_t[1]{1}; + EXPECT_NE(nullptr, axisValue); + axisTensor->SetBuffer(axisValue, sizeof(int64_t)); + m_allTensors.emplace_back(axisTensor); + m_expectAxisValue.emplace_back(*axisValue); +} + +void SoftmaxBuilderTest::InitTensor(const std::vector& inputsIndex, + const std::vector& outputsIndex) +{ + std::vector paramsIndex = { 2 }; + std::vector inputDim = {1, 5, 1, 1}; + std::vector OutputDim = {1, 5, 1, 1}; + + m_paramsIndex = paramsIndex; + SaveInputTensor(inputsIndex, OH_NN_FLOAT32, inputDim, nullptr); + SaveOutputTensor(outputsIndex, OH_NN_FLOAT32, OutputDim, nullptr); +} + +/** + * @tc.name: softmax_build_001 + * @tc.desc: Provide normal input, output, and parameters to verify the normal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(SoftmaxBuilderTest, softmax_build_001, TestSize.Level0) +{ + std::vector inputsIndex = { 0 }; + std::vector outputsIndex = { 1 }; + std::vector paramDim = {}; + + InitTensor(inputsIndex, outputsIndex); + SaveAxisTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_SOFTMAX_AXIS); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/** + * @tc.name: softmax_build_002 + * @tc.desc: Call Build func twice to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(SoftmaxBuilderTest, softmax_build_002, TestSize.Level0) +{ + std::vector inputsIndex = { 0 }; + std::vector outputsIndex = { 1 }; + std::vector paramDim = {}; + + InitTensor(inputsIndex, outputsIndex); + SaveAxisTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_SOFTMAX_AXIS); + + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/** + * @tc.name: softmax_build_003 + * @tc.desc: Provide two more than normal input to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(SoftmaxBuilderTest, softmax_build_003, TestSize.Level0) +{ + std::vector inputsIndex = { 0, 1, 2 }; + std::vector outputsIndex = { 3 }; + std::vector paramDim = {}; + + InitTensor(inputsIndex, outputsIndex); + SaveAxisTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_SOFTMAX_AXIS); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: softmax_build_004 + * @tc.desc: Provide one more than normal output to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(SoftmaxBuilderTest, softmax_build_004, TestSize.Level0) +{ + std::vector inputsIndex = { 0 }; + std::vector outputsIndex = { 1, 2 }; + std::vector paramDim = {}; + + InitTensor(inputsIndex, outputsIndex); + SaveAxisTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_SOFTMAX_AXIS); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: softmax_build_005 + * @tc.desc: Provide empty input, output, and parameters to verify the normal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(SoftmaxBuilderTest, softmax_build_005, TestSize.Level0) +{ + std::vector inputsIndex = { 0, 1 }; + std::vector outputsIndex = { 2 }; + std::vector paramsIndex = { 3 }; + OH_NN_ReturnCode ret = m_builder.Build(paramsIndex, inputsIndex, outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: softmax_build_006 + * @tc.desc: Provide empty output to verify the normal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(SoftmaxBuilderTest, softmax_build_006, TestSize.Level0) +{ + std::vector inputsIndex = { 0 }; + std::vector outputsIndex = {}; + std::vector paramDim = {}; + + InitTensor(inputsIndex, outputsIndex); + SaveAxisTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_SOFTMAX_AXIS); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: softmax_build_007 + * @tc.desc: Provide no param error to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(SoftmaxBuilderTest, softmax_build_007, TestSize.Level0) +{ + std::vector inputsIndex = { 0, 1 }; + std::vector outputsIndex = { 2 }; + std::vector paramsIndex = {}; + std::vector inputDim = {1, 5, 1, 1}; + std::vector OutputDim = {1, 5, 1, 1}; + + m_paramsIndex = paramsIndex; + SaveInputTensor(inputsIndex, OH_NN_FLOAT32, inputDim, nullptr); + SaveOutputTensor(outputsIndex, OH_NN_FLOAT32, OutputDim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: softmax_build_008 + * @tc.desc: Provide param type error to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(SoftmaxBuilderTest, softmax_build_008, TestSize.Level0) +{ + std::vector inputsIndex = { 0 }; + std::vector outputsIndex = { 1 }; + std::vector paramDim = {}; + + InitTensor(inputsIndex, outputsIndex); + SaveAxisTensor(OH_NN_INT32, paramDim, nullptr, OH_NN_SOFTMAX_AXIS); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: softmax_build_009 + * @tc.desc: Provide param dimension error to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(SoftmaxBuilderTest, softmax_build_009, TestSize.Level0) +{ + std::vector inputsIndex = { 0 }; + std::vector outputsIndex = { 1 }; + std::vector paramDim = {1, 5, 1, 1}; + + InitTensor(inputsIndex, outputsIndex); + SaveAxisTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_SOFTMAX_AXIS); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: softmax_build_010 + * @tc.desc: Provide parameter buffer is nullptr to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(SoftmaxBuilderTest, softmax_build_010, TestSize.Level0) +{ + std::vector inputsIndex = { 0 }; + std::vector outputsIndex = { 1 }; + std::vector paramDim = {}; + + InitTensor(inputsIndex, outputsIndex); + std::shared_ptr axisTensor = TransToNNTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_SOFTMAX_AXIS); + axisTensor->SetBuffer(nullptr, 0); + m_allTensors.emplace_back(axisTensor); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: softmax_build_011 + * @tc.desc: Provide invalid parameter type to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(SoftmaxBuilderTest, softmax_build_011, TestSize.Level0) +{ + std::vector inputsIndex = { 0 }; + std::vector outputsIndex = { 1 }; + std::vector paramDim = {}; + + InitTensor(inputsIndex, outputsIndex); + SaveAxisTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_SCALE_AXIS); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: softmax_getprimitive_001 + * @tc.desc: Verify the GetPrimitive function return nullptr + * @tc.type: FUNC + */ +HWTEST_F(SoftmaxBuilderTest, softmax_getprimitive_001, TestSize.Level0) +{ + LiteGraphTensorPtr primitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); + EXPECT_EQ(primitive, expectPrimitive); +} + +/** + * @tc.name: softmax_getprimitive_002 + * @tc.desc: Verify the normal params return behavior of the getprimitive function + * @tc.type: FUNC + */ +HWTEST_F(SoftmaxBuilderTest, softmax_getprimitive_002, TestSize.Level0) +{ + std::vector inputsIndex = { 0 }; + std::vector outputsIndex = { 1 }; + std::vector paramDim = {}; + + InitTensor(inputsIndex, outputsIndex); + SaveAxisTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_SOFTMAX_AXIS); + + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + LiteGraphTensorPtr primitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr nullPrimitive(nullptr, DestroyLiteGraphPrimitive); + EXPECT_NE(nullPrimitive, primitive); + + auto returnValue = mindspore::lite::MindIR_Softmax_GetAxis(primitive.get()); + auto returnValueSize = returnValue.size(); + for (size_t i = 0; i < returnValueSize; ++i) { + EXPECT_EQ(returnValue[i], m_expectAxisValue[i]); + } +} +} // namespace UnitTest +} // namespace NeuralNetworkRuntime +} // namespace OHOS diff --git a/test/unittest/ops/spacetobatchnd_builder_test.cpp b/test/unittest/ops/spacetobatchnd_builder_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..05c63171d7a3348d63131b89d2ddfd27c2393d17 --- /dev/null +++ b/test/unittest/ops/spacetobatchnd_builder_test.cpp @@ -0,0 +1,487 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "frameworks/native/ops/space_to_batch_nd_builder.h" + +#include +#include "frameworks/native/nn_tensor.h" +#include "ops_test.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Ops; + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class SpaceToBatchNDBuilderTest : public OpsTest { +protected: + void InitTensor(const std::vector& inputsIndex, + const std::vector& outputsIndex) override; + void SaveBlockShapeTensor(OH_NN_DataType dataType, const std::vector &dim, + const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + void SavePaddingsTensor(OH_NN_DataType dataType, const std::vector &dim, + const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + void InitTensor(const std::vector& inputsIndex, const std::vector& outputsIndex, + const std::vector& paramsIndex); + +protected: + SpaceToBatchNDBuilder m_builder; + std::vector m_expectBlockShapeValue; + std::vector> m_expectPaddingsValue; +}; + +void SpaceToBatchNDBuilderTest::InitTensor(const std::vector& inputsIndex, + const std::vector& outputsIndex) +{ + std::vector paramsIndex = { 2, 3 }; + std::vector inputDim = {3, 2, 3}; + std::vector OutputDim = {1, 1, 3}; + + m_paramsIndex = paramsIndex; + SaveInputTensor(inputsIndex, OH_NN_FLOAT32, inputDim, nullptr); + SaveOutputTensor(outputsIndex, OH_NN_FLOAT32, OutputDim, nullptr); +} + +void SpaceToBatchNDBuilderTest::InitTensor(const std::vector& inputsIndex, + const std::vector& outputsIndex, const std::vector& paramsIndex) +{ + std::vector inputDim = {1, 2, 2, 1}; + std::vector OutputDim = {4, 1, 1, 1}; + std::vector shapeDim = {3}; + std::vector paddingsDim = {2, 2}; + + m_paramsIndex = paramsIndex; + SaveInputTensor(inputsIndex, OH_NN_FLOAT32, inputDim, nullptr); + SaveOutputTensor(outputsIndex, OH_NN_FLOAT32, OutputDim, nullptr); + SaveBlockShapeTensor(OH_NN_INT64, shapeDim, nullptr, OH_NN_SPACE_TO_BATCH_ND_BLOCK_SHAPE); + SavePaddingsTensor(OH_NN_INT64, paddingsDim, nullptr, OH_NN_SPACE_TO_BATCH_ND_PADDINGS); +} + +void SpaceToBatchNDBuilderTest::SaveBlockShapeTensor(OH_NN_DataType dataType, const std::vector &dim, + const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + const int blockShapeLen = 2; + std::shared_ptr blockShapeTensor = TransToNNTensor(dataType, dim, quantParam, type); + int64_t* blockShapeValue = new (std::nothrow) int64_t[blockShapeLen] {2, 2}; + EXPECT_NE(nullptr, blockShapeValue); + blockShapeTensor->SetBuffer(blockShapeValue, sizeof(int64_t) * blockShapeLen); + m_allTensors.emplace_back(blockShapeTensor); + m_expectBlockShapeValue.assign(blockShapeValue, blockShapeValue + blockShapeLen); +} + +void SpaceToBatchNDBuilderTest::SavePaddingsTensor(OH_NN_DataType dataType, const std::vector &dim, + const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + const int paddingsLen = 4; + const int row = 2; + const int col = 2; + std::shared_ptr paddingsTensor = TransToNNTensor(dataType, dim, quantParam, type); + int64_t* paddingsValue = new (std::nothrow) int64_t[paddingsLen] {0, 0, 0, 0}; + EXPECT_NE(nullptr, paddingsValue); + paddingsTensor->SetBuffer(paddingsValue, sizeof(int64_t) * paddingsLen); + m_allTensors.emplace_back(paddingsTensor); + + m_expectPaddingsValue.resize(row); + for (int i = 0; i < row; ++i) { + m_expectPaddingsValue[i].resize(col); + } + + int i = 0; + int j = 0; + for (int k = 0; k < paddingsLen; ++k) { + i = k / col; + j = k % col; + m_expectPaddingsValue[i][j] = paddingsValue[k]; + } +} + +/** + * @tc.name: spacetobatchnd_build_001 + * @tc.desc: Provide normal input, output, and parameters to verify the normal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(SpaceToBatchNDBuilderTest, spacetobatchnd_build_001, TestSize.Level0) +{ + std::vector inputsIndex = { 0 }; + std::vector outputsIndex = { 1 }; + std::vector shapeDim = {2}; + std::vector paddingsDim = {2, 2}; + + InitTensor(inputsIndex, outputsIndex); + SaveBlockShapeTensor(OH_NN_INT64, shapeDim, nullptr, OH_NN_SPACE_TO_BATCH_ND_BLOCK_SHAPE); + SavePaddingsTensor(OH_NN_INT64, paddingsDim, nullptr, OH_NN_SPACE_TO_BATCH_ND_PADDINGS); + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/** + * @tc.name: spacetobatchnd_build_002 + * @tc.desc: Call Build func twice to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(SpaceToBatchNDBuilderTest, spacetobatchnd_build_002, TestSize.Level0) +{ + std::vector inputsIndex = { 0 }; + std::vector outputsIndex = { 1 }; + std::vector shapeDim = {2}; + std::vector paddingsDim = {2, 2}; + + InitTensor(inputsIndex, outputsIndex); + + SaveBlockShapeTensor(OH_NN_INT64, shapeDim, nullptr, OH_NN_SPACE_TO_BATCH_ND_BLOCK_SHAPE); + SavePaddingsTensor(OH_NN_INT64, paddingsDim, nullptr, OH_NN_SPACE_TO_BATCH_ND_PADDINGS); + + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/** + * @tc.name: spacetobatchnd_build_003 + * @tc.desc: Provide one more than normal input to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(SpaceToBatchNDBuilderTest, spacetobatchnd_build_003, TestSize.Level0) +{ + std::vector inputsIndex = { 0, 1, 2, 3 }; + std::vector outputsIndex = { 4 }; + std::vector shapeDim = {2}; + std::vector paddingsDim = {2, 2}; + + InitTensor(inputsIndex, outputsIndex); + SaveBlockShapeTensor(OH_NN_INT64, shapeDim, nullptr, OH_NN_SPACE_TO_BATCH_ND_BLOCK_SHAPE); + SavePaddingsTensor(OH_NN_INT64, paddingsDim, nullptr, OH_NN_SPACE_TO_BATCH_ND_PADDINGS); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: spacetobatchnd_build_004 + * @tc.desc: Provide one more than normal output to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(SpaceToBatchNDBuilderTest, spacetobatchnd_build_004, TestSize.Level0) +{ + std::vector inputsIndex = { 0 }; + std::vector outputsIndex = { 1, 2 }; + std::vector shapeDim = {2}; + std::vector paddingsDim = {2, 2}; + + InitTensor(inputsIndex, outputsIndex); + SaveBlockShapeTensor(OH_NN_INT64, shapeDim, nullptr, OH_NN_SPACE_TO_BATCH_ND_BLOCK_SHAPE); + SavePaddingsTensor(OH_NN_INT64, paddingsDim, nullptr, OH_NN_SPACE_TO_BATCH_ND_PADDINGS); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: spacetobatchnd_build_005 + * @tc.desc: Provide empty input to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(SpaceToBatchNDBuilderTest, spacetobatchnd_build_005, TestSize.Level0) +{ + std::vector inputsIndex = {}; + std::vector outputsIndex = {}; + std::vector shapeDim = {2}; + std::vector paddingsDim = {2, 2}; + + InitTensor(inputsIndex, outputsIndex); + SaveBlockShapeTensor(OH_NN_INT64, shapeDim, nullptr, OH_NN_SPACE_TO_BATCH_ND_BLOCK_SHAPE); + SavePaddingsTensor(OH_NN_INT64, paddingsDim, nullptr, OH_NN_SPACE_TO_BATCH_ND_PADDINGS); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: spacetobatchnd_build_006 + * @tc.desc: Provide param type error to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(SpaceToBatchNDBuilderTest, spacetobatchnd_build_006, TestSize.Level0) +{ + std::vector inputsIndex = { 0 }; + std::vector outputsIndex = { 1 }; + std::vector shapeDim = {2}; + std::vector paddingsDim = {2, 2}; + + InitTensor(inputsIndex, outputsIndex); + + SaveBlockShapeTensor(OH_NN_INT32, shapeDim, nullptr, OH_NN_SPACE_TO_BATCH_ND_BLOCK_SHAPE); + SavePaddingsTensor(OH_NN_INT64, paddingsDim, nullptr, OH_NN_SPACE_TO_BATCH_ND_PADDINGS); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: spacetobatchnd_build_007 + * @tc.desc: Provide param type error to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(SpaceToBatchNDBuilderTest, spacetobatchnd_build_007, TestSize.Level0) +{ + std::vector inputsIndex = { 0 }; + std::vector outputsIndex = { 1 }; + std::vector shapeDim = {2}; + std::vector paddingsDim = {2, 2}; + + InitTensor(inputsIndex, outputsIndex); + + SaveBlockShapeTensor(OH_NN_INT64, shapeDim, nullptr, OH_NN_SPACE_TO_BATCH_ND_BLOCK_SHAPE); + SavePaddingsTensor(OH_NN_INT32, paddingsDim, nullptr, OH_NN_SPACE_TO_BATCH_ND_PADDINGS); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: spacetobatchnd_build_008 + * @tc.desc: Provide input dimensions error to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(SpaceToBatchNDBuilderTest, spacetobatchnd_build_008, TestSize.Level0) +{ + std::vector inputsIndex = { 0 }; + std::vector outputsIndex = { 1 }; + std::vector paramsIndex = { 2, 3 }; + InitTensor(inputsIndex, outputsIndex, paramsIndex); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: spacetobatchnd_build_009 + * @tc.desc: Provide output dimensions error to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(SpaceToBatchNDBuilderTest, spacetobatchnd_build_009, TestSize.Level0) +{ + std::vector inputsIndex = { 0 }; + std::vector outputsIndex = { 1 }; + std::vector paramsIndex = { 2, 3 }; + InitTensor(inputsIndex, outputsIndex, paramsIndex); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: spacetobatchnd_build_010 + * @tc.desc: Provide empty output to verify the normal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(SpaceToBatchNDBuilderTest, spacetobatchnd_build_0010, TestSize.Level0) +{ + std::vector inputsIndex = { 0 }; + std::vector outputsIndex = {}; + std::vector paramsIndex = { 1, 2 }; + + InitTensor(inputsIndex, outputsIndex, paramsIndex); + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: spacetobatchnd_build_011 + * @tc.desc: Provide block shape parameter buffer is nullptr to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(SpaceToBatchNDBuilderTest, spacetobatchnd_build_011, TestSize.Level0) +{ + std::vector inputsIndex = { 0 }; + std::vector outputsIndex = { 1 }; + std::vector shapeDim = {2}; + std::vector paddingsDim = {2, 2}; + + InitTensor(inputsIndex, outputsIndex); + std::shared_ptr blockShapeTensor = TransToNNTensor(OH_NN_INT64, shapeDim, nullptr, + OH_NN_SPACE_TO_BATCH_ND_BLOCK_SHAPE); + blockShapeTensor->SetBuffer(nullptr, 0); + m_allTensors.emplace_back(blockShapeTensor); + + SavePaddingsTensor(OH_NN_INT64, paddingsDim, nullptr, OH_NN_SPACE_TO_BATCH_ND_PADDINGS); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: spacetobatchnd_build_012 + * @tc.desc: Provide paddings parameter buffer is nullptr to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(SpaceToBatchNDBuilderTest, spacetobatchnd_build_012, TestSize.Level0) +{ + std::vector inputsIndex = { 0 }; + std::vector outputsIndex = { 1 }; + std::vector shapeDim = {2}; + std::vector paddingsDim = {2, 2}; + + InitTensor(inputsIndex, outputsIndex); + SaveBlockShapeTensor(OH_NN_INT64, shapeDim, nullptr, OH_NN_SPACE_TO_BATCH_ND_BLOCK_SHAPE); + + std::shared_ptr blockShapeTensor = TransToNNTensor(OH_NN_INT64, paddingsDim, nullptr, + OH_NN_SPACE_TO_BATCH_ND_PADDINGS); + blockShapeTensor->SetBuffer(nullptr, 0); + m_allTensors.emplace_back(blockShapeTensor); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: spacetobatchnd_build_013 + * @tc.desc: Provide invalid parameter type to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(SpaceToBatchNDBuilderTest, spacetobatchnd_build_013, TestSize.Level0) +{ + std::vector inputsIndex = { 0 }; + std::vector outputsIndex = { 1 }; + std::vector shapeDim = {2}; + std::vector paddingsDim = {2, 2}; + + InitTensor(inputsIndex, outputsIndex); + SaveBlockShapeTensor(OH_NN_INT64, shapeDim, nullptr, OH_NN_SPACE_TO_BATCH_ND_BLOCK_SHAPE); + SavePaddingsTensor(OH_NN_INT64, paddingsDim, nullptr, OH_NN_SCALE_AXIS); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: spacetobatchnd_build_014 + * @tc.desc: Provide block shape parameter dimension error to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(SpaceToBatchNDBuilderTest, spacetobatchnd_build_014, TestSize.Level0) +{ + std::vector inputsIndex = { 0 }; + std::vector outputsIndex = { 1 }; + std::vector shapeDim = {2, 3}; + std::vector paddingsDim = {2, 2}; + + InitTensor(inputsIndex, outputsIndex); + SaveBlockShapeTensor(OH_NN_INT64, shapeDim, nullptr, OH_NN_SPACE_TO_BATCH_ND_BLOCK_SHAPE); + SavePaddingsTensor(OH_NN_INT64, paddingsDim, nullptr, OH_NN_SPACE_TO_BATCH_ND_PADDINGS); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: spacetobatchnd_build_015 + * @tc.desc: Provide paddings parameter dimension error to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(SpaceToBatchNDBuilderTest, spacetobatchnd_build_015, TestSize.Level0) +{ + std::vector inputsIndex = { 0 }; + std::vector outputsIndex = { 1 }; + std::vector shapeDim = {2}; + std::vector paddingsDim = {2, 2, 2}; + + InitTensor(inputsIndex, outputsIndex); + SaveBlockShapeTensor(OH_NN_INT64, shapeDim, nullptr, OH_NN_SPACE_TO_BATCH_ND_BLOCK_SHAPE); + SavePaddingsTensor(OH_NN_INT64, paddingsDim, nullptr, OH_NN_SPACE_TO_BATCH_ND_PADDINGS); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: spacetobatchnd_build_016 + * @tc.desc: Provide paddings parameter dimension error to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(SpaceToBatchNDBuilderTest, spacetobatchnd_build_016, TestSize.Level0) +{ + std::vector inputsIndex = { 0 }; + std::vector outputsIndex = { 1 }; + std::vector shapeDim = {2}; + std::vector paddingsDim = {2, 3}; + + InitTensor(inputsIndex, outputsIndex); + SaveBlockShapeTensor(OH_NN_INT64, shapeDim, nullptr, OH_NN_SPACE_TO_BATCH_ND_BLOCK_SHAPE); + + const int paddingsLen = 6; + std::shared_ptr paddingsTensor = TransToNNTensor(OH_NN_INT64, paddingsDim, nullptr, + OH_NN_SPACE_TO_BATCH_ND_PADDINGS); + int64_t* paddingsValue = new (std::nothrow) int64_t[paddingsLen] {0, 0, 0, 0, 0, 0}; + EXPECT_NE(nullptr, paddingsValue); + paddingsTensor->SetBuffer(paddingsValue, sizeof(int64_t) * paddingsLen); + m_allTensors.emplace_back(paddingsTensor); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: spacetobatchnd_getprimitive_001 + * @tc.desc: Verify the GetPrimitive function return nullptr + * @tc.type: FUNC + */ +HWTEST_F(SpaceToBatchNDBuilderTest, spacetobatchnd_getprimitive_001, TestSize.Level0) +{ + LiteGraphTensorPtr primitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); + EXPECT_EQ(primitive, expectPrimitive); +} + +/** + * @tc.name: spacetobatchnd_getprimitive_002 + * @tc.desc: Verify the normal params return behavior of the getprimitive function + * @tc.type: FUNC + */ +HWTEST_F(SpaceToBatchNDBuilderTest, spacetobatchnd_getprimitive_002, TestSize.Level0) +{ + std::vector inputsIndex = { 0 }; + std::vector outputsIndex = { 1 }; + std::vector shapeDim = {2}; + std::vector paddingsDim = {2, 2}; + + InitTensor(inputsIndex, outputsIndex); + SaveBlockShapeTensor(OH_NN_INT64, shapeDim, nullptr, OH_NN_SPACE_TO_BATCH_ND_BLOCK_SHAPE); + SavePaddingsTensor(OH_NN_INT64, paddingsDim, nullptr, OH_NN_SPACE_TO_BATCH_ND_PADDINGS); + + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + LiteGraphTensorPtr primitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); + EXPECT_NE(primitive, expectPrimitive); + + auto returnValue = mindspore::lite::MindIR_SpaceToBatchND_GetPaddings(primitive.get()); + auto returnValueSize = returnValue.size(); + for (size_t i = 0; i < returnValueSize; ++i) { + auto k = returnValue[i].size(); + for (size_t j = 0; j < k; ++j) { + EXPECT_EQ(returnValue[i][j], m_expectPaddingsValue[i][j]); + } + } + + auto returnBlockShape = mindspore::lite::MindIR_SpaceToBatchND_GetBlockShape(primitive.get()); + auto returnBlockShapeSize = returnBlockShape.size(); + for (size_t i = 0; i < returnBlockShapeSize; ++i) { + EXPECT_EQ(returnBlockShape[i], m_expectBlockShapeValue[i]); + } +} +} // namespace UnitTest +} // namespace NeuralNetworkRuntime +} // namespace OHOS diff --git a/test/unittest/ops/split_builder_test.cpp b/test/unittest/ops/split_builder_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..d657265b204fa4373f89bfbfde4747b88d4f853f --- /dev/null +++ b/test/unittest/ops/split_builder_test.cpp @@ -0,0 +1,435 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "frameworks/native/ops/split_builder.h" + +#include +#include "frameworks/native/nn_tensor.h" +#include "ops_test.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Ops; + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class SplitBuilderTest : public OpsTest { +protected: + void InitTensor(const std::vector& inputsIndex, + const std::vector& outputsIndex) override; + void SaveAxisTensor(OH_NN_DataType dataType, const std::vector &dim, + const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + void SaveOutputNumTensor(OH_NN_DataType dataType, const std::vector &dim, + const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + void SaveSizeSplitsTensor(OH_NN_DataType dataType, const std::vector &dim, + const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + +protected: + SplitBuilder m_builder; + int64_t m_expectOutputNum {0}; + int64_t m_expectAxis {0}; + std::vector m_expectSizeSplitsValue; +}; + +void SplitBuilderTest::InitTensor(const std::vector& inputsIndex, const std::vector& outputsIndex) +{ + std::vector paramsIndex = { 3, 4, 5 }; + std::vector inputDim = {2, 4}; + std::vector OutputDim = {1, 4, 0, 0}; + + m_paramsIndex = paramsIndex; + SaveInputTensor(inputsIndex, OH_NN_FLOAT32, inputDim, nullptr); + SaveOutputTensor(outputsIndex, OH_NN_FLOAT32, OutputDim, nullptr); +} + +void SplitBuilderTest::SaveOutputNumTensor(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + std::shared_ptr outputNumTensor = TransToNNTensor(dataType, dim, quantParam, type); + int64_t* outputNumValue = new (std::nothrow) int64_t[1]{2}; + EXPECT_NE(nullptr, outputNumValue); + outputNumTensor->SetBuffer(outputNumValue, sizeof(int64_t)); + m_allTensors.emplace_back(outputNumTensor); + m_expectOutputNum = *outputNumValue; +} + +void SplitBuilderTest::SaveSizeSplitsTensor(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + const int sizeSplitsLen = 2; + std::shared_ptr sizeSplitsTensor = TransToNNTensor(dataType, dim, quantParam, type); + int64_t* sizeSplitsValue = new (std::nothrow) int64_t[sizeSplitsLen] {0, 0}; + EXPECT_NE(nullptr, sizeSplitsValue); + sizeSplitsTensor->SetBuffer(sizeSplitsValue, sizeof(int64_t) * sizeSplitsLen); + m_allTensors.emplace_back(sizeSplitsTensor); + m_expectSizeSplitsValue.assign(sizeSplitsValue, sizeSplitsValue + sizeSplitsLen); +} + +void SplitBuilderTest::SaveAxisTensor(OH_NN_DataType dataType, const std::vector &dim, + const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + std::shared_ptr axisTensor = TransToNNTensor(dataType, dim, quantParam, type); + int64_t* axisValue = new (std::nothrow) int64_t[1]{0}; + EXPECT_NE(nullptr, axisValue); + axisTensor->SetBuffer(axisValue, sizeof(int64_t)); + m_allTensors.emplace_back(axisTensor); + m_expectAxis = *axisValue; +} + +/** + * @tc.name: split_build_001 + * @tc.desc: Provide normal input, output, and parameters to verify the normal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(SplitBuilderTest, split_build_001, TestSize.Level0) +{ + std::vector inputsIndex = { 0 }; + std::vector outputsIndex = { 1, 2 }; + std::vector paramDim = {}; + + InitTensor(inputsIndex, outputsIndex); + + SaveAxisTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_SPLIT_AXIS); + SaveOutputNumTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_SPLIT_OUTPUT_NUM); + SaveSizeSplitsTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_SPLIT_SIZE_SPLITS); + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/** + * @tc.name: split_build_002 + * @tc.desc: Call Build func twice to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(SplitBuilderTest, split_build_002, TestSize.Level0) +{ + std::vector inputsIndex = { 0 }; + std::vector outputsIndex = { 1, 2 }; + std::vector paramDim = {}; + + InitTensor(inputsIndex, outputsIndex); + SaveAxisTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_SPLIT_AXIS); + SaveOutputNumTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_SPLIT_OUTPUT_NUM); + SaveSizeSplitsTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_SPLIT_SIZE_SPLITS); + + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/** + * @tc.name: split_build_003 + * @tc.desc: Provide one more than normal input to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(SplitBuilderTest, split_build_003, TestSize.Level0) +{ + std::vector inputsIndex = { 0, 1 }; + std::vector outputsIndex = { 2, 3 }; + std::vector paramDim = {}; + + InitTensor(inputsIndex, outputsIndex); + SaveAxisTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_SPLIT_AXIS); + SaveOutputNumTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_SPLIT_OUTPUT_NUM); + SaveSizeSplitsTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_SPLIT_SIZE_SPLITS); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: split_build_004 + * @tc.desc: Provide one more than normal output to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(SplitBuilderTest, split_build_004, TestSize.Level0) +{ + std::vector inputsIndex = { 0 }; + std::vector outputsIndex = { 1, 2, 3 }; + std::vector paramDim = {}; + + InitTensor(inputsIndex, outputsIndex); + + SaveAxisTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_SPLIT_AXIS); + SaveOutputNumTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_SPLIT_OUTPUT_NUM); + SaveSizeSplitsTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_SPLIT_SIZE_SPLITS); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: split_build_005 + * @tc.desc: Provide empty input, output, and parameters to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(SplitBuilderTest, split_build_005, TestSize.Level0) +{ + std::vector inputsIndex = { 0, 1, 2, 3 }; + std::vector outputsIndex = { 4, 5 }; + std::vector paramsIndex = { 6, 7, 8 }; + OH_NN_ReturnCode ret = m_builder.Build(paramsIndex, inputsIndex, outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: split_build_006 + * @tc.desc: Provide axis param type error to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(SplitBuilderTest, split_build_006, TestSize.Level0) +{ + std::vector inputsIndex = { 0 }; + std::vector outputsIndex = { 1, 2 }; + std::vector paramDim = {}; + + InitTensor(inputsIndex, outputsIndex); + SaveAxisTensor(OH_NN_INT8, paramDim, nullptr, OH_NN_SPLIT_AXIS); + SaveOutputNumTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_SPLIT_OUTPUT_NUM); + SaveSizeSplitsTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_SPLIT_SIZE_SPLITS); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: split_build_007 + * @tc.desc: Provide axis param type error to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(SplitBuilderTest, split_build_007, TestSize.Level0) +{ + std::vector inputsIndex = { 0 }; + std::vector outputsIndex = { 1, 2 }; + std::vector paramDim = {}; + + InitTensor(inputsIndex, outputsIndex); + SaveAxisTensor(OH_NN_INT32, paramDim, nullptr, OH_NN_SPLIT_AXIS); + SaveOutputNumTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_SPLIT_OUTPUT_NUM); + SaveSizeSplitsTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_SPLIT_SIZE_SPLITS); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: split_build_008 + * @tc.desc: Provide size splits param type error to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(SplitBuilderTest, split_build_008, TestSize.Level0) +{ + std::vector inputsIndex = { 0 }; + std::vector outputsIndex = { 1, 2 }; + std::vector paramDim = {}; + + InitTensor(inputsIndex, outputsIndex); + SaveAxisTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_SPLIT_AXIS); + SaveOutputNumTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_SPLIT_OUTPUT_NUM); + SaveSizeSplitsTensor(OH_NN_INT32, paramDim, nullptr, OH_NN_SPLIT_SIZE_SPLITS); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: split_build_009 + * @tc.desc: Provide output num param type error to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(SplitBuilderTest, split_build_009, TestSize.Level0) +{ + std::vector inputsIndex = { 0 }; + std::vector outputsIndex = { 1, 2 }; + std::vector paramDim = {}; + + InitTensor(inputsIndex, outputsIndex); + SaveAxisTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_SPLIT_AXIS); + SaveOutputNumTensor(OH_NN_INT32, paramDim, nullptr, OH_NN_SPLIT_OUTPUT_NUM); + SaveSizeSplitsTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_SPLIT_SIZE_SPLITS); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: split_build_010 + * @tc.desc: Provide axis param type error to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(SplitBuilderTest, split_build_010, TestSize.Level0) +{ + std::vector inputsIndex = { 0 }; + std::vector outputsIndex = { 1, 2 }; + std::vector paramDim = {}; + + InitTensor(inputsIndex, outputsIndex); + SaveAxisTensor(OH_NN_BOOL, paramDim, nullptr, OH_NN_SPLIT_AXIS); + SaveOutputNumTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_SPLIT_OUTPUT_NUM); + SaveSizeSplitsTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_SPLIT_SIZE_SPLITS); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: split_build_011 + * @tc.desc: Provide axis parameter buffer is nullptr to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(SplitBuilderTest, split_build_011, TestSize.Level0) +{ + std::vector inputsIndex = { 0 }; + std::vector outputsIndex = { 1, 2 }; + std::vector paramDim = {}; + + InitTensor(inputsIndex, outputsIndex); + + std::shared_ptr axisTensor = TransToNNTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_SPLIT_AXIS); + axisTensor->SetBuffer(nullptr, 0); + m_allTensors.emplace_back(axisTensor); + + SaveOutputNumTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_SPLIT_OUTPUT_NUM); + SaveSizeSplitsTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_SPLIT_SIZE_SPLITS); + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: split_build_012 + * @tc.desc: Provide invalid parameter type to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(SplitBuilderTest, split_build_012, TestSize.Level0) +{ + std::vector inputsIndex = { 0 }; + std::vector outputsIndex = { 1, 2 }; + std::vector paramDim = {}; + + InitTensor(inputsIndex, outputsIndex); + SaveAxisTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_SPLIT_AXIS); + SaveOutputNumTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_SPLIT_OUTPUT_NUM); + SaveSizeSplitsTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_SCALE_AXIS); + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: split_build_013 + * @tc.desc: Provide axis parameter not scalar to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(SplitBuilderTest, split_build_013, TestSize.Level0) +{ + std::vector inputsIndex = { 0 }; + std::vector outputsIndex = { 1, 2 }; + std::vector paramDim = {}; + std::vector axisDim = {1, 2}; + + InitTensor(inputsIndex, outputsIndex); + SaveAxisTensor(OH_NN_INT64, axisDim, nullptr, OH_NN_SPLIT_AXIS); + SaveOutputNumTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_SPLIT_OUTPUT_NUM); + SaveSizeSplitsTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_SPLIT_SIZE_SPLITS); + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: split_build_014 + * @tc.desc: Provide output parameter not scalar to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(SplitBuilderTest, split_build_014, TestSize.Level0) +{ + std::vector inputsIndex = { 0 }; + std::vector outputsIndex = { 1, 2 }; + std::vector paramDim = {}; + std::vector outputNumDim = {1, 2}; + + InitTensor(inputsIndex, outputsIndex); + SaveAxisTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_SPLIT_AXIS); + SaveOutputNumTensor(OH_NN_INT64, outputNumDim, nullptr, OH_NN_SPLIT_OUTPUT_NUM); + SaveSizeSplitsTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_SPLIT_SIZE_SPLITS); + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: split_build_015 + * @tc.desc: Provide empty output and param to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(SplitBuilderTest, split_build_015, TestSize.Level0) +{ + std::vector inputsIndex = { 1 }; + std::vector outputsIndex = {}; + std::vector paramsIndex = {}; + std::vector inputDim = {2, 4}; + + m_paramsIndex = paramsIndex; + SaveInputTensor(inputsIndex, OH_NN_FLOAT32, inputDim, nullptr); + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: split_getprimitive_001 + * @tc.desc: Verify the GetPrimitive function return nullptr + * @tc.type: FUNC + */ +HWTEST_F(SplitBuilderTest, split_getprimitive_001, TestSize.Level0) +{ + LiteGraphTensorPtr primitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); + EXPECT_EQ(primitive, expectPrimitive); +} + +/** + * @tc.name: split_getprimitive_002 + * @tc.desc: Verify the normal params return behavior of the getprimitive function + * @tc.type: FUNC + */ +HWTEST_F(SplitBuilderTest, split_getprimitive_002, TestSize.Level0) +{ + std::vector inputsIndex = { 0 }; + std::vector outputsIndex = { 1, 2 }; + std::vector paramDim = {}; + + InitTensor(inputsIndex, outputsIndex); + SaveAxisTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_SPLIT_AXIS); + SaveOutputNumTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_SPLIT_OUTPUT_NUM); + SaveSizeSplitsTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_SPLIT_SIZE_SPLITS); + + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + LiteGraphTensorPtr primitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); + EXPECT_NE(expectPrimitive, primitive); + + auto returnValue = mindspore::lite::MindIR_Split_GetSizeSplits(primitive.get()); + auto returnValueSize = returnValue.size(); + for (size_t i = 0; i < returnValueSize; ++i) { + EXPECT_EQ(returnValue[i], m_expectSizeSplitsValue[i]); + } + + auto returnOutputNum = mindspore::lite::MindIR_Split_GetOutputNum(primitive.get()); + EXPECT_EQ(returnOutputNum, m_expectOutputNum); + + auto returnAxis = mindspore::lite::MindIR_Split_GetAxis(primitive.get()); + EXPECT_EQ(returnAxis, m_expectAxis); +} +} // namespace UnitTest +} // namespace NeuralNetworkRuntime +} // namespace OHOS diff --git a/test/unittest/ops/sqrt_builder_test.cpp b/test/unittest/ops/sqrt_builder_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..93a136774eaa993d3fa9ad11c92affa29722281f --- /dev/null +++ b/test/unittest/ops/sqrt_builder_test.cpp @@ -0,0 +1,205 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "frameworks/native/ops/sqrt_builder.h" + +#include +#include "frameworks/native/nn_tensor.h" +#include "ops_test.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Ops; + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class SqrtBuilderTest : public OpsTest { +protected: + void InitTensor(const std::vector& inputsIndex, + const std::vector& outputsIndex) override; + void CheckResult(); + +protected: + SqrtBuilder m_builder; +}; + +void SqrtBuilderTest::InitTensor(const std::vector& inputsIndex, + const std::vector& outputsIndex) +{ + std::vector inputDim = {3}; + std::vector OutputDim = {3}; + + SaveInputTensor(inputsIndex, OH_NN_FLOAT32, inputDim, nullptr); + SaveOutputTensor(outputsIndex, OH_NN_FLOAT32, OutputDim, nullptr); +} + +void SqrtBuilderTest::CheckResult() +{ + LiteGraphTensorPtr primitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); + EXPECT_NE(primitive, expectPrimitive); +} + +/** + * @tc.name: sqrt_build_001 + * @tc.desc: Provide normal input, output to verify the normal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(SqrtBuilderTest, sqrt_build_001, TestSize.Level0) +{ + std::vector inputsIndex = { 0 }; + std::vector outputsIndex = { 1 }; + std::vector inputDim = {3}; + std::vector OutputDim = {3}; + + SaveInputTensor(inputsIndex, OH_NN_FLOAT32, inputDim, nullptr); + SaveOutputTensor(outputsIndex, OH_NN_FLOAT32, OutputDim, nullptr); + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/** + * @tc.name: sqrt_build_002 + * @tc.desc: Provide one more than normal input to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(SqrtBuilderTest, sqrt_build_002, TestSize.Level0) +{ + std::vector inputsIndex = { 0 }; + std::vector outputsIndex = { 1 }; + std::vector inputDim = {3}; + std::vector OutputDim = {3}; + + SaveInputTensor(inputsIndex, OH_NN_FLOAT32, inputDim, nullptr); + SaveOutputTensor(outputsIndex, OH_NN_FLOAT32, OutputDim, nullptr); + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/** + * @tc.name: sqrt_build_003 + * @tc.desc: Provide one more than normal input to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(SqrtBuilderTest, sqrt_build_003, TestSize.Level0) +{ + std::vector inputsIndex = { 0, 1 }; + std::vector outputsIndex = { 2 }; + std::vector inputDim = {3}; + std::vector OutputDim = {3}; + + SaveInputTensor(inputsIndex, OH_NN_FLOAT32, inputDim, nullptr); + SaveOutputTensor(outputsIndex, OH_NN_FLOAT32, OutputDim, nullptr); + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: sqrt_build_004 + * @tc.desc: Provide one more than normal output to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(SqrtBuilderTest, sqrt_build_004, TestSize.Level0) +{ + std::vector inputsIndex = { 0 }; + std::vector outputsIndex = { 1, 2 }; + std::vector inputDim = {3}; + std::vector OutputDim = {3}; + + SaveInputTensor(inputsIndex, OH_NN_FLOAT32, inputDim, nullptr); + SaveOutputTensor(outputsIndex, OH_NN_FLOAT32, OutputDim, nullptr); + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: sqrt_build_005 + * @tc.desc: Provide empty input, output, and parameters to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(SqrtBuilderTest, sqrt_build_005, TestSize.Level0) +{ + std::vector inputsIndex = { 0 }; + std::vector outputsIndex = { 1 }; + std::vector paramsIndex = {}; + OH_NN_ReturnCode ret = m_builder.Build(paramsIndex, inputsIndex, outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: sqrt_build_006 + * @tc.desc: Provide empty output to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(SqrtBuilderTest, sqrt_build_006, TestSize.Level0) +{ + std::vector inputsIndex = { 0 }; + std::vector outputsIndex = {}; + std::vector inputDim = {3}; + std::vector OutputDim = {3}; + + SaveInputTensor(inputsIndex, OH_NN_FLOAT32, inputDim, nullptr); + SaveOutputTensor(outputsIndex, OH_NN_FLOAT32, OutputDim, nullptr); + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: sqrt_build_007 + * @tc.desc: Provide a param to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(SqrtBuilderTest, sqrt_build_007, TestSize.Level0) +{ + std::vector inputsIndex = { 0 }; + std::vector outputsIndex = { 1 }; + std::vector paramsIndex = { 4 }; + + m_paramsIndex = paramsIndex; + InitTensor(inputsIndex, outputsIndex); + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: sqrt_getprimitive_001 + * @tc.desc: Verify the GetPrimitive function return nullptr + * @tc.type: FUNC + */ +HWTEST_F(SqrtBuilderTest, sqrt_getprimitive_001, TestSize.Level0) +{ + LiteGraphTensorPtr primitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); + EXPECT_EQ(primitive, expectPrimitive); +} + +/** + * @tc.name: sqrt_getprimitive_002 + * @tc.desc: Verify the normal params return behavior of the getprimitive function + * @tc.type: FUNC + */ +HWTEST_F(SqrtBuilderTest, sqrt_getprimitive_002, TestSize.Level0) +{ + std::vector inputsIndex = { 0 }; + std::vector outputsIndex = { 1 }; + InitTensor(inputsIndex, outputsIndex); + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + CheckResult(); +} +} // namespace UnitTest +} // namespace NeuralNetworkRuntime +} // namespace OHOS diff --git a/test/unittest/ops/squared_difference_builder_test.cpp b/test/unittest/ops/squared_difference_builder_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..c203592ce91c427234990211b7824cdeaf7c6bc4 --- /dev/null +++ b/test/unittest/ops/squared_difference_builder_test.cpp @@ -0,0 +1,204 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "frameworks/native/ops/squared_difference_builder.h" + +#include +#include "frameworks/native/nn_tensor.h" +#include "ops_test.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Ops; + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class SquaredDifferenceBuilderTest : public OpsTest { +protected: + void InitTensor(const std::vector& inputsIndex, + const std::vector& outputsIndex) override; + void CheckResult(); + +protected: + SquaredDifferenceBuilder m_builder; +}; + +void SquaredDifferenceBuilderTest::InitTensor(const std::vector& inputsIndex, + const std::vector& outputsIndex) +{ + std::vector inputDim = {3}; + std::vector OutputDim = {3}; + + SaveInputTensor(inputsIndex, OH_NN_FLOAT32, inputDim, nullptr); + SaveOutputTensor(outputsIndex, OH_NN_FLOAT32, OutputDim, nullptr); +} + +void SquaredDifferenceBuilderTest::CheckResult() +{ + LiteGraphTensorPtr primitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); + EXPECT_NE(primitive, expectPrimitive); +} + +/** + * @tc.name: squareddifference_build_001 + * @tc.desc: Provide normal input, output to verify the normal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(SquaredDifferenceBuilderTest, squareddifference_build_001, TestSize.Level0) +{ + std::vector inputsIndex = { 0, 1 }; + std::vector outputsIndex = { 2 }; + std::vector inputDim = {3}; + std::vector OutputDim = {3}; + + SaveInputTensor(inputsIndex, OH_NN_FLOAT32, inputDim, nullptr); + SaveOutputTensor(outputsIndex, OH_NN_FLOAT32, OutputDim, nullptr); + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/** + * @tc.name: squareddifference_build_002 + * @tc.desc: Call Build func twice to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(SquaredDifferenceBuilderTest, squareddifference_build_002, TestSize.Level0) +{ + std::vector inputsIndex = { 0, 1 }; + std::vector outputsIndex = { 2 }; + std::vector inputDim = {3}; + std::vector OutputDim = {3}; + + SaveInputTensor(inputsIndex, OH_NN_FLOAT32, inputDim, nullptr); + SaveOutputTensor(outputsIndex, OH_NN_FLOAT32, OutputDim, nullptr); + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/** + * @tc.name: squareddifference_build_003 + * @tc.desc: rovide one more than normal input to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(SquaredDifferenceBuilderTest, squareddifference_build_003, TestSize.Level0) +{ + std::vector inputsIndex = { 0, 1, 2 }; + std::vector outputsIndex = { 3 }; + std::vector inputDim = {3}; + std::vector OutputDim = {3}; + + SaveInputTensor(inputsIndex, OH_NN_FLOAT32, inputDim, nullptr); + SaveOutputTensor(outputsIndex, OH_NN_FLOAT32, OutputDim, nullptr); + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: squareddifference_build_004 + * @tc.desc: rovide one more than normal output to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(SquaredDifferenceBuilderTest, squareddifference_build_004, TestSize.Level0) +{ + std::vector inputsIndex = { 0, 1 }; + std::vector outputsIndex = { 2, 3 }; + std::vector inputDim = {3}; + std::vector OutputDim = {3}; + + SaveInputTensor(inputsIndex, OH_NN_FLOAT32, inputDim, nullptr); + SaveOutputTensor(outputsIndex, OH_NN_FLOAT32, OutputDim, nullptr); + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: squareddifference_build_005 + * @tc.desc: Provide empty input, output, and parameters to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(SquaredDifferenceBuilderTest, squareddifference_build_005, TestSize.Level0) +{ + std::vector inputsIndex = { 0, 1 }; + std::vector outputsIndex = { 2 }; + std::vector paramsIndex = {}; + OH_NN_ReturnCode ret = m_builder.Build(paramsIndex, inputsIndex, outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: squareddifference_build_006 + * @tc.desc: Provide empty output to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(SquaredDifferenceBuilderTest, squareddifference_build_006, TestSize.Level0) +{ + std::vector inputsIndex = { 0, 1 }; + std::vector outputsIndex = { 2 }; + std::vector inputDim = {3}; + + SaveInputTensor(inputsIndex, OH_NN_FLOAT32, inputDim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: squareddifference_build_007 + * @tc.desc: Provide a param to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(SquaredDifferenceBuilderTest, squareddifference_build_007, TestSize.Level0) +{ + std::vector inputsIndex = { 0, 1 }; + std::vector outputsIndex = { 2 }; + std::vector paramsIndex = { 4 }; + + m_paramsIndex = paramsIndex; + InitTensor(inputsIndex, outputsIndex); + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: squareddifference_getprimitive_001 + * @tc.desc: Verify the GetPrimitive function return nullptr + * @tc.type: FUNC + */ +HWTEST_F(SquaredDifferenceBuilderTest, squareddifference_getprimitive_001, TestSize.Level0) +{ + auto primitive = m_builder.GetPrimitive(); + LiteGraphPrimitvePtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); + EXPECT_EQ(primitive, expectPrimitive); +} + +/** + * @tc.name: squareddifference_getprimitive_002 + * @tc.desc: Verify the normal return behavior of the getprimitive function + * @tc.type: FUNC + */ +HWTEST_F(SquaredDifferenceBuilderTest, squareddifference_getprimitive_002, TestSize.Level0) +{ + std::vector inputsIndex = { 0, 1 }; + std::vector outputsIndex = { 2 }; + InitTensor(inputsIndex, outputsIndex); + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + CheckResult(); +} +} // namespace UnitTest +} // namespace NeuralNetworkRuntime +} // namespace OHOS \ No newline at end of file diff --git a/test/unittest/ops/squeeze_builder_test.cpp b/test/unittest/ops/squeeze_builder_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..2e39915ad9fe41c001fcdd54aee8d82945c35867 --- /dev/null +++ b/test/unittest/ops/squeeze_builder_test.cpp @@ -0,0 +1,266 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "frameworks/native/ops/squeeze_builder.h" + +#include +#include "frameworks/native/nn_tensor.h" +#include "ops_test.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Ops; + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class SqueezeBuilderTest : public OpsTest { +protected: + void InitTensor(const std::vector& inputsIndex, + const std::vector& outputsIndex) override; + void SaveAxisTensor(OH_NN_DataType dataType, const std::vector &dim, + const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + +protected: + SqueezeBuilder m_builder; + std::vector m_expectAxisValue; +}; + +void SqueezeBuilderTest::SaveAxisTensor(OH_NN_DataType dataType, const std::vector &dim, + const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + std::shared_ptr axisTensor =TransToNNTensor(dataType, dim, quantParam, type); + int64_t* axisValue = new (std::nothrow) int64_t[1]{2}; + EXPECT_NE(nullptr, axisValue); + axisTensor->SetBuffer(axisValue, sizeof(int64_t)); + m_allTensors.emplace_back(axisTensor); + m_expectAxisValue.emplace_back(*axisValue); +} + +void SqueezeBuilderTest::InitTensor(const std::vector& inputsIndex, + const std::vector& outputsIndex) +{ + std::vector paramsIndex = { 2 }; + std::vector inputDim = {3, 2, 1}; + std::vector OutputDim = {3, 2}; + + m_paramsIndex = paramsIndex; + SaveInputTensor(inputsIndex, OH_NN_FLOAT32, inputDim, nullptr); + SaveOutputTensor(outputsIndex, OH_NN_FLOAT32, OutputDim, nullptr); +} + +/** + * @tc.name: squeeze_build_001 + * @tc.desc: Provide normal input, output, and parameters to verify the normal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(SqueezeBuilderTest, squeeze_build_001, TestSize.Level0) +{ + std::vector inputsIndex = { 0 }; + std::vector outputsIndex = { 1 }; + std::vector paramDim = {}; + + InitTensor(inputsIndex, outputsIndex); + SaveAxisTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_SQUEEZE_AXIS); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/** + * @tc.name: squeeze_build_002 + * @tc.desc: Call Build func twice to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(SqueezeBuilderTest, squeeze_build_002, TestSize.Level0) +{ + std::vector inputsIndex = { 0 }; + std::vector outputsIndex = { 1 }; + std::vector paramDim = {}; + + InitTensor(inputsIndex, outputsIndex); + SaveAxisTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_SQUEEZE_AXIS); + + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/** + * @tc.name: squeeze_build_003 + * @tc.desc: Provide one more than normal input to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(SqueezeBuilderTest, squeeze_build_003, TestSize.Level0) +{ + std::vector inputsIndex = { 0, 1, 2 }; + std::vector outputsIndex = { 3 }; + std::vector paramDim = {}; + + InitTensor(inputsIndex, outputsIndex); + SaveAxisTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_SQUEEZE_AXIS); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: squeeze_build_004 + * @tc.desc: Provide one more than normal output to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(SqueezeBuilderTest, squeeze_build_004, TestSize.Level0) +{ + std::vector inputsIndex = { 0 }; + std::vector outputsIndex = { 1, 2 }; + std::vector paramDim = {}; + + InitTensor(inputsIndex, outputsIndex); + SaveAxisTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_SQUEEZE_AXIS); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: squeeze_build_005 + * @tc.desc: Provide empty input, output, and parameters to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(SqueezeBuilderTest, squeeze_build_005, TestSize.Level0) +{ + std::vector inputsIndex = { 0, 1 }; + std::vector outputsIndex = { 2 }; + std::vector paramsIndex = { 3 }; + + OH_NN_ReturnCode ret = m_builder.Build(paramsIndex, inputsIndex, outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: squeeze_build_006 + * @tc.desc: Provide empty output to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(SqueezeBuilderTest, squeeze_build_006, TestSize.Level0) +{ + std::vector inputsIndex = { 0 }; + std::vector outputsIndex = {}; + std::vector paramDim = {}; + + InitTensor(inputsIndex, outputsIndex); + SaveAxisTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_SQUEEZE_AXIS); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: squeeze_build_007 + * @tc.desc: Provide param type error to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(SqueezeBuilderTest, squeeze_build_007, TestSize.Level0) +{ + std::vector inputsIndex = { 0 }; + std::vector outputsIndex = { 1 }; + std::vector paramDim = {}; + + InitTensor(inputsIndex, outputsIndex); + SaveAxisTensor(OH_NN_INT32, paramDim, nullptr, OH_NN_SQUEEZE_AXIS); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: squeeze_build_008 + * @tc.desc: Provide axis parameter buffer is nullptr to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(SqueezeBuilderTest, squeeze_build_008, TestSize.Level0) +{ + std::vector inputsIndex = { 0 }; + std::vector outputsIndex = { 1 }; + std::vector paramDim = {}; + + InitTensor(inputsIndex, outputsIndex); + + std::shared_ptr axisTensor =TransToNNTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_SQUEEZE_AXIS); + axisTensor->SetBuffer(nullptr, 0); + m_allTensors.emplace_back(axisTensor); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: squeeze_build_009 + * @tc.desc: Provide invalid parameter type to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(SqueezeBuilderTest, squeeze_build_009, TestSize.Level0) +{ + std::vector inputsIndex = { 0 }; + std::vector outputsIndex = { 1 }; + std::vector paramDim = {}; + + InitTensor(inputsIndex, outputsIndex); + SaveAxisTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_SCALE_AXIS); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: squeeze_getprimitive_001 + * @tc.desc: Verify the GetPrimitive function return nullptr + * @tc.type: FUNC + */ +HWTEST_F(SqueezeBuilderTest, squeeze_getprimitive_001, TestSize.Level0) +{ + LiteGraphTensorPtr primitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); + EXPECT_EQ(primitive, expectPrimitive); +} + +/** + * @tc.name: squeeze_getprimitive_002 + * @tc.desc: Verify the normal params return behavior of the getprimitive function + * @tc.type: FUNC + */ +HWTEST_F(SqueezeBuilderTest, squeeze_getprimitive_002, TestSize.Level0) +{ + std::vector inputsIndex = { 0 }; + std::vector outputsIndex = { 1 }; + std::vector paramDim = {}; + + InitTensor(inputsIndex, outputsIndex); + SaveAxisTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_SQUEEZE_AXIS); + + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + LiteGraphTensorPtr primitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); + EXPECT_NE(primitive, expectPrimitive); + + auto returnValue = mindspore::lite::MindIR_Squeeze_GetAxis(primitive.get()); + auto returnValueSize = returnValue.size(); + for (size_t i = 0; i < returnValueSize; ++i) { + EXPECT_EQ(returnValue[i], m_expectAxisValue[i]); + } +} +} // namespace UnitTest +} // namespace NeuralNetworkRuntime +} // namespace OHOS \ No newline at end of file diff --git a/test/unittest/ops/stack_builder_test.cpp b/test/unittest/ops/stack_builder_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..7dfc67e310403c467058ce129201885974407d63 --- /dev/null +++ b/test/unittest/ops/stack_builder_test.cpp @@ -0,0 +1,299 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "frameworks/native/ops/stack_builder.h" + +#include +#include "frameworks/native/nn_tensor.h" +#include "ops_test.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Ops; + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class StackBuilderTest : public OpsTest { +protected: + void InitTensor(const std::vector& inputsIndex, + const std::vector& outputsIndex) override; + void SaveAxisTensor(OH_NN_DataType dataType, const std::vector &dim, + const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + +protected: + StackBuilder m_builder; + int64_t m_expectAxisValue {0}; +}; + +void StackBuilderTest::InitTensor(const std::vector& inputsIndex, + const std::vector& outputsIndex) +{ + std::vector paramsIndex = { 3 }; + std::vector inputDim = {2}; + std::vector OutputDim = {2, 2}; + + m_paramsIndex = paramsIndex; + SaveInputTensor(inputsIndex, OH_NN_FLOAT32, inputDim, nullptr); + SaveOutputTensor(outputsIndex, OH_NN_FLOAT32, OutputDim, nullptr); +} + +void StackBuilderTest::SaveAxisTensor(OH_NN_DataType dataType, const std::vector &dim, + const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + std::shared_ptr axisTensor = TransToNNTensor(dataType, dim, quantParam, type); + int64_t* axisValue = new (std::nothrow) int64_t[1]{1}; + EXPECT_NE(nullptr, axisValue); + axisTensor->SetBuffer(axisValue, sizeof(int64_t)); + m_allTensors.emplace_back(axisTensor); + m_expectAxisValue = *axisValue; +} + +/** + * @tc.name: stack_build_001 + * @tc.desc: Provide normal input, output, and parameters to verify the normal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(StackBuilderTest, stack_build_001, TestSize.Level0) +{ + std::vector inputsIndex = { 0, 1 }; + std::vector outputsIndex = { 2 }; + std::vector paramDim = {}; + + InitTensor(inputsIndex, outputsIndex); + SaveAxisTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_STACK_AXIS); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/** + * @tc.name: stack_build_002 + * @tc.desc: Call Build func twice to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(StackBuilderTest, stack_build_002, TestSize.Level0) +{ + std::vector inputsIndex = { 0, 1 }; + std::vector outputsIndex = { 2 }; + std::vector paramDim = {}; + + InitTensor(inputsIndex, outputsIndex); + SaveAxisTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_STACK_AXIS); + + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/** + * @tc.name: stack_build_003 + * @tc.desc: Provide one more than normal input to verify the normal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(StackBuilderTest, stack_build_003, TestSize.Level0) +{ + std::vector inputsIndex = { 0, 1, 2 }; + std::vector outputsIndex = { 3 }; + std::vector paramDim = {}; + + InitTensor(inputsIndex, outputsIndex); + SaveAxisTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_STACK_AXIS); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: stack_build_004 + * @tc.desc: Provide one more than normal output to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(StackBuilderTest, stack_build_004, TestSize.Level0) +{ + std::vector inputsIndex = { 0, 1 }; + std::vector outputsIndex = { 2, 3 }; + std::vector paramDim = {}; + + InitTensor(inputsIndex, outputsIndex); + SaveAxisTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_STACK_AXIS); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: stack_build_005 + * @tc.desc: Provide empty input, output, and parameters to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(StackBuilderTest, stack_build_005, TestSize.Level0) +{ + std::vector inputsIndex = { 0, 1 }; + std::vector outputsIndex = {}; + std::vector paramsIndex = {}; + + OH_NN_ReturnCode ret = m_builder.Build(paramsIndex, inputsIndex, outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: stack_build_006 + * @tc.desc: Provide empty output to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(StackBuilderTest, stack_build_006, TestSize.Level0) +{ + std::vector inputsIndex = { 0, 1 }; + std::vector outputsIndex = {}; + std::vector paramDim = {}; + + InitTensor(inputsIndex, outputsIndex); + SaveAxisTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_STACK_AXIS); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: stack_build_007 + * @tc.desc: Provide param type error to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(StackBuilderTest, stack_build_007, TestSize.Level0) +{ + std::vector inputsIndex = { 0, 1 }; + std::vector outputsIndex = { 2 }; + std::vector paramDim = {}; + + InitTensor(inputsIndex, outputsIndex); + SaveAxisTensor(OH_NN_INT32, paramDim, nullptr, OH_NN_STACK_AXIS); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: stack_build_008 + * @tc.desc: Provide one less than normal input to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(StackBuilderTest, stack_build_008, TestSize.Level0) +{ + std::vector inputsIndex = { 0 }; + std::vector outputsIndex = { 1 }; + std::vector paramDim = {}; + + InitTensor(inputsIndex, outputsIndex); + SaveAxisTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_STACK_AXIS); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: stack_build_009 + * @tc.desc: Provide axis parameter buffer is nullptr to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(StackBuilderTest, stack_build_009, TestSize.Level0) +{ + std::vector inputsIndex = { 0, 1 }; + std::vector outputsIndex = { 2 }; + std::vector paramDim = {}; + + InitTensor(inputsIndex, outputsIndex); + + std::shared_ptr axisTensor =TransToNNTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_STACK_AXIS); + axisTensor->SetBuffer(nullptr, 0); + m_allTensors.emplace_back(axisTensor); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: stack_build_010 + * @tc.desc: Provide axis not scaler to verify the normal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(StackBuilderTest, stack_build_010, TestSize.Level0) +{ + std::vector inputsIndex = { 0, 1 }; + std::vector outputsIndex = { 2 }; + std::vector paramDim = {1, 2}; + + InitTensor(inputsIndex, outputsIndex); + SaveAxisTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_STACK_AXIS); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: stack_build_011 + * @tc.desc: Provide invalid parameter type to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(StackBuilderTest, stack_build_011, TestSize.Level0) +{ + std::vector inputsIndex = { 0, 1 }; + std::vector outputsIndex = { 2 }; + std::vector paramDim = {}; + + InitTensor(inputsIndex, outputsIndex); + SaveAxisTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_SCALE_AXIS); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: stack_get_primitive_001 + * @tc.desc: Verify the GetPrimitive function return nullptr + * @tc.type: FUNC + */ +HWTEST_F(StackBuilderTest, stack_get_primitive_001, TestSize.Level0) +{ + LiteGraphTensorPtr primitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); + EXPECT_EQ(primitive, expectPrimitive); +} + +/** + * @tc.name: stack_get_primitive_002 + * @tc.desc: Verify the normal return behavior of the getprimitive function + * @tc.type: FUNC + */ +HWTEST_F(StackBuilderTest, stack_get_primitive_002, TestSize.Level0) +{ + std::vector inputsIndex = { 0, 1 }; + std::vector outputsIndex = { 2 }; + std::vector paramDim = {}; + + InitTensor(inputsIndex, outputsIndex); + SaveAxisTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_STACK_AXIS); + + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + LiteGraphTensorPtr primitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); + EXPECT_NE(primitive, expectPrimitive); + + auto returnValue = mindspore::lite::MindIR_Stack_GetAxis(primitive.get()); + EXPECT_EQ(returnValue, m_expectAxisValue); +} +} // namespace UnitTest +} // namespace NeuralNetworkRuntime +} // namespace OHOS \ No newline at end of file diff --git a/test/unittest/ops/strided_slice_builder_test.cpp b/test/unittest/ops/strided_slice_builder_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..3014b75c92e0619f96a03c7e43d71becb88e7eef --- /dev/null +++ b/test/unittest/ops/strided_slice_builder_test.cpp @@ -0,0 +1,557 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "frameworks/native/ops/strided_slice_builder.h" + +#include +#include "frameworks/native/nn_tensor.h" +#include "ops_test.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Ops; + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class StridedSliceBuilderTest : public OpsTest { +protected: + void InitTensor(const std::vector& inputsIndex, + const std::vector& outputsIndex) override; + void SaveBeginMaskTensor(OH_NN_DataType dataType, const std::vector &dim, + const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + void SaveEndMaskTensor(OH_NN_DataType dataType, const std::vector &dim, + const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + void SaveEllipsisMaskTensor(OH_NN_DataType dataType, const std::vector &dim, + const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + void SaveNewAxisTensor(OH_NN_DataType dataType, const std::vector &dim, + const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + void SaveShrinkAxisTensor(OH_NN_DataType dataType, const std::vector &dim, + const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + void InitParams(); + +protected: + StridedSliceBuilder m_builder; + int64_t m_expectBeginMaskValue {0}; + int64_t m_expectEndMaskValue {0}; + int64_t m_expectEllipsisMaskValue {0}; + int64_t m_expectNewAxisMaskValue {0}; + int64_t m_expectShrinkAxisMaskValue {0}; +}; + +void StridedSliceBuilderTest::SaveBeginMaskTensor(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + std::shared_ptr beginMaskTensor = TransToNNTensor(dataType, dim, quantParam, type); + int64_t* beginMaskValue = new (std::nothrow) int64_t[1]{0}; + EXPECT_NE(nullptr, beginMaskValue); + beginMaskTensor->SetBuffer(beginMaskValue, sizeof(int64_t)); + m_allTensors.emplace_back(beginMaskTensor); + m_expectBeginMaskValue = *beginMaskValue; +} + +void StridedSliceBuilderTest::SaveEndMaskTensor(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + std::shared_ptr endMaskTensor = TransToNNTensor(dataType, dim, quantParam, type); + int64_t* endMaskValue = new (std::nothrow) int64_t[1]{0}; + EXPECT_NE(nullptr, endMaskValue); + endMaskTensor->SetBuffer(endMaskValue, sizeof(int64_t)); + m_allTensors.emplace_back(endMaskTensor); + m_expectEndMaskValue = *endMaskValue; +} + +void StridedSliceBuilderTest::SaveEllipsisMaskTensor(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + std::shared_ptr ellipsisMaskTensor = TransToNNTensor(dataType, dim, quantParam, type); + int64_t* ellipsisMaskValue = new (std::nothrow) int64_t[1]{0}; + EXPECT_NE(nullptr, ellipsisMaskValue); + ellipsisMaskTensor->SetBuffer(ellipsisMaskValue, sizeof(int64_t)); + m_allTensors.emplace_back(ellipsisMaskTensor); + m_expectEllipsisMaskValue = *ellipsisMaskValue; +} + +void StridedSliceBuilderTest::SaveNewAxisTensor(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + std::shared_ptr axisTensor = TransToNNTensor(dataType, dim, quantParam, type); + int64_t* newAxisMaskValue = new (std::nothrow) int64_t[1]{0}; + EXPECT_NE(nullptr, newAxisMaskValue); + axisTensor->SetBuffer(newAxisMaskValue, sizeof(int64_t)); + m_allTensors.emplace_back(axisTensor); + m_expectNewAxisMaskValue = *newAxisMaskValue; +} + +void StridedSliceBuilderTest::SaveShrinkAxisTensor(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + std::shared_ptr shrinkAxisMaskTensor = TransToNNTensor(dataType, dim, quantParam, type); + int64_t* shrinkAxisMaskValue = new (std::nothrow) int64_t[1]{0}; + EXPECT_NE(nullptr, shrinkAxisMaskValue); + shrinkAxisMaskTensor->SetBuffer(shrinkAxisMaskValue, sizeof(int64_t)); + m_allTensors.emplace_back(shrinkAxisMaskTensor); + m_expectShrinkAxisMaskValue = *shrinkAxisMaskValue; +} + +void StridedSliceBuilderTest::InitTensor(const std::vector& inputsIndex, + const std::vector& outputsIndex) +{ + std::vector paramsIndex = { 5, 6, 7, 8, 9 }; + std::vector inputDim = {3, 2, 3}; + std::vector OutputDim = {1, 2, 2}; + + m_paramsIndex = paramsIndex; + SaveInputTensor(inputsIndex, OH_NN_FLOAT32, inputDim, nullptr); + SaveOutputTensor(outputsIndex, OH_NN_FLOAT32, OutputDim, nullptr); +} + +void StridedSliceBuilderTest::InitParams() +{ + std::vector paramDim = {}; + SaveBeginMaskTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_STRIDED_SLICE_BEGIN_MASK); + SaveEndMaskTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_STRIDED_SLICE_END_MASK); + SaveEllipsisMaskTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_STRIDED_SLICE_ELLIPSIS_MASK); + SaveNewAxisTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_STRIDED_SLICE_NEW_AXIS_MASK); + SaveShrinkAxisTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_STRIDED_SLICE_SHRINK_AXIS_MASK); +} + +/** + * @tc.name: stridedslice_build_001 + * @tc.desc: Provide normal input, output, and parameters to verify the normal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(StridedSliceBuilderTest, stridedslice_build_001, TestSize.Level0) +{ + std::vector inputsIndex = { 0, 1, 2, 3 }; + std::vector outputsIndex = { 4 }; + + InitTensor(inputsIndex, outputsIndex); + InitParams(); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/** + * @tc.name: stridedslice_build_002 + * @tc.desc: Call Build func twice to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(StridedSliceBuilderTest, stridedslice_build_002, TestSize.Level0) +{ + std::vector inputsIndex = { 0, 1, 2, 3 }; + std::vector outputsIndex = { 4 }; + + InitTensor(inputsIndex, outputsIndex); + InitParams(); + + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/** + * @tc.name: stridedslice_build_003 + * @tc.desc: Provide one more than normal input to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(StridedSliceBuilderTest, stridedslice_build_003, TestSize.Level0) +{ + std::vector inputsIndex = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 }; + std::vector outputsIndex = { 10 }; + std::vector paramsIndex = { 11, 12, 13, 14, 15 }; + std::vector inputDim = {3, 2, 3}; + std::vector OutputDim = {1, 2, 2}; + + m_paramsIndex = paramsIndex; + SaveInputTensor(inputsIndex, OH_NN_FLOAT32, inputDim, nullptr); + SaveOutputTensor(outputsIndex, OH_NN_FLOAT32, OutputDim, nullptr); + InitParams(); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: stridedslice_build_004 + * @tc.desc: Provide one more than normal output to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(StridedSliceBuilderTest, stridedslice_build_004, TestSize.Level0) +{ + std::vector inputsIndex = { 0, 1, 2, 3 }; + std::vector outputsIndex = { 4, 5 }; + std::vector paramsIndex = { 6, 7, 8, 9, 10 }; + std::vector inputDim = {3, 2, 3}; + std::vector OutputDim = {1, 2, 2}; + + m_paramsIndex = paramsIndex; + SaveInputTensor(inputsIndex, OH_NN_FLOAT32, inputDim, nullptr); + SaveOutputTensor(outputsIndex, OH_NN_FLOAT32, OutputDim, nullptr); + InitParams(); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: stridedslice_build_005 + * @tc.desc: Provide empty input, output, and parameters to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(StridedSliceBuilderTest, stridedslice_build_005, TestSize.Level0) +{ + std::vector inputsIndex = { 0, 1, 2, 3, 4, 5, 6, 7, 8 }; + std::vector outputsIndex = { 9 }; + std::vector paramsIndex = { 10, 11, 12, 13, 14 }; + + OH_NN_ReturnCode ret = m_builder.Build(paramsIndex, inputsIndex, outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: stridedslice_build_006 + * @tc.desc:Provide empty output to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(StridedSliceBuilderTest, stridedslice_build_006, TestSize.Level0) +{ + std::vector inputsIndex = { 0, 1, 2, 3 }; + std::vector outputsIndex = {}; + std::vector paramsIndex = { 4, 5, 6, 7, 8 }; + std::vector inputDim = {3, 2, 3}; + std::vector OutputDim = {1, 2, 2}; + + m_paramsIndex = paramsIndex; + SaveInputTensor(inputsIndex, OH_NN_FLOAT32, inputDim, nullptr); + SaveOutputTensor(outputsIndex, OH_NN_FLOAT32, OutputDim, nullptr); + InitParams(); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: stridedslice_build_007 + * @tc.desc: Provide beginmask param type error to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(StridedSliceBuilderTest, stridedslice_build_007, TestSize.Level0) +{ + std::vector inputsIndex = { 0, 1, 2, 3 }; + std::vector outputsIndex = { 4 }; + std::vector paramsIndex = { 5, 6, 7, 8, 9 }; + std::vector inputDim = {3, 2, 3}; + std::vector OutputDim = {1, 2, 2}; + std::vector paramDim = {}; + + m_paramsIndex = paramsIndex; + SaveInputTensor(inputsIndex, OH_NN_FLOAT32, inputDim, nullptr); + SaveOutputTensor(outputsIndex, OH_NN_FLOAT32, OutputDim, nullptr); + SaveBeginMaskTensor(OH_NN_INT32, paramDim, nullptr, OH_NN_STRIDED_SLICE_BEGIN_MASK); + SaveEndMaskTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_STRIDED_SLICE_END_MASK); + SaveEllipsisMaskTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_STRIDED_SLICE_ELLIPSIS_MASK); + SaveNewAxisTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_STRIDED_SLICE_NEW_AXIS_MASK); + SaveShrinkAxisTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_STRIDED_SLICE_SHRINK_AXIS_MASK); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: stridedslice_build_008 + * @tc.desc: Provide endmask param type error to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(StridedSliceBuilderTest, stridedslice_build_008, TestSize.Level0) +{ + std::vector inputsIndex = { 0, 1, 2, 3 }; + std::vector outputsIndex = { 4 }; + std::vector paramDim = {}; + + InitTensor(inputsIndex, outputsIndex); + SaveEndMaskTensor(OH_NN_INT32, paramDim, nullptr, OH_NN_STRIDED_SLICE_END_MASK); + SaveEllipsisMaskTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_STRIDED_SLICE_ELLIPSIS_MASK); + SaveNewAxisTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_STRIDED_SLICE_NEW_AXIS_MASK); + SaveShrinkAxisTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_STRIDED_SLICE_SHRINK_AXIS_MASK); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: stridedslice_build_009 + * @tc.desc: Provide ellipsismask param type error to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(StridedSliceBuilderTest, stridedslice_build_009, TestSize.Level0) +{ + std::vector inputsIndex = { 0, 1, 2, 3 }; + std::vector outputsIndex = { 4 }; + std::vector paramDim = {}; + + InitTensor(inputsIndex, outputsIndex); + SaveEndMaskTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_STRIDED_SLICE_END_MASK); + SaveEllipsisMaskTensor(OH_NN_INT32, paramDim, nullptr, OH_NN_STRIDED_SLICE_ELLIPSIS_MASK); + SaveNewAxisTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_STRIDED_SLICE_NEW_AXIS_MASK); + SaveShrinkAxisTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_STRIDED_SLICE_SHRINK_AXIS_MASK); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: stridedslice_build_010 + * @tc.desc: Provide axis param type error to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(StridedSliceBuilderTest, stridedslice_build_010, TestSize.Level0) +{ + std::vector inputsIndex = { 0, 1, 2, 3 }; + std::vector outputsIndex = { 4 }; + std::vector paramDim = {}; + + InitTensor(inputsIndex, outputsIndex); + SaveEndMaskTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_STRIDED_SLICE_END_MASK); + SaveEllipsisMaskTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_STRIDED_SLICE_ELLIPSIS_MASK); + SaveNewAxisTensor(OH_NN_INT32, paramDim, nullptr, OH_NN_STRIDED_SLICE_NEW_AXIS_MASK); + SaveShrinkAxisTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_STRIDED_SLICE_SHRINK_AXIS_MASK); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: stridedslice_build_011 + * @tc.desc: Provide shrinkaxis param type error to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(StridedSliceBuilderTest, stridedslice_build_011, TestSize.Level0) +{ + std::vector inputsIndex = { 0, 1, 2, 3 }; + std::vector outputsIndex = { 4 }; + std::vector paramDim = {}; + + InitTensor(inputsIndex, outputsIndex); + SaveEndMaskTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_STRIDED_SLICE_END_MASK); + SaveEllipsisMaskTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_STRIDED_SLICE_ELLIPSIS_MASK); + SaveNewAxisTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_STRIDED_SLICE_NEW_AXIS_MASK); + SaveShrinkAxisTensor(OH_NN_INT32, paramDim, nullptr, OH_NN_STRIDED_SLICE_SHRINK_AXIS_MASK); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + + +/** + * @tc.name: stridedslice_build_012 + * @tc.desc: Provide begin mask parameter buffer is nullptr to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(StridedSliceBuilderTest, stridedslice_build_012, TestSize.Level0) +{ + std::vector inputsIndex = { 0, 1, 2, 3 }; + std::vector outputsIndex = { 4 }; + std::vector paramDim = {}; + + InitTensor(inputsIndex, outputsIndex); + std::shared_ptr beginMaskTensor = TransToNNTensor(OH_NN_INT64, paramDim, nullptr, + OH_NN_STRIDED_SLICE_BEGIN_MASK); + beginMaskTensor->SetBuffer(nullptr, 0); + m_allTensors.emplace_back(beginMaskTensor); + + SaveEndMaskTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_STRIDED_SLICE_END_MASK); + SaveEllipsisMaskTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_STRIDED_SLICE_ELLIPSIS_MASK); + SaveNewAxisTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_STRIDED_SLICE_NEW_AXIS_MASK); + SaveShrinkAxisTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_STRIDED_SLICE_SHRINK_AXIS_MASK); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: stridedslice_build_013 + * @tc.desc: Provide end mask parameter buffer is nullptr to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(StridedSliceBuilderTest, stridedslice_build_013, TestSize.Level0) +{ + std::vector inputsIndex = { 0, 1, 2, 3 }; + std::vector outputsIndex = { 4 }; + std::vector paramDim = {}; + + InitTensor(inputsIndex, outputsIndex); + SaveBeginMaskTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_STRIDED_SLICE_BEGIN_MASK); + + std::shared_ptr endMaskTensor = TransToNNTensor(OH_NN_INT64, paramDim, nullptr, + OH_NN_STRIDED_SLICE_END_MASK); + endMaskTensor->SetBuffer(nullptr, 0); + m_allTensors.emplace_back(endMaskTensor); + + SaveEllipsisMaskTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_STRIDED_SLICE_ELLIPSIS_MASK); + SaveNewAxisTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_STRIDED_SLICE_NEW_AXIS_MASK); + SaveShrinkAxisTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_STRIDED_SLICE_SHRINK_AXIS_MASK); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: stridedslice_build_014 + * @tc.desc: Provide ellipsis mask parameter buffer is nullptr to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(StridedSliceBuilderTest, stridedslice_build_014, TestSize.Level0) +{ + std::vector inputsIndex = { 0, 1, 2, 3 }; + std::vector outputsIndex = { 4 }; + std::vector paramDim = {}; + + InitTensor(inputsIndex, outputsIndex); + SaveBeginMaskTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_STRIDED_SLICE_BEGIN_MASK); + SaveEndMaskTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_STRIDED_SLICE_END_MASK); + + std::shared_ptr ellipsisMaskTensor = TransToNNTensor(OH_NN_INT64, paramDim, nullptr, + OH_NN_STRIDED_SLICE_ELLIPSIS_MASK); + ellipsisMaskTensor->SetBuffer(nullptr, 0); + m_allTensors.emplace_back(ellipsisMaskTensor); + + SaveNewAxisTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_STRIDED_SLICE_NEW_AXIS_MASK); + SaveShrinkAxisTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_STRIDED_SLICE_SHRINK_AXIS_MASK); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: stridedslice_build_015 + * @tc.desc: Provide new axis parameter buffer is nullptr to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(StridedSliceBuilderTest, stridedslice_build_015, TestSize.Level0) +{ + std::vector inputsIndex = { 0, 1, 2, 3 }; + std::vector outputsIndex = { 4 }; + std::vector paramDim = {}; + + InitTensor(inputsIndex, outputsIndex); + SaveBeginMaskTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_STRIDED_SLICE_BEGIN_MASK); + SaveEndMaskTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_STRIDED_SLICE_END_MASK); + SaveEllipsisMaskTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_STRIDED_SLICE_ELLIPSIS_MASK); + + std::shared_ptr axisTensor = TransToNNTensor(OH_NN_INT64, paramDim, nullptr, + OH_NN_STRIDED_SLICE_NEW_AXIS_MASK); + axisTensor->SetBuffer(nullptr, 0); + m_allTensors.emplace_back(axisTensor); + + SaveShrinkAxisTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_STRIDED_SLICE_SHRINK_AXIS_MASK); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: stridedslice_build_016 + * @tc.desc: Provide shrink axis parameter buffer is nullptr to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(StridedSliceBuilderTest, stridedslice_build_016, TestSize.Level0) +{ + std::vector inputsIndex = { 0, 1, 2, 3 }; + std::vector outputsIndex = { 4 }; + std::vector paramDim = {}; + + InitTensor(inputsIndex, outputsIndex); + SaveBeginMaskTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_STRIDED_SLICE_BEGIN_MASK); + SaveEndMaskTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_STRIDED_SLICE_END_MASK); + SaveEllipsisMaskTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_STRIDED_SLICE_ELLIPSIS_MASK); + SaveNewAxisTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_STRIDED_SLICE_NEW_AXIS_MASK); + + std::shared_ptr shrinkAxisTensor = TransToNNTensor(OH_NN_INT64, paramDim, nullptr, + OH_NN_STRIDED_SLICE_SHRINK_AXIS_MASK); + shrinkAxisTensor->SetBuffer(nullptr, 0); + m_allTensors.emplace_back(shrinkAxisTensor); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: stridedslice_build_017 + * @tc.desc: Provide invalid parameter type to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(StridedSliceBuilderTest, stridedslice_build_017, TestSize.Level0) +{ + std::vector inputsIndex = { 0, 1, 2, 3 }; + std::vector outputsIndex = { 4 }; + std::vector paramDim = {}; + + InitTensor(inputsIndex, outputsIndex); + SaveBeginMaskTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_STRIDED_SLICE_BEGIN_MASK); + SaveEndMaskTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_STRIDED_SLICE_END_MASK); + SaveEllipsisMaskTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_STRIDED_SLICE_ELLIPSIS_MASK); + SaveNewAxisTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_STRIDED_SLICE_NEW_AXIS_MASK); + SaveShrinkAxisTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_SCALE_AXIS); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: stridedslice_get_primitive_001 + * @tc.desc: Verify the GetPrimitive function return nullptr + * @tc.type: FUNC + */ +HWTEST_F(StridedSliceBuilderTest, stridedslice_get_primitive_001, TestSize.Level0) +{ + LiteGraphTensorPtr primitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive = { nullptr, DestroyLiteGraphPrimitive }; + EXPECT_EQ(primitive, expectPrimitive); +} + +/** + * @tc.name: stridedslice_get_primitive_002 + * @tc.desc: Verify the normal params return behavior of the getprimitive function + * @tc.type: FUNC + */ +HWTEST_F(StridedSliceBuilderTest, stridedslice_get_primitive_002, TestSize.Level0) +{ + std::vector inputsIndex = { 0, 1, 2, 3 }; + std::vector outputsIndex = { 4 }; + + InitTensor(inputsIndex, outputsIndex); + InitParams(); + + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + LiteGraphTensorPtr primitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive = { nullptr, DestroyLiteGraphPrimitive }; + EXPECT_NE(primitive, expectPrimitive); + + auto beginMaskReturn = mindspore::lite::MindIR_StridedSlice_GetBeginMask(primitive.get()); + EXPECT_EQ(beginMaskReturn, m_expectBeginMaskValue); + auto endMaskReturn = mindspore::lite::MindIR_StridedSlice_GetEndMask(primitive.get()); + EXPECT_EQ(endMaskReturn, m_expectEndMaskValue); + auto ellipsisMaskReturn = mindspore::lite::MindIR_StridedSlice_GetEllipsisMask(primitive.get()); + EXPECT_EQ(ellipsisMaskReturn, m_expectEllipsisMaskValue); + auto newAxisMaskReturn = mindspore::lite::MindIR_StridedSlice_GetNewAxisMask(primitive.get()); + EXPECT_EQ(newAxisMaskReturn, m_expectNewAxisMaskValue); + auto shrinkAxisMaskReturn = mindspore::lite::MindIR_StridedSlice_GetShrinkAxisMask(primitive.get()); + EXPECT_EQ(shrinkAxisMaskReturn, m_expectShrinkAxisMaskValue); +} +} // namespace UnitTest +} // namespace NeuralNetworkRuntime +} // namespace OHOS diff --git a/test/unittest/ops/sub_builder_test.cpp b/test/unittest/ops/sub_builder_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..ee7f7a882b72ea7c0e8e00553073dbeb29dcddb8 --- /dev/null +++ b/test/unittest/ops/sub_builder_test.cpp @@ -0,0 +1,324 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "frameworks/native/ops/sub_builder.h" + +#include +#include "frameworks/native/nn_tensor.h" +#include "ops_test.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Ops; + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class SubBuilderTest : public OpsTest { +protected: + void InitTensor(const std::vector& inputsIndex, + const std::vector& outputsIndex) override; + void SaveActivateTypeTensor(OH_NN_DataType dataType, const std::vector &dim, + const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + +protected: + SubBuilder m_builder; + int8_t m_expectActivationTypeValue {0}; +}; + +void SubBuilderTest::SaveActivateTypeTensor(OH_NN_DataType dataType, const std::vector &dim, + const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + std::shared_ptr activationTypeTensor = TransToNNTensor(dataType, dim, quantParam, type); + int8_t* activationTypeValue = new (std::nothrow) int8_t[1]{OH_NN_FUSED_RELU6}; + EXPECT_NE(nullptr, activationTypeValue); + activationTypeTensor->SetBuffer(activationTypeValue, sizeof(int8_t)); + m_allTensors.emplace_back(activationTypeTensor); + m_expectActivationTypeValue = mindspore::lite::ACTIVATION_TYPE_RELU6; +} + +void SubBuilderTest::InitTensor(const std::vector& inputsIndex, const std::vector& outputsIndex) +{ + std::vector paramsIndex = { 3 }; + std::vector inputDim = {3}; + std::vector OutputDim = {3}; + + m_paramsIndex = paramsIndex; + SaveInputTensor(inputsIndex, OH_NN_FLOAT32, inputDim, nullptr); + SaveOutputTensor(outputsIndex, OH_NN_FLOAT32, OutputDim, nullptr); +} + +/** + * @tc.name: sub_build_001 + * @tc.desc: Provide normal input, output, and parameters to verify the normal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(SubBuilderTest, sub_build_001, TestSize.Level0) +{ + std::vector inputsIndex = { 0, 1 }; + std::vector outputsIndex = { 2 }; + std::vector paramDim = {}; + + InitTensor(inputsIndex, outputsIndex); + SaveActivateTypeTensor(OH_NN_INT8, paramDim, nullptr, OH_NN_SUB_ACTIVATIONTYPE); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/** + * @tc.name: sub_build_002 + * @tc.desc: Call Build func twice to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(SubBuilderTest, sub_build_002, TestSize.Level0) +{ + std::vector inputsIndex = { 0, 1 }; + std::vector outputsIndex = { 2 }; + std::vector paramDim = {}; + + InitTensor(inputsIndex, outputsIndex); + SaveActivateTypeTensor(OH_NN_INT8, paramDim, nullptr, OH_NN_SUB_ACTIVATIONTYPE); + + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/** + * @tc.name: sub_build_003 + * @tc.desc:Provide one more than normal input to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(SubBuilderTest, sub_build_003, TestSize.Level0) +{ + std::vector inputsIndex = { 0, 1, 2, 3 }; + std::vector outputsIndex = { 4 }; + std::vector paramDim = {}; + + InitTensor(inputsIndex, outputsIndex); + SaveActivateTypeTensor(OH_NN_INT8, paramDim, nullptr, OH_NN_SUB_ACTIVATIONTYPE); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: sub_build_004 + * @tc.desc: Provide one more than normal output to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(SubBuilderTest, sub_build_004, TestSize.Level0) +{ + std::vector inputsIndex = { 0, 1 }; + std::vector outputsIndex = { 2, 3 }; + std::vector paramDim = {}; + + InitTensor(inputsIndex, outputsIndex); + SaveActivateTypeTensor(OH_NN_INT8, paramDim, nullptr, OH_NN_SUB_ACTIVATIONTYPE); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: sub_build_005 + * @tc.desc: Provide empty input, output, and parameters to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(SubBuilderTest, sub_build_005, TestSize.Level0) +{ + std::vector inputsIndex = { 0, 1, 2 }; + std::vector outputsIndex = { 3 }; + std::vector paramsIndex = { 4 }; + + OH_NN_ReturnCode ret = m_builder.Build(paramsIndex, inputsIndex, outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: sub_build_006 + * @tc.desc: Provide empty output to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(SubBuilderTest, sub_build_006, TestSize.Level0) +{ + std::vector inputsIndex = { 0, 1, 2 }; + std::vector outputsIndex = {}; + std::vector paramDim = {}; + + InitTensor(inputsIndex, outputsIndex); + SaveActivateTypeTensor(OH_NN_BOOL, paramDim, nullptr, OH_NN_SUB_ACTIVATIONTYPE); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: sub_build_007 + * @tc.desc: Provide param type error to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(SubBuilderTest, sub_build_007, TestSize.Level0) +{ + std::vector inputsIndex = { 0, 1 }; + std::vector outputsIndex = { 2 }; + std::vector paramDim = {}; + + InitTensor(inputsIndex, outputsIndex); + SaveActivateTypeTensor(OH_NN_BOOL, paramDim, nullptr, OH_NN_SUB_ACTIVATIONTYPE); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: sub_build_008 + * @tc.desc:Provide param type error to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(SubBuilderTest, sub_build_008, TestSize.Level0) +{ + std::vector inputsIndex = { 0, 1 }; + std::vector outputsIndex = { 2 }; + std::vector paramDim = {}; + + InitTensor(inputsIndex, outputsIndex); + SaveActivateTypeTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_SUB_ACTIVATIONTYPE); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: sub_build_009 + * @tc.desc: Provide activate type parameter buffer is nullptr to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(SubBuilderTest, sub_build_009, TestSize.Level0) +{ + std::vector inputsIndex = { 0, 1 }; + std::vector outputsIndex = { 2 }; + std::vector paramDim = {}; + + InitTensor(inputsIndex, outputsIndex); + + std::shared_ptr activationTypeTensor = TransToNNTensor(OH_NN_INT8, paramDim, nullptr, + OH_NN_SUB_ACTIVATIONTYPE); + activationTypeTensor->SetBuffer(nullptr, 0); + m_allTensors.emplace_back(activationTypeTensor); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: sub_build_010 + * @tc.desc: Provide parameter is not scaler to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(SubBuilderTest, sub_build_010, TestSize.Level0) +{ + std::vector inputsIndex = { 0, 1 }; + std::vector outputsIndex = { 2 }; + std::vector paramDim = {1, 2}; + + InitTensor(inputsIndex, outputsIndex); + SaveActivateTypeTensor(OH_NN_INT8, paramDim, nullptr, OH_NN_SUB_ACTIVATIONTYPE); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: sub_build_011 + * @tc.desc: Provide invalid parameter type to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(SubBuilderTest, sub_build_011, TestSize.Level0) +{ + std::vector inputsIndex = { 0, 1 }; + std::vector outputsIndex = { 2 }; + std::vector paramDim = {}; + + InitTensor(inputsIndex, outputsIndex); + SaveActivateTypeTensor(OH_NN_INT8, paramDim, nullptr, OH_NN_SCALE_AXIS); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: sub_build_012 + * @tc.desc: Provide invalid param value to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(SubBuilderTest, sub_build_012, TestSize.Level0) +{ + std::vector inputsIndex = { 0, 1 }; + std::vector outputsIndex = { 2 }; + std::vector paramDim = {}; + + InitTensor(inputsIndex, outputsIndex); + + std::shared_ptr activationTypeTensor = TransToNNTensor(OH_NN_INT8, paramDim, nullptr, + OH_NN_SUB_ACTIVATIONTYPE); + int8_t* activationTypeValue = new (std::nothrow) int8_t[1]{-1}; + EXPECT_NE(nullptr, activationTypeValue); + activationTypeTensor->SetBuffer(activationTypeValue, sizeof(int8_t)); + m_allTensors.emplace_back(activationTypeTensor); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: sub_get_primitive_001 + * @tc.desc: Verify the GetPrimitive function return nullptr + * @tc.type: FUNC + */ +HWTEST_F(SubBuilderTest, sub_get_primitive_001, TestSize.Level0) +{ + LiteGraphTensorPtr primitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive = { nullptr, DestroyLiteGraphPrimitive }; + EXPECT_EQ(primitive, expectPrimitive); +} + +/** + * @tc.name: sub_get_primitive_002 + * @tc.desc: Verify the normal params return behavior of the getprimitive function + * @tc.type: FUNC + */ +HWTEST_F(SubBuilderTest, sub_get_primitive_002, TestSize.Level0) +{ + std::vector inputsIndex = { 0, 1 }; + std::vector outputsIndex = { 2 }; + std::vector paramDim = {}; + + InitTensor(inputsIndex, outputsIndex); + SaveActivateTypeTensor(OH_NN_INT8, paramDim, nullptr, OH_NN_SUB_ACTIVATIONTYPE); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_SUCCESS, ret); + LiteGraphTensorPtr primitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive = { nullptr, DestroyLiteGraphPrimitive }; + EXPECT_NE(primitive, expectPrimitive); + + auto returnValue = mindspore::lite::MindIR_SubFusion_GetActivationType(primitive.get()); + EXPECT_EQ(returnValue, m_expectActivationTypeValue); +} +} // namespace UnitTest +} // namespace NeuralNetworkRuntime +} // namespace OHOS diff --git a/test/unittest/ops/tanh_builder_test.cpp b/test/unittest/ops/tanh_builder_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..275ce20470157d7f7de1cc4fc70cd08996c17fd2 --- /dev/null +++ b/test/unittest/ops/tanh_builder_test.cpp @@ -0,0 +1,194 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "frameworks/native/ops/tanh_builder.h" + +#include +#include "frameworks/native/nn_tensor.h" +#include "ops_test.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Ops; + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class TanhBuilderTest : public OpsTest { +protected: + void InitTensor(const std::vector& inputsIndex, + const std::vector& outputsIndex) override; + +protected: + TanhBuilder m_builder; +}; + +void TanhBuilderTest::InitTensor(const std::vector& inputsIndex, + const std::vector& outputsIndex) +{ + std::vector inputDim = {1, 5, 1, 1}; + std::vector OutputDim = {1, 5, 1, 1}; + + SaveInputTensor(inputsIndex, OH_NN_FLOAT32, inputDim, nullptr); + SaveOutputTensor(outputsIndex, OH_NN_FLOAT32, OutputDim, nullptr); +} + +/** + * @tc.name: tanh_build_001 + * @tc.desc: Provide normal input, output to verify the normal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(TanhBuilderTest, tanh_build_001, TestSize.Level0) +{ + std::vector inputsIndex = { 0 }; + std::vector outputsIndex = { 1 }; + + InitTensor(inputsIndex, outputsIndex); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/** + * @tc.name: tanh_build_002 + * @tc.desc: Call Build func twice to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(TanhBuilderTest, tanh_build_002, TestSize.Level0) +{ + std::vector inputsIndex = { 0 }; + std::vector outputsIndex = { 1 }; + + InitTensor(inputsIndex, outputsIndex); + + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/** + * @tc.name: tanh_build_003 + * @tc.desc: Provide one more than normal input to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(TanhBuilderTest, tanh_build_003, TestSize.Level0) +{ + std::vector inputsIndex = { 0, 1 }; + std::vector outputsIndex = { 2 }; + + InitTensor(inputsIndex, outputsIndex); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: tanh_build_004 + * @tc.desc: Provide one more than normal output to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(TanhBuilderTest, tanh_build_004, TestSize.Level0) +{ + std::vector inputsIndex = { 0 }; + std::vector outputsIndex = { 1, 2 }; + + InitTensor(inputsIndex, outputsIndex); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: tanh_build_005 + * @tc.desc: Provide empty input, output, and parameters to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(TanhBuilderTest, tanh_build_005, TestSize.Level0) +{ + std::vector inputsIndex = { 0 }; + std::vector outputsIndex = { 1 }; + std::vector paramsIndex = {}; + + OH_NN_ReturnCode ret = m_builder.Build(paramsIndex, inputsIndex, outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: tanh_build_006 + * @tc.desc: Provide empty output to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(TanhBuilderTest, tanh_build_006, TestSize.Level0) +{ + std::vector inputsIndex = { 0 }; + std::vector outputsIndex = { 1 }; + std::vector inputDim = {1, 5, 1, 1}; + + SaveInputTensor(inputsIndex, OH_NN_FLOAT32, inputDim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: tanh_build_007 + * @tc.desc: Provide a param to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(TanhBuilderTest, tanh_build_007, TestSize.Level0) +{ + std::vector inputsIndex = { 0 }; + std::vector outputsIndex = { 1 }; + std::vector paramsIndex = { 4 }; + + m_paramsIndex = paramsIndex; + InitTensor(inputsIndex, outputsIndex); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: tanh_get_primitive_001 + * @tc.desc: Verify the GetPrimitive function return nullptr + * @tc.type: FUNC + */ +HWTEST_F(TanhBuilderTest, tanh_get_primitive_001, TestSize.Level0) +{ + LiteGraphTensorPtr primitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive = { nullptr, DestroyLiteGraphPrimitive }; + EXPECT_EQ(primitive, expectPrimitive); +} + +/** + * @tc.name: tanh_get_primitive_002 + * @tc.desc: Verify the normal params return behavior of the getprimitive function + * @tc.type: FUNC + */ +HWTEST_F(TanhBuilderTest, tanh_get_primitive_002, TestSize.Level0) +{ + std::vector inputsIndex = { 0 }; + std::vector outputsIndex = { 1 }; + + InitTensor(inputsIndex, outputsIndex); + + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + LiteGraphTensorPtr primitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive = { nullptr, DestroyLiteGraphPrimitive }; + EXPECT_NE(primitive, expectPrimitive); +} +} // namespace UnitTest +} // namespace NeuralNetworkRuntime +} // namespace OHOS diff --git a/test/unittest/ops/tile_builder_test.cpp b/test/unittest/ops/tile_builder_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..f6b9808ecd3deb35a4967db8230b13f21dc34f96 --- /dev/null +++ b/test/unittest/ops/tile_builder_test.cpp @@ -0,0 +1,200 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "frameworks/native/ops/tile_builder.h" + +#include +#include "frameworks/native/nn_tensor.h" +#include "ops_test.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Ops; + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class TileBuilderTest : public OpsTest { +protected: + void InitTensor(const std::vector& inputsIndex, + const std::vector& outputsIndex) override; + void CheckResult(); + +protected: + TileBuilder m_builder; +}; + +void TileBuilderTest::InitTensor(const std::vector& inputsIndex, + const std::vector& outputsIndex) +{ + std::vector inputDim = {2, 2}; + std::vector OutputDim = {4, 4}; + + SaveInputTensor(inputsIndex, OH_NN_FLOAT32, inputDim, nullptr); + SaveOutputTensor(outputsIndex, OH_NN_FLOAT32, OutputDim, nullptr); +} + +void TileBuilderTest::CheckResult() +{ + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + LiteGraphTensorPtr primitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive = { nullptr, DestroyLiteGraphPrimitive }; + EXPECT_NE(primitive, expectPrimitive); +} + +/** + * @tc.name: tile_build_001 + * @tc.desc: Provide normal input, output to verify the normal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(TileBuilderTest, tile_build_001, TestSize.Level0) +{ + std::vector inputsIndex = { 0, 1 }; + std::vector outputsIndex = { 2 }; + + InitTensor(inputsIndex, outputsIndex); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/** + * @tc.name: tile_build_002 + * @tc.desc: Call Build func twice to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(TileBuilderTest, tile_build_002, TestSize.Level0) +{ + std::vector inputsIndex = { 0, 1 }; + std::vector outputsIndex = { 2 }; + + InitTensor(inputsIndex, outputsIndex); + + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/** + * @tc.name: tile_build_003 + * @tc.desc: Provide one more than normal input to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(TileBuilderTest, tile_build_003, TestSize.Level0) +{ + std::vector inputsIndex = { 0, 1, 2 }; + std::vector outputsIndex = { 3 }; + + InitTensor(inputsIndex, outputsIndex); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: tile_build_004 + * @tc.desc: Provide one more than normal output to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(TileBuilderTest, tile_build_004, TestSize.Level0) +{ + std::vector inputsIndex = { 0, 1 }; + std::vector outputsIndex = { 2, 3 }; + + InitTensor(inputsIndex, outputsIndex); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: tile_build_005 + * @tc.desc: Provide empty input, output, and parameters to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(TileBuilderTest, tile_build_005, TestSize.Level0) +{ + std::vector inputsIndex = { 0, 1 }; + std::vector outputsIndex = { 2 }; + std::vector paramsIndex = {}; + + OH_NN_ReturnCode ret = m_builder.Build(paramsIndex, inputsIndex, outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: tile_build_001 + * @tc.desc: Provide empty output to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(TileBuilderTest, tile_build_006, TestSize.Level0) +{ + std::vector inputsIndex = { 0, 1 }; + std::vector outputsIndex = { 2 }; + std::vector inputDim = {2, 2}; + + SaveInputTensor(inputsIndex, OH_NN_FLOAT32, inputDim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: tile_build_007 + * @tc.desc: Provide a param to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(TileBuilderTest, tile_build_007, TestSize.Level0) +{ + std::vector inputsIndex = { 0, 1 }; + std::vector outputsIndex = { 2 }; + std::vector paramsIndex = { 4 }; + + m_paramsIndex = paramsIndex; + InitTensor(inputsIndex, outputsIndex); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: tile_get_primitive_001 + * @tc.desc: Verify the GetPrimitive function return nullptr + * @tc.type: FUNC + */ +HWTEST_F(TileBuilderTest, tile_get_primitive_001, TestSize.Level0) +{ + LiteGraphTensorPtr primitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive = { nullptr, DestroyLiteGraphPrimitive }; + EXPECT_EQ(primitive, expectPrimitive); +} + +/** + * @tc.name: tile_getprimitive_002 + * @tc.desc: Verify the normal params return behavior of the getprimitive function + * @tc.type: FUNC + */ +HWTEST_F(TileBuilderTest, tile_getprimitive_002, TestSize.Level0) +{ + std::vector inputsIndex = { 0, 1 }; + std::vector outputsIndex = { 2 }; + + InitTensor(inputsIndex, outputsIndex); + + CheckResult(); +} +} // namespace UnitTest +} // namespace NeuralNetworkRuntime +} // namespace OHOS diff --git a/test/unittest/ops/topk_builder_test.cpp b/test/unittest/ops/topk_builder_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..8237e92558597271ea77792f9bb251c7ed3299e0 --- /dev/null +++ b/test/unittest/ops/topk_builder_test.cpp @@ -0,0 +1,261 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "frameworks/native/ops/top_k_builder.h" + +#include +#include "frameworks/native/nn_tensor.h" +#include "ops_test.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Ops; + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class TopKBuilderTest : public OpsTest { +protected: + void InitTensor(const std::vector& inputsIndex, + const std::vector& outputsIndex) override; + void SaveSortedTensor(OH_NN_DataType dataType, const std::vector &dim, + const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + +protected: + TopKBuilder m_builder; + bool m_topkValue {false}; +}; + +void TopKBuilderTest::InitTensor(const std::vector& inputsIndex, const std::vector& outputsIndex) +{ + std::vector paramsIndex = { 4 }; + std::vector inputDim = {9}; + std::vector OutputDim = {3}; + + m_paramsIndex = paramsIndex; + SaveInputTensor(inputsIndex, OH_NN_FLOAT32, inputDim, nullptr); + SaveOutputTensor(outputsIndex, OH_NN_FLOAT32, OutputDim, nullptr); +} + +void TopKBuilderTest::SaveSortedTensor(OH_NN_DataType dataType, const std::vector &dim, + const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + std::shared_ptr topkTensor = TransToNNTensor(dataType, dim, quantParam, type); + bool* topkValue = new (std::nothrow) bool[1]{true}; + EXPECT_NE(nullptr, topkValue); + topkTensor->SetBuffer(topkValue, sizeof(bool)); + m_allTensors.emplace_back(topkTensor); + m_topkValue = *topkValue; +} + +/** + * @tc.name: topk_build_001 + * @tc.desc: Provide normal input, output, and parameters to verify the normal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(TopKBuilderTest, topk_build_001, TestSize.Level0) +{ + std::vector inputsIndex = { 0, 1 }; + std::vector outputsIndex = { 2, 3 }; + std::vector paramDim = {}; + + InitTensor(inputsIndex, outputsIndex); + SaveSortedTensor(OH_NN_BOOL, paramDim, nullptr, OH_NN_TOP_K_SORTED); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/** + * @tc.name: topk_build_002 + * @tc.desc: Call Build func twice to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(TopKBuilderTest, topk_build_002, TestSize.Level0) +{ + std::vector inputsIndex = { 0, 1 }; + std::vector outputsIndex = { 2, 3 }; + std::vector paramDim = {}; + + InitTensor(inputsIndex, outputsIndex); + SaveSortedTensor(OH_NN_BOOL, paramDim, nullptr, OH_NN_TOP_K_SORTED); + + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/** + * @tc.name: topk_build_003 + * @tc.desc: Provide one more than normal input to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(TopKBuilderTest, topk_build_003, TestSize.Level0) +{ + std::vector inputsIndex = { 0, 1, 2, 3 }; + std::vector outputsIndex = { 4, 5 }; + std::vector paramDim = {}; + + InitTensor(inputsIndex, outputsIndex); + SaveSortedTensor(OH_NN_BOOL, paramDim, nullptr, OH_NN_TOP_K_SORTED); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: topk_builder_004 + * @tc.desc: Provide one more than normal output to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(TopKBuilderTest, topk_builder_004, TestSize.Level0) +{ + std::vector inputsIndex = { 0, 1 }; + std::vector outputsIndex = { 2, 3, 4 }; + std::vector paramDim = {}; + + InitTensor(inputsIndex, outputsIndex); + SaveSortedTensor(OH_NN_BOOL, paramDim, nullptr, OH_NN_TOP_K_SORTED); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: topk_build_005 + * @tc.desc: Provide empty input, output, and parameters to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(TopKBuilderTest, topk_build_005, TestSize.Level0) +{ + std::vector inputsIndex = { 0, 1, 2 }; + std::vector outputsIndex = { 3, 4 }; + std::vector paramsIndex = { 5 }; + + OH_NN_ReturnCode ret = m_builder.Build(paramsIndex, inputsIndex, outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: topk_build_006 + * @tc.desc: Provide param type error to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(TopKBuilderTest, topk_build_006, TestSize.Level0) +{ + std::vector inputsIndex = { 0, 1 }; + std::vector outputsIndex = { 2, 3 }; + std::vector paramDim = {}; + + InitTensor(inputsIndex, outputsIndex); + SaveSortedTensor(OH_NN_INT8, paramDim, nullptr, OH_NN_TOP_K_SORTED); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: topk_build_007 + * @tc.desc: Provide param type error to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(TopKBuilderTest, topk_build_007, TestSize.Level0) +{ + std::vector inputsIndex = { 0, 1 }; + std::vector outputsIndex = { 2, 3 }; + std::vector paramDim = {}; + + InitTensor(inputsIndex, outputsIndex); + SaveSortedTensor(OH_NN_INT32, paramDim, nullptr, OH_NN_TOP_K_SORTED); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: topk_build_008 + * @tc.desc: Provide sorted parameter buffer is nullptr to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(TopKBuilderTest, topk_build_008, TestSize.Level0) +{ + std::vector inputsIndex = { 0, 1 }; + std::vector outputsIndex = { 2, 3 }; + std::vector paramDim = {}; + + InitTensor(inputsIndex, outputsIndex); + + std::shared_ptr topkTensor = TransToNNTensor(OH_NN_BOOL, paramDim, nullptr, OH_NN_TOP_K_SORTED); + topkTensor->SetBuffer(nullptr, 0); + m_allTensors.emplace_back(topkTensor); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: topk_build_009 + * @tc.desc: Provide invalid parameter type to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(TopKBuilderTest, topk_build_009, TestSize.Level0) +{ + std::vector inputsIndex = { 0, 1 }; + std::vector outputsIndex = { 2, 3 }; + std::vector paramDim = {}; + + InitTensor(inputsIndex, outputsIndex); + SaveSortedTensor(OH_NN_BOOL, paramDim, nullptr, OH_NN_SCALE_AXIS); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: topk_get_primitive_001 + * @tc.desc: Verify the GetPrimitive function return nullptr + * @tc.type: FUNC + */ +HWTEST_F(TopKBuilderTest, topk_get_primitive_001, TestSize.Level0) +{ + LiteGraphTensorPtr primitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive = { nullptr, DestroyLiteGraphPrimitive }; + EXPECT_EQ(primitive, expectPrimitive); +} + +/** + * @tc.name: topk_get_primitive_002 + * @tc.desc: Verify the normal params return behavior of the getprimitive function + * @tc.type: FUNC + */ +HWTEST_F(TopKBuilderTest, topk_get_primitive_002, TestSize.Level0) +{ + std::vector inputsIndex = { 0, 1 }; + std::vector outputsIndex = { 2, 3 }; + std::vector paramDim = {}; + InitTensor(inputsIndex, outputsIndex); + SaveSortedTensor(OH_NN_BOOL, paramDim, nullptr, OH_NN_TOP_K_SORTED); + + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + LiteGraphTensorPtr primitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive = { nullptr, DestroyLiteGraphPrimitive }; + EXPECT_NE(primitive, expectPrimitive); + + auto sortedReturn = mindspore::lite::MindIR_TopKFusion_GetSorted(primitive.get()); + EXPECT_EQ(sortedReturn, m_topkValue); +} +} // namespace UnitTest +} // namespace NeuralNetworkRuntime +} // namespace OHOS diff --git a/test/unittest/ops/transpose_builder_test.cpp b/test/unittest/ops/transpose_builder_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..a0a5095177b59552891c6276abe7418af98b8105 --- /dev/null +++ b/test/unittest/ops/transpose_builder_test.cpp @@ -0,0 +1,193 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "frameworks/native/ops/transpose_builder.h" + +#include +#include "frameworks/native/nn_tensor.h" +#include "ops_test.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Ops; + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class TransposeBuilderTest : public OpsTest { +protected: + void InitTensor(const std::vector& inputsIndex, + const std::vector& outputsIndex) override; + +protected: + TransposeBuilder m_builder; +}; + +void TransposeBuilderTest::InitTensor(const std::vector& inputsIndex, + const std::vector& outputsIndex) +{ + std::vector inputDim = {2, 3}; + std::vector OutputDim = {3, 2}; + + SaveInputTensor(inputsIndex, OH_NN_FLOAT32, inputDim, nullptr); + SaveOutputTensor(outputsIndex, OH_NN_FLOAT32, OutputDim, nullptr); +} + +/** + * @tc.name: transpose_build_001 + * @tc.desc: Provide normal input, output to verify the normal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(TransposeBuilderTest, transpose_build_001, TestSize.Level0) +{ + std::vector inputsIndex = { 0, 1 }; + std::vector outputsIndex = { 2 }; + + InitTensor(inputsIndex, outputsIndex); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/** + * @tc.name: transpose_build_002 + * @tc.desc: Call Build func twice to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(TransposeBuilderTest, transpose_build_002, TestSize.Level0) +{ + std::vector inputsIndex = { 0, 1 }; + std::vector outputsIndex = { 2 }; + + InitTensor(inputsIndex, outputsIndex); + + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/** + * @tc.name: transpose_build_003 + * @tc.desc: Provide one more than normal input to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(TransposeBuilderTest, transpose_build_003, TestSize.Level0) +{ + std::vector inputsIndex = { 0, 1, 2 }; + std::vector outputsIndex = { 3 }; + + InitTensor(inputsIndex, outputsIndex); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: transpose_build_004 + * @tc.desc: Provide one more than normal output to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(TransposeBuilderTest, transpose_build_004, TestSize.Level0) +{ + std::vector inputsIndex = { 0, 1 }; + std::vector outputsIndex = { 2, 3 }; + + InitTensor(inputsIndex, outputsIndex); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: transpose_build_005 + * @tc.desc: Provide empty input, output, and parameters to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(TransposeBuilderTest, transpose_build_005, TestSize.Level0) +{ + std::vector inputsIndex = { 0, 1 }; + std::vector outputsIndex = { 2 }; + std::vector paramsIndex = {}; + + OH_NN_ReturnCode ret = m_builder.Build(paramsIndex, inputsIndex, outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: transpose_build_006 + * @tc.desc: Provide empty output to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(TransposeBuilderTest, transpose_build_006, TestSize.Level0) +{ + std::vector inputsIndex = { 0, 1 }; + std::vector outputsIndex = {}; + + InitTensor(inputsIndex, outputsIndex); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: transpose_build_007 + * @tc.desc: Provide a param to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(TransposeBuilderTest, transpose_build_007, TestSize.Level0) +{ + std::vector inputsIndex = { 0, 1 }; + std::vector outputsIndex = { 2 }; + std::vector paramsIndex = { 4 }; + + m_paramsIndex = paramsIndex; + InitTensor(inputsIndex, outputsIndex); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: transpose_get_primitive_001 + * @tc.desc: Verify the GetPrimitive function return nullptr + * @tc.type: FUNC + */ +HWTEST_F(TransposeBuilderTest, transpose_get_primitive_001, TestSize.Level0) +{ + LiteGraphTensorPtr primitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive = { nullptr, DestroyLiteGraphPrimitive }; + EXPECT_EQ(primitive, expectPrimitive); +} + +/** + * @tc.name: transpose_getprimitive_002 + * @tc.desc: Verify the normal return behavior of the getprimitive function + * @tc.type: FUNC + */ +HWTEST_F(TransposeBuilderTest, transpose_getprimitive_002, TestSize.Level0) +{ + std::vector inputsIndex = { 0, 1 }; + std::vector outputsIndex = { 2 }; + + InitTensor(inputsIndex, outputsIndex); + + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + LiteGraphTensorPtr primitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive = { nullptr, DestroyLiteGraphPrimitive }; + EXPECT_NE(primitive, expectPrimitive); +} +} // namespace UnitTest +} // namespace NeuralNetworkRuntime +} // namespace OHOS diff --git a/test/unittest/ops/unsqueeze_builder_test.cpp b/test/unittest/ops/unsqueeze_builder_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..512b4d29c21c09df4bac8a2c64e0c866d41d9edb --- /dev/null +++ b/test/unittest/ops/unsqueeze_builder_test.cpp @@ -0,0 +1,301 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "frameworks/native/ops/unsqueeze_builder.h" + +#include +#include "frameworks/native/nn_tensor.h" +#include "ops_test.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Ops; + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class UnsqueezeBuilderTest : public OpsTest { +protected: + void InitTensor(const std::vector& inputsIndex, + const std::vector& outputsIndex) override; + void SaveAxisTensor(OH_NN_DataType dataType, const std::vector &dim, + const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + +protected: + UnsqueezeBuilder m_builder; + std::vector m_expectAxisValue; +}; + +void UnsqueezeBuilderTest::SaveAxisTensor(OH_NN_DataType dataType, const std::vector &dim, + const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + std::shared_ptr axisTensor = TransToNNTensor(dataType, dim, quantParam, type); + int64_t* axisValue = new (std::nothrow) int64_t[1]{1}; + axisTensor->SetBuffer(axisValue, sizeof(int64_t)); + m_allTensors.emplace_back(axisTensor); + m_expectAxisValue.emplace_back(*axisValue); +} + +void UnsqueezeBuilderTest::InitTensor(const std::vector& inputsIndex, + const std::vector& outputsIndex) +{ + std::vector paramsIndex = { 2 }; + std::vector inputDim = {1, 5, 1}; + std::vector OutputDim = {1, 1, 5, 1}; + + m_paramsIndex = paramsIndex; + SaveInputTensor(inputsIndex, OH_NN_FLOAT32, inputDim, nullptr); + SaveOutputTensor(outputsIndex, OH_NN_FLOAT32, OutputDim, nullptr); +} + +/** + * @tc.name: unsqueeze_build_001 + * @tc.desc: Provide normal input, output, and parameters to verify the normal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(UnsqueezeBuilderTest, unsqueeze_build_001, TestSize.Level0) +{ + std::vector inputsIndex = { 0 }; + std::vector outputsIndex = { 1 }; + std::vector paramDim = {}; + + InitTensor(inputsIndex, outputsIndex); + SaveAxisTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_UNSQUEEZE_AXIS); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/** + * @tc.name: unsqueeze_build_002 + * @tc.desc: Call Build func twice to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(UnsqueezeBuilderTest, unsqueeze_build_002, TestSize.Level0) +{ + std::vector inputsIndex = { 0 }; + std::vector outputsIndex = { 1 }; + std::vector paramDim = {}; + + InitTensor(inputsIndex, outputsIndex); + SaveAxisTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_UNSQUEEZE_AXIS); + + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/** + * @tc.name: unsqueeze_build_003 + * @tc.desc: Provide one more than normal input to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(UnsqueezeBuilderTest, unsqueeze_build_003, TestSize.Level0) +{ + std::vector inputsIndex = { 0, 1, 2 }; + std::vector outputsIndex = { 3 }; + std::vector paramDim = {}; + + InitTensor(inputsIndex, outputsIndex); + SaveAxisTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_UNSQUEEZE_AXIS); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: unsqueeze_build_004 + * @tc.desc: Provide one more than normal output to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(UnsqueezeBuilderTest, unsqueeze_build_004, TestSize.Level0) +{ + std::vector inputsIndex = { 0 }; + std::vector outputsIndex = { 1, 2 }; + std::vector paramDim = {}; + + InitTensor(inputsIndex, outputsIndex); + SaveAxisTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_UNSQUEEZE_AXIS); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: unsqueeze_build_005 + * @tc.desc: Provide empty input, output, and parameters to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(UnsqueezeBuilderTest, unsqueeze_build_005, TestSize.Level0) +{ + std::vector inputsIndex = { 0 }; + std::vector outputsIndex = { 1 }; + std::vector paramsIndex = { 2 }; + + OH_NN_ReturnCode ret = m_builder.Build(paramsIndex, inputsIndex, outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: unsqueeze_build_006 + * @tc.desc: Provide empty output to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(UnsqueezeBuilderTest, unsqueeze_build_006, TestSize.Level0) +{ + std::vector inputsIndex = { 0 }; + std::vector outputsIndex = {}; + std::vector paramDim = {}; + + InitTensor(inputsIndex, outputsIndex); + SaveAxisTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_UNSQUEEZE_AXIS); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: unsqueeze_build_007 + * @tc.desc: Provide param type error to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(UnsqueezeBuilderTest, unsqueeze_build_007, TestSize.Level0) +{ + std::vector inputsIndex = { 0 }; + std::vector outputsIndex = { 1 }; + std::vector paramDim = {}; + + InitTensor(inputsIndex, outputsIndex); + SaveAxisTensor(OH_NN_INT8, paramDim, nullptr, OH_NN_UNSQUEEZE_AXIS); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: unsqueeze_build_008 + * @tc.desc: Provide param type error to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(UnsqueezeBuilderTest, unsqueeze_build_008, TestSize.Level0) +{ + std::vector inputsIndex = { 0 }; + std::vector outputsIndex = { 1 }; + std::vector paramDim = {}; + + InitTensor(inputsIndex, outputsIndex); + SaveAxisTensor(OH_NN_INT32, paramDim, nullptr, OH_NN_UNSQUEEZE_AXIS); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: unsqueeze_build_009 + * @tc.desc: Provide axis parameter buffer is nullptr to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(UnsqueezeBuilderTest, unsqueeze_build_009, TestSize.Level0) +{ + std::vector inputsIndex = { 0 }; + std::vector outputsIndex = { 1 }; + std::vector paramDim = {}; + + InitTensor(inputsIndex, outputsIndex); + + std::shared_ptr axisTensor = TransToNNTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_UNSQUEEZE_AXIS); + axisTensor->SetBuffer(nullptr, 0); + m_allTensors.emplace_back(axisTensor); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: unsqueeze_build_010 + * @tc.desc: Provide axis parameter is not scaler to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(UnsqueezeBuilderTest, unsqueeze_build_010, TestSize.Level0) +{ + std::vector inputsIndex = { 0 }; + std::vector outputsIndex = { 1 }; + std::vector paramDim = {1, 2}; + + InitTensor(inputsIndex, outputsIndex); + SaveAxisTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_UNSQUEEZE_AXIS); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: unsqueeze_build_011 + * @tc.desc: Provide invalid parameter type to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(UnsqueezeBuilderTest, unsqueeze_build_011, TestSize.Level0) +{ + std::vector inputsIndex = { 0 }; + std::vector outputsIndex = { 1 }; + std::vector paramDim = {}; + + InitTensor(inputsIndex, outputsIndex); + SaveAxisTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_SCALE_AXIS); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: unsqueeze_get_primitive_001 + * @tc.desc: Verify the GetPrimitive function return nullptr + * @tc.type: FUNC + */ +HWTEST_F(UnsqueezeBuilderTest, unsqueeze_get_primitive_001, TestSize.Level0) +{ + LiteGraphTensorPtr primitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive = { nullptr, DestroyLiteGraphPrimitive }; + EXPECT_EQ(primitive, expectPrimitive); +} + +/** + * @tc.name: unsqueeze_get_primitive_002 + * @tc.desc: Verify the normal params return behavior of the getprimitive function + * @tc.type: FUNC + */ +HWTEST_F(UnsqueezeBuilderTest, unsqueeze_get_primitive_002, TestSize.Level0) +{ + std::vector inputsIndex = { 0 }; + std::vector outputsIndex = { 1 }; + std::vector paramDim = {}; + + InitTensor(inputsIndex, outputsIndex); + SaveAxisTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_UNSQUEEZE_AXIS); + + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + LiteGraphTensorPtr primitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive = { nullptr, DestroyLiteGraphPrimitive }; + EXPECT_NE(primitive, expectPrimitive); + + auto axisReturn = mindspore::lite::MindIR_Unsqueeze_GetAxis(primitive.get()); + auto axisReturnSize = axisReturn.size(); + for (size_t i = 0; i < axisReturnSize; ++i) { + EXPECT_EQ(axisReturn[i], m_expectAxisValue[i]); + } +} +} // namespace UnitTest +} // namespace NeuralNetworkRuntime +} // namespace OHOS