From f27f7d851840a21c0004d9a93aaeac6ad74b9ba0 Mon Sep 17 00:00:00 2001 From: wWX1227061 Date: Fri, 23 Feb 2024 15:56:53 +0800 Subject: [PATCH 1/9] add second phase ops Signed-off-by: wWX1227061 --- .../native/neural_network_core/validation.cpp | 2 +- .../native/neural_network_runtime/BUILD.gn | 14 + .../lite_graph_to_hdi_model_v2_1.cpp | 268 ++++++++++++++++++ .../ops/all_builder.cpp | 110 +++++++ .../neural_network_runtime/ops/all_builder.h | 48 ++++ .../ops/assert_builder.cpp | 110 +++++++ .../ops/assert_builder.h | 48 ++++ .../ops/cos_builder.cpp | 73 +++++ .../neural_network_runtime/ops/cos_builder.h | 42 +++ .../ops/log_builder.cpp | 73 +++++ .../neural_network_runtime/ops/log_builder.h | 42 +++ .../ops/logical_and_builder.cpp | 73 +++++ .../ops/logical_and_builder.h | 42 +++ .../ops/logical_not_builder.cpp | 73 +++++ .../ops/logical_not_builder.h | 42 +++ .../ops/logical_or_builder.cpp | 73 +++++ .../ops/logical_or_builder.h | 42 +++ .../ops/mod_builder.cpp | 73 +++++ .../neural_network_runtime/ops/mod_builder.h | 42 +++ .../ops/neg_builder.cpp | 73 +++++ .../neural_network_runtime/ops/neg_builder.h | 42 +++ .../ops/normalize_builder.cpp | 73 +++++ .../ops/normalize_builder.h | 42 +++ .../ops/pow_builder.cpp | 76 ++++- .../neural_network_runtime/ops/pow_builder.h | 8 + .../ops/reciprocal_builder.cpp | 73 +++++ .../ops/reciprocal_builder.h | 42 +++ .../ops/sin_builder.cpp | 73 +++++ .../neural_network_runtime/ops/sin_builder.h | 42 +++ .../ops/sparse_to_dense_builder.cpp | 73 +++++ .../ops/sparse_to_dense_builder.h | 42 +++ .../ops/unstack_builder.cpp | 29 +- .../ops/where_builder.cpp | 73 +++++ .../ops/where_builder.h | 42 +++ .../neural_network_runtime_type.h | 197 +++++++++++++ test/unittest/BUILD.gn | 4 +- test/unittest/ops/BUILD.gn | 16 +- test/unittest/ops/all_test.cpp | 250 ++++++++++++++++ test/unittest/ops/assert_test.cpp | 250 ++++++++++++++++ test/unittest/ops/cos_test.cpp | 158 +++++++++++ test/unittest/ops/instance_norm_test.cpp | 1 + test/unittest/ops/log_test.cpp | 158 +++++++++++ test/unittest/ops/logical_and_test.cpp | 158 +++++++++++ test/unittest/ops/logical_not_test.cpp | 158 +++++++++++ test/unittest/ops/logical_or_test.cpp | 158 +++++++++++ test/unittest/ops/mod_test.cpp | 173 +++++++++++ test/unittest/ops/neg_test.cpp | 158 +++++++++++ test/unittest/ops/normalize_test.cpp | 158 +++++++++++ test/unittest/ops/pow_builder_test.cpp | 169 ++++++++++- test/unittest/ops/reciprocal_test.cpp | 158 +++++++++++ test/unittest/ops/sin_test.cpp | 158 +++++++++++ test/unittest/ops/sparse_to_dense_test.cpp | 182 ++++++++++++ test/unittest/ops/unstack_test.cpp | 36 +-- test/unittest/ops/where_test.cpp | 158 +++++++++++ 54 files changed, 4891 insertions(+), 60 deletions(-) create mode 100644 frameworks/native/neural_network_runtime/ops/all_builder.cpp create mode 100644 frameworks/native/neural_network_runtime/ops/all_builder.h create mode 100644 frameworks/native/neural_network_runtime/ops/assert_builder.cpp create mode 100644 frameworks/native/neural_network_runtime/ops/assert_builder.h create mode 100644 frameworks/native/neural_network_runtime/ops/cos_builder.cpp create mode 100644 frameworks/native/neural_network_runtime/ops/cos_builder.h create mode 100644 frameworks/native/neural_network_runtime/ops/log_builder.cpp create mode 100644 frameworks/native/neural_network_runtime/ops/log_builder.h create mode 100644 frameworks/native/neural_network_runtime/ops/logical_and_builder.cpp create mode 100644 frameworks/native/neural_network_runtime/ops/logical_and_builder.h create mode 100644 frameworks/native/neural_network_runtime/ops/logical_not_builder.cpp create mode 100644 frameworks/native/neural_network_runtime/ops/logical_not_builder.h create mode 100644 frameworks/native/neural_network_runtime/ops/logical_or_builder.cpp create mode 100644 frameworks/native/neural_network_runtime/ops/logical_or_builder.h create mode 100644 frameworks/native/neural_network_runtime/ops/mod_builder.cpp create mode 100644 frameworks/native/neural_network_runtime/ops/mod_builder.h create mode 100644 frameworks/native/neural_network_runtime/ops/neg_builder.cpp create mode 100644 frameworks/native/neural_network_runtime/ops/neg_builder.h create mode 100644 frameworks/native/neural_network_runtime/ops/normalize_builder.cpp create mode 100644 frameworks/native/neural_network_runtime/ops/normalize_builder.h create mode 100644 frameworks/native/neural_network_runtime/ops/reciprocal_builder.cpp create mode 100644 frameworks/native/neural_network_runtime/ops/reciprocal_builder.h create mode 100644 frameworks/native/neural_network_runtime/ops/sin_builder.cpp create mode 100644 frameworks/native/neural_network_runtime/ops/sin_builder.h create mode 100644 frameworks/native/neural_network_runtime/ops/sparse_to_dense_builder.cpp create mode 100644 frameworks/native/neural_network_runtime/ops/sparse_to_dense_builder.h create mode 100644 frameworks/native/neural_network_runtime/ops/where_builder.cpp create mode 100644 frameworks/native/neural_network_runtime/ops/where_builder.h create mode 100644 test/unittest/ops/all_test.cpp create mode 100644 test/unittest/ops/assert_test.cpp create mode 100644 test/unittest/ops/cos_test.cpp create mode 100644 test/unittest/ops/log_test.cpp create mode 100644 test/unittest/ops/logical_and_test.cpp create mode 100644 test/unittest/ops/logical_not_test.cpp create mode 100644 test/unittest/ops/logical_or_test.cpp create mode 100644 test/unittest/ops/mod_test.cpp create mode 100644 test/unittest/ops/neg_test.cpp create mode 100644 test/unittest/ops/normalize_test.cpp create mode 100644 test/unittest/ops/reciprocal_test.cpp create mode 100644 test/unittest/ops/sin_test.cpp create mode 100644 test/unittest/ops/sparse_to_dense_test.cpp create mode 100644 test/unittest/ops/where_test.cpp diff --git a/frameworks/native/neural_network_core/validation.cpp b/frameworks/native/neural_network_core/validation.cpp index 0374811..110d4ee 100644 --- a/frameworks/native/neural_network_core/validation.cpp +++ b/frameworks/native/neural_network_core/validation.cpp @@ -60,7 +60,7 @@ bool ValidateFuseType(OH_NN_FuseType fuseType) bool ValidateTensorType(OH_NN_TensorType nnTensorType) { - if ((nnTensorType >= OH_NN_TENSOR) && (nnTensorType <= OH_NN_CLIP_MIN)) { + if ((nnTensorType >= OH_NN_TENSOR) && (nnTensorType <= OH_NN_POW_SHIFT)) { return true; } return false; diff --git a/frameworks/native/neural_network_runtime/BUILD.gn b/frameworks/native/neural_network_runtime/BUILD.gn index 4561310..36c71d6 100644 --- a/frameworks/native/neural_network_runtime/BUILD.gn +++ b/frameworks/native/neural_network_runtime/BUILD.gn @@ -53,7 +53,9 @@ nnrt_sources = [ ops_sources = [ "ops/abs_builder.cpp", "ops/add_builder.cpp", + "ops/all_builder.cpp", "ops/argmax_builder.cpp", + "ops/assert_builder.cpp", "ops/avgpool_builder.cpp", "ops/batch_to_space_nd_builder.cpp", "ops/batchnorm_builder.cpp", @@ -65,6 +67,7 @@ ops_sources = [ "ops/constant_of_shape_builder.cpp", "ops/conv2d_builder.cpp", "ops/conv2d_transpose_builder.cpp", + "ops/cos_builder.cpp", "ops/depth_to_space_builder.cpp", "ops/depthwise_conv2d_native_builder.cpp", "ops/div_builder.cpp", @@ -86,11 +89,18 @@ ops_sources = [ "ops/leaky_relu_builder.cpp", "ops/less_builder.cpp", "ops/lessequal_builder.cpp", + "ops/log_builder.cpp", + "ops/logical_and_builder.cpp", + "ops/logical_not_builder.cpp", + "ops/logical_or_builder.cpp", "ops/lstm_builder.cpp", "ops/matmul_builder.cpp", "ops/maximum_builder.cpp", "ops/maxpool_builder.cpp", + "ops/mod_builder.cpp", "ops/mul_builder.cpp", + "ops/neg_builder.cpp", + "ops/normalize_builder.cpp", "ops/notequal_builder.cpp", "ops/onehot_builder.cpp", "ops/ops_validation.cpp", @@ -101,6 +111,7 @@ ops_sources = [ "ops/quant_dtype_cast_builder.cpp", "ops/range_builder.cpp", "ops/real_div_builder.cpp", + "ops/reciprocal_builder.cpp", "ops/reduceall_builder.cpp", "ops/reducemean_builder.cpp", "ops/reduceprod_builder.cpp", @@ -113,9 +124,11 @@ ops_sources = [ "ops/select_builder.cpp", "ops/shape_builder.cpp", "ops/sigmoid_builder.cpp", + "ops/sin_builder.cpp", "ops/slice_builder.cpp", "ops/softmax_builder.cpp", "ops/space_to_batch_nd_builder.cpp", + "ops/sparse_to_dense_builder.cpp", "ops/split_builder.cpp", "ops/sqrt_builder.cpp", "ops/square_builder.cpp", @@ -130,6 +143,7 @@ ops_sources = [ "ops/transpose_builder.cpp", "ops/unsqueeze_builder.cpp", "ops/unstack_builder.cpp", + "ops/where_builder.cpp", ] ohos_shared_library("libneural_network_runtime") { diff --git a/frameworks/native/neural_network_runtime/lite_graph_to_hdi_model_v2_1.cpp b/frameworks/native/neural_network_runtime/lite_graph_to_hdi_model_v2_1.cpp index a8ed752..f9ae54c 100644 --- a/frameworks/native/neural_network_runtime/lite_graph_to_hdi_model_v2_1.cpp +++ b/frameworks/native/neural_network_runtime/lite_graph_to_hdi_model_v2_1.cpp @@ -85,6 +85,23 @@ std::vector ConvertAddFusion(PrimitivePtr primitive) return ret; } +std::vector ConvertAll(PrimitivePtr primitive) +{ + if (primitive == nullptr) { + LOGE("ConvertAll v2_1 failed, primitive is nullptr."); + return {}; + } + + All all{}; + all.keepDims = mindspore::lite::MindIR_All_GetKeepDims(primitive); + + OHOS::MessageParcel data; + (void)AllBlockMarshalling(data, all); + std::vector ret(reinterpret_cast(data.GetData()), + reinterpret_cast(data.GetData()) + data.GetDataSize()); + return ret; +} + std::vector ConvertArgMaxFusion(PrimitivePtr primitive) { if (primitive == nullptr) { @@ -105,6 +122,23 @@ std::vector ConvertArgMaxFusion(PrimitivePtr primitive) return ret; } +std::vector ConvertAssert(PrimitivePtr primitive) +{ + if (primitive == nullptr) { + LOGE("ConvertAssert v2_1 failed, primitive is nullptr."); + return {}; + } + + Assert assert{}; + assert.summarize = mindspore::lite::MindIR_Assert_GetSummarize(primitive); + + OHOS::MessageParcel data; + (void)AssertBlockMarshalling(data, assert); + std::vector ret(reinterpret_cast(data.GetData()), + reinterpret_cast(data.GetData()) + data.GetDataSize()); + return ret; +} + std::vector ConvertAvgPoolFusion(PrimitivePtr primitive) { if (primitive == nullptr) { @@ -283,6 +317,22 @@ std::vector ConvertConv2dTransposeFusion(PrimitivePtr primitive) return ret; } +std::vector ConvertCos(PrimitivePtr primitive) +{ + if (primitive == nullptr) { + LOGE("ConvertCos v2_1 failed, primitive is nullptr."); + return {}; + } + + Cos cos{}; + + OHOS::MessageParcel data; + (void)CosBlockMarshalling(data, cos); + std::vector ret(reinterpret_cast(data.GetData()), + reinterpret_cast(data.GetData()) + data.GetDataSize()); + return ret; +} + std::vector ConvertConstantOfShape(PrimitivePtr primitive) { if (primitive == nullptr) { @@ -601,6 +651,70 @@ std::vector ConvertLessEqual(PrimitivePtr primitive) return ret; } +std::vector ConvertLog(PrimitivePtr primitive) +{ + if (primitive == nullptr) { + LOGE("ConvertLog v2_1 failed, primitive is nullptr."); + return {}; + } + + Log log{}; + + OHOS::MessageParcel data; + (void)LogBlockMarshalling(data, log); + std::vector ret(reinterpret_cast(data.GetData()), + reinterpret_cast(data.GetData()) + data.GetDataSize()); + return ret; +} + +std::vector ConvertLogicalAnd(PrimitivePtr primitive) +{ + if (primitive == nullptr) { + LOGE("ConvertLogicalAnd v2_1 failed, primitive is nullptr."); + return {}; + } + + LogicalAnd logicalAnd{}; + + OHOS::MessageParcel data; + (void)LogicalAndBlockMarshalling(data, logicalAnd); + std::vector ret(reinterpret_cast(data.GetData()), + reinterpret_cast(data.GetData()) + data.GetDataSize()); + return ret; +} + +std::vector ConvertLogicalNot(PrimitivePtr primitive) +{ + if (primitive == nullptr) { + LOGE("ConvertLogicalNot v2_1 failed, primitive is nullptr."); + return {}; + } + + LogicalNot logicalNot{}; + + OHOS::MessageParcel data; + (void)LogicalNotBlockMarshalling(data, logicalNot); + std::vector ret(reinterpret_cast(data.GetData()), + reinterpret_cast(data.GetData()) + data.GetDataSize()); + return ret; +} + +std::vector ConvertLogicalOr(PrimitivePtr primitive) +{ + if (primitive == nullptr) { + LOGE("ConvertLogicalOr v2_1 failed, primitive is nullptr."); + return {}; + } + + LogicalOr logicalOr{}; + + OHOS::MessageParcel data; + (void)LogicalOrBlockMarshalling(data, logicalOr); + std::vector ret(reinterpret_cast(data.GetData()), + reinterpret_cast(data.GetData()) + data.GetDataSize()); + return ret; +} + std::vector ConvertLstm(PrimitivePtr primitive) { if (primitive == nullptr) { @@ -687,6 +801,22 @@ std::vector ConvertMaxPoolFusion(PrimitivePtr primitive) return ret; } +std::vector ConvertMod(PrimitivePtr primitive) +{ + if (primitive == nullptr) { + LOGE("ConvertMod v2_1 failed, primitive is nullptr."); + return {}; + } + + Mod mod{}; + + OHOS::MessageParcel data; + (void)ModBlockMarshalling(data, mod); + std::vector ret(reinterpret_cast(data.GetData()), + reinterpret_cast(data.GetData()) + data.GetDataSize()); + return ret; +} + std::vector ConvertMulFusion(PrimitivePtr primitive) { if (primitive == nullptr) { @@ -704,6 +834,38 @@ std::vector ConvertMulFusion(PrimitivePtr primitive) return ret; } +std::vector ConvertNeg(PrimitivePtr primitive) +{ + if (primitive == nullptr) { + LOGE("ConvertNeg v2_1 failed, primitive is nullptr."); + return {}; + } + + Neg neg{}; + + OHOS::MessageParcel data; + (void)NegBlockMarshalling(data, neg); + std::vector ret(reinterpret_cast(data.GetData()), + reinterpret_cast(data.GetData()) + data.GetDataSize()); + return ret; +} + +std::vector ConvertCustomNormalize(PrimitivePtr primitive) +{ + if (primitive == nullptr) { + LOGE("ConvertCustomNormalize v2_1 failed, primitive is nullptr."); + return {}; + } + + CustomNormalize customNormalize{}; + + OHOS::MessageParcel data; + (void)CustomNormalizeBlockMarshalling(data, customNormalize); + std::vector ret(reinterpret_cast(data.GetData()), + reinterpret_cast(data.GetData()) + data.GetDataSize()); + return ret; +} + std::vector ConvertNotEqual(PrimitivePtr primitive) { if (primitive == nullptr) { @@ -824,6 +986,22 @@ std::vector ConvertRange(PrimitivePtr primitive) return ret; } +std::vector ConvertReciprocal(PrimitivePtr primitive) +{ + if (primitive == nullptr) { + LOGE("ConvertReciprocal v2_1 failed, primitive is nullptr."); + return {}; + } + + Reciprocal reciprocal{}; + + OHOS::MessageParcel data; + (void)ReciprocalBlockMarshalling(data, reciprocal); + std::vector ret(reinterpret_cast(data.GetData()), + reinterpret_cast(data.GetData()) + data.GetDataSize()); + return ret; +} + std::vector ConvertRealDiv(PrimitivePtr primitive) { if (primitive == nullptr) { @@ -946,6 +1124,23 @@ std::vector ConvertShape(PrimitivePtr primitive) reinterpret_cast(data.GetData()) + data.GetDataSize()); return ret; } + +std::vector ConvertSin(PrimitivePtr primitive) +{ + if (primitive == nullptr) { + LOGE("ConvertSin v2_1 failed, primitive is nullptr."); + return {}; + } + + Sin sin{}; + + OHOS::MessageParcel data; + (void)SinBlockMarshalling(data, sin); + std::vector ret(reinterpret_cast(data.GetData()), + reinterpret_cast(data.GetData()) + data.GetDataSize()); + return ret; +} + std::vector ConvertSliceFusion(PrimitivePtr primitive) { if (primitive == nullptr) { @@ -995,6 +1190,21 @@ std::vector ConvertSpaceToBatchND(PrimitivePtr primitive) return ret; } +std::vector ConvertSparseToDense(PrimitivePtr primitive) +{ + if (primitive == nullptr) { + LOGE("ConvertSparseToDense v2_1 failed, primitive is nullptr."); + return {}; + } + + SparseToDense sparseToDense{}; + OHOS::MessageParcel data; + (void)SparseToDenseBlockMarshalling(data, sparseToDense); + std::vector ret(reinterpret_cast(data.GetData()), + reinterpret_cast(data.GetData()) + data.GetDataSize()); + return ret; +} + std::vector ConvertSplit(PrimitivePtr primitive) { if (primitive == nullptr) { @@ -1208,6 +1418,22 @@ std::vector ConvertUnstack(PrimitivePtr primitive) return ret; } +std::vector ConvertWhere(PrimitivePtr primitive) +{ + if (primitive == nullptr) { + LOGE("ConvertWhere v2_1 failed, primitive is nullptr."); + return {}; + } + + Where where{}; + + OHOS::MessageParcel data; + (void)WhereBlockMarshalling(data, where); + std::vector ret(reinterpret_cast(data.GetData()), + reinterpret_cast(data.GetData()) + data.GetDataSize()); + return ret; +} + std::vector ConvertSelect(PrimitivePtr primitive) { if (primitive == nullptr) { @@ -1252,9 +1478,15 @@ std::vector Convert(OHOS::HDI::Nnrt::V2_1::NodeType type, PrimitivePtr p case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_ADD_FUSION: return ConvertAddFusion(primitive); break; + case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_ALL: + return ConvertAll(primitive); + break; case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_ARGMAX_FUSION: return ConvertArgMaxFusion(primitive); break; + case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_ASSERT: + return ConvertAssert(primitive); + break; case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_AVGPOOL_FUSION: return ConvertAvgPoolFusion(primitive); break; @@ -1282,6 +1514,9 @@ std::vector Convert(OHOS::HDI::Nnrt::V2_1::NodeType type, PrimitivePtr p case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_CONV2D_TRANSPOSE_FUSION: return ConvertConv2dTransposeFusion(primitive); break; + case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_COS: + return ConvertCos(primitive); + break; case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_CONSTANT_OF_SHAPE: return ConvertConstantOfShape(primitive); break; @@ -1339,6 +1574,18 @@ std::vector Convert(OHOS::HDI::Nnrt::V2_1::NodeType type, PrimitivePtr p case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_LESS_EQUAL: return ConvertLessEqual(primitive); break; + case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_LOG: + return ConvertLog(primitive); + break; + case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_LOGICAL_AND: + return ConvertLogicalAnd(primitive); + break; + case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_LOGICAL_NOT: + return ConvertLogicalNot(primitive); + break; + case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_LOGICAL_OR: + return ConvertLogicalOr(primitive); + break; case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_LSTM: return ConvertLstm(primitive); break; @@ -1351,9 +1598,18 @@ std::vector Convert(OHOS::HDI::Nnrt::V2_1::NodeType type, PrimitivePtr p case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_MAX_POOL_FUSION: return ConvertMaxPoolFusion(primitive); break; + case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_MOD: + return ConvertMod(primitive); + break; case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_MUL_FUSION: return ConvertMulFusion(primitive); break; + case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_NEG: + return ConvertNeg(primitive); + break; + case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_CUSTOM_NORMALIZE: + return ConvertCustomNormalize(primitive); + break; case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_NOT_EQUAL: return ConvertNotEqual(primitive); break; @@ -1375,6 +1631,9 @@ std::vector Convert(OHOS::HDI::Nnrt::V2_1::NodeType type, PrimitivePtr p case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_RANGE: return ConvertRange(primitive); break; + case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_RECIPROCAL: + return ConvertReciprocal(primitive); + break; case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_REAL_DIV: return ConvertRealDiv(primitive); break; @@ -1396,6 +1655,9 @@ std::vector Convert(OHOS::HDI::Nnrt::V2_1::NodeType type, PrimitivePtr p case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_SHAPE: return ConvertShape(primitive); break; + case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_SIN: + return ConvertSin(primitive); + break; case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_SLICE_FUSION: return ConvertSliceFusion(primitive); break; @@ -1405,6 +1667,9 @@ std::vector Convert(OHOS::HDI::Nnrt::V2_1::NodeType type, PrimitivePtr p case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_SPACE_TO_BATCH_ND: return ConvertSpaceToBatchND(primitive); break; + case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_SPARSE_TO_DENSE: + return ConvertSparseToDense(primitive); + break; case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_SPLIT: return ConvertSplit(primitive); break; @@ -1444,6 +1709,9 @@ std::vector Convert(OHOS::HDI::Nnrt::V2_1::NodeType type, PrimitivePtr p case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_UNSTACK: return ConvertUnstack(primitive); break; + case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_WHERE: + return ConvertWhere(primitive); + break; case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_SELECT: return ConvertSelect(primitive); break; diff --git a/frameworks/native/neural_network_runtime/ops/all_builder.cpp b/frameworks/native/neural_network_runtime/ops/all_builder.cpp new file mode 100644 index 0000000..aa53e2f --- /dev/null +++ b/frameworks/native/neural_network_runtime/ops/all_builder.cpp @@ -0,0 +1,110 @@ +/* + * Copyright (c) 2024 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "all_builder.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +static const int INPUT_NUM = 2; +static const int OUTPUT_NUM = 1; +static const int SCALAR_LENGTH = 1; +static const std::string OP_NAME = "All"; + +AllBuilder::AllBuilder() {} + +AllBuilder::~AllBuilder() {} + +OH_NN_ReturnCode AllBuilder::SetKeepDims(std::shared_ptr& tensor) +{ + if (tensor->GetDataType() != OH_NN_INT64) { + LOGE("[All] The keep_dims should be type OH_NN_INT64."); + return OH_NN_INVALID_PARAMETER; + } + + if (tensor->GetElementCount() != SCALAR_LENGTH) { + LOGE("[All] The keep_dims should be scalar."); + return OH_NN_INVALID_PARAMETER; + } + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[All] Tensor buffer is nullptr."); + return OH_NN_INVALID_PARAMETER; + } + m_keepDims = *(static_cast(buffer)); + + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode AllBuilder::Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) +{ + if (m_isBuild) { + LOGE("[All] Build failed, the all operation has been build. cannot build again."); + return OH_NN_OPERATION_FORBIDDEN; + } + + auto ret = CheckIOIndex(inputsIndex, outputsIndex, allTensors, INPUT_NUM, OUTPUT_NUM); + if (ret != OH_NN_SUCCESS) { + LOGE("[All] Build failed, passed invalid input or output index."); + return ret; + } + + m_inputsIndex = inputsIndex; + m_outputsIndex = outputsIndex; + + OH_NN_ReturnCode returnCode; + for (int i : paramsIndex) { + std::shared_ptr tensor = allTensors[i]; + tensor->IdentifyOpParameter(); + switch (tensor->GetType()) { + case OH_NN_ALL_KEEP_DIMS: + returnCode = SetKeepDims(tensor); + break; + default: + LOGE("[All] Build failed, param invalid, type=%d", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; + } + + if (returnCode != OH_NN_SUCCESS) { + LOGE("[All] Build failed, passed invalid param."); + return returnCode; + } + } + + m_name = OP_NAME; + m_isBuild = true; + return OH_NN_SUCCESS; +} + +LiteGraphPrimitvePtr AllBuilder::GetPrimitive() +{ + if (!m_isBuild) { + LOGE("[All] GetPrimitive failed, cannot get primitive before call build."); + return {nullptr, DestroyLiteGraphPrimitive}; + } + + void* primitive = mindspore::lite::MindIR_All_CreatePrimitive(m_keepDims); + LiteGraphPrimitvePtr graphPrimitivePtr(primitive, DestroyLiteGraphPrimitive) ; + return graphPrimitivePtr; +} + +REGISTER_OPS(AllBuilder, OH_NN_OPS_ALL); +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS diff --git a/frameworks/native/neural_network_runtime/ops/all_builder.h b/frameworks/native/neural_network_runtime/ops/all_builder.h new file mode 100644 index 0000000..e43ff1b --- /dev/null +++ b/frameworks/native/neural_network_runtime/ops/all_builder.h @@ -0,0 +1,48 @@ +/* + * Copyright (c) 2024 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_ALL_BUILDER_H +#define NEURAL_NETWORK_RUNTIME_ALL_BUILDER_H + +#include "mindir.h" + +#include "ops_builder.h" +#include "ops_registry.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +class AllBuilder : public OpsBuilder { +public: + AllBuilder(); + ~AllBuilder() override; + OH_NN_ReturnCode Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) override; + + LiteGraphPrimitvePtr GetPrimitive() override; + +private: + OH_NN_ReturnCode SetKeepDims(std::shared_ptr& tensor); + +private: + int64_t m_keepDims {0}; +}; +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS + +#endif // NEURAL_NETWORK_RUNTIME_ALL_BUILDER_H \ No newline at end of file diff --git a/frameworks/native/neural_network_runtime/ops/assert_builder.cpp b/frameworks/native/neural_network_runtime/ops/assert_builder.cpp new file mode 100644 index 0000000..d11d7fb --- /dev/null +++ b/frameworks/native/neural_network_runtime/ops/assert_builder.cpp @@ -0,0 +1,110 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "assert_builder.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +static const int INPUT_NUM = 2; +static const int OUTPUT_NUM = 1; +static const int SCALAR_LENGTH = 1; +static const std::string OP_NAME = "Assert"; + +AssertBuilder::AssertBuilder() {} + +AssertBuilder::~AssertBuilder() {} + +OH_NN_ReturnCode AssertBuilder::SetSummarize(std::shared_ptr& tensor) +{ + if (tensor->GetDataType() != OH_NN_INT64) { + LOGE("[Assert] The summarize should be type OH_NN_INT64."); + return OH_NN_INVALID_PARAMETER; + } + + if (tensor->GetElementCount() != SCALAR_LENGTH) { + LOGE("[Assert] The summarize should be scalar."); + return OH_NN_INVALID_PARAMETER; + } + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[Assert] Tensor buffer is nullptr."); + return OH_NN_INVALID_PARAMETER; + } + m_summarize = *(static_cast(buffer)); + + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode AssertBuilder::Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) +{ + if (m_isBuild) { + LOGE("[Assert] Build failed, the assert operation has been build. cannot build again."); + return OH_NN_OPERATION_FORBIDDEN; + } + + auto ret = CheckIOIndex(inputsIndex, outputsIndex, allTensors, INPUT_NUM, OUTPUT_NUM); + if (ret != OH_NN_SUCCESS) { + LOGE("[Assert] Build failed, passed invalid input or output index."); + return ret; + } + + m_inputsIndex = inputsIndex; + m_outputsIndex = outputsIndex; + + OH_NN_ReturnCode returnCode; + for (int i : paramsIndex) { + std::shared_ptr tensor = allTensors[i]; + tensor->IdentifyOpParameter(); + switch (tensor->GetType()) { + case OH_NN_ASSERT_SUMMARIZE: + returnCode = SetSummarize(tensor); + break; + default: + LOGE("[Assert] Build failed, param invalid, type=%d", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; + } + + if (returnCode != OH_NN_SUCCESS) { + LOGE("[Assert] Build failed, passed invalid param."); + return returnCode; + } + } + + m_name = OP_NAME; + m_isBuild = true; + return OH_NN_SUCCESS; +} + +LiteGraphPrimitvePtr AssertBuilder::GetPrimitive() +{ + if (!m_isBuild) { + LOGE("[Assert] GetPrimitive failed, cannot get primitive before call build."); + return {nullptr, DestroyLiteGraphPrimitive}; + } + + void* primitive = mindspore::lite::MindIR_Assert_CreatePrimitive(m_summarize); + LiteGraphPrimitvePtr graphPrimitivePtr(primitive, DestroyLiteGraphPrimitive) ; + return graphPrimitivePtr; +} + +REGISTER_OPS(AssertBuilder, OH_NN_OPS_ASSERT); +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS diff --git a/frameworks/native/neural_network_runtime/ops/assert_builder.h b/frameworks/native/neural_network_runtime/ops/assert_builder.h new file mode 100644 index 0000000..7f4189b --- /dev/null +++ b/frameworks/native/neural_network_runtime/ops/assert_builder.h @@ -0,0 +1,48 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_ASSERT_BUILDER_H +#define NEURAL_NETWORK_RUNTIME_ASSERT_BUILDER_H + +#include "mindir.h" + +#include "ops_builder.h" +#include "ops_registry.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +class AssertBuilder : public OpsBuilder { +public: + AssertBuilder(); + ~AssertBuilder() override; + OH_NN_ReturnCode Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) override; + + LiteGraphPrimitvePtr GetPrimitive() override; + +private: + OH_NN_ReturnCode SetSummarize(std::shared_ptr& tensor); + +private: + int64_t m_summarize {0}; +}; +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS + +#endif // NEURAL_NETWORK_RUNTIME_ASSERT_BUILDER_H \ No newline at end of file diff --git a/frameworks/native/neural_network_runtime/ops/cos_builder.cpp b/frameworks/native/neural_network_runtime/ops/cos_builder.cpp new file mode 100644 index 0000000..86b0ca9 --- /dev/null +++ b/frameworks/native/neural_network_runtime/ops/cos_builder.cpp @@ -0,0 +1,73 @@ +/* + * Copyright (c) 2024 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "cos_builder.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +static const int INPUT_NUM = 1; +static const int OUTPUT_NUM = 1; +static const std::string OP_NAME = "Cos"; + +CosBuilder::CosBuilder() {} + +CosBuilder::~CosBuilder() {} + +OH_NN_ReturnCode CosBuilder::Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) +{ + if (m_isBuild) { + LOGE("[Cos] Build failed, the cos operation has been build. cannot build again."); + return OH_NN_OPERATION_FORBIDDEN; + } + + auto ret = CheckIOIndex(inputsIndex, outputsIndex, allTensors, INPUT_NUM, OUTPUT_NUM); + if (ret != OH_NN_SUCCESS) { + LOGE("[Cos] Build failed, passed invalid input or output index."); + return ret; + } + + if (!paramsIndex.empty()) { + LOGW("[Cos] Build failed, the cos expects no parameters, but receive %zu", paramsIndex.size()); + return OH_NN_INVALID_PARAMETER; + } + + m_inputsIndex = inputsIndex; + m_outputsIndex = outputsIndex; + + m_name = OP_NAME; + m_isBuild = true; + return OH_NN_SUCCESS; +} + +LiteGraphPrimitvePtr CosBuilder::GetPrimitive() +{ + if (!m_isBuild) { + LOGE("[Cos] GetPrimitive failed, cannot get primitive before call build."); + return {nullptr, DestroyLiteGraphPrimitive}; + } + + void* primitive = mindspore::lite::MindIR_Cos_CreatePrimitive(); + LiteGraphPrimitvePtr graphPrimitivePtr(primitive, DestroyLiteGraphPrimitive) ; + return graphPrimitivePtr; +} + +REGISTER_OPS(CosBuilder, OH_NN_OPS_COS); +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS diff --git a/frameworks/native/neural_network_runtime/ops/cos_builder.h b/frameworks/native/neural_network_runtime/ops/cos_builder.h new file mode 100644 index 0000000..a2e4165 --- /dev/null +++ b/frameworks/native/neural_network_runtime/ops/cos_builder.h @@ -0,0 +1,42 @@ +/* + * Copyright (c) 2024 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_COS_BUILDER_H +#define NEURAL_NETWORK_RUNTIME_COS_BUILDER_H + +#include "mindir.h" + +#include "ops_builder.h" +#include "ops_registry.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +class CosBuilder : public OpsBuilder { +public: + CosBuilder(); + ~CosBuilder() override; + OH_NN_ReturnCode Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) override; + + LiteGraphPrimitvePtr GetPrimitive() override; +}; +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS + +#endif // NEURAL_NETWORK_RUNTIME_COS_BUILDER_H \ No newline at end of file diff --git a/frameworks/native/neural_network_runtime/ops/log_builder.cpp b/frameworks/native/neural_network_runtime/ops/log_builder.cpp new file mode 100644 index 0000000..b926526 --- /dev/null +++ b/frameworks/native/neural_network_runtime/ops/log_builder.cpp @@ -0,0 +1,73 @@ +/* + * Copyright (c) 2024 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "log_builder.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +static const int INPUT_NUM = 1; +static const int OUTPUT_NUM = 1; +static const std::string OP_NAME = "Log"; + +LogBuilder::LogBuilder() {} + +LogBuilder::~LogBuilder() {} + +OH_NN_ReturnCode LogBuilder::Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) +{ + if (m_isBuild) { + LOGE("[Log] Build failed, the log operation has been build. cannot build again."); + return OH_NN_OPERATION_FORBIDDEN; + } + + auto ret = CheckIOIndex(inputsIndex, outputsIndex, allTensors, INPUT_NUM, OUTPUT_NUM); + if (ret != OH_NN_SUCCESS) { + LOGE("[Log] Build failed, passed invalid input or output index."); + return ret; + } + + if (!paramsIndex.empty()) { + LOGW("[Log] Build failed, the log expects no parameters, but receive %zu", paramsIndex.size()); + return OH_NN_INVALID_PARAMETER; + } + + m_inputsIndex = inputsIndex; + m_outputsIndex = outputsIndex; + + m_name = OP_NAME; + m_isBuild = true; + return OH_NN_SUCCESS; +} + +LiteGraphPrimitvePtr LogBuilder::GetPrimitive() +{ + if (!m_isBuild) { + LOGE("[Log] GetPrimitive failed, cannot get primitive before call build."); + return {nullptr, DestroyLiteGraphPrimitive}; + } + + void* primitive = mindspore::lite::MindIR_Log_CreatePrimitive(); + LiteGraphPrimitvePtr graphPrimitivePtr(primitive, DestroyLiteGraphPrimitive) ; + return graphPrimitivePtr; +} + +REGISTER_OPS(LogBuilder, OH_NN_OPS_LOG); +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS diff --git a/frameworks/native/neural_network_runtime/ops/log_builder.h b/frameworks/native/neural_network_runtime/ops/log_builder.h new file mode 100644 index 0000000..a7be75a --- /dev/null +++ b/frameworks/native/neural_network_runtime/ops/log_builder.h @@ -0,0 +1,42 @@ +/* + * Copyright (c) 2024 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_LOG_BUILDER_H +#define NEURAL_NETWORK_RUNTIME_LOG_BUILDER_H + +#include "mindir.h" + +#include "ops_builder.h" +#include "ops_registry.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +class LogBuilder : public OpsBuilder { +public: + LogBuilder(); + ~LogBuilder() override; + OH_NN_ReturnCode Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) override; + + LiteGraphPrimitvePtr GetPrimitive() override; +}; +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS + +#endif // NEURAL_NETWORK_RUNTIME_LOG_BUILDER_H \ No newline at end of file diff --git a/frameworks/native/neural_network_runtime/ops/logical_and_builder.cpp b/frameworks/native/neural_network_runtime/ops/logical_and_builder.cpp new file mode 100644 index 0000000..6ce24c7 --- /dev/null +++ b/frameworks/native/neural_network_runtime/ops/logical_and_builder.cpp @@ -0,0 +1,73 @@ +/* + * Copyright (c) 2024 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "logical_and_builder.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +static const int INPUT_NUM = 2; +static const int OUTPUT_NUM = 1; +static const std::string OP_NAME = "LogicalAnd"; + +LogicalAndBuilder::LogicalAndBuilder() {} + +LogicalAndBuilder::~LogicalAndBuilder() {} + +OH_NN_ReturnCode LogicalAndBuilder::Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) +{ + if (m_isBuild) { + LOGE("[LogicalAnd] Build failed, the logicalAnd operation has been build. cannot build again."); + return OH_NN_OPERATION_FORBIDDEN; + } + + auto ret = CheckIOIndex(inputsIndex, outputsIndex, allTensors, INPUT_NUM, OUTPUT_NUM); + if (ret != OH_NN_SUCCESS) { + LOGE("[LogicalAnd] Build failed, passed invalid input or output index."); + return ret; + } + + if (!paramsIndex.empty()) { + LOGW("[LogicalAnd] Build failed, the logicalAnd expects no parameters, but receive %zu", paramsIndex.size()); + return OH_NN_INVALID_PARAMETER; + } + + m_inputsIndex = inputsIndex; + m_outputsIndex = outputsIndex; + + m_name = OP_NAME; + m_isBuild = true; + return OH_NN_SUCCESS; +} + +LiteGraphPrimitvePtr LogicalAndBuilder::GetPrimitive() +{ + if (!m_isBuild) { + LOGE("[LogicalAnd] GetPrimitive failed, cannot get primitive before call build."); + return {nullptr, DestroyLiteGraphPrimitive}; + } + + void* primitive = mindspore::lite::MindIR_LogicalAnd_CreatePrimitive(); + LiteGraphPrimitvePtr graphPrimitivePtr(primitive, DestroyLiteGraphPrimitive) ; + return graphPrimitivePtr; +} + +REGISTER_OPS(LogicalAndBuilder, OH_NN_OPS_LOGICAL_AND); +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS diff --git a/frameworks/native/neural_network_runtime/ops/logical_and_builder.h b/frameworks/native/neural_network_runtime/ops/logical_and_builder.h new file mode 100644 index 0000000..c376b68 --- /dev/null +++ b/frameworks/native/neural_network_runtime/ops/logical_and_builder.h @@ -0,0 +1,42 @@ +/* + * Copyright (c) 2024 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_LOGICAL_AND_BUILDER_H +#define NEURAL_NETWORK_RUNTIME_LOGICAL_AND_BUILDER_H + +#include "mindir.h" + +#include "ops_builder.h" +#include "ops_registry.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +class LogicalAndBuilder : public OpsBuilder { +public: + LogicalAndBuilder(); + ~LogicalAndBuilder() override; + OH_NN_ReturnCode Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) override; + + LiteGraphPrimitvePtr GetPrimitive() override; +}; +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS + +#endif // NEURAL_NETWORK_RUNTIME_LOGICAL_AND_BUILDER_H \ No newline at end of file diff --git a/frameworks/native/neural_network_runtime/ops/logical_not_builder.cpp b/frameworks/native/neural_network_runtime/ops/logical_not_builder.cpp new file mode 100644 index 0000000..05b4aed --- /dev/null +++ b/frameworks/native/neural_network_runtime/ops/logical_not_builder.cpp @@ -0,0 +1,73 @@ +/* + * Copyright (c) 2024 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "logical_not_builder.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +static const int INPUT_NUM = 1; +static const int OUTPUT_NUM = 1; +static const std::string OP_NAME = "LogicalNot"; + +LogicalNotBuilder::LogicalNotBuilder() {} + +LogicalNotBuilder::~LogicalNotBuilder() {} + +OH_NN_ReturnCode LogicalNotBuilder::Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) +{ + if (m_isBuild) { + LOGE("[LogicalNot] Build failed, the logicalNot operation has been build. cannot build again."); + return OH_NN_OPERATION_FORBIDDEN; + } + + auto ret = CheckIOIndex(inputsIndex, outputsIndex, allTensors, INPUT_NUM, OUTPUT_NUM); + if (ret != OH_NN_SUCCESS) { + LOGE("[LogicalNot] Build failed, passed invalid input or output index."); + return ret; + } + + if (!paramsIndex.empty()) { + LOGW("[LogicalNot] Build failed, the logicalNot expects no parameters, but receive %zu", paramsIndex.size()); + return OH_NN_INVALID_PARAMETER; + } + + m_inputsIndex = inputsIndex; + m_outputsIndex = outputsIndex; + + m_name = OP_NAME; + m_isBuild = true; + return OH_NN_SUCCESS; +} + +LiteGraphPrimitvePtr LogicalNotBuilder::GetPrimitive() +{ + if (!m_isBuild) { + LOGE("[LogicalNot] GetPrimitive failed, cannot get primitive before call build."); + return {nullptr, DestroyLiteGraphPrimitive}; + } + + void* primitive = mindspore::lite::MindIR_LogicalNot_CreatePrimitive(); + LiteGraphPrimitvePtr graphPrimitivePtr(primitive, DestroyLiteGraphPrimitive) ; + return graphPrimitivePtr; +} + +REGISTER_OPS(LogicalNotBuilder, OH_NN_OPS_LOGICAL_NOT); +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS diff --git a/frameworks/native/neural_network_runtime/ops/logical_not_builder.h b/frameworks/native/neural_network_runtime/ops/logical_not_builder.h new file mode 100644 index 0000000..0703101 --- /dev/null +++ b/frameworks/native/neural_network_runtime/ops/logical_not_builder.h @@ -0,0 +1,42 @@ +/* + * Copyright (c) 2024 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_LOGICAL_NOT_BUILDER_H +#define NEURAL_NETWORK_RUNTIME_LOGICAL_NOT_BUILDER_H + +#include "mindir.h" + +#include "ops_builder.h" +#include "ops_registry.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +class LogicalNotBuilder : public OpsBuilder { +public: + LogicalNotBuilder(); + ~LogicalNotBuilder() override; + OH_NN_ReturnCode Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) override; + + LiteGraphPrimitvePtr GetPrimitive() override; +}; +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS + +#endif // NEURAL_NETWORK_RUNTIME_LOGICAL_NOT_BUILDER_H \ No newline at end of file diff --git a/frameworks/native/neural_network_runtime/ops/logical_or_builder.cpp b/frameworks/native/neural_network_runtime/ops/logical_or_builder.cpp new file mode 100644 index 0000000..3f1d5c7 --- /dev/null +++ b/frameworks/native/neural_network_runtime/ops/logical_or_builder.cpp @@ -0,0 +1,73 @@ +/* + * Copyright (c) 2024 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "logical_or_builder.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +static const int INPUT_NUM = 2; +static const int OUTPUT_NUM = 1; +static const std::string OP_NAME = "LogicalOr"; + +LogicalOrBuilder::LogicalOrBuilder() {} + +LogicalOrBuilder::~LogicalOrBuilder() {} + +OH_NN_ReturnCode LogicalOrBuilder::Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) +{ + if (m_isBuild) { + LOGE("[LogicalOr] Build failed, the logicalOr operation has been build. cannot build again."); + return OH_NN_OPERATION_FORBIDDEN; + } + + auto ret = CheckIOIndex(inputsIndex, outputsIndex, allTensors, INPUT_NUM, OUTPUT_NUM); + if (ret != OH_NN_SUCCESS) { + LOGE("[LogicalOr] Build failed, passed invalid input or output index."); + return ret; + } + + if (!paramsIndex.empty()) { + LOGW("[LogicalOr] Build failed, the logicalOr expects no parameters, but receive %zu", paramsIndex.size()); + return OH_NN_INVALID_PARAMETER; + } + + m_inputsIndex = inputsIndex; + m_outputsIndex = outputsIndex; + + m_name = OP_NAME; + m_isBuild = true; + return OH_NN_SUCCESS; +} + +LiteGraphPrimitvePtr LogicalOrBuilder::GetPrimitive() +{ + if (!m_isBuild) { + LOGE("[LogicalOr] GetPrimitive failed, cannot get primitive before call build."); + return {nullptr, DestroyLiteGraphPrimitive}; + } + + void* primitive = mindspore::lite::MindIR_LogicalOr_CreatePrimitive(); + LiteGraphPrimitvePtr graphPrimitivePtr(primitive, DestroyLiteGraphPrimitive) ; + return graphPrimitivePtr; +} + +REGISTER_OPS(LogicalOrBuilder, OH_NN_OPS_LOGICAL_OR); +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS diff --git a/frameworks/native/neural_network_runtime/ops/logical_or_builder.h b/frameworks/native/neural_network_runtime/ops/logical_or_builder.h new file mode 100644 index 0000000..ac21309 --- /dev/null +++ b/frameworks/native/neural_network_runtime/ops/logical_or_builder.h @@ -0,0 +1,42 @@ +/* + * Copyright (c) 2024 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_LOGICAL_OR_BUILDER_H +#define NEURAL_NETWORK_RUNTIME_LOGICAL_OR_BUILDER_H + +#include "mindir.h" + +#include "ops_builder.h" +#include "ops_registry.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +class LogicalOrBuilder : public OpsBuilder { +public: + LogicalOrBuilder(); + ~LogicalOrBuilder() override; + OH_NN_ReturnCode Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) override; + + LiteGraphPrimitvePtr GetPrimitive() override; +}; +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS + +#endif // NEURAL_NETWORK_RUNTIME_LOGICAL_OR_BUILDER_H \ No newline at end of file diff --git a/frameworks/native/neural_network_runtime/ops/mod_builder.cpp b/frameworks/native/neural_network_runtime/ops/mod_builder.cpp new file mode 100644 index 0000000..61bd023 --- /dev/null +++ b/frameworks/native/neural_network_runtime/ops/mod_builder.cpp @@ -0,0 +1,73 @@ +/* + * Copyright (c) 2024 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "mod_builder.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +static const int INPUT_NUM = 2; +static const int OUTPUT_NUM = 1; +static const std::string OP_NAME = "Mod"; + +ModBuilder::ModBuilder() {} + +ModBuilder::~ModBuilder() {} + +OH_NN_ReturnCode ModBuilder::Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) +{ + if (m_isBuild) { + LOGE("[Mod] Build failed, the mod operation has been build. cannot build again."); + return OH_NN_OPERATION_FORBIDDEN; + } + + auto ret = CheckIOIndex(inputsIndex, outputsIndex, allTensors, INPUT_NUM, OUTPUT_NUM); + if (ret != OH_NN_SUCCESS) { + LOGE("[Mod] Build failed, passed invalid input or output index."); + return ret; + } + + if (!paramsIndex.empty()) { + LOGW("[Mod] Build failed, the mod expects no parameters, but receive %zu", paramsIndex.size()); + return OH_NN_INVALID_PARAMETER; + } + + m_inputsIndex = inputsIndex; + m_outputsIndex = outputsIndex; + + m_name = OP_NAME; + m_isBuild = true; + return OH_NN_SUCCESS; +} + +LiteGraphPrimitvePtr ModBuilder::GetPrimitive() +{ + if (!m_isBuild) { + LOGE("[Mod] GetPrimitive failed, cannot get primitive before call build."); + return {nullptr, DestroyLiteGraphPrimitive}; + } + + void* primitive = mindspore::lite::MindIR_Mod_CreatePrimitive(); + LiteGraphPrimitvePtr graphPrimitivePtr(primitive, DestroyLiteGraphPrimitive) ; + return graphPrimitivePtr; +} + +REGISTER_OPS(ModBuilder, OH_NN_OPS_MOD); +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS diff --git a/frameworks/native/neural_network_runtime/ops/mod_builder.h b/frameworks/native/neural_network_runtime/ops/mod_builder.h new file mode 100644 index 0000000..e66658a --- /dev/null +++ b/frameworks/native/neural_network_runtime/ops/mod_builder.h @@ -0,0 +1,42 @@ +/* + * Copyright (c) 2024 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_MOD_BUILDER_H +#define NEURAL_NETWORK_RUNTIME_MOD_BUILDER_H + +#include "mindir.h" + +#include "ops_builder.h" +#include "ops_registry.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +class ModBuilder : public OpsBuilder { +public: + ModBuilder(); + ~ModBuilder() override; + OH_NN_ReturnCode Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) override; + + LiteGraphPrimitvePtr GetPrimitive() override; +}; +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS + +#endif // NEURAL_NETWORK_RUNTIME_MOD_BUILDER_H \ No newline at end of file diff --git a/frameworks/native/neural_network_runtime/ops/neg_builder.cpp b/frameworks/native/neural_network_runtime/ops/neg_builder.cpp new file mode 100644 index 0000000..ae6834b --- /dev/null +++ b/frameworks/native/neural_network_runtime/ops/neg_builder.cpp @@ -0,0 +1,73 @@ +/* + * Copyright (c) 2024 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "neg_builder.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +static const int INPUT_NUM = 1; +static const int OUTPUT_NUM = 1; +static const std::string OP_NAME = "Neg"; + +NegBuilder::NegBuilder() {} + +NegBuilder::~NegBuilder() {} + +OH_NN_ReturnCode NegBuilder::Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) +{ + if (m_isBuild) { + LOGE("[Neg] Build failed, the neg operation has been build. cannot build again."); + return OH_NN_OPERATION_FORBIDDEN; + } + + auto ret = CheckIOIndex(inputsIndex, outputsIndex, allTensors, INPUT_NUM, OUTPUT_NUM); + if (ret != OH_NN_SUCCESS) { + LOGE("[Neg] Build failed, passed invalid input or output index."); + return ret; + } + + if (!paramsIndex.empty()) { + LOGW("[Neg] Build failed, the neg expects no parameters, but receive %zu", paramsIndex.size()); + return OH_NN_INVALID_PARAMETER; + } + + m_inputsIndex = inputsIndex; + m_outputsIndex = outputsIndex; + + m_name = OP_NAME; + m_isBuild = true; + return OH_NN_SUCCESS; +} + +LiteGraphPrimitvePtr NegBuilder::GetPrimitive() +{ + if (!m_isBuild) { + LOGE("[Neg] GetPrimitive failed, cannot get primitive before call build."); + return {nullptr, DestroyLiteGraphPrimitive}; + } + + void* primitive = mindspore::lite::MindIR_Neg_CreatePrimitive(); + LiteGraphPrimitvePtr graphPrimitivePtr(primitive, DestroyLiteGraphPrimitive) ; + return graphPrimitivePtr; +} + +REGISTER_OPS(NegBuilder, OH_NN_OPS_NEG); +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS diff --git a/frameworks/native/neural_network_runtime/ops/neg_builder.h b/frameworks/native/neural_network_runtime/ops/neg_builder.h new file mode 100644 index 0000000..579dfe9 --- /dev/null +++ b/frameworks/native/neural_network_runtime/ops/neg_builder.h @@ -0,0 +1,42 @@ +/* + * Copyright (c) 2024 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_NEG_BUILDER_H +#define NEURAL_NETWORK_RUNTIME_NEG_BUILDER_H + +#include "mindir.h" + +#include "ops_builder.h" +#include "ops_registry.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +class NegBuilder : public OpsBuilder { +public: + NegBuilder(); + ~NegBuilder() override; + OH_NN_ReturnCode Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) override; + + LiteGraphPrimitvePtr GetPrimitive() override; +}; +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS + +#endif // NEURAL_NETWORK_RUNTIME_NEG_BUILDER_H \ No newline at end of file diff --git a/frameworks/native/neural_network_runtime/ops/normalize_builder.cpp b/frameworks/native/neural_network_runtime/ops/normalize_builder.cpp new file mode 100644 index 0000000..4476c27 --- /dev/null +++ b/frameworks/native/neural_network_runtime/ops/normalize_builder.cpp @@ -0,0 +1,73 @@ +/* + * Copyright (c) 2024 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "normalize_builder.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +static const int INPUT_NUM = 1; +static const int OUTPUT_NUM = 1; +static const std::string OP_NAME = "Normalize"; + +NormalizeBuilder::NormalizeBuilder() {} + +NormalizeBuilder::~NormalizeBuilder() {} + +OH_NN_ReturnCode NormalizeBuilder::Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) +{ + if (m_isBuild) { + LOGE("[Normalize] Build failed, the normalize operation has been build. cannot build again."); + return OH_NN_OPERATION_FORBIDDEN; + } + + auto ret = CheckIOIndex(inputsIndex, outputsIndex, allTensors, INPUT_NUM, OUTPUT_NUM); + if (ret != OH_NN_SUCCESS) { + LOGE("[Normalize] Build failed, passed invalid input or output index."); + return ret; + } + + if (!paramsIndex.empty()) { + LOGW("[Normalize] Build failed, the normalize expects no parameters, but receive %zu", paramsIndex.size()); + return OH_NN_INVALID_PARAMETER; + } + + m_inputsIndex = inputsIndex; + m_outputsIndex = outputsIndex; + + m_name = OP_NAME; + m_isBuild = true; + return OH_NN_SUCCESS; +} + +LiteGraphPrimitvePtr NormalizeBuilder::GetPrimitive() +{ + if (!m_isBuild) { + LOGE("[Normalize] GetPrimitive failed, cannot get primitive before call build."); + return {nullptr, DestroyLiteGraphPrimitive}; + } + + void* primitive = mindspore::lite::MindIR_CustomNormalize_CreatePrimitive(); + LiteGraphPrimitvePtr graphPrimitivePtr(primitive, DestroyLiteGraphPrimitive) ; + return graphPrimitivePtr; +} + +REGISTER_OPS(NormalizeBuilder, OH_NN_OPS_NORMALIZE); +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS diff --git a/frameworks/native/neural_network_runtime/ops/normalize_builder.h b/frameworks/native/neural_network_runtime/ops/normalize_builder.h new file mode 100644 index 0000000..53d467c --- /dev/null +++ b/frameworks/native/neural_network_runtime/ops/normalize_builder.h @@ -0,0 +1,42 @@ +/* + * Copyright (c) 2024 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_NORMALIZE_BUILDER_H +#define NEURAL_NETWORK_RUNTIME_NORMALIZE_BUILDER_H + +#include "mindir.h" + +#include "ops_builder.h" +#include "ops_registry.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +class NormalizeBuilder : public OpsBuilder { +public: + NormalizeBuilder(); + ~NormalizeBuilder() override; + OH_NN_ReturnCode Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) override; + + LiteGraphPrimitvePtr GetPrimitive() override; +}; +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS + +#endif // NEURAL_NETWORK_RUNTIME_NORMALIZE_BUILDER_H \ No newline at end of file diff --git a/frameworks/native/neural_network_runtime/ops/pow_builder.cpp b/frameworks/native/neural_network_runtime/ops/pow_builder.cpp index e0188a1..774119c 100644 --- a/frameworks/native/neural_network_runtime/ops/pow_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/pow_builder.cpp @@ -24,12 +24,57 @@ namespace NeuralNetworkRuntime { namespace Ops { static const int INPUT_NUM = 2; static const int OUTPUT_NUM = 1; +static const int SCALAR_LENGTH = 1; static const std::string OP_NAME = "Pow"; PowBuilder::PowBuilder() {} PowBuilder::~PowBuilder() {} +OH_NN_ReturnCode PowBuilder::SetScale(std::shared_ptr tensor) +{ + if (tensor->GetDataType() != OH_NN_FLOAT32) { + LOGE("[Pow] The scale should be type OH_NN_FLOAT32."); + return OH_NN_INVALID_PARAMETER; + } + + if (tensor->GetElementCount() != SCALAR_LENGTH) { + LOGE("[Pow] The scale should be scalar."); + return OH_NN_INVALID_PARAMETER; + } + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[Pow] Tensor buffer is nullptr."); + return OH_NN_INVALID_PARAMETER; + } + m_scale = *(static_cast(buffer)); + + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode PowBuilder::SetShift(std::shared_ptr tensor) +{ + if (tensor->GetDataType() != OH_NN_FLOAT32) { + LOGE("[Pow] The shift should be type OH_NN_FLOAT32."); + return OH_NN_INVALID_PARAMETER; + } + + if (tensor->GetElementCount() != SCALAR_LENGTH) { + LOGE("[Pow] The shift should be scalar."); + return OH_NN_INVALID_PARAMETER; + } + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[Pow] Tensor buffer is nullptr."); + return OH_NN_INVALID_PARAMETER; + } + m_shift = *(static_cast(buffer)); + + return OH_NN_SUCCESS; +} + OH_NN_ReturnCode PowBuilder::Build(const std::vector& paramsIndex, const std::vector& inputsIndex, const std::vector& outputsIndex, @@ -46,15 +91,31 @@ OH_NN_ReturnCode PowBuilder::Build(const std::vector& paramsIndex, return returnCode; } - if (!paramsIndex.empty()) { - LOGW("[Pow] Build failed, pow expects no parameters, but receive %zu", paramsIndex.size()); - return OH_NN_INVALID_PARAMETER; - } - m_inputsIndex = inputsIndex; m_outputsIndex = outputsIndex; SetQuantType(outputsIndex, allTensors); + + for (int i : paramsIndex) { + std::shared_ptr tensor = allTensors[i]; + tensor->IdentifyOpParameter(); + switch (tensor->GetType()) { + case OH_NN_POW_SCALE: + returnCode = SetScale(tensor); + break; + case OH_NN_POW_SHIFT: + returnCode = SetShift(tensor); + break; + default: + LOGE("[Pow] Build failed, param invalid, type=%d", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; + } + + if (returnCode != OH_NN_SUCCESS) { + LOGE("[Pow] Build failed, passed invalid param."); + return returnCode; + } + } m_name = OP_NAME; m_isBuild = true; @@ -68,10 +129,7 @@ LiteGraphPrimitvePtr PowBuilder::GetPrimitive() return {nullptr, DestroyLiteGraphPrimitive}; } - float scale{1.0}; - float shift{0.0}; - - void* primitive = mindspore::lite::MindIR_PowFusion_CreatePrimitive(scale, shift); + void* primitive = mindspore::lite::MindIR_PowFusion_CreatePrimitive(m_scale, m_shift); LiteGraphPrimitvePtr graphPrimitivePtr(primitive, DestroyLiteGraphPrimitive); return graphPrimitivePtr; } diff --git a/frameworks/native/neural_network_runtime/ops/pow_builder.h b/frameworks/native/neural_network_runtime/ops/pow_builder.h index e709c06..2b58d86 100644 --- a/frameworks/native/neural_network_runtime/ops/pow_builder.h +++ b/frameworks/native/neural_network_runtime/ops/pow_builder.h @@ -31,6 +31,14 @@ public: const std::vector>& allTensors) override; LiteGraphPrimitvePtr GetPrimitive() override; + +private: + OH_NN_ReturnCode SetScale(std::shared_ptr tensor); + OH_NN_ReturnCode SetShift(std::shared_ptr tensor); + +private: + float m_scale {1.0f}; + float m_shift {0.0f}; }; } // namespace Ops } // namespace NeuralNetworkRuntime diff --git a/frameworks/native/neural_network_runtime/ops/reciprocal_builder.cpp b/frameworks/native/neural_network_runtime/ops/reciprocal_builder.cpp new file mode 100644 index 0000000..44f7c74 --- /dev/null +++ b/frameworks/native/neural_network_runtime/ops/reciprocal_builder.cpp @@ -0,0 +1,73 @@ +/* + * Copyright (c) 2024 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "reciprocal_builder.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +static const int INPUT_NUM = 1; +static const int OUTPUT_NUM = 1; +static const std::string OP_NAME = "Reciprocal"; + +ReciprocalBuilder::ReciprocalBuilder() {} + +ReciprocalBuilder::~ReciprocalBuilder() {} + +OH_NN_ReturnCode ReciprocalBuilder::Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) +{ + if (m_isBuild) { + LOGE("[Reciprocal] Build failed, the reciprocal operation has been build. cannot build again."); + return OH_NN_OPERATION_FORBIDDEN; + } + + auto ret = CheckIOIndex(inputsIndex, outputsIndex, allTensors, INPUT_NUM, OUTPUT_NUM); + if (ret != OH_NN_SUCCESS) { + LOGE("[Reciprocal] Build failed, passed invalid input or output index."); + return ret; + } + + if (!paramsIndex.empty()) { + LOGW("[Reciprocal] Build failed, the reciprocal expects no parameters, but receive %zu", paramsIndex.size()); + return OH_NN_INVALID_PARAMETER; + } + + m_inputsIndex = inputsIndex; + m_outputsIndex = outputsIndex; + + m_name = OP_NAME; + m_isBuild = true; + return OH_NN_SUCCESS; +} + +LiteGraphPrimitvePtr ReciprocalBuilder::GetPrimitive() +{ + if (!m_isBuild) { + LOGE("[Reciprocal] GetPrimitive failed, cannot get primitive before call build."); + return {nullptr, DestroyLiteGraphPrimitive}; + } + + void* primitive = mindspore::lite::MindIR_Reciprocal_CreatePrimitive(); + LiteGraphPrimitvePtr graphPrimitivePtr(primitive, DestroyLiteGraphPrimitive) ; + return graphPrimitivePtr; +} + +REGISTER_OPS(ReciprocalBuilder, OH_NN_OPS_RECIPROCAL); +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS diff --git a/frameworks/native/neural_network_runtime/ops/reciprocal_builder.h b/frameworks/native/neural_network_runtime/ops/reciprocal_builder.h new file mode 100644 index 0000000..8f3fc12 --- /dev/null +++ b/frameworks/native/neural_network_runtime/ops/reciprocal_builder.h @@ -0,0 +1,42 @@ +/* + * Copyright (c) 2024 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_RECIPROCAL_BUILDER_H +#define NEURAL_NETWORK_RUNTIME_RECIPROCAL_BUILDER_H + +#include "mindir.h" + +#include "ops_builder.h" +#include "ops_registry.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +class ReciprocalBuilder : public OpsBuilder { +public: + ReciprocalBuilder(); + ~ReciprocalBuilder() override; + OH_NN_ReturnCode Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) override; + + LiteGraphPrimitvePtr GetPrimitive() override; +}; +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS + +#endif // NEURAL_NETWORK_RUNTIME_RECIPROCAL_BUILDER_H \ No newline at end of file diff --git a/frameworks/native/neural_network_runtime/ops/sin_builder.cpp b/frameworks/native/neural_network_runtime/ops/sin_builder.cpp new file mode 100644 index 0000000..589a822 --- /dev/null +++ b/frameworks/native/neural_network_runtime/ops/sin_builder.cpp @@ -0,0 +1,73 @@ +/* + * Copyright (c) 2024 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "sin_builder.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +static const int INPUT_NUM = 1; +static const int OUTPUT_NUM = 1; +static const std::string OP_NAME = "Sin"; + +SinBuilder::SinBuilder() {} + +SinBuilder::~SinBuilder() {} + +OH_NN_ReturnCode SinBuilder::Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) +{ + if (m_isBuild) { + LOGE("[Sin] Build failed, the sin operation has been build. cannot build again."); + return OH_NN_OPERATION_FORBIDDEN; + } + + auto ret = CheckIOIndex(inputsIndex, outputsIndex, allTensors, INPUT_NUM, OUTPUT_NUM); + if (ret != OH_NN_SUCCESS) { + LOGE("[Sin] Build failed, passed invalid input or output index."); + return ret; + } + + if (!paramsIndex.empty()) { + LOGW("[Sin] Build failed, the sin expects no parameters, but receive %zu", paramsIndex.size()); + return OH_NN_INVALID_PARAMETER; + } + + m_inputsIndex = inputsIndex; + m_outputsIndex = outputsIndex; + + m_name = OP_NAME; + m_isBuild = true; + return OH_NN_SUCCESS; +} + +LiteGraphPrimitvePtr SinBuilder::GetPrimitive() +{ + if (!m_isBuild) { + LOGE("[Sin] GetPrimitive failed, cannot get primitive before call build."); + return {nullptr, DestroyLiteGraphPrimitive}; + } + + void* primitive = mindspore::lite::MindIR_Sin_CreatePrimitive(); + LiteGraphPrimitvePtr graphPrimitivePtr(primitive, DestroyLiteGraphPrimitive) ; + return graphPrimitivePtr; +} + +REGISTER_OPS(SinBuilder, OH_NN_OPS_SIN); +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS diff --git a/frameworks/native/neural_network_runtime/ops/sin_builder.h b/frameworks/native/neural_network_runtime/ops/sin_builder.h new file mode 100644 index 0000000..da69c0c --- /dev/null +++ b/frameworks/native/neural_network_runtime/ops/sin_builder.h @@ -0,0 +1,42 @@ +/* + * Copyright (c) 2024 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_SIN_BUILDER_H +#define NEURAL_NETWORK_RUNTIME_SIN_BUILDER_H + +#include "mindir.h" + +#include "ops_builder.h" +#include "ops_registry.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +class SinBuilder : public OpsBuilder { +public: + SinBuilder(); + ~SinBuilder() override; + OH_NN_ReturnCode Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) override; + + LiteGraphPrimitvePtr GetPrimitive() override; +}; +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS + +#endif // NEURAL_NETWORK_RUNTIME_SIN_BUILDER_H \ No newline at end of file diff --git a/frameworks/native/neural_network_runtime/ops/sparse_to_dense_builder.cpp b/frameworks/native/neural_network_runtime/ops/sparse_to_dense_builder.cpp new file mode 100644 index 0000000..63c0389 --- /dev/null +++ b/frameworks/native/neural_network_runtime/ops/sparse_to_dense_builder.cpp @@ -0,0 +1,73 @@ +/* + * Copyright (c) 2024 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "sparse_to_dense_builder.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +static const int INPUT_NUM = 3; +static const int OUTPUT_NUM = 1; +static const std::string OP_NAME = "SparseToDense"; + +SparseToDenseBuilder::SparseToDenseBuilder() {} + +SparseToDenseBuilder::~SparseToDenseBuilder() {} + +OH_NN_ReturnCode SparseToDenseBuilder::Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) +{ + if (m_isBuild) { + LOGE("[SparseToDense] Build failed, the sparseToDense operation has been build. cannot build again."); + return OH_NN_OPERATION_FORBIDDEN; + } + + auto ret = CheckIOIndex(inputsIndex, outputsIndex, allTensors, INPUT_NUM, OUTPUT_NUM); + if (ret != OH_NN_SUCCESS) { + LOGE("[SparseToDense] Build failed, passed invalid input or output index."); + return ret; + } + + if (!paramsIndex.empty()) { + LOGW("[SparseToDense] Build failed, the sparseToDense expects no parameters, but receive %zu", paramsIndex.size()); + return OH_NN_INVALID_PARAMETER; + } + + m_inputsIndex = inputsIndex; + m_outputsIndex = outputsIndex; + + m_name = OP_NAME; + m_isBuild = true; + return OH_NN_SUCCESS; +} + +LiteGraphPrimitvePtr SparseToDenseBuilder::GetPrimitive() +{ + if (!m_isBuild) { + LOGE("[SparseToDense] GetPrimitive failed, cannot get primitive before call build."); + return {nullptr, DestroyLiteGraphPrimitive}; + } + + void* primitive = mindspore::lite::MindIR_SparseToDense_CreatePrimitive(); + LiteGraphPrimitvePtr graphPrimitivePtr(primitive, DestroyLiteGraphPrimitive) ; + return graphPrimitivePtr; +} + +REGISTER_OPS(SparseToDenseBuilder, OH_NN_OPS_SPARSE_TO_DENSE); +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS diff --git a/frameworks/native/neural_network_runtime/ops/sparse_to_dense_builder.h b/frameworks/native/neural_network_runtime/ops/sparse_to_dense_builder.h new file mode 100644 index 0000000..fb13a9a --- /dev/null +++ b/frameworks/native/neural_network_runtime/ops/sparse_to_dense_builder.h @@ -0,0 +1,42 @@ +/* + * Copyright (c) 2024 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_SPARSE_TO_DENSE_BUILDER_H +#define NEURAL_NETWORK_RUNTIME_SPARSE_TO_DENSE_BUILDER_H + +#include "mindir.h" + +#include "ops_builder.h" +#include "ops_registry.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +class SparseToDenseBuilder : public OpsBuilder { +public: + SparseToDenseBuilder(); + ~SparseToDenseBuilder() override; + OH_NN_ReturnCode Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) override; + + LiteGraphPrimitvePtr GetPrimitive() override; +}; +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS + +#endif // NEURAL_NETWORK_RUNTIME_SPARSE_TO_DENSE_BUILDER_H \ No newline at end of file diff --git a/frameworks/native/neural_network_runtime/ops/unstack_builder.cpp b/frameworks/native/neural_network_runtime/ops/unstack_builder.cpp index aede5d9..e2ee7a2 100755 --- a/frameworks/native/neural_network_runtime/ops/unstack_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/unstack_builder.cpp @@ -19,7 +19,7 @@ namespace OHOS { namespace NeuralNetworkRuntime { namespace Ops { static const int INPUT_NUM = 1; -static const int OUTPUT_NUM = 1; +static const int OUTPUT_MIN_NUM = 1; static const int SCALAR_LENGTH = 1; static const std::string OP_NAME = "Unstack"; @@ -59,10 +59,29 @@ OH_NN_ReturnCode UnstackBuilder::Build(const std::vector& paramsIndex, return OH_NN_OPERATION_FORBIDDEN; } - auto ret = CheckIOIndex(inputsIndex, outputsIndex, allTensors, INPUT_NUM, OUTPUT_NUM); - if (ret != OH_NN_SUCCESS) { - LOGE("[Unstack] Build failed, passed invalid input or output index."); - return ret; + if (inputsIndex.size() != INPUT_NUM) { + LOGE("[Unstack] The number of index of inputs don't equal to %d.", INPUT_NUM); + return OH_NN_INVALID_PARAMETER; + } + + if (outputsIndex.size() < OUTPUT_MIN_NUM) { + LOGE("[Unstack] The number of index of outputs don't larger than %d.", OUTPUT_MIN_NUM); + return OH_NN_INVALID_PARAMETER; + } + + size_t allTensorsSize = allTensors.size(); + for (auto index : inputsIndex) { + if (index >= allTensorsSize) { + LOGE("The index of inputs is out of range."); + return OH_NN_INVALID_PARAMETER; + } + } + + for (auto index : outputsIndex) { + if (index >= allTensorsSize) { + LOGE("The index of outputs is out of range."); + return OH_NN_INVALID_PARAMETER; + } } m_inputsIndex = inputsIndex; diff --git a/frameworks/native/neural_network_runtime/ops/where_builder.cpp b/frameworks/native/neural_network_runtime/ops/where_builder.cpp new file mode 100644 index 0000000..4c2fe01 --- /dev/null +++ b/frameworks/native/neural_network_runtime/ops/where_builder.cpp @@ -0,0 +1,73 @@ +/* + * Copyright (c) 2024 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "where_builder.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +static const int INPUT_NUM = 3; +static const int OUTPUT_NUM = 1; +static const std::string OP_NAME = "Where"; + +WhereBuilder::WhereBuilder() {} + +WhereBuilder::~WhereBuilder() {} + +OH_NN_ReturnCode WhereBuilder::Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) +{ + if (m_isBuild) { + LOGE("[Where] Build failed, the where operation has been build. cannot build again."); + return OH_NN_OPERATION_FORBIDDEN; + } + + auto ret = CheckIOIndex(inputsIndex, outputsIndex, allTensors, INPUT_NUM, OUTPUT_NUM); + if (ret != OH_NN_SUCCESS) { + LOGE("[Where] Build failed, passed invalid input or output index."); + return ret; + } + + if (!paramsIndex.empty()) { + LOGW("[Where] Build failed, the where expects no parameters, but receive %zu", paramsIndex.size()); + return OH_NN_INVALID_PARAMETER; + } + + m_inputsIndex = inputsIndex; + m_outputsIndex = outputsIndex; + + m_name = OP_NAME; + m_isBuild = true; + return OH_NN_SUCCESS; +} + +LiteGraphPrimitvePtr WhereBuilder::GetPrimitive() +{ + if (!m_isBuild) { + LOGE("[Where] GetPrimitive failed, cannot get primitive before call build."); + return {nullptr, DestroyLiteGraphPrimitive}; + } + + void* primitive = mindspore::lite::MindIR_Where_CreatePrimitive(); + LiteGraphPrimitvePtr graphPrimitivePtr(primitive, DestroyLiteGraphPrimitive) ; + return graphPrimitivePtr; +} + +REGISTER_OPS(WhereBuilder, OH_NN_OPS_WHERE); +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS diff --git a/frameworks/native/neural_network_runtime/ops/where_builder.h b/frameworks/native/neural_network_runtime/ops/where_builder.h new file mode 100644 index 0000000..befaf4b --- /dev/null +++ b/frameworks/native/neural_network_runtime/ops/where_builder.h @@ -0,0 +1,42 @@ +/* + * Copyright (c) 2024 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_WHERE_BUILDER_H +#define NEURAL_NETWORK_RUNTIME_WHERE_BUILDER_H + +#include "mindir.h" + +#include "ops_builder.h" +#include "ops_registry.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +class WhereBuilder : public OpsBuilder { +public: + WhereBuilder(); + ~WhereBuilder() override; + OH_NN_ReturnCode Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) override; + + LiteGraphPrimitvePtr GetPrimitive() override; +}; +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS + +#endif // NEURAL_NETWORK_RUNTIME_WHERE_BUILDER_H \ No newline at end of file diff --git a/interfaces/kits/c/neural_network_runtime/neural_network_runtime_type.h b/interfaces/kits/c/neural_network_runtime/neural_network_runtime_type.h index 0302284..d29a804 100644 --- a/interfaces/kits/c/neural_network_runtime/neural_network_runtime_type.h +++ b/interfaces/kits/c/neural_network_runtime/neural_network_runtime_type.h @@ -2001,6 +2001,191 @@ typedef enum { * * output: n-dimensional tensor., with the same data type and shape as the input tensor. */ OH_NN_OPS_CLIP = 77, + + /** + * Determine whether all emements in a given tensor are non-zero. It returns a boolean tensor + * where each element is 'True' if corresponding element in the input tensor is non-zero, and 'False' otherwise. + * + * Inputs: + * * input: n-dimensional tensor of shape (N,*), + * where * indicates any number of additional dimensions. + * * aixs: scalar or tensor, indices the dimension to be computed. + * + * Parameters: + * * keep_dims: Whether to keep dimension info. + * + * Outputs: + * * output: Indices or values before the maximum input tensor on the axis. + */ + OH_NN_OPS_ALL = 78, + + /** + * Asserts that the given condition si true. + * If condition evalutes to false, print the list of tensors in data. + * Summerize determines how many entries of the tensors to print. + * + * Inputs: + * * data: The tensors to print out when condition is false. + * * condition: The condition to evalute. + * + * Parameters: + * * maxsummarize: Print this many entries of each tensor. + * + * Outputs: + * * output: Tensor after average pooling. + */ + OH_NN_OPS_ASSERT = 79, + + /** + * Calculates the cosine of the given input tensor, element-wise. + * + * Inputs: + * * input: n-dimensional tensor. + * + * Outputs: + * * output: n-dimensional tensor. The cosine of the input tensor computed element-wise. + */ + OH_NN_OPS_COS = 80, + + /** + * Calculates the result of nature logarithm of the input. + * + * Inputs: + * * input: n-dimensional tensor. The value must be greater than 0. + * + * Outputs: + * * output: n-dimensional tensor with the same shape as the input tensor. + */ + OH_NN_OPS_LOG = 81, + + /** + * Calculates the truth value of input0 and input1 element-wise. + * + * Inputs: + * * input0: Tensor of type boolean or convert to boolean implicitly. + * * input1: Tensor of type boolean or convert to boolean implicitly. + * + * Outputs: + * * output: A tensor of type bool with the shape that x1 and x2 broadcast to. + */ + OH_NN_OPS_LOGICAL_AND = 82, + + /** + * Calculates the truth value of NOT x element-wise. + * + * Inputs: + * * input: Tensor of type boolean or convert to boolean implicitly. + * + * Outputs: + * * output: A tensor of type bool with the shape of input. + */ + OH_NN_OPS_LOGICAL_NOT = 83, + + /** + * Computes the remainder of dividing the first input tensor by the second input tensor element-wise. + * Inputs of x and y comply with the implicit type conversion rules to make the data types consistent. + * The inputs must be two tensors or one tensor and one scalar. When the inputs are two tensors, + * both dtypes cannot be bool, and the shapes of them could be broadcast. + * When the inputs are one tensor and one scalar, the scalar could only be a constant. + * + * Inputs: + * * input0: A number, a bool or a tensor whose data type is number. + * * input1:if input0 is a tensor, input1 could be a number, a bool or a tensor whose data type is number. + * If input0 is a number or a bool, input1 must be a tensor whose data type is number. + * + * Outputs: + * * output: The shape is the same shape as the boradcast shape. The data type is the type with + * the higher precision or the highest data type between the two inputs. + */ + OH_NN_OPS_MOD = 84, + + /** + * Returns a tensor with negative values of the input tensor element-wise. + * + * Inputs: + * * input: A tensor of the int or float type. + * + * Outputs: + * * output: A tensor with the same shape as the input tensor. + */ + OH_NN_OPS_NEG = 85, + + /** + * Calculate reciprocal of a tensor element-wise. + * + * Inputs: + * * input: Input tensor. + * + * Outputs: + * * output: A tensor with the same shape as the input tensor. + */ + OH_NN_OPS_RECIPROCAL = 86, + + /** + * Calculate sine of the input element-wise. + * + * Inputs: + * * input: Input tensor. + * + * Outputs: + * * output: A tensor with the same data type and shape as the input tensor. + */ + OH_NN_OPS_SIN = 87, + + /** + * Selects elements from x1 or x2 based on condition and returns a tensor. + * + * Inputs: + * * input_cond: n-dimensional tensor or scalar. + * The condition tensor, decides which element is chosen. + * * input1: n-dimensional tensor. If condition is rank 1, + * x1 may have higher rank, but its first dimension must match the size of condition. + * * input2: n-dimensional tensor. + * + * Outputs: + * * output: A tensor, has the same shape as the input_cond. + */ + OH_NN_OPS_WHERE = 88, + + /** + * Converts a sparse representation into a dense tensor. + * + * Inputs: + * * indices: 2-dimensional tensor. Position of an ellement in a sparse tensor. + * Each element value must be non-negative. The shape is (N, 2). + * * values: 1-dimensional tensor. The value corresponding to the location of indices. The shape is (N). + * * sparseShape: 2-dimensional tensor. The shape of a sparse tensor. The value consists of + * two positive integers, indicating that the shape of the sparse tensor is (N, C). + * + * Outputs: + * * output: A tensor. The data type is the same as values, and the shape is specified by sparseShape. + */ + OH_NN_OPS_SPARSE_TO_DENSE = 89, + + /** + * Calculates the truth value of input0 or input1 element-wise. + * + * Inputs: + * * input0: Tensor of type boolean or convert to boolean implicitly. + * * input1: Tensor of type boolean or convert to boolean implicitly. + * + * Outputs: + * * output: A tensor of type bool with the shape that input0 and input1 broadcast to. + */ + OH_NN_OPS_LOGICAL_OR = 90, + + /** + * Shift and scale inputs into a distribution centered around 0 with standard deviation 1. + * It accomplishes this by precomputing the mean and variance of the data, and calling + * (x - mean) / sqrt(var) at runtime. + * + * Inputs: + * * input: n-dimensional tensor. + * + * Outputs: + * * output: A tensor, has the same shape and data type as the input. + */ + OH_NN_OPS_NORMALIZE = 91, } OH_NN_OperationType; /** @@ -2357,6 +2542,18 @@ typedef enum { OH_NN_CLIP_MAX = 105, /** This enumerated value is used when the tensor is used as the min parameter of the Clip operator. */ OH_NN_CLIP_MIN = 106, + + /** This enumerated value is used when the tensor is used as the keep_dims parameter of the All operator. */ + OH_NN_ALL_KEEP_DIMS = 107, + + /** This enumerated value is used when the tensor is used as the summarize parameter + * of the Assert operator. */ + OH_NN_ASSERT_SUMMARIZE = 108, + + /** This enumerated value is used when the tensor is used as the scale parameter of the pow operator. */ + OH_NN_POW_SCALE = 109, + /** This enumerated value is used when the tensor is used as the shift parameter of the pow operator. */ + OH_NN_POW_SHIFT = 110, } OH_NN_TensorType; /** diff --git a/test/unittest/BUILD.gn b/test/unittest/BUILD.gn index eed3e38..e4396c5 100644 --- a/test/unittest/BUILD.gn +++ b/test/unittest/BUILD.gn @@ -16,8 +16,8 @@ import("//build/ohos.gni") group("unittest") { testonly = true deps = [ - "components:components_unittest", - "inner_kits:inner_kits_unittest", + #"components:components_unittest", + #"inner_kits:inner_kits_unittest", "ops:ops_unittest", ] } diff --git a/test/unittest/ops/BUILD.gn b/test/unittest/ops/BUILD.gn index 56cc849..c6457a2 100644 --- a/test/unittest/ops/BUILD.gn +++ b/test/unittest/ops/BUILD.gn @@ -33,8 +33,10 @@ ohos_unittest("OpsUnittest") { sources = [ "./abs_test.cpp" ] sources += [ "./add_test.cpp" ] + sources += [ "./all_test.cpp" ] sources += [ "./argmax_test.cpp" ] sources += [ "./avgpool_pad_test.cpp" ] + sources += [ "./assert_test.cpp" ] sources += [ "./avgpool_padmod_test.cpp" ] sources += [ "./batch_to_space_nd_test.cpp" ] sources += [ "./batchnorm_builder_test.cpp" ] @@ -49,6 +51,7 @@ ohos_unittest("OpsUnittest") { sources += [ "./conv2d_padmode_test.cpp" ] sources += [ "./conv2d_tranpose_padmode_test.cpp" ] sources += [ "./conv2d_transpose_pad_test.cpp" ] + sources += [ "./cos_test.cpp" ] sources += [ "./depth_to_space_test.cpp" ] sources += [ "./depthwise_conv2d_native_pad_test.cpp" ] sources += [ "./depthwise_conv2d_native_padmode_test.cpp" ] @@ -72,12 +75,19 @@ ohos_unittest("OpsUnittest") { sources += [ "./leaky_relu_test.cpp" ] sources += [ "./less_test.cpp" ] sources += [ "./lessequal_builder_test.cpp" ] + sources += [ "./log_test.cpp" ] + sources += [ "./logical_and_test.cpp" ] + sources += [ "./logical_not_test.cpp" ] + sources += [ "./logical_or_test.cpp" ] sources += [ "./lstm_test.cpp" ] sources += [ "./maximum_builder_test.cpp" ] sources += [ "./maxpool_pad_test.cpp" ] sources += [ "./maxpool_padmode_test.cpp" ] sources += [ "./matmul_builder_test.cpp" ] + sources += [ "./mod_test.cpp" ] sources += [ "./mul_builder_test.cpp" ] + sources += [ "./neg_test.cpp" ] + sources += [ "./normalize_test.cpp" ] sources += [ "./not_equal_builder_test.cpp" ] sources += [ "./onehot_builder_test.cpp" ] sources += [ "./pad_builder_test.cpp" ] @@ -86,6 +96,7 @@ ohos_unittest("OpsUnittest") { sources += [ "./quant_dtype_cast_builder_test.cpp" ] sources += [ "./range_test.cpp" ] sources += [ "./real_div_test.cpp" ] + sources += [ "./reciprocal_test.cpp" ] sources += [ "./reduce_all_builder_test.cpp" ] sources += [ "./reduce_mean_builder_test.cpp" ] sources += [ "./reduce_prod_builder_test.cpp" ] @@ -98,9 +109,11 @@ ohos_unittest("OpsUnittest") { sources += [ "./select_test.cpp" ] sources += [ "./shape_builder_test.cpp" ] sources += [ "./sigmoid_builder_test.cpp" ] + sources += [ "./sin_test.cpp" ] sources += [ "./slice_builder_test.cpp" ] sources += [ "./softmax_builder_test.cpp" ] sources += [ "./spacetobatchnd_builder_test.cpp" ] + sources += [ "./sparse_to_dense_test.cpp" ] sources += [ "./split_builder_test.cpp" ] sources += [ "./sqrt_builder_test.cpp" ] sources += [ "./square_test.cpp" ] @@ -115,6 +128,7 @@ ohos_unittest("OpsUnittest") { sources += [ "./transpose_builder_test.cpp" ] sources += [ "./unsqueeze_builder_test.cpp" ] sources += [ "./unstack_test.cpp" ] + sources += [ "./where_test.cpp" ] sources += [ "./ops_test.cpp" ] sources += [ "../common/base_test.cpp" ] @@ -128,7 +142,7 @@ ohos_unittest("OpsUnittest") { ] external_deps = [ - "drivers_interface_nnrt:libnnrt_proxy_1.0", + "drivers_interface_nnrt:libnnrt_proxy_2.1", "hilog:libhilog", "hitrace:hitrace_meter", "mindspore:mindir", diff --git a/test/unittest/ops/all_test.cpp b/test/unittest/ops/all_test.cpp new file mode 100644 index 0000000..ffa04a4 --- /dev/null +++ b/test/unittest/ops/all_test.cpp @@ -0,0 +1,250 @@ +/* + * Copyright (c) 2024 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ops/all_builder.h" + +#include "ops_test.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Ops; + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class AllBuilderTest : public OpsTest { +public: + void SetUp() override; + void TearDown() override; + +protected: + void SaveParamsTensor(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + void SetInputTensor(); + +protected: + AllBuilder m_builder; + std::vector m_inputs {0, 1}; + std::vector m_outputs {2}; + std::vector m_params {3}; + std::vector m_dim {2, 2}; + std::vector m_paramDim {}; +}; + +void AllBuilderTest::SetUp() {} + +void AllBuilderTest::TearDown() {} + +void AllBuilderTest::SaveParamsTensor(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + std::shared_ptr keepDimsTensor = TransToNNTensor(dataType, dim, quantParam, type); + int64_t* keepDimsValue = new (std::nothrow) int64_t[1] {0}; + EXPECT_NE(nullptr, keepDimsValue); + keepDimsTensor->SetBuffer(keepDimsValue, sizeof(int64_t)); + m_allTensors.emplace_back(keepDimsTensor); +} + +void AllBuilderTest::SetInputTensor() +{ + m_inputsIndex = m_inputs; + std::shared_ptr inputTensor; + inputTensor = TransToNNTensor(OH_NN_INT32, m_dim, nullptr, OH_NN_TENSOR); + m_allTensors.emplace_back(inputTensor); + + std::shared_ptr axisTensor; + axisTensor = TransToNNTensor(OH_NN_INT32, m_dim, nullptr, OH_NN_TENSOR); + m_allTensors.emplace_back(axisTensor); +} + +/** + * @tc.name: all_build_001 + * @tc.desc: Verify that the build function returns a successful message. + * @tc.type: FUNC + */ +HWTEST_F(AllBuilderTest, all_build_001, TestSize.Level2) +{ + SetInputTensor(); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); + SaveParamsTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_ALL_KEEP_DIMS); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/** + * @tc.name: all_build_002 + * @tc.desc: Verify that the build function returns a failed message with true m_isBuild. + * @tc.type: FUNC + */ +HWTEST_F(AllBuilderTest, all_build_002, TestSize.Level2) +{ + SetInputTensor(); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); + SaveParamsTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_ALL_KEEP_DIMS); + + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors)); + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/** + * @tc.name: all_build_003 + * @tc.desc: Verify that the build function returns a failed message with invalided input. + * @tc.type: FUNC + */ +HWTEST_F(AllBuilderTest, all_build_003, TestSize.Level2) +{ + m_inputs = {0, 1, 2}; + m_outputs = {3}; + m_params = {4}; + + SetInputTensor(); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); + SaveParamsTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_ALL_KEEP_DIMS); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: all_build_004 + * @tc.desc: Verify that the build function returns a failed message with invalided output. + * @tc.type: FUNC + */ +HWTEST_F(AllBuilderTest, all_build_004, TestSize.Level2) +{ + m_outputs = {2, 3}; + m_params = {4}; + + SetInputTensor(); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); + SaveParamsTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_ALL_KEEP_DIMS); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: all_build_005 + * @tc.desc: Verify that the build function returns a failed message with empty allTensor. + * @tc.type: FUNC + */ +HWTEST_F(AllBuilderTest, all_build_005, TestSize.Level2) +{ + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputs, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: all_build_006 + * @tc.desc: Verify that the build function returns a failed message without output tensor. + * @tc.type: FUNC + */ +HWTEST_F(AllBuilderTest, all_build_006, TestSize.Level2) +{ + SetInputTensor(); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: all_build_007 + * @tc.desc: Verify that the build function returns a failed message with invalid keep_dims's dataType. + * @tc.type: FUNC + */ +HWTEST_F(AllBuilderTest, all_build_007, TestSize.Level2) +{ + SetInputTensor(); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); + std::shared_ptr keepDimsTensor = TransToNNTensor(OH_NN_FLOAT32, m_paramDim, + nullptr, OH_NN_ALL_KEEP_DIMS); + float* keepDimsValue = new (std::nothrow) float[1] {0.0f}; + keepDimsTensor->SetBuffer(keepDimsValue, sizeof(float)); + m_allTensors.emplace_back(keepDimsTensor); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + keepDimsTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: all_build_008 + * @tc.desc: Verify that the build function returns a failed message with passing invalid keep_dims param. + * @tc.type: FUNC + */ +HWTEST_F(AllBuilderTest, all_build_008, TestSize.Level2) +{ + SetInputTensor(); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); + SaveParamsTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_MUL_ACTIVATION_TYPE); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: all_build_009 + * @tc.desc: Verify that the build function returns a failed message without set buffer for keep_dims. + * @tc.type: FUNC + */ +HWTEST_F(AllBuilderTest, all_build_009, TestSize.Level2) +{ + SetInputTensor(); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); + std::shared_ptr keepDimsTensor = TransToNNTensor(OH_NN_INT64, m_paramDim, + nullptr, OH_NN_ALL_KEEP_DIMS); + m_allTensors.emplace_back(keepDimsTensor); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: all_getprimitive_001 + * @tc.desc: Verify that the getPrimitive function returns a successful message + * @tc.type: FUNC + */ +HWTEST_F(AllBuilderTest, all_getprimitive_001, TestSize.Level2) +{ + SetInputTensor(); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); + SaveParamsTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_ALL_KEEP_DIMS); + + int64_t keepDimsValue = 0; + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors)); + LiteGraphPrimitvePtr primitive = m_builder.GetPrimitive(); + LiteGraphPrimitvePtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); + EXPECT_NE(expectPrimitive, primitive); + + auto returnValue = mindspore::lite::MindIR_All_GetKeepDims(primitive.get()); + EXPECT_EQ(returnValue, keepDimsValue); +} + +/** + * @tc.name: all_getprimitive_002 + * @tc.desc: Verify that the getPrimitive function returns a failed message without build. + * @tc.type: FUNC + */ +HWTEST_F(AllBuilderTest, all_getprimitive_002, TestSize.Level2) +{ + LiteGraphPrimitvePtr primitive = m_builder.GetPrimitive(); + LiteGraphPrimitvePtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); + EXPECT_EQ(expectPrimitive, primitive); +} +} +} +} \ No newline at end of file diff --git a/test/unittest/ops/assert_test.cpp b/test/unittest/ops/assert_test.cpp new file mode 100644 index 0000000..98adfe2 --- /dev/null +++ b/test/unittest/ops/assert_test.cpp @@ -0,0 +1,250 @@ +/* + * Copyright (c) 2024 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ops/assert_builder.h" + +#include "ops_test.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Ops; + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class AssertBuilderTest : public OpsTest { +public: + void SetUp() override; + void TearDown() override; + +protected: + void SaveParamsTensor(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + void SetInputTensor(); + +protected: + AssertBuilder m_builder; + std::vector m_inputs {0, 1}; + std::vector m_outputs {2}; + std::vector m_params {3}; + std::vector m_dim {2, 2}; + std::vector m_paramDim {}; +}; + +void AssertBuilderTest::SetUp() {} + +void AssertBuilderTest::TearDown() {} + +void AssertBuilderTest::SaveParamsTensor(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + std::shared_ptr summarizeTensor = TransToNNTensor(dataType, dim, quantParam, type); + int64_t* summarizeValue = new (std::nothrow) int64_t[1] {0}; + EXPECT_NE(nullptr, summarizeValue); + summarizeTensor->SetBuffer(summarizeValue, sizeof(int64_t)); + m_allTensors.emplace_back(summarizeTensor); +} + +void AssertBuilderTest::SetInputTensor() +{ + m_inputsIndex = m_inputs; + std::shared_ptr inputTensor; + inputTensor = TransToNNTensor(OH_NN_INT32, m_dim, nullptr, OH_NN_TENSOR); + m_allTensors.emplace_back(inputTensor); + + std::shared_ptr conditionTensor; + conditionTensor = TransToNNTensor(OH_NN_INT32, m_dim, nullptr, OH_NN_TENSOR); + m_allTensors.emplace_back(conditionTensor); +} + +/** + * @tc.name: assert_build_001 + * @tc.desc: Verify that the build function returns a successful message. + * @tc.type: FUNC + */ +HWTEST_F(AssertBuilderTest, assert_build_001, TestSize.Level2) +{ + SetInputTensor(); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); + SaveParamsTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_ASSERT_SUMMARIZE); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/** + * @tc.name: assert_build_002 + * @tc.desc: Verify that the build function returns a failed message with true m_isBuild. + * @tc.type: FUNC + */ +HWTEST_F(AssertBuilderTest, assert_build_002, TestSize.Level2) +{ + SetInputTensor(); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); + SaveParamsTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_ASSERT_SUMMARIZE); + + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors)); + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/** + * @tc.name: assert_build_003 + * @tc.desc: Verify that the build function returns a failed message with invalided input. + * @tc.type: FUNC + */ +HWTEST_F(AssertBuilderTest, assert_build_003, TestSize.Level2) +{ + m_inputs = {0, 1, 2}; + m_outputs = {3}; + m_params = {4}; + + SetInputTensor(); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); + SaveParamsTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_ASSERT_SUMMARIZE); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: assert_build_004 + * @tc.desc: Verify that the build function returns a failed message with invalided output. + * @tc.type: FUNC + */ +HWTEST_F(AssertBuilderTest, assert_build_004, TestSize.Level2) +{ + m_outputs = {2, 3}; + m_params = {4}; + + SetInputTensor(); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); + SaveParamsTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_ASSERT_SUMMARIZE); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: assert_build_005 + * @tc.desc: Verify that the build function returns a failed message with empty allTensor. + * @tc.type: FUNC + */ +HWTEST_F(AssertBuilderTest, assert_build_005, TestSize.Level2) +{ + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputs, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: assert_build_006 + * @tc.desc: Verify that the build function returns a failed message without output tensor. + * @tc.type: FUNC + */ +HWTEST_F(AssertBuilderTest, assert_build_006, TestSize.Level2) +{ + SetInputTensor(); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: assert_build_007 + * @tc.desc: Verify that the build function returns a failed message with invalid summarize's dataType. + * @tc.type: FUNC + */ +HWTEST_F(AssertBuilderTest, assert_build_007, TestSize.Level2) +{ + SetInputTensor(); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); + std::shared_ptr summarizeTensor = TransToNNTensor(OH_NN_FLOAT32, m_paramDim, + nullptr, OH_NN_ASSERT_SUMMARIZE); + float* summarizeValue = new (std::nothrow) float[1] {0.0f}; + summarizeTensor->SetBuffer(summarizeValue, sizeof(float)); + m_allTensors.emplace_back(summarizeTensor); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + summarizeTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: assert_build_008 + * @tc.desc: Verify that the build function returns a failed message with passing invalid summarize param. + * @tc.type: FUNC + */ +HWTEST_F(AssertBuilderTest, assert_build_008, TestSize.Level2) +{ + SetInputTensor(); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); + SaveParamsTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_MUL_ACTIVATION_TYPE); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: assert_build_009 + * @tc.desc: Verify that the build function returns a failed message without set buffer for summarize. + * @tc.type: FUNC + */ +HWTEST_F(AssertBuilderTest, assert_build_009, TestSize.Level2) +{ + SetInputTensor(); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); + std::shared_ptr summarizeTensor = TransToNNTensor(OH_NN_INT64, m_paramDim, + nullptr, OH_NN_ASSERT_SUMMARIZE); + m_allTensors.emplace_back(summarizeTensor); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: assert_getprimitive_001 + * @tc.desc: Verify that the getPrimitive function returns a successful message + * @tc.type: FUNC + */ +HWTEST_F(AssertBuilderTest, assert_getprimitive_001, TestSize.Level2) +{ + SetInputTensor(); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); + SaveParamsTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_ASSERT_SUMMARIZE); + + int64_t summarizeValue = 0; + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors)); + LiteGraphPrimitvePtr primitive = m_builder.GetPrimitive(); + LiteGraphPrimitvePtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); + EXPECT_NE(expectPrimitive, primitive); + + auto returnValue = mindspore::lite::MindIR_Assert_GetSummarize(primitive.get()); + EXPECT_EQ(returnValue, summarizeValue); +} + +/** + * @tc.name: assert_getprimitive_002 + * @tc.desc: Verify that the getPrimitive function returns a failed message without build. + * @tc.type: FUNC + */ +HWTEST_F(AssertBuilderTest, assert_getprimitive_002, TestSize.Level2) +{ + LiteGraphPrimitvePtr primitive = m_builder.GetPrimitive(); + LiteGraphPrimitvePtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); + EXPECT_EQ(expectPrimitive, primitive); +} +} +} +} \ No newline at end of file diff --git a/test/unittest/ops/cos_test.cpp b/test/unittest/ops/cos_test.cpp new file mode 100644 index 0000000..3db9623 --- /dev/null +++ b/test/unittest/ops/cos_test.cpp @@ -0,0 +1,158 @@ +/* + * Copyright (c) 2024 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ops/cos_builder.h" + +#include "ops_test.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Ops; + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class CosBuilderTest : public OpsTest { +public: + void SetUp() override; + void TearDown() override; + +protected: + CosBuilder m_builder; + std::vector m_inputs {0}; + std::vector m_outputs {1}; + std::vector m_dim {1, 2, 2, 1}; +}; + +void CosBuilderTest::SetUp() {} + +void CosBuilderTest::TearDown() {} + +/** + * @tc.name: cos_build_001 + * @tc.desc: Verify that the build function returns a successful message. + * @tc.type: FUNC + */ +HWTEST_F(CosBuilderTest, cos_build_001, TestSize.Level2) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/** + * @tc.name: cos_build_002 + * @tc.desc: Verify that the build function returns a failed message with true m_isBuild. + * @tc.type: FUNC + */ +HWTEST_F(CosBuilderTest, cos_build_002, TestSize.Level2) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); + + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/** + * @tc.name: cos_build_003 + * @tc.desc: Verify that the build function returns a failed message with invalided input. + * @tc.type: FUNC + */ +HWTEST_F(CosBuilderTest, cos_build_003, TestSize.Level2) +{ + m_inputs = {0, 1}; + m_outputs = {2}; + + SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: cos_build_004 + * @tc.desc: Verify that the build function returns a failed message with invalided output. + * @tc.type: FUNC + */ +HWTEST_F(CosBuilderTest, cos_build_004, TestSize.Level2) +{ + m_outputs = {1, 2}; + + SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: cos_build_005 + * @tc.desc: Verify that the build function returns a failed message with empty allTensor. + * @tc.type: FUNC + */ +HWTEST_F(CosBuilderTest, cos_build_005, TestSize.Level2) +{ + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputs, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: cos_build_006 + * @tc.desc: Verify that the build function returns a failed message without output tensor. + * @tc.type: FUNC + */ +HWTEST_F(CosBuilderTest, cos_build_006, TestSize.Level2) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: cos_getprimitive_001 + * @tc.desc: Verify that the getPrimitive function returns a successful message + * @tc.type: FUNC + */ +HWTEST_F(CosBuilderTest, cos_getprimitive_001, TestSize.Level2) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); + + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + LiteGraphPrimitvePtr primitive = m_builder.GetPrimitive(); + LiteGraphPrimitvePtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); + EXPECT_NE(expectPrimitive, primitive); +} + +/** + * @tc.name: cos_getprimitive_002 + * @tc.desc: Verify that the getPrimitive function returns a failed message without build. + * @tc.type: FUNC + */ +HWTEST_F(CosBuilderTest, cos_getprimitive_002, TestSize.Level2) +{ + LiteGraphPrimitvePtr primitive = m_builder.GetPrimitive(); + LiteGraphPrimitvePtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); + EXPECT_EQ(expectPrimitive, primitive); +} +} +} +} \ No newline at end of file diff --git a/test/unittest/ops/instance_norm_test.cpp b/test/unittest/ops/instance_norm_test.cpp index 944300f..9f7e838 100644 --- a/test/unittest/ops/instance_norm_test.cpp +++ b/test/unittest/ops/instance_norm_test.cpp @@ -74,6 +74,7 @@ void InstanceNormBuilderTest::SetInputTensor() biasTensor = TransToNNTensor(OH_NN_FLOAT32, m_scaleAndBiasDim, nullptr, OH_NN_TENSOR); m_allTensors.emplace_back(biasTensor); } + /** * @tc.name: instance_norm_build_001 * @tc.desc: Verify that the build function returns a successful message. diff --git a/test/unittest/ops/log_test.cpp b/test/unittest/ops/log_test.cpp new file mode 100644 index 0000000..ff9b582 --- /dev/null +++ b/test/unittest/ops/log_test.cpp @@ -0,0 +1,158 @@ +/* + * Copyright (c) 2024 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ops/log_builder.h" + +#include "ops_test.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Ops; + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class LogBuilderTest : public OpsTest { +public: + void SetUp() override; + void TearDown() override; + +protected: + LogBuilder m_builder; + std::vector m_inputs {0}; + std::vector m_outputs {1}; + std::vector m_dim {1, 2, 2, 1}; +}; + +void LogBuilderTest::SetUp() {} + +void LogBuilderTest::TearDown() {} + +/** + * @tc.name: log_build_001 + * @tc.desc: Verify that the build function returns a successful message. + * @tc.type: FUNC + */ +HWTEST_F(LogBuilderTest, log_build_001, TestSize.Level2) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/** + * @tc.name: log_build_002 + * @tc.desc: Verify that the build function returns a failed message with true m_isBuild. + * @tc.type: FUNC + */ +HWTEST_F(LogBuilderTest, log_build_002, TestSize.Level2) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); + + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/** + * @tc.name: log_build_003 + * @tc.desc: Verify that the build function returns a failed message with invalided input. + * @tc.type: FUNC + */ +HWTEST_F(LogBuilderTest, log_build_003, TestSize.Level2) +{ + m_inputs = {0, 1}; + m_outputs = {2}; + + SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: log_build_004 + * @tc.desc: Verify that the build function returns a failed message with invalided output. + * @tc.type: FUNC + */ +HWTEST_F(LogBuilderTest, log_build_004, TestSize.Level2) +{ + m_outputs = {1, 2}; + + SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: log_build_005 + * @tc.desc: Verify that the build function returns a failed message with empty allTensor. + * @tc.type: FUNC + */ +HWTEST_F(LogBuilderTest, log_build_005, TestSize.Level2) +{ + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputs, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: log_build_006 + * @tc.desc: Verify that the build function returns a failed message without output tensor. + * @tc.type: FUNC + */ +HWTEST_F(LogBuilderTest, log_build_006, TestSize.Level2) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: log_getprimitive_001 + * @tc.desc: Verify that the getPrimitive function returns a successful message + * @tc.type: FUNC + */ +HWTEST_F(LogBuilderTest, log_getprimitive_001, TestSize.Level2) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); + + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + LiteGraphPrimitvePtr primitive = m_builder.GetPrimitive(); + LiteGraphPrimitvePtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); + EXPECT_NE(expectPrimitive, primitive); +} + +/** + * @tc.name: log_getprimitive_002 + * @tc.desc: Verify that the getPrimitive function returns a failed message without build. + * @tc.type: FUNC + */ +HWTEST_F(LogBuilderTest, log_getprimitive_002, TestSize.Level2) +{ + LiteGraphPrimitvePtr primitive = m_builder.GetPrimitive(); + LiteGraphPrimitvePtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); + EXPECT_EQ(expectPrimitive, primitive); +} +} +} +} \ No newline at end of file diff --git a/test/unittest/ops/logical_and_test.cpp b/test/unittest/ops/logical_and_test.cpp new file mode 100644 index 0000000..ad4e795 --- /dev/null +++ b/test/unittest/ops/logical_and_test.cpp @@ -0,0 +1,158 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ops/logical_and_builder.h" + +#include "ops_test.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Ops; + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class LogicalAndBuilderTest : public OpsTest { +public: + void SetUp() override; + void TearDown() override; + +protected: + LogicalAndBuilder m_builder; + std::vector m_inputs {0, 1}; + std::vector m_outputs {2}; + std::vector m_dim {1, 2, 2, 1}; +}; + +void LogicalAndBuilderTest::SetUp() {} + +void LogicalAndBuilderTest::TearDown() {} + +/** + * @tc.name: logical_and_build_001 + * @tc.desc: Verify that the build function returns a successful message. + * @tc.type: FUNC + */ +HWTEST_F(LogicalAndBuilderTest, logical_and_build_001, TestSize.Level2) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/** + * @tc.name: logical_and_build_002 + * @tc.desc: Verify that the build function returns a failed message with true m_isBuild. + * @tc.type: FUNC + */ +HWTEST_F(LogicalAndBuilderTest, logical_and_build_002, TestSize.Level2) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); + + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/** + * @tc.name: logical_and_build_003 + * @tc.desc: Verify that the build function returns a failed message with invalided input. + * @tc.type: FUNC + */ +HWTEST_F(LogicalAndBuilderTest, logical_and_build_003, TestSize.Level2) +{ + m_inputs = {0, 1, 2}; + m_outputs = {3}; + + SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: logical_and_build_004 + * @tc.desc: Verify that the build function returns a failed message with invalided output. + * @tc.type: FUNC + */ +HWTEST_F(LogicalAndBuilderTest, logical_and_build_004, TestSize.Level2) +{ + m_outputs = {2, 3}; + + SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: logical_and_build_005 + * @tc.desc: Verify that the build function returns a failed message with empty allTensor. + * @tc.type: FUNC + */ +HWTEST_F(LogicalAndBuilderTest, logical_and_build_005, TestSize.Level2) +{ + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputs, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: logical_and_build_006 + * @tc.desc: Verify that the build function returns a failed message without output tensor. + * @tc.type: FUNC + */ +HWTEST_F(LogicalAndBuilderTest, logical_and_build_006, TestSize.Level2) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: logical_and_getprimitive_001 + * @tc.desc: Verify that the getPrimitive function returns a successful message + * @tc.type: FUNC + */ +HWTEST_F(LogicalAndBuilderTest, logical_and_getprimitive_001, TestSize.Level2) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); + + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + LiteGraphPrimitvePtr primitive = m_builder.GetPrimitive(); + LiteGraphPrimitvePtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); + EXPECT_NE(expectPrimitive, primitive); +} + +/** + * @tc.name: logical_and_getprimitive_002 + * @tc.desc: Verify that the getPrimitive function returns a failed message without build. + * @tc.type: FUNC + */ +HWTEST_F(LogicalAndBuilderTest, logical_and_getprimitive_002, TestSize.Level2) +{ + LiteGraphPrimitvePtr primitive = m_builder.GetPrimitive(); + LiteGraphPrimitvePtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); + EXPECT_EQ(expectPrimitive, primitive); +} +} +} +} \ No newline at end of file diff --git a/test/unittest/ops/logical_not_test.cpp b/test/unittest/ops/logical_not_test.cpp new file mode 100644 index 0000000..7c461b6 --- /dev/null +++ b/test/unittest/ops/logical_not_test.cpp @@ -0,0 +1,158 @@ +/* + * Copyright (c) 2024 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ops/logical_not_builder.h" + +#include "ops_test.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Ops; + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class LogicalNotBuilderTest : public OpsTest { +public: + void SetUp() override; + void TearDown() override; + +protected: + LogicalNotBuilder m_builder; + std::vector m_inputs {0}; + std::vector m_outputs {1}; + std::vector m_dim {1, 2, 2, 1}; +}; + +void LogicalNotBuilderTest::SetUp() {} + +void LogicalNotBuilderTest::TearDown() {} + +/** + * @tc.name: logical_not_build_001 + * @tc.desc: Verify that the build function returns a successful message. + * @tc.type: FUNC + */ +HWTEST_F(LogicalNotBuilderTest, logical_not_build_001, TestSize.Level2) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/** + * @tc.name: logical_not_build_002 + * @tc.desc: Verify that the build function returns a failed message with true m_isBuild. + * @tc.type: FUNC + */ +HWTEST_F(LogicalNotBuilderTest, logical_not_build_002, TestSize.Level2) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); + + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/** + * @tc.name: logical_not_build_003 + * @tc.desc: Verify that the build function returns a failed message with invalided input. + * @tc.type: FUNC + */ +HWTEST_F(LogicalNotBuilderTest, logical_not_build_003, TestSize.Level2) +{ + m_inputs = {0, 1}; + m_outputs = {2}; + + SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: logical_not_build_004 + * @tc.desc: Verify that the build function returns a failed message with invalided output. + * @tc.type: FUNC + */ +HWTEST_F(LogicalNotBuilderTest, logical_not_build_004, TestSize.Level2) +{ + m_outputs = {1, 2}; + + SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: logical_not_build_005 + * @tc.desc: Verify that the build function returns a failed message with empty allTensor. + * @tc.type: FUNC + */ +HWTEST_F(LogicalNotBuilderTest, logical_not_build_005, TestSize.Level2) +{ + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputs, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: logical_not_build_006 + * @tc.desc: Verify that the build function returns a failed message without output tensor. + * @tc.type: FUNC + */ +HWTEST_F(LogicalNotBuilderTest, logical_not_build_006, TestSize.Level2) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: logical_not_getprimitive_001 + * @tc.desc: Verify that the getPrimitive function returns a successful message + * @tc.type: FUNC + */ +HWTEST_F(LogicalNotBuilderTest, logical_not_getprimitive_001, TestSize.Level2) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); + + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + LiteGraphPrimitvePtr primitive = m_builder.GetPrimitive(); + LiteGraphPrimitvePtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); + EXPECT_NE(expectPrimitive, primitive); +} + +/** + * @tc.name: logical_not_getprimitive_002 + * @tc.desc: Verify that the getPrimitive function returns a failed message without build. + * @tc.type: FUNC + */ +HWTEST_F(LogicalNotBuilderTest, logical_not_getprimitive_002, TestSize.Level2) +{ + LiteGraphPrimitvePtr primitive = m_builder.GetPrimitive(); + LiteGraphPrimitvePtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); + EXPECT_EQ(expectPrimitive, primitive); +} +} +} +} \ No newline at end of file diff --git a/test/unittest/ops/logical_or_test.cpp b/test/unittest/ops/logical_or_test.cpp new file mode 100644 index 0000000..ea0a2f8 --- /dev/null +++ b/test/unittest/ops/logical_or_test.cpp @@ -0,0 +1,158 @@ +/* + * Copyright (c) 2024 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may or use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ops/logical_or_builder.h" + +#include "ops_test.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Ops; + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class LogicalOrBuilderTest : public OpsTest { +public: + void SetUp() override; + void TearDown() override; + +protected: + LogicalOrBuilder m_builder; + std::vector m_inputs {0, 1}; + std::vector m_outputs {2}; + std::vector m_dim {1, 2, 2, 1}; +}; + +void LogicalOrBuilderTest::SetUp() {} + +void LogicalOrBuilderTest::TearDown() {} + +/** + * @tc.name: logical_or_build_001 + * @tc.desc: Verify that the build function returns a successful message. + * @tc.type: FUNC + */ +HWTEST_F(LogicalOrBuilderTest, logical_or_build_001, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/** + * @tc.name: logical_or_build_002 + * @tc.desc: Verify that the build function returns a failed message with true m_isBuild. + * @tc.type: FUNC + */ +HWTEST_F(LogicalOrBuilderTest, logical_or_build_002, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); + + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/** + * @tc.name: logical_or_build_003 + * @tc.desc: Verify that the build function returns a failed message with invalided input. + * @tc.type: FUNC + */ +HWTEST_F(LogicalOrBuilderTest, logical_or_build_003, TestSize.Level0) +{ + m_inputs = {0, 1, 2}; + m_outputs = {3}; + + SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: logical_or_build_004 + * @tc.desc: Verify that the build function returns a failed message with invalided output. + * @tc.type: FUNC + */ +HWTEST_F(LogicalOrBuilderTest, logical_or_build_004, TestSize.Level0) +{ + m_outputs = {2, 3}; + + SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: logical_or_build_005 + * @tc.desc: Verify that the build function returns a failed message with empty allTensor. + * @tc.type: FUNC + */ +HWTEST_F(LogicalOrBuilderTest, logical_or_build_005, TestSize.Level0) +{ + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputs, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: logical_or_build_006 + * @tc.desc: Verify that the build function returns a failed message without output tensor. + * @tc.type: FUNC + */ +HWTEST_F(LogicalOrBuilderTest, logical_or_build_006, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: logical_or_getprimitive_001 + * @tc.desc: Verify that the getPrimitive function returns a successful message + * @tc.type: FUNC + */ +HWTEST_F(LogicalOrBuilderTest, logical_or_getprimitive_001, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); + + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + LiteGraphPrimitvePtr primitive = m_builder.GetPrimitive(); + LiteGraphPrimitvePtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); + EXPECT_NE(expectPrimitive, primitive); +} + +/** + * @tc.name: logical_or_getprimitive_002 + * @tc.desc: Verify that the getPrimitive function returns a failed message without build. + * @tc.type: FUNC + */ +HWTEST_F(LogicalOrBuilderTest, logical_or_getprimitive_002, TestSize.Level0) +{ + LiteGraphPrimitvePtr primitive = m_builder.GetPrimitive(); + LiteGraphPrimitvePtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); + EXPECT_EQ(expectPrimitive, primitive); +} +} +} +} \ No newline at end of file diff --git a/test/unittest/ops/mod_test.cpp b/test/unittest/ops/mod_test.cpp new file mode 100644 index 0000000..e927152 --- /dev/null +++ b/test/unittest/ops/mod_test.cpp @@ -0,0 +1,173 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ops/mod_builder.h" + +#include "ops_test.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Ops; + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class ModBuilderTest : public OpsTest { +public: + void SetUp() override; + void TearDown() override; + +protected: + void SetInputTensor(); + +protected: + ModBuilder m_builder; + std::vector m_inputs {0, 1}; + std::vector m_outputs {2}; + std::vector m_dim {1, 2, 2, 1}; +}; + +void ModBuilderTest::SetUp() {} + +void ModBuilderTest::TearDown() {} + +void ModBuilderTest::SetInputTensor() +{ + m_inputsIndex = m_inputs; + std::shared_ptr input1Tensor; + input1Tensor = TransToNNTensor(OH_NN_INT32, m_dim, nullptr, OH_NN_TENSOR); + m_allTensors.emplace_back(input1Tensor); + + std::shared_ptr input2Tensor; + input2Tensor = TransToNNTensor(OH_NN_INT32, m_dim, nullptr, OH_NN_TENSOR); + m_allTensors.emplace_back(input2Tensor); +} + +/** + * @tc.name: mod_build_001 + * @tc.desc: Verify that the build function returns a successful message. + * @tc.type: FUNC + */ +HWTEST_F(ModBuilderTest, mod_build_001, TestSize.Level2) +{ + SetInputTensor(); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/** + * @tc.name: mod_build_002 + * @tc.desc: Verify that the build function returns a failed message with true m_isBuild. + * @tc.type: FUNC + */ +HWTEST_F(ModBuilderTest, mod_build_002, TestSize.Level2) +{ + SetInputTensor(); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); + + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/** + * @tc.name: mod_build_003 + * @tc.desc: Verify that the build function returns a failed message with invalided input. + * @tc.type: FUNC + */ +HWTEST_F(ModBuilderTest, mod_build_003, TestSize.Level2) +{ + m_inputs = {0, 1, 2}; + m_outputs = {3}; + + SetInputTensor(); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: mod_build_004 + * @tc.desc: Verify that the build function returns a failed message with invalided output. + * @tc.type: FUNC + */ +HWTEST_F(ModBuilderTest, mod_build_004, TestSize.Level2) +{ + m_outputs = {2, 3}; + + SetInputTensor(); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: mod_build_005 + * @tc.desc: Verify that the build function returns a failed message with empty allTensor. + * @tc.type: FUNC + */ +HWTEST_F(ModBuilderTest, mod_build_005, TestSize.Level2) +{ + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputs, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: mod_build_006 + * @tc.desc: Verify that the build function returns a failed message without output tensor. + * @tc.type: FUNC + */ +HWTEST_F(ModBuilderTest, mod_build_006, TestSize.Level2) +{ + SetInputTensor(); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: mod_getprimitive_001 + * @tc.desc: Verify that the getPrimitive function returns a successful message + * @tc.type: FUNC + */ +HWTEST_F(ModBuilderTest, mod_getprimitive_001, TestSize.Level2) +{ + SetInputTensor(); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); + + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + LiteGraphPrimitvePtr primitive = m_builder.GetPrimitive(); + LiteGraphPrimitvePtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); + EXPECT_NE(expectPrimitive, primitive); +} + +/** + * @tc.name: mod_getprimitive_002 + * @tc.desc: Verify that the getPrimitive function returns a failed message without build. + * @tc.type: FUNC + */ +HWTEST_F(ModBuilderTest, mod_getprimitive_002, TestSize.Level2) +{ + LiteGraphPrimitvePtr primitive = m_builder.GetPrimitive(); + LiteGraphPrimitvePtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); + EXPECT_EQ(expectPrimitive, primitive); +} +} +} +} \ No newline at end of file diff --git a/test/unittest/ops/neg_test.cpp b/test/unittest/ops/neg_test.cpp new file mode 100644 index 0000000..8e34f26 --- /dev/null +++ b/test/unittest/ops/neg_test.cpp @@ -0,0 +1,158 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ops/neg_builder.h" + +#include "ops_test.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Ops; + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class NegBuilderTest : public OpsTest { +public: + void SetUp() override; + void TearDown() override; + +protected: + NegBuilder m_builder; + std::vector m_inputs {0}; + std::vector m_outputs {1}; + std::vector m_dim {1, 2, 2, 1}; +}; + +void NegBuilderTest::SetUp() {} + +void NegBuilderTest::TearDown() {} + +/** + * @tc.name: neg_build_001 + * @tc.desc: Verify that the build function returns a successful message. + * @tc.type: FUNC + */ +HWTEST_F(NegBuilderTest, neg_build_001, TestSize.Level2) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/** + * @tc.name: neg_build_002 + * @tc.desc: Verify that the build function returns a failed message with true m_isBuild. + * @tc.type: FUNC + */ +HWTEST_F(NegBuilderTest, neg_build_002, TestSize.Level2) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); + + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/** + * @tc.name: neg_build_003 + * @tc.desc: Verify that the build function returns a failed message with invalided input. + * @tc.type: FUNC + */ +HWTEST_F(NegBuilderTest, neg_build_003, TestSize.Level2) +{ + m_inputs = {0, 1}; + m_outputs = {2}; + + SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: neg_build_004 + * @tc.desc: Verify that the build function returns a failed message with invalided output. + * @tc.type: FUNC + */ +HWTEST_F(NegBuilderTest, neg_build_004, TestSize.Level2) +{ + m_outputs = {1, 2}; + + SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: neg_build_005 + * @tc.desc: Verify that the build function returns a failed message with empty allTensor. + * @tc.type: FUNC + */ +HWTEST_F(NegBuilderTest, neg_build_005, TestSize.Level2) +{ + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputs, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: neg_build_006 + * @tc.desc: Verify that the build function returns a failed message without output tensor. + * @tc.type: FUNC + */ +HWTEST_F(NegBuilderTest, neg_build_006, TestSize.Level2) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: neg_getprimitive_001 + * @tc.desc: Verify that the getPrimitive function returns a successful message + * @tc.type: FUNC + */ +HWTEST_F(NegBuilderTest, neg_getprimitive_001, TestSize.Level2) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); + + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + LiteGraphPrimitvePtr primitive = m_builder.GetPrimitive(); + LiteGraphPrimitvePtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); + EXPECT_NE(expectPrimitive, primitive); +} + +/** + * @tc.name: neg_getprimitive_002 + * @tc.desc: Verify that the getPrimitive function returns a failed message without build. + * @tc.type: FUNC + */ +HWTEST_F(NegBuilderTest, neg_getprimitive_002, TestSize.Level2) +{ + LiteGraphPrimitvePtr primitive = m_builder.GetPrimitive(); + LiteGraphPrimitvePtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); + EXPECT_EQ(expectPrimitive, primitive); +} +} +} +} \ No newline at end of file diff --git a/test/unittest/ops/normalize_test.cpp b/test/unittest/ops/normalize_test.cpp new file mode 100644 index 0000000..90767a2 --- /dev/null +++ b/test/unittest/ops/normalize_test.cpp @@ -0,0 +1,158 @@ +/* + * Copyright (c) 2024 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ops/normalize_builder.h" + +#include "ops_test.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Ops; + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class NormalizeBuilderTest : public OpsTest { +public: + void SetUp() override; + void TearDown() override; + +protected: + NormalizeBuilder m_builder; + std::vector m_inputs {0}; + std::vector m_outputs {1}; + std::vector m_dim {1, 2, 2, 1}; +}; + +void NormalizeBuilderTest::SetUp() {} + +void NormalizeBuilderTest::TearDown() {} + +/** + * @tc.name: normalize_build_001 + * @tc.desc: Verify that the build function returns a successful message. + * @tc.type: FUNC + */ +HWTEST_F(NormalizeBuilderTest, normalize_build_001, TestSize.Level2) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/** + * @tc.name: normalize_build_002 + * @tc.desc: Verify that the build function returns a failed message with true m_isBuild. + * @tc.type: FUNC + */ +HWTEST_F(NormalizeBuilderTest, normalize_build_002, TestSize.Level2) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); + + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/** + * @tc.name: normalize_build_003 + * @tc.desc: Verify that the build function returns a failed message with invalided input. + * @tc.type: FUNC + */ +HWTEST_F(NormalizeBuilderTest, normalize_build_003, TestSize.Level2) +{ + m_inputs = {0, 1}; + m_outputs = {2}; + + SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: normalize_build_004 + * @tc.desc: Verify that the build function returns a failed message with invalided output. + * @tc.type: FUNC + */ +HWTEST_F(NormalizeBuilderTest, normalize_build_004, TestSize.Level2) +{ + m_outputs = {1, 2}; + + SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: normalize_build_005 + * @tc.desc: Verify that the build function returns a failed message with empty allTensor. + * @tc.type: FUNC + */ +HWTEST_F(NormalizeBuilderTest, normalize_build_005, TestSize.Level2) +{ + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputs, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: normalize_build_006 + * @tc.desc: Verify that the build function returns a failed message without output tensor. + * @tc.type: FUNC + */ +HWTEST_F(NormalizeBuilderTest, normalize_build_006, TestSize.Level2) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: normalize_getprimitive_001 + * @tc.desc: Verify that the getPrimitive function returns a successful message + * @tc.type: FUNC + */ +HWTEST_F(NormalizeBuilderTest, normalize_getprimitive_001, TestSize.Level2) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); + + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + LiteGraphPrimitvePtr primitive = m_builder.GetPrimitive(); + LiteGraphPrimitvePtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); + EXPECT_NE(expectPrimitive, primitive); +} + +/** + * @tc.name: normalize_getprimitive_002 + * @tc.desc: Verify that the getPrimitive function returns a failed message without build. + * @tc.type: FUNC + */ +HWTEST_F(NormalizeBuilderTest, normalize_getprimitive_002, TestSize.Level2) +{ + LiteGraphPrimitvePtr primitive = m_builder.GetPrimitive(); + LiteGraphPrimitvePtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); + EXPECT_EQ(expectPrimitive, primitive); +} +} +} +} \ No newline at end of file diff --git a/test/unittest/ops/pow_builder_test.cpp b/test/unittest/ops/pow_builder_test.cpp index e8e3c4f..893fbdf 100644 --- a/test/unittest/ops/pow_builder_test.cpp +++ b/test/unittest/ops/pow_builder_test.cpp @@ -29,17 +29,46 @@ public: void SetUp() override; void TearDown() override; +protected: + void SaveShift(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + void SaveScale(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + protected: PowBuilder m_builder; std::vector m_inputs {0, 1}; std::vector m_outputs {2}; + std::vector m_params {3, 4}; std::vector m_dim {1, 2, 2, 1}; + std::vector m_shiftDim {1}; + std::vector m_scaleDim {1}; }; void PowBuilderTest::SetUp() {} void PowBuilderTest::TearDown() {} +void PowBuilderTest::SaveShift(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + std::shared_ptr shiftTensor = TransToNNTensor(dataType, dim, quantParam, type); + float* shiftValue = new (std::nothrow) float[1] {0.0f}; + EXPECT_NE(nullptr, shiftValue); + shiftTensor->SetBuffer(shiftValue, sizeof(float)); + m_allTensors.emplace_back(shiftTensor); +} + +void PowBuilderTest::SaveScale(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + std::shared_ptr scaleTensor = TransToNNTensor(dataType, dim, quantParam, type); + float* scaleValue = new (std::nothrow) float[1] {1.0f}; + EXPECT_NE(nullptr, scaleValue); + scaleTensor->SetBuffer(scaleValue, sizeof(float)); + m_allTensors.emplace_back(scaleTensor); +} + /** * @tc.name: pow_build_001 * @tc.desc: Provide normal input, output, and parameters to verify the normal behavior of the Build function @@ -49,8 +78,10 @@ HWTEST_F(PowBuilderTest, pow_build_001, TestSize.Level0) { SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_dim, nullptr); SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_dim, nullptr); + SaveShift(OH_NN_FLOAT32, m_shiftDim, nullptr, OH_NN_POW_SHIFT); + SaveScale(OH_NN_FLOAT32, m_scaleDim, nullptr, OH_NN_POW_SCALE); - OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); EXPECT_EQ(OH_NN_SUCCESS, ret); } @@ -63,9 +94,11 @@ HWTEST_F(PowBuilderTest, pow_build_002, TestSize.Level0) { SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_dim, nullptr); SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_dim, nullptr); + SaveShift(OH_NN_FLOAT32, m_shiftDim, nullptr, OH_NN_POW_SHIFT); + SaveScale(OH_NN_FLOAT32, m_scaleDim, nullptr, OH_NN_POW_SCALE); - EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); - OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors)); + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); } @@ -78,11 +111,14 @@ HWTEST_F(PowBuilderTest, pow_build_003, TestSize.Level0) { m_inputs = {0, 1, 2}; m_outputs = {3}; + m_params = {4, 5}; SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_dim, nullptr); SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_dim, nullptr); + SaveShift(OH_NN_FLOAT32, m_shiftDim, nullptr, OH_NN_POW_SHIFT); + SaveScale(OH_NN_FLOAT32, m_scaleDim, nullptr, OH_NN_POW_SCALE); - OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); } @@ -94,11 +130,14 @@ HWTEST_F(PowBuilderTest, pow_build_003, TestSize.Level0) HWTEST_F(PowBuilderTest, pow_build_004, TestSize.Level0) { m_outputs = {2, 3}; + m_params = {4, 5}; SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_dim, nullptr); SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_dim, nullptr); + SaveShift(OH_NN_FLOAT32, m_shiftDim, nullptr, OH_NN_POW_SHIFT); + SaveScale(OH_NN_FLOAT32, m_scaleDim, nullptr, OH_NN_POW_SCALE); - OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); } @@ -109,7 +148,7 @@ HWTEST_F(PowBuilderTest, pow_build_004, TestSize.Level0) */ HWTEST_F(PowBuilderTest, pow_build_005, TestSize.Level0) { - OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputs, m_outputs, m_allTensors); + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputs, m_outputs, m_allTensors); EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); } @@ -122,26 +161,117 @@ HWTEST_F(PowBuilderTest, pow_build_006, TestSize.Level0) { SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_dim, nullptr); - OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputs, m_allTensors); + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputs, m_allTensors); EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); } /** * @tc.name: pow_build_007 - * @tc.desc: Verify that the build function return a failed message with a virtual parameter + * @tc.desc: Verify that the build function returns a failed message with invalid shift's dataType. * @tc.type: FUNC */ -HWTEST_F(PowBuilderTest, pow_build_007, TestSize.Level0) +HWTEST_F(PowBuilderTest, pow_build_007, TestSize.Level2) { - std::vector paramsIndex = {3}; - std::vector paramDim = {}; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_dim, nullptr); + std::shared_ptr shiftTensor = TransToNNTensor(OH_NN_INT64, m_shiftDim, + nullptr, OH_NN_POW_SHIFT); + int64_t* shiftValue = new (std::nothrow) int64_t[1] {0}; + shiftTensor->SetBuffer(shiftValue, sizeof(shiftValue)); + m_allTensors.emplace_back(shiftTensor); + SaveScale(OH_NN_FLOAT32, m_scaleDim, nullptr, OH_NN_POW_SCALE); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + shiftTensor->SetBuffer(nullptr, 0); +} +/** + * @tc.name: pow_build_008 + * @tc.desc: Verify that the build function returns a failed message with invalid scale's dataType. + * @tc.type: FUNC + */ +HWTEST_F(PowBuilderTest, pow_build_008, TestSize.Level2) +{ SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_dim, nullptr); SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_dim, nullptr); - std::shared_ptr powTensor = TransToNNTensor(OH_NN_INT32, paramDim, nullptr, OH_NN_TENSOR); - m_allTensors.emplace_back(powTensor); + SaveShift(OH_NN_FLOAT32, m_shiftDim, nullptr, OH_NN_POW_SHIFT); + std::shared_ptr scaleTensor = TransToNNTensor(OH_NN_INT64, m_scaleDim, + nullptr, OH_NN_POW_SCALE); + int64_t* scaleValue = new (std::nothrow) int64_t[1] {1}; + scaleTensor->SetBuffer(scaleValue, sizeof(scaleValue)); + m_allTensors.emplace_back(scaleTensor); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + scaleTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: pow_build_009 + * @tc.desc: Verify that the build function returns a failed message with passing invalid shift param. + * @tc.type: FUNC + */ +HWTEST_F(PowBuilderTest, pow_build_009, TestSize.Level2) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); + SaveShift(OH_NN_FLOAT32, m_shiftDim, nullptr, OH_NN_MUL_ACTIVATION_TYPE); + SaveScale(OH_NN_FLOAT32, m_scaleDim, nullptr, OH_NN_POW_SCALE);; - OH_NN_ReturnCode ret = m_builder.Build(paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: pow_build_010 + * @tc.desc: Verify that the build function returns a failed message with passing invalid scale param. + * @tc.type: FUNC + */ +HWTEST_F(PowBuilderTest, pow_build_010, TestSize.Level2) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); + SaveShift(OH_NN_FLOAT32, m_shiftDim, nullptr, OH_NN_POW_SHIFT); + SaveScale(OH_NN_FLOAT32, m_scaleDim, nullptr, OH_NN_MUL_ACTIVATION_TYPE); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: pow_build_011 + * @tc.desc: Verify that the build function returns a failed message without set buffer for shift. + * @tc.type: FUNC + */ +HWTEST_F(PowBuilderTest, pow_build_011, TestSize.Level2) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); + std::shared_ptr shiftTensor = TransToNNTensor(OH_NN_FLOAT32, m_shiftDim, + nullptr, OH_NN_POW_SHIFT); + m_allTensors.emplace_back(shiftTensor); + SaveScale(OH_NN_FLOAT32, m_scaleDim, nullptr, OH_NN_POW_SCALE);; + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: pow_build_012 + * @tc.desc: Verify that the build function returns a failed message without set buffer for scale. + * @tc.type: FUNC + */ +HWTEST_F(PowBuilderTest, pow_build_012, TestSize.Level2) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); + SaveShift(OH_NN_FLOAT32, m_shiftDim, nullptr, OH_NN_POW_SHIFT); + std::shared_ptr scaleTensor = TransToNNTensor(OH_NN_FLOAT32, m_scaleDim, + nullptr, OH_NN_POW_SCALE); + m_allTensors.emplace_back(scaleTensor); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); } @@ -166,11 +296,20 @@ HWTEST_F(PowBuilderTest, pow_get_primitive_002, TestSize.Level0) { SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_dim, nullptr); SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_dim, nullptr); + SaveShift(OH_NN_FLOAT32, m_shiftDim, nullptr, OH_NN_POW_SHIFT); + SaveScale(OH_NN_FLOAT32, m_scaleDim, nullptr, OH_NN_POW_SCALE); - EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + float shiftValue = 0.0f; + float scaleValue = 1.0f; + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors)); LiteGraphTensorPtr powPrimitive = m_builder.GetPrimitive(); LiteGraphTensorPtr expectPrimitive = {nullptr, DestroyLiteGraphPrimitive}; EXPECT_NE(powPrimitive, expectPrimitive); + + auto returnShiftValue = mindspore::lite::MindIR_PowFusion_GetShift(powPrimitive.get()); + EXPECT_EQ(shiftValue, returnShiftValue); + auto returnScaleValue = mindspore::lite::MindIR_PowFusion_GetScale(powPrimitive.get()); + EXPECT_EQ(scaleValue, returnScaleValue); } } // namespace UnitTest } // namespace NeuralNetworkRuntime diff --git a/test/unittest/ops/reciprocal_test.cpp b/test/unittest/ops/reciprocal_test.cpp new file mode 100644 index 0000000..421284d --- /dev/null +++ b/test/unittest/ops/reciprocal_test.cpp @@ -0,0 +1,158 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ops/reciprocal_builder.h" + +#include "ops_test.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Ops; + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class ReciprocalBuilderTest : public OpsTest { +public: + void SetUp() override; + void TearDown() override; + +protected: + ReciprocalBuilder m_builder; + std::vector m_inputs {0}; + std::vector m_outputs {1}; + std::vector m_dim {1, 2, 2, 1}; +}; + +void ReciprocalBuilderTest::SetUp() {} + +void ReciprocalBuilderTest::TearDown() {} + +/** + * @tc.name: reciprocal_build_001 + * @tc.desc: Verify that the build function returns a successful message. + * @tc.type: FUNC + */ +HWTEST_F(ReciprocalBuilderTest, reciprocal_build_001, TestSize.Level2) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/** + * @tc.name: reciprocal_build_002 + * @tc.desc: Verify that the build function returns a failed message with true m_isBuild. + * @tc.type: FUNC + */ +HWTEST_F(ReciprocalBuilderTest, reciprocal_build_002, TestSize.Level2) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); + + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/** + * @tc.name: reciprocal_build_003 + * @tc.desc: Verify that the build function returns a failed message with invalided input. + * @tc.type: FUNC + */ +HWTEST_F(ReciprocalBuilderTest, reciprocal_build_003, TestSize.Level2) +{ + m_inputs = {0, 1}; + m_outputs = {2}; + + SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: reciprocal_build_004 + * @tc.desc: Verify that the build function returns a failed message with invalided output. + * @tc.type: FUNC + */ +HWTEST_F(ReciprocalBuilderTest, reciprocal_build_004, TestSize.Level2) +{ + m_outputs = {1, 2}; + + SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: reciprocal_build_005 + * @tc.desc: Verify that the build function returns a failed message with empty allTensor. + * @tc.type: FUNC + */ +HWTEST_F(ReciprocalBuilderTest, reciprocal_build_005, TestSize.Level2) +{ + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputs, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: reciprocal_build_006 + * @tc.desc: Verify that the build function returns a failed message without output tensor. + * @tc.type: FUNC + */ +HWTEST_F(ReciprocalBuilderTest, reciprocal_build_006, TestSize.Level2) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: reciprocal_getprimitive_001 + * @tc.desc: Verify that the getPrimitive function returns a successful message + * @tc.type: FUNC + */ +HWTEST_F(ReciprocalBuilderTest, reciprocal_getprimitive_001, TestSize.Level2) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); + + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + LiteGraphPrimitvePtr primitive = m_builder.GetPrimitive(); + LiteGraphPrimitvePtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); + EXPECT_NE(expectPrimitive, primitive); +} + +/** + * @tc.name: reciprocal_getprimitive_002 + * @tc.desc: Verify that the getPrimitive function returns a failed message without build. + * @tc.type: FUNC + */ +HWTEST_F(ReciprocalBuilderTest, reciprocal_getprimitive_002, TestSize.Level2) +{ + LiteGraphPrimitvePtr primitive = m_builder.GetPrimitive(); + LiteGraphPrimitvePtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); + EXPECT_EQ(expectPrimitive, primitive); +} +} +} +} \ No newline at end of file diff --git a/test/unittest/ops/sin_test.cpp b/test/unittest/ops/sin_test.cpp new file mode 100644 index 0000000..3d50a70 --- /dev/null +++ b/test/unittest/ops/sin_test.cpp @@ -0,0 +1,158 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ops/sin_builder.h" + +#include "ops_test.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Ops; + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class SinBuilderTest : public OpsTest { +public: + void SetUp() override; + void TearDown() override; + +protected: + SinBuilder m_builder; + std::vector m_inputs {0}; + std::vector m_outputs {1}; + std::vector m_dim {1, 2, 2, 1}; +}; + +void SinBuilderTest::SetUp() {} + +void SinBuilderTest::TearDown() {} + +/** + * @tc.name: sin_build_001 + * @tc.desc: Verify that the build function returns a successful message. + * @tc.type: FUNC + */ +HWTEST_F(SinBuilderTest, sin_build_001, TestSize.Level2) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/** + * @tc.name: sin_build_002 + * @tc.desc: Verify that the build function returns a failed message with true m_isBuild. + * @tc.type: FUNC + */ +HWTEST_F(SinBuilderTest, sin_build_002, TestSize.Level2) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); + + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/** + * @tc.name: sin_build_003 + * @tc.desc: Verify that the build function returns a failed message with invalided input. + * @tc.type: FUNC + */ +HWTEST_F(SinBuilderTest, sin_build_003, TestSize.Level2) +{ + m_inputs = {0, 1}; + m_outputs = {2}; + + SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: sin_build_004 + * @tc.desc: Verify that the build function returns a failed message with invalided output. + * @tc.type: FUNC + */ +HWTEST_F(SinBuilderTest, sin_build_004, TestSize.Level2) +{ + m_outputs = {1, 2}; + + SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: sin_build_005 + * @tc.desc: Verify that the build function returns a failed message with empty allTensor. + * @tc.type: FUNC + */ +HWTEST_F(SinBuilderTest, sin_build_005, TestSize.Level2) +{ + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputs, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: sin_build_006 + * @tc.desc: Verify that the build function returns a failed message without output tensor. + * @tc.type: FUNC + */ +HWTEST_F(SinBuilderTest, sin_build_006, TestSize.Level2) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: sin_getprimitive_001 + * @tc.desc: Verify that the getPrimitive function returns a successful message + * @tc.type: FUNC + */ +HWTEST_F(SinBuilderTest, sin_getprimitive_001, TestSize.Level2) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); + + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + LiteGraphPrimitvePtr primitive = m_builder.GetPrimitive(); + LiteGraphPrimitvePtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); + EXPECT_NE(expectPrimitive, primitive); +} + +/** + * @tc.name: sin_getprimitive_002 + * @tc.desc: Verify that the getPrimitive function returns a failed message without build. + * @tc.type: FUNC + */ +HWTEST_F(SinBuilderTest, sin_getprimitive_002, TestSize.Level2) +{ + LiteGraphPrimitvePtr primitive = m_builder.GetPrimitive(); + LiteGraphPrimitvePtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); + EXPECT_EQ(expectPrimitive, primitive); +} +} +} +} \ No newline at end of file diff --git a/test/unittest/ops/sparse_to_dense_test.cpp b/test/unittest/ops/sparse_to_dense_test.cpp new file mode 100644 index 0000000..add8e5c --- /dev/null +++ b/test/unittest/ops/sparse_to_dense_test.cpp @@ -0,0 +1,182 @@ +/* + * Copyright (c) 2024 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ops/sparse_to_dense_builder.h" + +#include "ops_test.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Ops; + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class SparseToDenseBuilderTest : public OpsTest { +public: + void SetUp() override; + void TearDown() override; + +protected: + void SetInputTensor(); + void SetOutputTensor(); + +protected: + SparseToDenseBuilder m_builder; + std::vector m_inputs {0, 1, 2}; + std::vector m_outputs {3}; + std::vector m_params {}; + std::vector m_indicesDim {2, 2}; + std::vector m_valueDim {2}; + std::vector m_sparseShapeDim {2}; + std::vector m_outputDim {2, 3}; +}; + +void SparseToDenseBuilderTest::SetUp() {} + +void SparseToDenseBuilderTest::TearDown() {} + +void SparseToDenseBuilderTest::SetInputTensor() +{ + m_inputsIndex = m_inputs; + std::shared_ptr indicesTensor; + indicesTensor = TransToNNTensor(OH_NN_FLOAT32, m_indicesDim, nullptr, OH_NN_TENSOR); + m_allTensors.emplace_back(indicesTensor); + + std::shared_ptr valueTensor; + valueTensor = TransToNNTensor(OH_NN_FLOAT32, m_valueDim, nullptr, OH_NN_TENSOR); + m_allTensors.emplace_back(valueTensor); + + std::shared_ptr sparseShapeTensor; + sparseShapeTensor = TransToNNTensor(OH_NN_FLOAT32, m_sparseShapeDim, nullptr, OH_NN_TENSOR); + m_allTensors.emplace_back(sparseShapeTensor); +} + +/** + * @tc.name: SparseToDense_build_001 + * @tc.desc: Verify that the build function returns a successful message. + * @tc.type: FUNC + */ +HWTEST_F(SparseToDenseBuilderTest, SparseToDense_build_001, TestSize.Level2) +{ + SetInputTensor(); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_outputDim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/** + * @tc.name: SparseToDense_build_002 + * @tc.desc: Verify that the build function returns a failed message with true m_isBuild. + * @tc.type: FUNC + */ +HWTEST_F(SparseToDenseBuilderTest, SparseToDense_build_002, TestSize.Level2) +{ + SetInputTensor(); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_outputDim, nullptr); + + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors)); + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/** + * @tc.name: SparseToDense_build_003 + * @tc.desc: Verify that the build function returns a failed message with invalided input. + * @tc.type: FUNC + */ +HWTEST_F(SparseToDenseBuilderTest, SparseToDense_build_003, TestSize.Level2) +{ + m_inputs = {0, 1, 2, 3}; + m_outputs = {4}; + + SetInputTensor(); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_outputDim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: SparseToDense_build_004 + * @tc.desc: Verify that the build function returns a failed message with invalided output. + * @tc.type: FUNC + */ +HWTEST_F(SparseToDenseBuilderTest, SparseToDense_build_004, TestSize.Level2) +{ + m_outputs = {3, 4}; + + SetInputTensor(); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_outputDim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: SparseToDense_build_005 + * @tc.desc: Verify that the build function returns a failed message with empty allTensor. + * @tc.type: FUNC + */ +HWTEST_F(SparseToDenseBuilderTest, SparseToDense_build_005, TestSize.Level2) +{ + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputs, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: SparseToDense_build_006 + * @tc.desc: Verify that the build function returns a failed message without output tensor. + * @tc.type: FUNC + */ +HWTEST_F(SparseToDenseBuilderTest, SparseToDense_build_006, TestSize.Level2) +{ + SetInputTensor(); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: SparseToDense_getprimitive_001 + * @tc.desc: Verify that the getPrimitive function returns a successful message. + * @tc.type: FUNC + */ +HWTEST_F(SparseToDenseBuilderTest, SparseToDense_getprimitive_001, TestSize.Level2) +{ + SetInputTensor(); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_outputDim, nullptr); + + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors)); + LiteGraphPrimitvePtr primitive = m_builder.GetPrimitive(); + LiteGraphPrimitvePtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); + EXPECT_NE(expectPrimitive, primitive); +} + +/** + * @tc.name: SparseToDense_getprimitive_002 + * @tc.desc: Verify that the getPrimitive function returns a failed message without build. + * @tc.type: FUNC + */ +HWTEST_F(SparseToDenseBuilderTest, SparseToDense_getprimitive_002, TestSize.Level2) +{ + LiteGraphPrimitvePtr primitive = m_builder.GetPrimitive(); + LiteGraphPrimitvePtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); + EXPECT_EQ(expectPrimitive, primitive); +} +} +} +} \ No newline at end of file diff --git a/test/unittest/ops/unstack_test.cpp b/test/unittest/ops/unstack_test.cpp index e9d4cff..6675e57 100644 --- a/test/unittest/ops/unstack_test.cpp +++ b/test/unittest/ops/unstack_test.cpp @@ -108,39 +108,21 @@ HWTEST_F(UnstackBuilderTest, unstack_build_003, TestSize.Level2) /** * @tc.name: unstack_build_004 - * @tc.desc: Verify that the build function returns a failed message with invalided output. - * @tc.type: FUNC - */ -HWTEST_F(UnstackBuilderTest, unstack_build_004, TestSize.Level2) -{ - m_outputs = {1, 2}; - m_params = {3}; - - SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); - SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); - SaveParamsTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_UNSTACK_AXIS); - - OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); - EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); -} - -/** - * @tc.name: unstack_build_005 * @tc.desc: Verify that the build function returns a failed message with empty allTensor. * @tc.type: FUNC */ -HWTEST_F(UnstackBuilderTest, unstack_build_005, TestSize.Level2) +HWTEST_F(UnstackBuilderTest, unstack_build_004, TestSize.Level2) { OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputs, m_outputs, m_allTensors); EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); } /** - * @tc.name: unstack_build_006 + * @tc.name: unstack_build_005 * @tc.desc: Verify that the build function returns a failed message without output tensor. * @tc.type: FUNC */ -HWTEST_F(UnstackBuilderTest, unstack_build_006, TestSize.Level2) +HWTEST_F(UnstackBuilderTest, unstack_build_005, TestSize.Level2) { SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); @@ -149,11 +131,11 @@ HWTEST_F(UnstackBuilderTest, unstack_build_006, TestSize.Level2) } /** - * @tc.name: unstack_build_007 + * @tc.name: unstack_build_006 * @tc.desc: Verify that the build function returns a failed message with invalid axis's dataType. * @tc.type: FUNC */ -HWTEST_F(UnstackBuilderTest, unstack_build_007, TestSize.Level2) +HWTEST_F(UnstackBuilderTest, unstack_build_006, TestSize.Level2) { SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); @@ -169,11 +151,11 @@ HWTEST_F(UnstackBuilderTest, unstack_build_007, TestSize.Level2) } /** - * @tc.name: unstack_build_008 + * @tc.name: unstack_build_007 * @tc.desc: Verify that the build function returns a failed message with passing invalid axis param. * @tc.type: FUNC */ -HWTEST_F(UnstackBuilderTest, unstack_build_008, TestSize.Level2) +HWTEST_F(UnstackBuilderTest, unstack_build_007, TestSize.Level2) { SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); @@ -184,11 +166,11 @@ HWTEST_F(UnstackBuilderTest, unstack_build_008, TestSize.Level2) } /** - * @tc.name: unstack_build_009 + * @tc.name: unstack_build_008 * @tc.desc: Verify that the build function returns a failed message without set buffer for axis. * @tc.type: FUNC */ -HWTEST_F(UnstackBuilderTest, unstack_build_009, TestSize.Level2) +HWTEST_F(UnstackBuilderTest, unstack_build_008, TestSize.Level2) { SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); diff --git a/test/unittest/ops/where_test.cpp b/test/unittest/ops/where_test.cpp new file mode 100644 index 0000000..a3a1860 --- /dev/null +++ b/test/unittest/ops/where_test.cpp @@ -0,0 +1,158 @@ +/* + * Copyright (c) 2024 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ops/where_builder.h" + +#include "ops_test.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Ops; + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class WhereBuilderTest : public OpsTest { +public: + void SetUp() override; + void TearDown() override; + +protected: + WhereBuilder m_builder; + std::vector m_inputs {0, 1, 2}; + std::vector m_outputs {3}; + std::vector m_dim {1, 2, 2, 1}; +}; + +void WhereBuilderTest::SetUp() {} + +void WhereBuilderTest::TearDown() {} + +/** + * @tc.name: where_build_001 + * @tc.desc: Verify that the build function returns a successful message. + * @tc.type: FUNC + */ +HWTEST_F(WhereBuilderTest, where_build_001, TestSize.Level2) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/** + * @tc.name: where_build_002 + * @tc.desc: Verify that the build function returns a failed message with true m_isBuild. + * @tc.type: FUNC + */ +HWTEST_F(WhereBuilderTest, where_build_002, TestSize.Level2) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); + + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/** + * @tc.name: where_build_003 + * @tc.desc: Verify that the build function returns a failed message with invalided input. + * @tc.type: FUNC + */ +HWTEST_F(WhereBuilderTest, where_build_003, TestSize.Level2) +{ + m_inputs = {0, 1, 2, 3}; + m_outputs = {4}; + + SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: where_build_004 + * @tc.desc: Verify that the build function returns a failed message with invalided output. + * @tc.type: FUNC + */ +HWTEST_F(WhereBuilderTest, where_build_004, TestSize.Level2) +{ + m_outputs = {3, 4}; + + SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: where_build_005 + * @tc.desc: Verify that the build function returns a failed message with empty allTensor. + * @tc.type: FUNC + */ +HWTEST_F(WhereBuilderTest, where_build_005, TestSize.Level2) +{ + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputs, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: where_build_006 + * @tc.desc: Verify that the build function returns a failed message without output tensor. + * @tc.type: FUNC + */ +HWTEST_F(WhereBuilderTest, where_build_006, TestSize.Level2) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: where_getprimitive_001 + * @tc.desc: Verify that the getPrimitive function returns a successful message + * @tc.type: FUNC + */ +HWTEST_F(WhereBuilderTest, where_getprimitive_001, TestSize.Level2) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); + + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + LiteGraphPrimitvePtr primitive = m_builder.GetPrimitive(); + LiteGraphPrimitvePtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); + EXPECT_NE(expectPrimitive, primitive); +} + +/** + * @tc.name: where_getprimitive_002 + * @tc.desc: Verify that the getPrimitive function returns a failed message without build. + * @tc.type: FUNC + */ +HWTEST_F(WhereBuilderTest, where_getprimitive_002, TestSize.Level2) +{ + LiteGraphPrimitvePtr primitive = m_builder.GetPrimitive(); + LiteGraphPrimitvePtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); + EXPECT_EQ(expectPrimitive, primitive); +} +} +} +} \ No newline at end of file -- Gitee From ba4dc4dadbbe814a974f4b4525e72f5dd3e39ee8 Mon Sep 17 00:00:00 2001 From: wWX1227061 Date: Fri, 23 Feb 2024 16:13:42 +0800 Subject: [PATCH 2/9] fix codecheck bug Signed-off-by: wWX1227061 --- .../neural_network_runtime/ops/sparse_to_dense_builder.cpp | 3 ++- .../c/neural_network_runtime/neural_network_runtime_type.h | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/frameworks/native/neural_network_runtime/ops/sparse_to_dense_builder.cpp b/frameworks/native/neural_network_runtime/ops/sparse_to_dense_builder.cpp index 63c0389..43e28b9 100644 --- a/frameworks/native/neural_network_runtime/ops/sparse_to_dense_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/sparse_to_dense_builder.cpp @@ -43,7 +43,8 @@ OH_NN_ReturnCode SparseToDenseBuilder::Build(const std::vector& params } if (!paramsIndex.empty()) { - LOGW("[SparseToDense] Build failed, the sparseToDense expects no parameters, but receive %zu", paramsIndex.size()); + LOGW("[SparseToDense] Build failed, the sparseToDense expects no parameters, + but receive %zu", paramsIndex.size()); return OH_NN_INVALID_PARAMETER; } diff --git a/interfaces/kits/c/neural_network_runtime/neural_network_runtime_type.h b/interfaces/kits/c/neural_network_runtime/neural_network_runtime_type.h index d29a804..6cb2f0a 100644 --- a/interfaces/kits/c/neural_network_runtime/neural_network_runtime_type.h +++ b/interfaces/kits/c/neural_network_runtime/neural_network_runtime_type.h @@ -2140,7 +2140,7 @@ typedef enum { * The condition tensor, decides which element is chosen. * * input1: n-dimensional tensor. If condition is rank 1, * x1 may have higher rank, but its first dimension must match the size of condition. - * * input2: n-dimensional tensor. + * * input2: n-dimensional tensor. * * Outputs: * * output: A tensor, has the same shape as the input_cond. -- Gitee From 9361a4a37d1207431887d4bb6fc010136efb91ef Mon Sep 17 00:00:00 2001 From: wWX1227061 Date: Fri, 23 Feb 2024 16:30:21 +0800 Subject: [PATCH 3/9] fix codecheck bug Signed-off-by: wWX1227061 --- .../neural_network_runtime/ops/sparse_to_dense_builder.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/frameworks/native/neural_network_runtime/ops/sparse_to_dense_builder.cpp b/frameworks/native/neural_network_runtime/ops/sparse_to_dense_builder.cpp index 43e28b9..52b5f79 100644 --- a/frameworks/native/neural_network_runtime/ops/sparse_to_dense_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/sparse_to_dense_builder.cpp @@ -43,8 +43,8 @@ OH_NN_ReturnCode SparseToDenseBuilder::Build(const std::vector& params } if (!paramsIndex.empty()) { - LOGW("[SparseToDense] Build failed, the sparseToDense expects no parameters, - but receive %zu", paramsIndex.size()); + LOGW("[SparseToDense] Build failed, the sparseToDense expects no parameters, but receive %zu", \ + paramsIndex.size()); return OH_NN_INVALID_PARAMETER; } -- Gitee From 39361432b9ca27448e36ee6d3641928cd30619be Mon Sep 17 00:00:00 2001 From: wWX1227061 Date: Fri, 23 Feb 2024 17:06:38 +0800 Subject: [PATCH 4/9] delete normalize ops Signed-off-by: wWX1227061 --- .../native/neural_network_runtime/BUILD.gn | 1 - .../lite_graph_to_hdi_model_v2_1.cpp | 19 ----- .../ops/normalize_builder.cpp | 73 ------------------- .../ops/normalize_builder.h | 42 ----------- .../neural_network_runtime_type.h | 13 ---- test/unittest/ops/BUILD.gn | 1 - 6 files changed, 149 deletions(-) delete mode 100644 frameworks/native/neural_network_runtime/ops/normalize_builder.cpp delete mode 100644 frameworks/native/neural_network_runtime/ops/normalize_builder.h diff --git a/frameworks/native/neural_network_runtime/BUILD.gn b/frameworks/native/neural_network_runtime/BUILD.gn index 36c71d6..af8948b 100644 --- a/frameworks/native/neural_network_runtime/BUILD.gn +++ b/frameworks/native/neural_network_runtime/BUILD.gn @@ -100,7 +100,6 @@ ops_sources = [ "ops/mod_builder.cpp", "ops/mul_builder.cpp", "ops/neg_builder.cpp", - "ops/normalize_builder.cpp", "ops/notequal_builder.cpp", "ops/onehot_builder.cpp", "ops/ops_validation.cpp", diff --git a/frameworks/native/neural_network_runtime/lite_graph_to_hdi_model_v2_1.cpp b/frameworks/native/neural_network_runtime/lite_graph_to_hdi_model_v2_1.cpp index f9ae54c..efc04c0 100644 --- a/frameworks/native/neural_network_runtime/lite_graph_to_hdi_model_v2_1.cpp +++ b/frameworks/native/neural_network_runtime/lite_graph_to_hdi_model_v2_1.cpp @@ -850,22 +850,6 @@ std::vector ConvertNeg(PrimitivePtr primitive) return ret; } -std::vector ConvertCustomNormalize(PrimitivePtr primitive) -{ - if (primitive == nullptr) { - LOGE("ConvertCustomNormalize v2_1 failed, primitive is nullptr."); - return {}; - } - - CustomNormalize customNormalize{}; - - OHOS::MessageParcel data; - (void)CustomNormalizeBlockMarshalling(data, customNormalize); - std::vector ret(reinterpret_cast(data.GetData()), - reinterpret_cast(data.GetData()) + data.GetDataSize()); - return ret; -} - std::vector ConvertNotEqual(PrimitivePtr primitive) { if (primitive == nullptr) { @@ -1607,9 +1591,6 @@ std::vector Convert(OHOS::HDI::Nnrt::V2_1::NodeType type, PrimitivePtr p case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_NEG: return ConvertNeg(primitive); break; - case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_CUSTOM_NORMALIZE: - return ConvertCustomNormalize(primitive); - break; case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_NOT_EQUAL: return ConvertNotEqual(primitive); break; diff --git a/frameworks/native/neural_network_runtime/ops/normalize_builder.cpp b/frameworks/native/neural_network_runtime/ops/normalize_builder.cpp deleted file mode 100644 index 4476c27..0000000 --- a/frameworks/native/neural_network_runtime/ops/normalize_builder.cpp +++ /dev/null @@ -1,73 +0,0 @@ -/* - * Copyright (c) 2024 Huawei Device Co., Ltd. - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "normalize_builder.h" - -namespace OHOS { -namespace NeuralNetworkRuntime { -namespace Ops { -static const int INPUT_NUM = 1; -static const int OUTPUT_NUM = 1; -static const std::string OP_NAME = "Normalize"; - -NormalizeBuilder::NormalizeBuilder() {} - -NormalizeBuilder::~NormalizeBuilder() {} - -OH_NN_ReturnCode NormalizeBuilder::Build(const std::vector& paramsIndex, - const std::vector& inputsIndex, - const std::vector& outputsIndex, - const std::vector>& allTensors) -{ - if (m_isBuild) { - LOGE("[Normalize] Build failed, the normalize operation has been build. cannot build again."); - return OH_NN_OPERATION_FORBIDDEN; - } - - auto ret = CheckIOIndex(inputsIndex, outputsIndex, allTensors, INPUT_NUM, OUTPUT_NUM); - if (ret != OH_NN_SUCCESS) { - LOGE("[Normalize] Build failed, passed invalid input or output index."); - return ret; - } - - if (!paramsIndex.empty()) { - LOGW("[Normalize] Build failed, the normalize expects no parameters, but receive %zu", paramsIndex.size()); - return OH_NN_INVALID_PARAMETER; - } - - m_inputsIndex = inputsIndex; - m_outputsIndex = outputsIndex; - - m_name = OP_NAME; - m_isBuild = true; - return OH_NN_SUCCESS; -} - -LiteGraphPrimitvePtr NormalizeBuilder::GetPrimitive() -{ - if (!m_isBuild) { - LOGE("[Normalize] GetPrimitive failed, cannot get primitive before call build."); - return {nullptr, DestroyLiteGraphPrimitive}; - } - - void* primitive = mindspore::lite::MindIR_CustomNormalize_CreatePrimitive(); - LiteGraphPrimitvePtr graphPrimitivePtr(primitive, DestroyLiteGraphPrimitive) ; - return graphPrimitivePtr; -} - -REGISTER_OPS(NormalizeBuilder, OH_NN_OPS_NORMALIZE); -} // namespace Ops -} // namespace NeuralNetworkRuntime -} // namespace OHOS diff --git a/frameworks/native/neural_network_runtime/ops/normalize_builder.h b/frameworks/native/neural_network_runtime/ops/normalize_builder.h deleted file mode 100644 index 53d467c..0000000 --- a/frameworks/native/neural_network_runtime/ops/normalize_builder.h +++ /dev/null @@ -1,42 +0,0 @@ -/* - * Copyright (c) 2024 Huawei Device Co., Ltd. - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef NEURAL_NETWORK_RUNTIME_NORMALIZE_BUILDER_H -#define NEURAL_NETWORK_RUNTIME_NORMALIZE_BUILDER_H - -#include "mindir.h" - -#include "ops_builder.h" -#include "ops_registry.h" - -namespace OHOS { -namespace NeuralNetworkRuntime { -namespace Ops { -class NormalizeBuilder : public OpsBuilder { -public: - NormalizeBuilder(); - ~NormalizeBuilder() override; - OH_NN_ReturnCode Build(const std::vector& paramsIndex, - const std::vector& inputsIndex, - const std::vector& outputsIndex, - const std::vector>& allTensors) override; - - LiteGraphPrimitvePtr GetPrimitive() override; -}; -} // namespace Ops -} // namespace NeuralNetworkRuntime -} // namespace OHOS - -#endif // NEURAL_NETWORK_RUNTIME_NORMALIZE_BUILDER_H \ No newline at end of file diff --git a/interfaces/kits/c/neural_network_runtime/neural_network_runtime_type.h b/interfaces/kits/c/neural_network_runtime/neural_network_runtime_type.h index 6cb2f0a..fa77804 100644 --- a/interfaces/kits/c/neural_network_runtime/neural_network_runtime_type.h +++ b/interfaces/kits/c/neural_network_runtime/neural_network_runtime_type.h @@ -2173,19 +2173,6 @@ typedef enum { * * output: A tensor of type bool with the shape that input0 and input1 broadcast to. */ OH_NN_OPS_LOGICAL_OR = 90, - - /** - * Shift and scale inputs into a distribution centered around 0 with standard deviation 1. - * It accomplishes this by precomputing the mean and variance of the data, and calling - * (x - mean) / sqrt(var) at runtime. - * - * Inputs: - * * input: n-dimensional tensor. - * - * Outputs: - * * output: A tensor, has the same shape and data type as the input. - */ - OH_NN_OPS_NORMALIZE = 91, } OH_NN_OperationType; /** diff --git a/test/unittest/ops/BUILD.gn b/test/unittest/ops/BUILD.gn index c6457a2..4a4fa2c 100644 --- a/test/unittest/ops/BUILD.gn +++ b/test/unittest/ops/BUILD.gn @@ -87,7 +87,6 @@ ohos_unittest("OpsUnittest") { sources += [ "./mod_test.cpp" ] sources += [ "./mul_builder_test.cpp" ] sources += [ "./neg_test.cpp" ] - sources += [ "./normalize_test.cpp" ] sources += [ "./not_equal_builder_test.cpp" ] sources += [ "./onehot_builder_test.cpp" ] sources += [ "./pad_builder_test.cpp" ] -- Gitee From aad01799d89b7842ec5bc9223d27c056b6a7e62c Mon Sep 17 00:00:00 2001 From: wWX1227061 Date: Fri, 23 Feb 2024 17:09:57 +0800 Subject: [PATCH 5/9] delete normalize ops Signed-off-by: wWX1227061 --- test/unittest/ops/normalize_test.cpp | 158 --------------------------- 1 file changed, 158 deletions(-) delete mode 100644 test/unittest/ops/normalize_test.cpp diff --git a/test/unittest/ops/normalize_test.cpp b/test/unittest/ops/normalize_test.cpp deleted file mode 100644 index 90767a2..0000000 --- a/test/unittest/ops/normalize_test.cpp +++ /dev/null @@ -1,158 +0,0 @@ -/* - * Copyright (c) 2024 Huawei Device Co., Ltd. - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "ops/normalize_builder.h" - -#include "ops_test.h" - -using namespace testing; -using namespace testing::ext; -using namespace OHOS::NeuralNetworkRuntime::Ops; - -namespace OHOS { -namespace NeuralNetworkRuntime { -namespace UnitTest { -class NormalizeBuilderTest : public OpsTest { -public: - void SetUp() override; - void TearDown() override; - -protected: - NormalizeBuilder m_builder; - std::vector m_inputs {0}; - std::vector m_outputs {1}; - std::vector m_dim {1, 2, 2, 1}; -}; - -void NormalizeBuilderTest::SetUp() {} - -void NormalizeBuilderTest::TearDown() {} - -/** - * @tc.name: normalize_build_001 - * @tc.desc: Verify that the build function returns a successful message. - * @tc.type: FUNC - */ -HWTEST_F(NormalizeBuilderTest, normalize_build_001, TestSize.Level2) -{ - SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); - SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); - - OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); - EXPECT_EQ(OH_NN_SUCCESS, ret); -} - -/** - * @tc.name: normalize_build_002 - * @tc.desc: Verify that the build function returns a failed message with true m_isBuild. - * @tc.type: FUNC - */ -HWTEST_F(NormalizeBuilderTest, normalize_build_002, TestSize.Level2) -{ - SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); - SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); - - EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); - OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); - EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); -} - -/** - * @tc.name: normalize_build_003 - * @tc.desc: Verify that the build function returns a failed message with invalided input. - * @tc.type: FUNC - */ -HWTEST_F(NormalizeBuilderTest, normalize_build_003, TestSize.Level2) -{ - m_inputs = {0, 1}; - m_outputs = {2}; - - SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); - SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); - - OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); - EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); -} - -/** - * @tc.name: normalize_build_004 - * @tc.desc: Verify that the build function returns a failed message with invalided output. - * @tc.type: FUNC - */ -HWTEST_F(NormalizeBuilderTest, normalize_build_004, TestSize.Level2) -{ - m_outputs = {1, 2}; - - SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); - SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); - - OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); - EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); -} - -/** - * @tc.name: normalize_build_005 - * @tc.desc: Verify that the build function returns a failed message with empty allTensor. - * @tc.type: FUNC - */ -HWTEST_F(NormalizeBuilderTest, normalize_build_005, TestSize.Level2) -{ - OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputs, m_outputs, m_allTensors); - EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); -} - -/** - * @tc.name: normalize_build_006 - * @tc.desc: Verify that the build function returns a failed message without output tensor. - * @tc.type: FUNC - */ -HWTEST_F(NormalizeBuilderTest, normalize_build_006, TestSize.Level2) -{ - SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); - - OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputs, m_allTensors); - EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); -} - -/** - * @tc.name: normalize_getprimitive_001 - * @tc.desc: Verify that the getPrimitive function returns a successful message - * @tc.type: FUNC - */ -HWTEST_F(NormalizeBuilderTest, normalize_getprimitive_001, TestSize.Level2) -{ - SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); - SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); - - EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); - LiteGraphPrimitvePtr primitive = m_builder.GetPrimitive(); - LiteGraphPrimitvePtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); - EXPECT_NE(expectPrimitive, primitive); -} - -/** - * @tc.name: normalize_getprimitive_002 - * @tc.desc: Verify that the getPrimitive function returns a failed message without build. - * @tc.type: FUNC - */ -HWTEST_F(NormalizeBuilderTest, normalize_getprimitive_002, TestSize.Level2) -{ - LiteGraphPrimitvePtr primitive = m_builder.GetPrimitive(); - LiteGraphPrimitvePtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); - EXPECT_EQ(expectPrimitive, primitive); -} -} -} -} \ No newline at end of file -- Gitee From 40481edc72133f75c0b873c98f17ffbcd8c9e5c9 Mon Sep 17 00:00:00 2001 From: wWX1227061 Date: Mon, 26 Feb 2024 10:47:13 +0800 Subject: [PATCH 6/9] fix gtest level Signed-off-by: wWX1227061 --- test/unittest/BUILD.gn | 4 +- test/unittest/ops/abs_test.cpp | 16 +-- test/unittest/ops/all_test.cpp | 24 ++-- test/unittest/ops/assert_test.cpp | 24 ++-- test/unittest/ops/broadcast_to_test.cpp | 24 ++-- test/unittest/ops/clip_test.cpp | 31 ++--- test/unittest/ops/constant_of_shape_test.cpp | 32 ++--- test/unittest/ops/cos_test.cpp | 16 +-- test/unittest/ops/depth_to_space_test.cpp | 40 +++--- test/unittest/ops/equal_builder_test.cpp | 18 +-- test/unittest/ops/erf_test.cpp | 16 +-- test/unittest/ops/exp_test.cpp | 36 +++--- test/unittest/ops/flatten_test.cpp | 24 ++-- test/unittest/ops/greater_builder_test.cpp | 18 +-- .../ops/greater_equal_builder_test.cpp | 18 +-- test/unittest/ops/instance_norm_test.cpp | 24 ++-- test/unittest/ops/leaky_relu_test.cpp | 27 +++-- test/unittest/ops/less_test.cpp | 16 +-- test/unittest/ops/lessequal_builder_test.cpp | 18 +-- test/unittest/ops/log_test.cpp | 16 +-- test/unittest/ops/logical_and_test.cpp | 16 +-- test/unittest/ops/logical_not_test.cpp | 16 +-- test/unittest/ops/logical_or_test.cpp | 16 +-- test/unittest/ops/lstm_test.cpp | 114 ++++++++++++------ test/unittest/ops/mod_test.cpp | 16 +-- test/unittest/ops/neg_test.cpp | 16 +-- test/unittest/ops/not_equal_builder_test.cpp | 18 +-- test/unittest/ops/pow_builder_test.cpp | 33 ++--- test/unittest/ops/range_test.cpp | 43 +++---- test/unittest/ops/real_div_test.cpp | 16 +-- test/unittest/ops/reciprocal_test.cpp | 16 +-- test/unittest/ops/select_test.cpp | 16 +-- test/unittest/ops/sin_test.cpp | 16 +-- test/unittest/ops/sparse_to_dense_test.cpp | 16 +-- test/unittest/ops/square_test.cpp | 16 +-- test/unittest/ops/unstack_test.cpp | 22 ++-- test/unittest/ops/where_test.cpp | 16 +-- 37 files changed, 465 insertions(+), 399 deletions(-) diff --git a/test/unittest/BUILD.gn b/test/unittest/BUILD.gn index e4396c5..eed3e38 100644 --- a/test/unittest/BUILD.gn +++ b/test/unittest/BUILD.gn @@ -16,8 +16,8 @@ import("//build/ohos.gni") group("unittest") { testonly = true deps = [ - #"components:components_unittest", - #"inner_kits:inner_kits_unittest", + "components:components_unittest", + "inner_kits:inner_kits_unittest", "ops:ops_unittest", ] } diff --git a/test/unittest/ops/abs_test.cpp b/test/unittest/ops/abs_test.cpp index 7b9144f..74d66a5 100644 --- a/test/unittest/ops/abs_test.cpp +++ b/test/unittest/ops/abs_test.cpp @@ -45,7 +45,7 @@ void AbsBuilderTest::TearDown() {} * @tc.desc: Verify that the build function returns a successful message. * @tc.type: FUNC */ -HWTEST_F(AbsBuilderTest, abs_build_001, TestSize.Level2) +HWTEST_F(AbsBuilderTest, abs_build_001, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); @@ -59,7 +59,7 @@ HWTEST_F(AbsBuilderTest, abs_build_001, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message with true m_isBuild. * @tc.type: FUNC */ -HWTEST_F(AbsBuilderTest, abs_build_002, TestSize.Level2) +HWTEST_F(AbsBuilderTest, abs_build_002, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); @@ -74,7 +74,7 @@ HWTEST_F(AbsBuilderTest, abs_build_002, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message with invalided input. * @tc.type: FUNC */ -HWTEST_F(AbsBuilderTest, abs_build_003, TestSize.Level2) +HWTEST_F(AbsBuilderTest, abs_build_003, TestSize.Level1) { m_inputs = {0, 1}; m_outputs = {2}; @@ -91,7 +91,7 @@ HWTEST_F(AbsBuilderTest, abs_build_003, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message with invalided output. * @tc.type: FUNC */ -HWTEST_F(AbsBuilderTest, abs_build_004, TestSize.Level2) +HWTEST_F(AbsBuilderTest, abs_build_004, TestSize.Level1) { m_outputs = {1, 2}; @@ -107,7 +107,7 @@ HWTEST_F(AbsBuilderTest, abs_build_004, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message with empty allTensor. * @tc.type: FUNC */ -HWTEST_F(AbsBuilderTest, abs_build_005, TestSize.Level2) +HWTEST_F(AbsBuilderTest, abs_build_005, TestSize.Level1) { OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputs, m_outputs, m_allTensors); EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); @@ -118,7 +118,7 @@ HWTEST_F(AbsBuilderTest, abs_build_005, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message without output tensor. * @tc.type: FUNC */ -HWTEST_F(AbsBuilderTest, abs_build_006, TestSize.Level2) +HWTEST_F(AbsBuilderTest, abs_build_006, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); @@ -131,7 +131,7 @@ HWTEST_F(AbsBuilderTest, abs_build_006, TestSize.Level2) * @tc.desc: Verify that the getPrimitive function returns a successful message * @tc.type: FUNC */ -HWTEST_F(AbsBuilderTest, abs_getprimitive_001, TestSize.Level2) +HWTEST_F(AbsBuilderTest, abs_getprimitive_001, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); @@ -147,7 +147,7 @@ HWTEST_F(AbsBuilderTest, abs_getprimitive_001, TestSize.Level2) * @tc.desc: Verify that the getPrimitive function returns a failed message without build. * @tc.type: FUNC */ -HWTEST_F(AbsBuilderTest, abs_getprimitive_002, TestSize.Level2) +HWTEST_F(AbsBuilderTest, abs_getprimitive_002, TestSize.Level1) { LiteGraphPrimitvePtr primitive = m_builder.GetPrimitive(); LiteGraphPrimitvePtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); diff --git a/test/unittest/ops/all_test.cpp b/test/unittest/ops/all_test.cpp index ffa04a4..933e2e6 100644 --- a/test/unittest/ops/all_test.cpp +++ b/test/unittest/ops/all_test.cpp @@ -74,7 +74,7 @@ void AllBuilderTest::SetInputTensor() * @tc.desc: Verify that the build function returns a successful message. * @tc.type: FUNC */ -HWTEST_F(AllBuilderTest, all_build_001, TestSize.Level2) +HWTEST_F(AllBuilderTest, all_build_001, TestSize.Level1) { SetInputTensor(); SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); @@ -89,7 +89,7 @@ HWTEST_F(AllBuilderTest, all_build_001, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message with true m_isBuild. * @tc.type: FUNC */ -HWTEST_F(AllBuilderTest, all_build_002, TestSize.Level2) +HWTEST_F(AllBuilderTest, all_build_002, TestSize.Level1) { SetInputTensor(); SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); @@ -105,7 +105,7 @@ HWTEST_F(AllBuilderTest, all_build_002, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message with invalided input. * @tc.type: FUNC */ -HWTEST_F(AllBuilderTest, all_build_003, TestSize.Level2) +HWTEST_F(AllBuilderTest, all_build_003, TestSize.Level1) { m_inputs = {0, 1, 2}; m_outputs = {3}; @@ -124,7 +124,7 @@ HWTEST_F(AllBuilderTest, all_build_003, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message with invalided output. * @tc.type: FUNC */ -HWTEST_F(AllBuilderTest, all_build_004, TestSize.Level2) +HWTEST_F(AllBuilderTest, all_build_004, TestSize.Level1) { m_outputs = {2, 3}; m_params = {4}; @@ -142,7 +142,7 @@ HWTEST_F(AllBuilderTest, all_build_004, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message with empty allTensor. * @tc.type: FUNC */ -HWTEST_F(AllBuilderTest, all_build_005, TestSize.Level2) +HWTEST_F(AllBuilderTest, all_build_005, TestSize.Level1) { OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputs, m_outputs, m_allTensors); EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); @@ -153,7 +153,7 @@ HWTEST_F(AllBuilderTest, all_build_005, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message without output tensor. * @tc.type: FUNC */ -HWTEST_F(AllBuilderTest, all_build_006, TestSize.Level2) +HWTEST_F(AllBuilderTest, all_build_006, TestSize.Level1) { SetInputTensor(); @@ -166,10 +166,11 @@ HWTEST_F(AllBuilderTest, all_build_006, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message with invalid keep_dims's dataType. * @tc.type: FUNC */ -HWTEST_F(AllBuilderTest, all_build_007, TestSize.Level2) +HWTEST_F(AllBuilderTest, all_build_007, TestSize.Level1) { SetInputTensor(); SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); + std::shared_ptr keepDimsTensor = TransToNNTensor(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_ALL_KEEP_DIMS); float* keepDimsValue = new (std::nothrow) float[1] {0.0f}; @@ -186,7 +187,7 @@ HWTEST_F(AllBuilderTest, all_build_007, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message with passing invalid keep_dims param. * @tc.type: FUNC */ -HWTEST_F(AllBuilderTest, all_build_008, TestSize.Level2) +HWTEST_F(AllBuilderTest, all_build_008, TestSize.Level1) { SetInputTensor(); SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); @@ -201,10 +202,11 @@ HWTEST_F(AllBuilderTest, all_build_008, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message without set buffer for keep_dims. * @tc.type: FUNC */ -HWTEST_F(AllBuilderTest, all_build_009, TestSize.Level2) +HWTEST_F(AllBuilderTest, all_build_009, TestSize.Level1) { SetInputTensor(); SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); + std::shared_ptr keepDimsTensor = TransToNNTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_ALL_KEEP_DIMS); m_allTensors.emplace_back(keepDimsTensor); @@ -218,7 +220,7 @@ HWTEST_F(AllBuilderTest, all_build_009, TestSize.Level2) * @tc.desc: Verify that the getPrimitive function returns a successful message * @tc.type: FUNC */ -HWTEST_F(AllBuilderTest, all_getprimitive_001, TestSize.Level2) +HWTEST_F(AllBuilderTest, all_getprimitive_001, TestSize.Level1) { SetInputTensor(); SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); @@ -239,7 +241,7 @@ HWTEST_F(AllBuilderTest, all_getprimitive_001, TestSize.Level2) * @tc.desc: Verify that the getPrimitive function returns a failed message without build. * @tc.type: FUNC */ -HWTEST_F(AllBuilderTest, all_getprimitive_002, TestSize.Level2) +HWTEST_F(AllBuilderTest, all_getprimitive_002, TestSize.Level1) { LiteGraphPrimitvePtr primitive = m_builder.GetPrimitive(); LiteGraphPrimitvePtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); diff --git a/test/unittest/ops/assert_test.cpp b/test/unittest/ops/assert_test.cpp index 98adfe2..132b091 100644 --- a/test/unittest/ops/assert_test.cpp +++ b/test/unittest/ops/assert_test.cpp @@ -74,7 +74,7 @@ void AssertBuilderTest::SetInputTensor() * @tc.desc: Verify that the build function returns a successful message. * @tc.type: FUNC */ -HWTEST_F(AssertBuilderTest, assert_build_001, TestSize.Level2) +HWTEST_F(AssertBuilderTest, assert_build_001, TestSize.Level1) { SetInputTensor(); SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); @@ -89,7 +89,7 @@ HWTEST_F(AssertBuilderTest, assert_build_001, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message with true m_isBuild. * @tc.type: FUNC */ -HWTEST_F(AssertBuilderTest, assert_build_002, TestSize.Level2) +HWTEST_F(AssertBuilderTest, assert_build_002, TestSize.Level1) { SetInputTensor(); SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); @@ -105,7 +105,7 @@ HWTEST_F(AssertBuilderTest, assert_build_002, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message with invalided input. * @tc.type: FUNC */ -HWTEST_F(AssertBuilderTest, assert_build_003, TestSize.Level2) +HWTEST_F(AssertBuilderTest, assert_build_003, TestSize.Level1) { m_inputs = {0, 1, 2}; m_outputs = {3}; @@ -124,7 +124,7 @@ HWTEST_F(AssertBuilderTest, assert_build_003, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message with invalided output. * @tc.type: FUNC */ -HWTEST_F(AssertBuilderTest, assert_build_004, TestSize.Level2) +HWTEST_F(AssertBuilderTest, assert_build_004, TestSize.Level1) { m_outputs = {2, 3}; m_params = {4}; @@ -142,7 +142,7 @@ HWTEST_F(AssertBuilderTest, assert_build_004, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message with empty allTensor. * @tc.type: FUNC */ -HWTEST_F(AssertBuilderTest, assert_build_005, TestSize.Level2) +HWTEST_F(AssertBuilderTest, assert_build_005, TestSize.Level1) { OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputs, m_outputs, m_allTensors); EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); @@ -153,7 +153,7 @@ HWTEST_F(AssertBuilderTest, assert_build_005, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message without output tensor. * @tc.type: FUNC */ -HWTEST_F(AssertBuilderTest, assert_build_006, TestSize.Level2) +HWTEST_F(AssertBuilderTest, assert_build_006, TestSize.Level1) { SetInputTensor(); @@ -166,10 +166,11 @@ HWTEST_F(AssertBuilderTest, assert_build_006, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message with invalid summarize's dataType. * @tc.type: FUNC */ -HWTEST_F(AssertBuilderTest, assert_build_007, TestSize.Level2) +HWTEST_F(AssertBuilderTest, assert_build_007, TestSize.Level1) { SetInputTensor(); SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); + std::shared_ptr summarizeTensor = TransToNNTensor(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_ASSERT_SUMMARIZE); float* summarizeValue = new (std::nothrow) float[1] {0.0f}; @@ -186,7 +187,7 @@ HWTEST_F(AssertBuilderTest, assert_build_007, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message with passing invalid summarize param. * @tc.type: FUNC */ -HWTEST_F(AssertBuilderTest, assert_build_008, TestSize.Level2) +HWTEST_F(AssertBuilderTest, assert_build_008, TestSize.Level1) { SetInputTensor(); SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); @@ -201,10 +202,11 @@ HWTEST_F(AssertBuilderTest, assert_build_008, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message without set buffer for summarize. * @tc.type: FUNC */ -HWTEST_F(AssertBuilderTest, assert_build_009, TestSize.Level2) +HWTEST_F(AssertBuilderTest, assert_build_009, TestSize.Level1) { SetInputTensor(); SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); + std::shared_ptr summarizeTensor = TransToNNTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_ASSERT_SUMMARIZE); m_allTensors.emplace_back(summarizeTensor); @@ -218,7 +220,7 @@ HWTEST_F(AssertBuilderTest, assert_build_009, TestSize.Level2) * @tc.desc: Verify that the getPrimitive function returns a successful message * @tc.type: FUNC */ -HWTEST_F(AssertBuilderTest, assert_getprimitive_001, TestSize.Level2) +HWTEST_F(AssertBuilderTest, assert_getprimitive_001, TestSize.Level1) { SetInputTensor(); SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); @@ -239,7 +241,7 @@ HWTEST_F(AssertBuilderTest, assert_getprimitive_001, TestSize.Level2) * @tc.desc: Verify that the getPrimitive function returns a failed message without build. * @tc.type: FUNC */ -HWTEST_F(AssertBuilderTest, assert_getprimitive_002, TestSize.Level2) +HWTEST_F(AssertBuilderTest, assert_getprimitive_002, TestSize.Level1) { LiteGraphPrimitvePtr primitive = m_builder.GetPrimitive(); LiteGraphPrimitvePtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); diff --git a/test/unittest/ops/broadcast_to_test.cpp b/test/unittest/ops/broadcast_to_test.cpp index 6cd2748..cb7cbdc 100644 --- a/test/unittest/ops/broadcast_to_test.cpp +++ b/test/unittest/ops/broadcast_to_test.cpp @@ -63,7 +63,7 @@ void BroadcastToBuilderTest::SaveParamsTensor(OH_NN_DataType dataType, * @tc.desc: Verify that the build function returns a successful message. * @tc.type: FUNC */ -HWTEST_F(BroadcastToBuilderTest, broadcast_to_build_001, TestSize.Level2) +HWTEST_F(BroadcastToBuilderTest, broadcast_to_build_001, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_inputDim, nullptr); SaveOutputTensor(m_outputs, OH_NN_INT32, m_outputDim, nullptr); @@ -78,7 +78,7 @@ HWTEST_F(BroadcastToBuilderTest, broadcast_to_build_001, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message with true m_isBuild. * @tc.type: FUNC */ -HWTEST_F(BroadcastToBuilderTest, broadcast_to_build_002, TestSize.Level2) +HWTEST_F(BroadcastToBuilderTest, broadcast_to_build_002, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_inputDim, nullptr); SaveOutputTensor(m_outputs, OH_NN_INT32, m_outputDim, nullptr); @@ -94,7 +94,7 @@ HWTEST_F(BroadcastToBuilderTest, broadcast_to_build_002, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message with invalided input. * @tc.type: FUNC */ -HWTEST_F(BroadcastToBuilderTest, broadcast_to_build_003, TestSize.Level2) +HWTEST_F(BroadcastToBuilderTest, broadcast_to_build_003, TestSize.Level1) { m_inputs = {0, 1}; m_outputs = {2}; @@ -113,7 +113,7 @@ HWTEST_F(BroadcastToBuilderTest, broadcast_to_build_003, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message with invalided output. * @tc.type: FUNC */ -HWTEST_F(BroadcastToBuilderTest, broadcast_to_build_004, TestSize.Level2) +HWTEST_F(BroadcastToBuilderTest, broadcast_to_build_004, TestSize.Level1) { m_outputs = {1, 2}; m_params = {3}; @@ -131,7 +131,7 @@ HWTEST_F(BroadcastToBuilderTest, broadcast_to_build_004, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message with empty allTensor. * @tc.type: FUNC */ -HWTEST_F(BroadcastToBuilderTest, broadcast_to_build_005, TestSize.Level2) +HWTEST_F(BroadcastToBuilderTest, broadcast_to_build_005, TestSize.Level1) { OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputs, m_outputs, m_allTensors); EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); @@ -142,7 +142,7 @@ HWTEST_F(BroadcastToBuilderTest, broadcast_to_build_005, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message without output tensor. * @tc.type: FUNC */ -HWTEST_F(BroadcastToBuilderTest, broadcast_to_build_006, TestSize.Level2) +HWTEST_F(BroadcastToBuilderTest, broadcast_to_build_006, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_inputDim, nullptr); @@ -155,10 +155,11 @@ HWTEST_F(BroadcastToBuilderTest, broadcast_to_build_006, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message with invalid shape's dataType. * @tc.type: FUNC */ -HWTEST_F(BroadcastToBuilderTest, broadcast_to_build_007, TestSize.Level2) +HWTEST_F(BroadcastToBuilderTest, broadcast_to_build_007, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_inputDim, nullptr); SaveOutputTensor(m_outputs, OH_NN_INT32, m_outputDim, nullptr); + std::shared_ptr shapeTensor = TransToNNTensor(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_BROADCAST_TO_SHAPE); float* shapeValue = new (std::nothrow) float[2] {2.0f, 3.0f}; @@ -176,7 +177,7 @@ HWTEST_F(BroadcastToBuilderTest, broadcast_to_build_007, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message with passing invalid param. * @tc.type: FUNC */ -HWTEST_F(BroadcastToBuilderTest, broadcast_to_build_008, TestSize.Level2) +HWTEST_F(BroadcastToBuilderTest, broadcast_to_build_008, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_inputDim, nullptr); SaveOutputTensor(m_outputs, OH_NN_INT32, m_outputDim, nullptr); @@ -191,10 +192,11 @@ HWTEST_F(BroadcastToBuilderTest, broadcast_to_build_008, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message without set buffer for shape. * @tc.type: FUNC */ -HWTEST_F(BroadcastToBuilderTest, broadcast_to_build_009, TestSize.Level2) +HWTEST_F(BroadcastToBuilderTest, broadcast_to_build_009, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_inputDim, nullptr); SaveOutputTensor(m_outputs, OH_NN_INT32, m_outputDim, nullptr); + std::shared_ptr shapeTensor = TransToNNTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_BROADCAST_TO_SHAPE); m_allTensors.emplace_back(shapeTensor); @@ -208,7 +210,7 @@ HWTEST_F(BroadcastToBuilderTest, broadcast_to_build_009, TestSize.Level2) * @tc.desc: Verify that the getPrimitive function returns a successful message * @tc.type: FUNC */ -HWTEST_F(BroadcastToBuilderTest, broadcast_to_getprimitive_001, TestSize.Level2) +HWTEST_F(BroadcastToBuilderTest, broadcast_to_getprimitive_001, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_inputDim, nullptr); SaveOutputTensor(m_outputs, OH_NN_INT32, m_outputDim, nullptr); @@ -232,7 +234,7 @@ HWTEST_F(BroadcastToBuilderTest, broadcast_to_getprimitive_001, TestSize.Level2) * @tc.desc: Verify that the getPrimitive function returns a failed message without build. * @tc.type: FUNC */ -HWTEST_F(BroadcastToBuilderTest, broadcast_to_getprimitive_002, TestSize.Level2) +HWTEST_F(BroadcastToBuilderTest, broadcast_to_getprimitive_002, TestSize.Level1) { LiteGraphPrimitvePtr primitive = m_builder.GetPrimitive(); LiteGraphPrimitvePtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); diff --git a/test/unittest/ops/clip_test.cpp b/test/unittest/ops/clip_test.cpp index 62a3010..e26d4bb 100644 --- a/test/unittest/ops/clip_test.cpp +++ b/test/unittest/ops/clip_test.cpp @@ -73,7 +73,7 @@ void ClipBuilderTest::SaveMin(OH_NN_DataType dataType, * @tc.desc: Verify that the build function returns a successful message. * @tc.type: FUNC */ -HWTEST_F(ClipBuilderTest, clip_build_001, TestSize.Level2) +HWTEST_F(ClipBuilderTest, clip_build_001, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); @@ -89,7 +89,7 @@ HWTEST_F(ClipBuilderTest, clip_build_001, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message with true m_isBuild. * @tc.type: FUNC */ -HWTEST_F(ClipBuilderTest, clip_build_002, TestSize.Level2) +HWTEST_F(ClipBuilderTest, clip_build_002, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); @@ -106,7 +106,7 @@ HWTEST_F(ClipBuilderTest, clip_build_002, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message with invalided input. * @tc.type: FUNC */ -HWTEST_F(ClipBuilderTest, clip_build_003, TestSize.Level2) +HWTEST_F(ClipBuilderTest, clip_build_003, TestSize.Level1) { m_inputs = {0, 1}; m_outputs = {2}; @@ -126,7 +126,7 @@ HWTEST_F(ClipBuilderTest, clip_build_003, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message with invalided output. * @tc.type: FUNC */ -HWTEST_F(ClipBuilderTest, clip_build_004, TestSize.Level2) +HWTEST_F(ClipBuilderTest, clip_build_004, TestSize.Level1) { m_outputs = {1, 2}; m_params = {3, 4}; @@ -145,7 +145,7 @@ HWTEST_F(ClipBuilderTest, clip_build_004, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message with empty allTensor. * @tc.type: FUNC */ -HWTEST_F(ClipBuilderTest, clip_build_005, TestSize.Level2) +HWTEST_F(ClipBuilderTest, clip_build_005, TestSize.Level1) { OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputs, m_outputs, m_allTensors); EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); @@ -156,7 +156,7 @@ HWTEST_F(ClipBuilderTest, clip_build_005, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message without output tensor. * @tc.type: FUNC */ -HWTEST_F(ClipBuilderTest, clip_build_006, TestSize.Level2) +HWTEST_F(ClipBuilderTest, clip_build_006, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); @@ -169,10 +169,11 @@ HWTEST_F(ClipBuilderTest, clip_build_006, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message with invalid max's dataType. * @tc.type: FUNC */ -HWTEST_F(ClipBuilderTest, clip_build_007, TestSize.Level2) +HWTEST_F(ClipBuilderTest, clip_build_007, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); + std::shared_ptr maxTensor = TransToNNTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_CLIP_MAX); int64_t* maxValue = new (std::nothrow) int64_t [1]{10}; @@ -191,11 +192,11 @@ HWTEST_F(ClipBuilderTest, clip_build_007, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message with invalid min's dataType. * @tc.type: FUNC */ -HWTEST_F(ClipBuilderTest, clip_build_008, TestSize.Level2) +HWTEST_F(ClipBuilderTest, clip_build_008, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); - + SaveMax(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_CLIP_MAX); std::shared_ptr minTensor = TransToNNTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_CLIP_MIN); @@ -214,7 +215,7 @@ HWTEST_F(ClipBuilderTest, clip_build_008, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message with passing invalid max param. * @tc.type: FUNC */ -HWTEST_F(ClipBuilderTest, clip_build_009, TestSize.Level2) +HWTEST_F(ClipBuilderTest, clip_build_009, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); @@ -231,7 +232,7 @@ HWTEST_F(ClipBuilderTest, clip_build_009, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message with passing invalid min param. * @tc.type: FUNC */ -HWTEST_F(ClipBuilderTest, clip_build_010, TestSize.Level2) +HWTEST_F(ClipBuilderTest, clip_build_010, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); @@ -248,7 +249,7 @@ HWTEST_F(ClipBuilderTest, clip_build_010, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message without set buffer for max. * @tc.type: FUNC */ -HWTEST_F(ClipBuilderTest, clip_build_011, TestSize.Level2) +HWTEST_F(ClipBuilderTest, clip_build_011, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); @@ -267,7 +268,7 @@ HWTEST_F(ClipBuilderTest, clip_build_011, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message without set buffer for min. * @tc.type: FUNC */ -HWTEST_F(ClipBuilderTest, clip_build_012, TestSize.Level2) +HWTEST_F(ClipBuilderTest, clip_build_012, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); @@ -286,7 +287,7 @@ HWTEST_F(ClipBuilderTest, clip_build_012, TestSize.Level2) * @tc.desc: Verify that the getPrimitive function returns a successful message. * @tc.type: FUNC */ -HWTEST_F(ClipBuilderTest, clip_getprimitive_001, TestSize.Level2) +HWTEST_F(ClipBuilderTest, clip_getprimitive_001, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); @@ -311,7 +312,7 @@ HWTEST_F(ClipBuilderTest, clip_getprimitive_001, TestSize.Level2) * @tc.desc: Verify that the getPrimitive function returns a failed message without build. * @tc.type: FUNC */ -HWTEST_F(ClipBuilderTest, clip_getprimitive_002, TestSize.Level2) +HWTEST_F(ClipBuilderTest, clip_getprimitive_002, TestSize.Level1) { LiteGraphPrimitvePtr primitive = m_builder.GetPrimitive(); LiteGraphPrimitvePtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); diff --git a/test/unittest/ops/constant_of_shape_test.cpp b/test/unittest/ops/constant_of_shape_test.cpp index a5d7643..e52e1c7 100644 --- a/test/unittest/ops/constant_of_shape_test.cpp +++ b/test/unittest/ops/constant_of_shape_test.cpp @@ -75,7 +75,7 @@ void ConstantOfShapeBuilderTest::SaveValue(OH_NN_DataType dataType, * @tc.desc: Verify that the build function returns a successful message. * @tc.type: FUNC */ -HWTEST_F(ConstantOfShapeBuilderTest, constant_of_shape_build_001, TestSize.Level2) +HWTEST_F(ConstantOfShapeBuilderTest, constant_of_shape_build_001, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_inputDim, nullptr); SaveOutputTensor(m_outputs, OH_NN_INT32, m_outputDim, nullptr); @@ -91,7 +91,7 @@ HWTEST_F(ConstantOfShapeBuilderTest, constant_of_shape_build_001, TestSize.Level * @tc.desc: Verify that the build function returns a failed message with true m_isBuild. * @tc.type: FUNC */ -HWTEST_F(ConstantOfShapeBuilderTest, constant_of_shape_build_002, TestSize.Level2) +HWTEST_F(ConstantOfShapeBuilderTest, constant_of_shape_build_002, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_inputDim, nullptr); SaveOutputTensor(m_outputs, OH_NN_INT32, m_outputDim, nullptr); @@ -108,7 +108,7 @@ HWTEST_F(ConstantOfShapeBuilderTest, constant_of_shape_build_002, TestSize.Level * @tc.desc: Verify that the build function returns a failed message with invalided input. * @tc.type: FUNC */ -HWTEST_F(ConstantOfShapeBuilderTest, constant_of_shape_build_003, TestSize.Level2) +HWTEST_F(ConstantOfShapeBuilderTest, constant_of_shape_build_003, TestSize.Level1) { m_inputs = {0, 1}; m_outputs = {2}; @@ -128,7 +128,7 @@ HWTEST_F(ConstantOfShapeBuilderTest, constant_of_shape_build_003, TestSize.Level * @tc.desc: Verify that the build function returns a failed message with invalided output. * @tc.type: FUNC */ -HWTEST_F(ConstantOfShapeBuilderTest, constant_of_shape_build_004, TestSize.Level2) +HWTEST_F(ConstantOfShapeBuilderTest, constant_of_shape_build_004, TestSize.Level1) { m_outputs = {1, 2}; m_params = {3, 4}; @@ -147,7 +147,7 @@ HWTEST_F(ConstantOfShapeBuilderTest, constant_of_shape_build_004, TestSize.Level * @tc.desc: Verify that the build function returns a failed message with empty allTensor. * @tc.type: FUNC */ -HWTEST_F(ConstantOfShapeBuilderTest, constant_of_shape_build_005, TestSize.Level2) +HWTEST_F(ConstantOfShapeBuilderTest, constant_of_shape_build_005, TestSize.Level1) { OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputs, m_outputs, m_allTensors); EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); @@ -158,7 +158,7 @@ HWTEST_F(ConstantOfShapeBuilderTest, constant_of_shape_build_005, TestSize.Level * @tc.desc: Verify that the build function returns a failed message without output tensor. * @tc.type: FUNC */ -HWTEST_F(ConstantOfShapeBuilderTest, constant_of_shape_build_006, TestSize.Level2) +HWTEST_F(ConstantOfShapeBuilderTest, constant_of_shape_build_006, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_inputDim, nullptr); @@ -171,10 +171,11 @@ HWTEST_F(ConstantOfShapeBuilderTest, constant_of_shape_build_006, TestSize.Level * @tc.desc: Verify that the build function returns a failed message with invalid dataType's dataType. * @tc.type: FUNC */ -HWTEST_F(ConstantOfShapeBuilderTest, constant_of_shape_build_007, TestSize.Level2) +HWTEST_F(ConstantOfShapeBuilderTest, constant_of_shape_build_007, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_inputDim, nullptr); SaveOutputTensor(m_outputs, OH_NN_INT32, m_outputDim, nullptr); + std::shared_ptr dataTypeTensor = TransToNNTensor(OH_NN_FLOAT32, m_dataTypeDim, nullptr, OH_NN_CONSTANT_OF_SHAPE_DATA_TYPE); float* dataTypeValue = new (std::nothrow) float [1]{0.0f}; @@ -192,10 +193,11 @@ HWTEST_F(ConstantOfShapeBuilderTest, constant_of_shape_build_007, TestSize.Level * @tc.desc: Verify that the build function returns a failed message with invalid value's dataType. * @tc.type: FUNC */ -HWTEST_F(ConstantOfShapeBuilderTest, constant_of_shape_build_008, TestSize.Level2) +HWTEST_F(ConstantOfShapeBuilderTest, constant_of_shape_build_008, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_inputDim, nullptr); SaveOutputTensor(m_outputs, OH_NN_INT32, m_outputDim, nullptr); + SaveDataType(OH_NN_INT64, m_dataTypeDim, nullptr, OH_NN_CONSTANT_OF_SHAPE_DATA_TYPE); std::shared_ptr valueTensor = TransToNNTensor(OH_NN_INT64, m_valueDim, nullptr, OH_NN_CONSTANT_OF_SHAPE_VALUE); @@ -215,7 +217,7 @@ HWTEST_F(ConstantOfShapeBuilderTest, constant_of_shape_build_008, TestSize.Level * @tc.desc: Verify that the build function returns a failed message with passing invalid dataType param. * @tc.type: FUNC */ -HWTEST_F(ConstantOfShapeBuilderTest, constant_of_shape_build_009, TestSize.Level2) +HWTEST_F(ConstantOfShapeBuilderTest, constant_of_shape_build_009, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_inputDim, nullptr); SaveOutputTensor(m_outputs, OH_NN_INT32, m_outputDim, nullptr); @@ -231,7 +233,7 @@ HWTEST_F(ConstantOfShapeBuilderTest, constant_of_shape_build_009, TestSize.Level * @tc.desc: Verify that the build function returns a failed message with passing invalid value param. * @tc.type: FUNC */ -HWTEST_F(ConstantOfShapeBuilderTest, constant_of_shape_build_010, TestSize.Level2) +HWTEST_F(ConstantOfShapeBuilderTest, constant_of_shape_build_010, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_inputDim, nullptr); SaveOutputTensor(m_outputs, OH_NN_INT32, m_outputDim, nullptr); @@ -247,10 +249,11 @@ HWTEST_F(ConstantOfShapeBuilderTest, constant_of_shape_build_010, TestSize.Level * @tc.desc: Verify that the build function returns a failed message without set buffer for dataType. * @tc.type: FUNC */ -HWTEST_F(ConstantOfShapeBuilderTest, constant_of_shape_build_011, TestSize.Level2) +HWTEST_F(ConstantOfShapeBuilderTest, constant_of_shape_build_011, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_inputDim, nullptr); SaveOutputTensor(m_outputs, OH_NN_INT32, m_outputDim, nullptr); + std::shared_ptr dataTypeTensor = TransToNNTensor(OH_NN_INT64, m_dataTypeDim, nullptr, OH_NN_CONSTANT_OF_SHAPE_DATA_TYPE); m_allTensors.emplace_back(dataTypeTensor); @@ -265,10 +268,11 @@ HWTEST_F(ConstantOfShapeBuilderTest, constant_of_shape_build_011, TestSize.Level * @tc.desc: Verify that the build function returns a failed message without set buffer for value. * @tc.type: FUNC */ -HWTEST_F(ConstantOfShapeBuilderTest, constant_of_shape_build_012, TestSize.Level2) +HWTEST_F(ConstantOfShapeBuilderTest, constant_of_shape_build_012, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_inputDim, nullptr); SaveOutputTensor(m_outputs, OH_NN_INT32, m_outputDim, nullptr); + SaveDataType(OH_NN_INT64, m_dataTypeDim, nullptr, OH_NN_CONSTANT_OF_SHAPE_DATA_TYPE); std::shared_ptr valueTensor = TransToNNTensor(OH_NN_FLOAT32, m_valueDim, nullptr, OH_NN_CONSTANT_OF_SHAPE_VALUE); @@ -283,7 +287,7 @@ HWTEST_F(ConstantOfShapeBuilderTest, constant_of_shape_build_012, TestSize.Level * @tc.desc: Verify that the getPrimitive function returns a successful message * @tc.type: FUNC */ -HWTEST_F(ConstantOfShapeBuilderTest, constant_of_shape_getprimitive_001, TestSize.Level2) +HWTEST_F(ConstantOfShapeBuilderTest, constant_of_shape_getprimitive_001, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_inputDim, nullptr); SaveOutputTensor(m_outputs, OH_NN_INT32, m_outputDim, nullptr); @@ -311,7 +315,7 @@ HWTEST_F(ConstantOfShapeBuilderTest, constant_of_shape_getprimitive_001, TestSiz * @tc.desc: Verify that the getPrimitive function returns a failed message without build. * @tc.type: FUNC */ -HWTEST_F(ConstantOfShapeBuilderTest, constant_of_shape_getprimitive_002, TestSize.Level2) +HWTEST_F(ConstantOfShapeBuilderTest, constant_of_shape_getprimitive_002, TestSize.Level1) { LiteGraphPrimitvePtr primitive = m_builder.GetPrimitive(); LiteGraphPrimitvePtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); diff --git a/test/unittest/ops/cos_test.cpp b/test/unittest/ops/cos_test.cpp index 3db9623..a6ccdcd 100644 --- a/test/unittest/ops/cos_test.cpp +++ b/test/unittest/ops/cos_test.cpp @@ -45,7 +45,7 @@ void CosBuilderTest::TearDown() {} * @tc.desc: Verify that the build function returns a successful message. * @tc.type: FUNC */ -HWTEST_F(CosBuilderTest, cos_build_001, TestSize.Level2) +HWTEST_F(CosBuilderTest, cos_build_001, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); @@ -59,7 +59,7 @@ HWTEST_F(CosBuilderTest, cos_build_001, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message with true m_isBuild. * @tc.type: FUNC */ -HWTEST_F(CosBuilderTest, cos_build_002, TestSize.Level2) +HWTEST_F(CosBuilderTest, cos_build_002, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); @@ -74,7 +74,7 @@ HWTEST_F(CosBuilderTest, cos_build_002, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message with invalided input. * @tc.type: FUNC */ -HWTEST_F(CosBuilderTest, cos_build_003, TestSize.Level2) +HWTEST_F(CosBuilderTest, cos_build_003, TestSize.Level1) { m_inputs = {0, 1}; m_outputs = {2}; @@ -91,7 +91,7 @@ HWTEST_F(CosBuilderTest, cos_build_003, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message with invalided output. * @tc.type: FUNC */ -HWTEST_F(CosBuilderTest, cos_build_004, TestSize.Level2) +HWTEST_F(CosBuilderTest, cos_build_004, TestSize.Level1) { m_outputs = {1, 2}; @@ -107,7 +107,7 @@ HWTEST_F(CosBuilderTest, cos_build_004, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message with empty allTensor. * @tc.type: FUNC */ -HWTEST_F(CosBuilderTest, cos_build_005, TestSize.Level2) +HWTEST_F(CosBuilderTest, cos_build_005, TestSize.Level1) { OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputs, m_outputs, m_allTensors); EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); @@ -118,7 +118,7 @@ HWTEST_F(CosBuilderTest, cos_build_005, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message without output tensor. * @tc.type: FUNC */ -HWTEST_F(CosBuilderTest, cos_build_006, TestSize.Level2) +HWTEST_F(CosBuilderTest, cos_build_006, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); @@ -131,7 +131,7 @@ HWTEST_F(CosBuilderTest, cos_build_006, TestSize.Level2) * @tc.desc: Verify that the getPrimitive function returns a successful message * @tc.type: FUNC */ -HWTEST_F(CosBuilderTest, cos_getprimitive_001, TestSize.Level2) +HWTEST_F(CosBuilderTest, cos_getprimitive_001, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); @@ -147,7 +147,7 @@ HWTEST_F(CosBuilderTest, cos_getprimitive_001, TestSize.Level2) * @tc.desc: Verify that the getPrimitive function returns a failed message without build. * @tc.type: FUNC */ -HWTEST_F(CosBuilderTest, cos_getprimitive_002, TestSize.Level2) +HWTEST_F(CosBuilderTest, cos_getprimitive_002, TestSize.Level1) { LiteGraphPrimitvePtr primitive = m_builder.GetPrimitive(); LiteGraphPrimitvePtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); diff --git a/test/unittest/ops/depth_to_space_test.cpp b/test/unittest/ops/depth_to_space_test.cpp index 6412f6c..77984f1 100644 --- a/test/unittest/ops/depth_to_space_test.cpp +++ b/test/unittest/ops/depth_to_space_test.cpp @@ -92,7 +92,7 @@ void DepthToSpaceBuilderTest::SaveMode(OH_NN_DataType dataType, * @tc.desc: Verify that the build function returns a successful message. * @tc.type: FUNC */ -HWTEST_F(DepthToSpaceBuilderTest, depth_to_space_build_001, TestSize.Level2) +HWTEST_F(DepthToSpaceBuilderTest, depth_to_space_build_001, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_inputDim, nullptr); SaveOutputTensor(m_outputs, OH_NN_INT32, m_outputDim, nullptr); @@ -110,7 +110,7 @@ HWTEST_F(DepthToSpaceBuilderTest, depth_to_space_build_001, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message with true m_isBuild. * @tc.type: FUNC */ -HWTEST_F(DepthToSpaceBuilderTest, depth_to_space_build_002, TestSize.Level2) +HWTEST_F(DepthToSpaceBuilderTest, depth_to_space_build_002, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_inputDim, nullptr); SaveOutputTensor(m_outputs, OH_NN_INT32, m_outputDim, nullptr); @@ -129,7 +129,7 @@ HWTEST_F(DepthToSpaceBuilderTest, depth_to_space_build_002, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message with invalided input. * @tc.type: FUNC */ -HWTEST_F(DepthToSpaceBuilderTest, depth_to_space_build_003, TestSize.Level2) +HWTEST_F(DepthToSpaceBuilderTest, depth_to_space_build_003, TestSize.Level1) { m_inputs = {0, 1}; m_outputs = {2}; @@ -151,7 +151,7 @@ HWTEST_F(DepthToSpaceBuilderTest, depth_to_space_build_003, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message with invalided output. * @tc.type: FUNC */ -HWTEST_F(DepthToSpaceBuilderTest, depth_to_space_build_004, TestSize.Level2) +HWTEST_F(DepthToSpaceBuilderTest, depth_to_space_build_004, TestSize.Level1) { m_outputs = {1, 2}; m_params = {3, 4, 5}; @@ -172,7 +172,7 @@ HWTEST_F(DepthToSpaceBuilderTest, depth_to_space_build_004, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message with empty allTensor. * @tc.type: FUNC */ -HWTEST_F(DepthToSpaceBuilderTest, depth_to_space_build_005, TestSize.Level2) +HWTEST_F(DepthToSpaceBuilderTest, depth_to_space_build_005, TestSize.Level1) { OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputs, m_outputs, m_allTensors); EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); @@ -183,7 +183,7 @@ HWTEST_F(DepthToSpaceBuilderTest, depth_to_space_build_005, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message without output tensor. * @tc.type: FUNC */ -HWTEST_F(DepthToSpaceBuilderTest, depth_to_space_build_006, TestSize.Level2) +HWTEST_F(DepthToSpaceBuilderTest, depth_to_space_build_006, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_inputDim, nullptr); @@ -196,10 +196,11 @@ HWTEST_F(DepthToSpaceBuilderTest, depth_to_space_build_006, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message with invalid blockSize's dataType. * @tc.type: FUNC */ -HWTEST_F(DepthToSpaceBuilderTest, depth_to_space_build_007, TestSize.Level2) +HWTEST_F(DepthToSpaceBuilderTest, depth_to_space_build_007, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_inputDim, nullptr); SaveOutputTensor(m_outputs, OH_NN_INT32, m_outputDim, nullptr); + std::shared_ptr blockSizeTensor = TransToNNTensor(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_DEPTH_TO_SPACE_BLOCK_SIZE); float* blockSizeValue = new (std::nothrow) float [1]{2.0f}; @@ -220,10 +221,11 @@ HWTEST_F(DepthToSpaceBuilderTest, depth_to_space_build_007, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message with invalid format's dataType. * @tc.type: FUNC */ -HWTEST_F(DepthToSpaceBuilderTest, depth_to_space_build_008, TestSize.Level2) +HWTEST_F(DepthToSpaceBuilderTest, depth_to_space_build_008, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_inputDim, nullptr); SaveOutputTensor(m_outputs, OH_NN_INT32, m_outputDim, nullptr); + SaveBlockSize(OH_NN_INT64, m_paramDim, nullptr, OH_NN_DEPTH_TO_SPACE_BLOCK_SIZE); std::shared_ptr formatTensor = TransToNNTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_DEPTH_TO_SPACE_FORMAT); @@ -243,10 +245,11 @@ HWTEST_F(DepthToSpaceBuilderTest, depth_to_space_build_008, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message with invalid mode's dataType. * @tc.type: FUNC */ -HWTEST_F(DepthToSpaceBuilderTest, depth_to_space_build_009, TestSize.Level2) +HWTEST_F(DepthToSpaceBuilderTest, depth_to_space_build_009, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_inputDim, nullptr); SaveOutputTensor(m_outputs, OH_NN_INT32, m_outputDim, nullptr); + SaveBlockSize(OH_NN_INT64, m_paramDim, nullptr, OH_NN_DEPTH_TO_SPACE_BLOCK_SIZE); SaveFormat(OH_NN_INT8, m_paramDim, nullptr, OH_NN_DEPTH_TO_SPACE_FORMAT); modeTensor = TransToNNTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_DEPTH_TO_SPACE_MODE); @@ -264,7 +267,7 @@ HWTEST_F(DepthToSpaceBuilderTest, depth_to_space_build_009, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message with passing invalid blockSize param. * @tc.type: FUNC */ -HWTEST_F(DepthToSpaceBuilderTest, depth_to_space_build_010, TestSize.Level2) +HWTEST_F(DepthToSpaceBuilderTest, depth_to_space_build_010, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_inputDim, nullptr); SaveOutputTensor(m_outputs, OH_NN_INT32, m_outputDim, nullptr); @@ -282,7 +285,7 @@ HWTEST_F(DepthToSpaceBuilderTest, depth_to_space_build_010, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message with passing invalid format param. * @tc.type: FUNC */ -HWTEST_F(DepthToSpaceBuilderTest, depth_to_space_build_011, TestSize.Level2) +HWTEST_F(DepthToSpaceBuilderTest, depth_to_space_build_011, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_inputDim, nullptr); SaveOutputTensor(m_outputs, OH_NN_INT32, m_outputDim, nullptr); @@ -300,7 +303,7 @@ HWTEST_F(DepthToSpaceBuilderTest, depth_to_space_build_011, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message with passing invalid mode param. * @tc.type: FUNC */ -HWTEST_F(DepthToSpaceBuilderTest, depth_to_space_build_012, TestSize.Level2) +HWTEST_F(DepthToSpaceBuilderTest, depth_to_space_build_012, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_inputDim, nullptr); SaveOutputTensor(m_outputs, OH_NN_INT32, m_outputDim, nullptr); @@ -318,10 +321,11 @@ HWTEST_F(DepthToSpaceBuilderTest, depth_to_space_build_012, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message without set buffer for blockSize. * @tc.type: FUNC */ -HWTEST_F(DepthToSpaceBuilderTest, depth_to_space_build_013, TestSize.Level2) +HWTEST_F(DepthToSpaceBuilderTest, depth_to_space_build_013, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_inputDim, nullptr); SaveOutputTensor(m_outputs, OH_NN_INT32, m_outputDim, nullptr); + std::shared_ptr blockSizeTensor = TransToNNTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_DEPTH_TO_SPACE_BLOCK_SIZE); m_allTensors.emplace_back(blockSizeTensor); @@ -338,10 +342,11 @@ HWTEST_F(DepthToSpaceBuilderTest, depth_to_space_build_013, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message without set buffer for format. * @tc.type: FUNC */ -HWTEST_F(DepthToSpaceBuilderTest, depth_to_space_build_014, TestSize.Level2) +HWTEST_F(DepthToSpaceBuilderTest, depth_to_space_build_014, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_inputDim, nullptr); SaveOutputTensor(m_outputs, OH_NN_INT32, m_outputDim, nullptr); + SaveBlockSize(OH_NN_INT64, m_paramDim, nullptr, OH_NN_DEPTH_TO_SPACE_BLOCK_SIZE); std::shared_ptr formatTensor = TransToNNTensor(OH_NN_INT8, m_paramDim, nullptr, OH_NN_DEPTH_TO_SPACE_FORMAT); @@ -358,10 +363,11 @@ HWTEST_F(DepthToSpaceBuilderTest, depth_to_space_build_014, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message without set buffer for mode. * @tc.type: FUNC */ -HWTEST_F(DepthToSpaceBuilderTest, depth_to_space_build_015, TestSize.Level2) +HWTEST_F(DepthToSpaceBuilderTest, depth_to_space_build_015, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_inputDim, nullptr); SaveOutputTensor(m_outputs, OH_NN_INT32, m_outputDim, nullptr); + SaveBlockSize(OH_NN_INT64, m_paramDim, nullptr, OH_NN_DEPTH_TO_SPACE_BLOCK_SIZE); SaveFormat(OH_NN_INT8, m_paramDim, nullptr, OH_NN_DEPTH_TO_SPACE_FORMAT); modeTensor = TransToNNTensor(OH_NN_INT8, m_paramDim, nullptr, OH_NN_DEPTH_TO_SPACE_MODE); @@ -376,7 +382,7 @@ HWTEST_F(DepthToSpaceBuilderTest, depth_to_space_build_015, TestSize.Level2) * @tc.desc: Verify that the getPrimitive function returns a successful message * @tc.type: FUNC */ -HWTEST_F(DepthToSpaceBuilderTest, depth_to_space_getprimitive_001, TestSize.Level2) +HWTEST_F(DepthToSpaceBuilderTest, depth_to_space_getprimitive_001, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_inputDim, nullptr); SaveOutputTensor(m_outputs, OH_NN_INT32, m_outputDim, nullptr); @@ -407,7 +413,7 @@ HWTEST_F(DepthToSpaceBuilderTest, depth_to_space_getprimitive_001, TestSize.Leve * @tc.desc: Verify that the getPrimitive function returns a failed message without build. * @tc.type: FUNC */ -HWTEST_F(DepthToSpaceBuilderTest, depth_to_space_getprimitive_002, TestSize.Level2) +HWTEST_F(DepthToSpaceBuilderTest, depth_to_space_getprimitive_002, TestSize.Level1) { LiteGraphPrimitvePtr primitive = m_builder.GetPrimitive(); LiteGraphPrimitvePtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); diff --git a/test/unittest/ops/equal_builder_test.cpp b/test/unittest/ops/equal_builder_test.cpp index e30325b..7924880 100644 --- a/test/unittest/ops/equal_builder_test.cpp +++ b/test/unittest/ops/equal_builder_test.cpp @@ -47,7 +47,7 @@ void EqualBuilderTest::TearDown() {} * @tc.desc: Verify that the build function returns a successful message. * @tc.type: FUNC */ -HWTEST_F(EqualBuilderTest, equal_build_001, TestSize.Level0) +HWTEST_F(EqualBuilderTest, equal_build_001, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_inputDim, nullptr); SaveOutputTensor(m_outputs, OH_NN_BOOL, m_outputDim, nullptr); @@ -61,7 +61,7 @@ HWTEST_F(EqualBuilderTest, equal_build_001, TestSize.Level0) * @tc.desc: Verify that the build function returns a failed message with true m_isBuild. * @tc.type: FUNC */ -HWTEST_F(EqualBuilderTest, equal_build_002, TestSize.Level0) +HWTEST_F(EqualBuilderTest, equal_build_002, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_inputDim, nullptr); SaveOutputTensor(m_outputs, OH_NN_BOOL, m_outputDim, nullptr); @@ -76,7 +76,7 @@ HWTEST_F(EqualBuilderTest, equal_build_002, TestSize.Level0) * @tc.desc: Verify that the build function returns a failed message with invalided input. * @tc.type: FUNC */ -HWTEST_F(EqualBuilderTest, equal_build_003, TestSize.Level0) +HWTEST_F(EqualBuilderTest, equal_build_003, TestSize.Level1) { m_inputs = {0, 1, 2, 3}; m_outputs = {4}; @@ -93,7 +93,7 @@ HWTEST_F(EqualBuilderTest, equal_build_003, TestSize.Level0) * @tc.desc: Verify that the build function returns a failed message with invalided output. * @tc.type: FUNC */ -HWTEST_F(EqualBuilderTest, equal_build_004, TestSize.Level0) +HWTEST_F(EqualBuilderTest, equal_build_004, TestSize.Level1) { std::vector m_outputs = {2, 3, 4}; @@ -109,7 +109,7 @@ HWTEST_F(EqualBuilderTest, equal_build_004, TestSize.Level0) * @tc.desc: Verify that the build function returns a failed message with empty allTensor. * @tc.type: FUNC */ -HWTEST_F(EqualBuilderTest, equal_build_005, TestSize.Level0) +HWTEST_F(EqualBuilderTest, equal_build_005, TestSize.Level1) { OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputs, m_outputs, m_allTensors); EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); @@ -120,7 +120,7 @@ HWTEST_F(EqualBuilderTest, equal_build_005, TestSize.Level0) * @tc.desc: Verify that the build function returns a failed message without output tensor. * @tc.type: FUNC */ -HWTEST_F(EqualBuilderTest, equal_build_006, TestSize.Level0) +HWTEST_F(EqualBuilderTest, equal_build_006, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_inputDim, nullptr); @@ -133,7 +133,7 @@ HWTEST_F(EqualBuilderTest, equal_build_006, TestSize.Level0) * @tc.desc: Verify that the build function returns a failed message with a virtual parameter. * @tc.type: FUNC */ -HWTEST_F(EqualBuilderTest, equal_build_007, TestSize.Level0) +HWTEST_F(EqualBuilderTest, equal_build_007, TestSize.Level1) { std::vector m_params = {3}; std::vector paramDim = {}; @@ -153,7 +153,7 @@ HWTEST_F(EqualBuilderTest, equal_build_007, TestSize.Level0) * @tc.desc: Verify that the getPrimitive function returns a successful message * @tc.type: FUNC */ -HWTEST_F(EqualBuilderTest, equal_getprimitive_001, TestSize.Level0) +HWTEST_F(EqualBuilderTest, equal_getprimitive_001, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_inputDim, nullptr); SaveOutputTensor(m_outputs, OH_NN_BOOL, m_outputDim, nullptr); @@ -169,7 +169,7 @@ HWTEST_F(EqualBuilderTest, equal_getprimitive_001, TestSize.Level0) * @tc.desc: Verify that the getPrimitive function returns a failed message without build. * @tc.type: FUNC */ -HWTEST_F(EqualBuilderTest, equal_getprimitive_002, TestSize.Level0) +HWTEST_F(EqualBuilderTest, equal_getprimitive_002, TestSize.Level1) { LiteGraphPrimitvePtr primitive = m_builder.GetPrimitive(); LiteGraphPrimitvePtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); diff --git a/test/unittest/ops/erf_test.cpp b/test/unittest/ops/erf_test.cpp index 1aeedc6..c81b320 100644 --- a/test/unittest/ops/erf_test.cpp +++ b/test/unittest/ops/erf_test.cpp @@ -45,7 +45,7 @@ void ErfBuilderTest::TearDown() {} * @tc.desc: Verify that the build function returns a successful message. * @tc.type: FUNC */ -HWTEST_F(ErfBuilderTest, erf_build_001, TestSize.Level2) +HWTEST_F(ErfBuilderTest, erf_build_001, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); @@ -59,7 +59,7 @@ HWTEST_F(ErfBuilderTest, erf_build_001, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message with true m_isBuild. * @tc.type: FUNC */ -HWTEST_F(ErfBuilderTest, erf_build_002, TestSize.Level2) +HWTEST_F(ErfBuilderTest, erf_build_002, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); @@ -74,7 +74,7 @@ HWTEST_F(ErfBuilderTest, erf_build_002, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message with invalided input. * @tc.type: FUNC */ -HWTEST_F(ErfBuilderTest, erf_build_003, TestSize.Level2) +HWTEST_F(ErfBuilderTest, erf_build_003, TestSize.Level1) { m_inputs = {0, 1}; m_outputs = {2}; @@ -91,7 +91,7 @@ HWTEST_F(ErfBuilderTest, erf_build_003, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message with invalided output. * @tc.type: FUNC */ -HWTEST_F(ErfBuilderTest, erf_build_004, TestSize.Level2) +HWTEST_F(ErfBuilderTest, erf_build_004, TestSize.Level1) { m_outputs = {1, 2}; @@ -107,7 +107,7 @@ HWTEST_F(ErfBuilderTest, erf_build_004, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message with empty allTensor. * @tc.type: FUNC */ -HWTEST_F(ErfBuilderTest, erf_build_005, TestSize.Level2) +HWTEST_F(ErfBuilderTest, erf_build_005, TestSize.Level1) { OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputs, m_outputs, m_allTensors); EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); @@ -118,7 +118,7 @@ HWTEST_F(ErfBuilderTest, erf_build_005, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message without output tensor. * @tc.type: FUNC */ -HWTEST_F(ErfBuilderTest, erf_build_006, TestSize.Level2) +HWTEST_F(ErfBuilderTest, erf_build_006, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); @@ -131,7 +131,7 @@ HWTEST_F(ErfBuilderTest, erf_build_006, TestSize.Level2) * @tc.desc: Verify that the getPrimitive function returns a successful message * @tc.type: FUNC */ -HWTEST_F(ErfBuilderTest, erf_getprimitive_001, TestSize.Level2) +HWTEST_F(ErfBuilderTest, erf_getprimitive_001, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); @@ -147,7 +147,7 @@ HWTEST_F(ErfBuilderTest, erf_getprimitive_001, TestSize.Level2) * @tc.desc: Verify that the getPrimitive function returns a failed message without build. * @tc.type: FUNC */ -HWTEST_F(ErfBuilderTest, erf_getprimitive_002, TestSize.Level2) +HWTEST_F(ErfBuilderTest, erf_getprimitive_002, TestSize.Level1) { LiteGraphPrimitvePtr primitive = m_builder.GetPrimitive(); LiteGraphPrimitvePtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); diff --git a/test/unittest/ops/exp_test.cpp b/test/unittest/ops/exp_test.cpp index 401bac9..d5c6ead 100644 --- a/test/unittest/ops/exp_test.cpp +++ b/test/unittest/ops/exp_test.cpp @@ -85,7 +85,7 @@ void ExpBuilderTest::SaveShift(OH_NN_DataType dataType, * @tc.desc: Verify that the build function returns a successful message. * @tc.type: FUNC */ -HWTEST_F(ExpBuilderTest, exp_build_001, TestSize.Level2) +HWTEST_F(ExpBuilderTest, exp_build_001, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); @@ -102,7 +102,7 @@ HWTEST_F(ExpBuilderTest, exp_build_001, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message with true m_isBuild. * @tc.type: FUNC */ -HWTEST_F(ExpBuilderTest, exp_build_002, TestSize.Level2) +HWTEST_F(ExpBuilderTest, exp_build_002, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); @@ -120,7 +120,7 @@ HWTEST_F(ExpBuilderTest, exp_build_002, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message with invalided input. * @tc.type: FUNC */ -HWTEST_F(ExpBuilderTest, exp_build_003, TestSize.Level2) +HWTEST_F(ExpBuilderTest, exp_build_003, TestSize.Level1) { m_inputs = {0, 1}; m_outputs = {2}; @@ -141,7 +141,7 @@ HWTEST_F(ExpBuilderTest, exp_build_003, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message with invalided output. * @tc.type: FUNC */ -HWTEST_F(ExpBuilderTest, exp_build_004, TestSize.Level2) +HWTEST_F(ExpBuilderTest, exp_build_004, TestSize.Level1) { m_outputs = {1, 2}; m_params = {3, 4, 5}; @@ -161,7 +161,7 @@ HWTEST_F(ExpBuilderTest, exp_build_004, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message with empty allTensor. * @tc.type: FUNC */ -HWTEST_F(ExpBuilderTest, exp_build_005, TestSize.Level2) +HWTEST_F(ExpBuilderTest, exp_build_005, TestSize.Level1) { OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputs, m_outputs, m_allTensors); EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); @@ -172,7 +172,7 @@ HWTEST_F(ExpBuilderTest, exp_build_005, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message without output tensor. * @tc.type: FUNC */ -HWTEST_F(ExpBuilderTest, exp_build_006, TestSize.Level2) +HWTEST_F(ExpBuilderTest, exp_build_006, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); @@ -185,10 +185,11 @@ HWTEST_F(ExpBuilderTest, exp_build_006, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message with invalid base's dataType. * @tc.type: FUNC */ -HWTEST_F(ExpBuilderTest, exp_build_007, TestSize.Level2) +HWTEST_F(ExpBuilderTest, exp_build_007, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); + std::shared_ptr baseTensor = TransToNNTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_EXP_BASE); int64_t* baseValue = new (std::nothrow) int64_t [1]{-1}; @@ -208,7 +209,7 @@ HWTEST_F(ExpBuilderTest, exp_build_007, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message with invalid scale's dataType. * @tc.type: FUNC */ -HWTEST_F(ExpBuilderTest, exp_build_008, TestSize.Level2) +HWTEST_F(ExpBuilderTest, exp_build_008, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); @@ -232,7 +233,7 @@ HWTEST_F(ExpBuilderTest, exp_build_008, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message with invalid base's dataType. * @tc.type: FUNC */ -HWTEST_F(ExpBuilderTest, exp_build_009, TestSize.Level2) +HWTEST_F(ExpBuilderTest, exp_build_009, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); @@ -256,7 +257,7 @@ HWTEST_F(ExpBuilderTest, exp_build_009, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message with passing invalid base param. * @tc.type: FUNC */ -HWTEST_F(ExpBuilderTest, exp_build_010, TestSize.Level2) +HWTEST_F(ExpBuilderTest, exp_build_010, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); @@ -274,7 +275,7 @@ HWTEST_F(ExpBuilderTest, exp_build_010, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message with passing invalid scale param. * @tc.type: FUNC */ -HWTEST_F(ExpBuilderTest, exp_build_011, TestSize.Level2) +HWTEST_F(ExpBuilderTest, exp_build_011, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); @@ -292,7 +293,7 @@ HWTEST_F(ExpBuilderTest, exp_build_011, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message with passing invalid shift param. * @tc.type: FUNC */ -HWTEST_F(ExpBuilderTest, exp_build_012, TestSize.Level2) +HWTEST_F(ExpBuilderTest, exp_build_012, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); @@ -310,7 +311,7 @@ HWTEST_F(ExpBuilderTest, exp_build_012, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message without set buffer for base. * @tc.type: FUNC */ -HWTEST_F(ExpBuilderTest, exp_build_013, TestSize.Level2) +HWTEST_F(ExpBuilderTest, exp_build_013, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); @@ -321,7 +322,6 @@ HWTEST_F(ExpBuilderTest, exp_build_013, TestSize.Level2) SaveScale(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_EXP_SCALE); SaveShift(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_EXP_SHIFT); - OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); } @@ -331,7 +331,7 @@ HWTEST_F(ExpBuilderTest, exp_build_013, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message without set buffer for scale. * @tc.type: FUNC */ -HWTEST_F(ExpBuilderTest, exp_build_014, TestSize.Level2) +HWTEST_F(ExpBuilderTest, exp_build_014, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); @@ -352,7 +352,7 @@ HWTEST_F(ExpBuilderTest, exp_build_014, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message without set buffer for shift. * @tc.type: FUNC */ -HWTEST_F(ExpBuilderTest, exp_build_015, TestSize.Level2) +HWTEST_F(ExpBuilderTest, exp_build_015, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); @@ -372,7 +372,7 @@ HWTEST_F(ExpBuilderTest, exp_build_015, TestSize.Level2) * @tc.desc: Verify that the getPrimitive function returns a successful message * @tc.type: FUNC */ -HWTEST_F(ExpBuilderTest, exp_getprimitive_001, TestSize.Level2) +HWTEST_F(ExpBuilderTest, exp_getprimitive_001, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); @@ -401,7 +401,7 @@ HWTEST_F(ExpBuilderTest, exp_getprimitive_001, TestSize.Level2) * @tc.desc: Verify that the getPrimitive function returns a failed message without build. * @tc.type: FUNC */ -HWTEST_F(ExpBuilderTest, exp_getprimitive_002, TestSize.Level2) +HWTEST_F(ExpBuilderTest, exp_getprimitive_002, TestSize.Level1) { LiteGraphPrimitvePtr primitive = m_builder.GetPrimitive(); LiteGraphPrimitvePtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); diff --git a/test/unittest/ops/flatten_test.cpp b/test/unittest/ops/flatten_test.cpp index 3bf07ac..cf61344 100644 --- a/test/unittest/ops/flatten_test.cpp +++ b/test/unittest/ops/flatten_test.cpp @@ -62,7 +62,7 @@ void FlattenBuilderTest::SaveParamsTensor(OH_NN_DataType dataType, * @tc.desc: Verify that the build function returns a successful message. * @tc.type: FUNC */ -HWTEST_F(FlattenBuilderTest, flatten_build_001, TestSize.Level0) +HWTEST_F(FlattenBuilderTest, flatten_build_001, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_inputDim, nullptr); SaveOutputTensor(m_outputs, OH_NN_INT32, m_outputDim, nullptr); @@ -77,7 +77,7 @@ HWTEST_F(FlattenBuilderTest, flatten_build_001, TestSize.Level0) * @tc.desc: Verify that the build function returns a failed message with true m_isBuild. * @tc.type: FUNC */ -HWTEST_F(FlattenBuilderTest, flatten_build_002, TestSize.Level0) +HWTEST_F(FlattenBuilderTest, flatten_build_002, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_inputDim, nullptr); SaveOutputTensor(m_outputs, OH_NN_INT32, m_outputDim, nullptr); @@ -93,7 +93,7 @@ HWTEST_F(FlattenBuilderTest, flatten_build_002, TestSize.Level0) * @tc.desc: Verify that the build function returns a failed message with invalided input. * @tc.type: FUNC */ -HWTEST_F(FlattenBuilderTest, flatten_build_003, TestSize.Level0) +HWTEST_F(FlattenBuilderTest, flatten_build_003, TestSize.Level1) { m_inputs = {0, 1}; m_outputs = {2}; @@ -112,7 +112,7 @@ HWTEST_F(FlattenBuilderTest, flatten_build_003, TestSize.Level0) * @tc.desc: Verify that the build function returns a failed message with invalided output. * @tc.type: FUNC */ -HWTEST_F(FlattenBuilderTest, flatten_build_004, TestSize.Level0) +HWTEST_F(FlattenBuilderTest, flatten_build_004, TestSize.Level1) { m_outputs = {1, 2}; m_params = {3}; @@ -130,7 +130,7 @@ HWTEST_F(FlattenBuilderTest, flatten_build_004, TestSize.Level0) * @tc.desc: Verify that the build function returns a failed message with empty allTensor. * @tc.type: FUNC */ -HWTEST_F(FlattenBuilderTest, flatten_build_005, TestSize.Level0) +HWTEST_F(FlattenBuilderTest, flatten_build_005, TestSize.Level1) { OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputs, m_outputs, m_allTensors); EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); @@ -141,7 +141,7 @@ HWTEST_F(FlattenBuilderTest, flatten_build_005, TestSize.Level0) * @tc.desc: Verify that the build function returns a failed message without output tensor. * @tc.type: FUNC */ -HWTEST_F(FlattenBuilderTest, flatten_build_006, TestSize.Level0) +HWTEST_F(FlattenBuilderTest, flatten_build_006, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_inputDim, nullptr); @@ -154,10 +154,11 @@ HWTEST_F(FlattenBuilderTest, flatten_build_006, TestSize.Level0) * @tc.desc: Verify that the build function returns a failed message with invalid axis's dataType. * @tc.type: FUNC */ -HWTEST_F(FlattenBuilderTest, flatten_build_007, TestSize.Level0) +HWTEST_F(FlattenBuilderTest, flatten_build_007, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_inputDim, nullptr); SaveOutputTensor(m_outputs, OH_NN_INT32, m_outputDim, nullptr); + std::shared_ptr axisTensor = TransToNNTensor(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_FLATTEN_AXIS); float *axisValue = new (std::nothrow) float[1]{1.0f}; @@ -175,7 +176,7 @@ HWTEST_F(FlattenBuilderTest, flatten_build_007, TestSize.Level0) * @tc.desc: Verify that the build function returns a failed message with passing invalid param. * @tc.type: FUNC */ -HWTEST_F(FlattenBuilderTest, flatten_build_008, TestSize.Level0) +HWTEST_F(FlattenBuilderTest, flatten_build_008, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_inputDim, nullptr); SaveOutputTensor(m_outputs, OH_NN_INT32, m_outputDim, nullptr); @@ -190,10 +191,11 @@ HWTEST_F(FlattenBuilderTest, flatten_build_008, TestSize.Level0) * @tc.desc: Verify that the build function returns a failed message without set buffer for axis. * @tc.type: FUNC */ -HWTEST_F(FlattenBuilderTest, flatten_build_009, TestSize.Level0) +HWTEST_F(FlattenBuilderTest, flatten_build_009, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_inputDim, nullptr); SaveOutputTensor(m_outputs, OH_NN_INT32, m_outputDim, nullptr); + std::shared_ptr axisTensor = TransToNNTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_FLATTEN_AXIS); m_allTensors.emplace_back(axisTensor); @@ -207,7 +209,7 @@ HWTEST_F(FlattenBuilderTest, flatten_build_009, TestSize.Level0) * @tc.desc: Verify that the getPrimitive function returns a successful message * @tc.type: FUNC */ -HWTEST_F(FlattenBuilderTest, flatten_getprimitive_001, TestSize.Level0) +HWTEST_F(FlattenBuilderTest, flatten_getprimitive_001, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_inputDim, nullptr); SaveOutputTensor(m_outputs, OH_NN_INT32, m_outputDim, nullptr); @@ -228,7 +230,7 @@ HWTEST_F(FlattenBuilderTest, flatten_getprimitive_001, TestSize.Level0) * @tc.desc: Verify that the getPrimitive function returns a failed message without build. * @tc.type: FUNC */ -HWTEST_F(FlattenBuilderTest, flatten_getprimitive_002, TestSize.Level0) +HWTEST_F(FlattenBuilderTest, flatten_getprimitive_002, TestSize.Level1) { LiteGraphPrimitvePtr primitive = m_builder.GetPrimitive(); LiteGraphPrimitvePtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); diff --git a/test/unittest/ops/greater_builder_test.cpp b/test/unittest/ops/greater_builder_test.cpp index 8da4c9e..16a6ff9 100644 --- a/test/unittest/ops/greater_builder_test.cpp +++ b/test/unittest/ops/greater_builder_test.cpp @@ -47,7 +47,7 @@ void GreaterBuilderTest::TearDown() {} * @tc.desc: Verify that the build function returns a successful message. * @tc.type: FUNC */ -HWTEST_F(GreaterBuilderTest, greater_build_001, TestSize.Level0) +HWTEST_F(GreaterBuilderTest, greater_build_001, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_inputDim, nullptr); SaveOutputTensor(m_outputs, OH_NN_BOOL, m_outputDim, nullptr); @@ -61,7 +61,7 @@ HWTEST_F(GreaterBuilderTest, greater_build_001, TestSize.Level0) * @tc.desc: Verify that the build function returns a failed message with true m_isBuild. * @tc.type: FUNC */ -HWTEST_F(GreaterBuilderTest, greater_build_002, TestSize.Level0) +HWTEST_F(GreaterBuilderTest, greater_build_002, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_inputDim, nullptr); SaveOutputTensor(m_outputs, OH_NN_BOOL, m_outputDim, nullptr); @@ -76,7 +76,7 @@ HWTEST_F(GreaterBuilderTest, greater_build_002, TestSize.Level0) * @tc.desc: Verify that the build function returns a failed message with invalided input. * @tc.type: FUNC */ -HWTEST_F(GreaterBuilderTest, greater_build_003, TestSize.Level0) +HWTEST_F(GreaterBuilderTest, greater_build_003, TestSize.Level1) { m_inputs = {0, 1, 2, 3}; m_outputs = {4}; @@ -93,7 +93,7 @@ HWTEST_F(GreaterBuilderTest, greater_build_003, TestSize.Level0) * @tc.desc: Verify that the build function returns a failed message with invalided output. * @tc.type: FUNC */ -HWTEST_F(GreaterBuilderTest, greater_build_004, TestSize.Level0) +HWTEST_F(GreaterBuilderTest, greater_build_004, TestSize.Level1) { std::vector m_outputs = {2, 3, 4}; @@ -109,7 +109,7 @@ HWTEST_F(GreaterBuilderTest, greater_build_004, TestSize.Level0) * @tc.desc: Verify that the build function returns a failed message with empty allTensor. * @tc.type: FUNC */ -HWTEST_F(GreaterBuilderTest, greater_build_005, TestSize.Level0) +HWTEST_F(GreaterBuilderTest, greater_build_005, TestSize.Level1) { OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputs, m_outputs, m_allTensors); EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); @@ -120,7 +120,7 @@ HWTEST_F(GreaterBuilderTest, greater_build_005, TestSize.Level0) * @tc.desc: Verify that the build function returns a failed message without output tensor. * @tc.type: FUNC */ -HWTEST_F(GreaterBuilderTest, greater_build_006, TestSize.Level0) +HWTEST_F(GreaterBuilderTest, greater_build_006, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_inputDim, nullptr); @@ -133,7 +133,7 @@ HWTEST_F(GreaterBuilderTest, greater_build_006, TestSize.Level0) * @tc.desc: Verify that the build function returns a failed message with a virtual parameter. * @tc.type: FUNC */ -HWTEST_F(GreaterBuilderTest, greater_build_007, TestSize.Level0) +HWTEST_F(GreaterBuilderTest, greater_build_007, TestSize.Level1) { std::vector m_params = {3}; std::vector paramDim = {}; @@ -153,7 +153,7 @@ HWTEST_F(GreaterBuilderTest, greater_build_007, TestSize.Level0) * @tc.desc: Verify that the getPrimitive function returns a successful message * @tc.type: FUNC */ -HWTEST_F(GreaterBuilderTest, greater_getprimitive_001, TestSize.Level0) +HWTEST_F(GreaterBuilderTest, greater_getprimitive_001, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_inputDim, nullptr); SaveOutputTensor(m_outputs, OH_NN_BOOL, m_outputDim, nullptr); @@ -169,7 +169,7 @@ HWTEST_F(GreaterBuilderTest, greater_getprimitive_001, TestSize.Level0) * @tc.desc: Verify that the getPrimitive function returns a failed message without build. * @tc.type: FUNC */ -HWTEST_F(GreaterBuilderTest, greater_getprimitive_002, TestSize.Level0) +HWTEST_F(GreaterBuilderTest, greater_getprimitive_002, TestSize.Level1) { LiteGraphPrimitvePtr primitive = m_builder.GetPrimitive(); LiteGraphPrimitvePtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); diff --git a/test/unittest/ops/greater_equal_builder_test.cpp b/test/unittest/ops/greater_equal_builder_test.cpp index 373613d..6b9e5e5 100644 --- a/test/unittest/ops/greater_equal_builder_test.cpp +++ b/test/unittest/ops/greater_equal_builder_test.cpp @@ -47,7 +47,7 @@ void GreaterEqualBuilderTest::TearDown() {} * @tc.desc: Verify that the build function returns a successful message. * @tc.type: FUNC */ -HWTEST_F(GreaterEqualBuilderTest, greaterEqual_build_001, TestSize.Level0) +HWTEST_F(GreaterEqualBuilderTest, greaterEqual_build_001, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_inputDim, nullptr); SaveOutputTensor(m_outputs, OH_NN_BOOL, m_outputDim, nullptr); @@ -61,7 +61,7 @@ HWTEST_F(GreaterEqualBuilderTest, greaterEqual_build_001, TestSize.Level0) * @tc.desc: Verify that the build function returns a failed message with true m_isBuild. * @tc.type: FUNC */ -HWTEST_F(GreaterEqualBuilderTest, greaterEqual_build_002, TestSize.Level0) +HWTEST_F(GreaterEqualBuilderTest, greaterEqual_build_002, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_inputDim, nullptr); SaveOutputTensor(m_outputs, OH_NN_BOOL, m_outputDim, nullptr); @@ -76,7 +76,7 @@ HWTEST_F(GreaterEqualBuilderTest, greaterEqual_build_002, TestSize.Level0) * @tc.desc: Verify that the build function returns a failed message with invalided input. * @tc.type: FUNC */ -HWTEST_F(GreaterEqualBuilderTest, greaterEqual_build_003, TestSize.Level0) +HWTEST_F(GreaterEqualBuilderTest, greaterEqual_build_003, TestSize.Level1) { m_inputs = {0, 1, 2, 3}; m_outputs = {4}; @@ -93,7 +93,7 @@ HWTEST_F(GreaterEqualBuilderTest, greaterEqual_build_003, TestSize.Level0) * @tc.desc: Verify that the build function returns a failed message with invalided output. * @tc.type: FUNC */ -HWTEST_F(GreaterEqualBuilderTest, greaterEqual_build_004, TestSize.Level0) +HWTEST_F(GreaterEqualBuilderTest, greaterEqual_build_004, TestSize.Level1) { std::vector m_outputs = {2, 3, 4}; @@ -109,7 +109,7 @@ HWTEST_F(GreaterEqualBuilderTest, greaterEqual_build_004, TestSize.Level0) * @tc.desc: Verify that the build function returns a failed message with empty allTensor. * @tc.type: FUNC */ -HWTEST_F(GreaterEqualBuilderTest, greaterEqual_build_005, TestSize.Level0) +HWTEST_F(GreaterEqualBuilderTest, greaterEqual_build_005, TestSize.Level1) { OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputs, m_outputs, m_allTensors); EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); @@ -120,7 +120,7 @@ HWTEST_F(GreaterEqualBuilderTest, greaterEqual_build_005, TestSize.Level0) * @tc.desc: Verify that the build function returns a failed message without output tensor. * @tc.type: FUNC */ -HWTEST_F(GreaterEqualBuilderTest, greaterEqual_build_006, TestSize.Level0) +HWTEST_F(GreaterEqualBuilderTest, greaterEqual_build_006, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_inputDim, nullptr); @@ -133,7 +133,7 @@ HWTEST_F(GreaterEqualBuilderTest, greaterEqual_build_006, TestSize.Level0) * @tc.desc: Verify that the build function returns a failed message with a virtual parameter. * @tc.type: FUNC */ -HWTEST_F(GreaterEqualBuilderTest, greaterEqual_build_007, TestSize.Level0) +HWTEST_F(GreaterEqualBuilderTest, greaterEqual_build_007, TestSize.Level1) { std::vector m_params = {3}; std::vector paramDim = {}; @@ -153,7 +153,7 @@ HWTEST_F(GreaterEqualBuilderTest, greaterEqual_build_007, TestSize.Level0) * @tc.desc: Verify that the getPrimitive function returns a successful message * @tc.type: FUNC */ -HWTEST_F(GreaterEqualBuilderTest, greaterEqual_getprimitive_001, TestSize.Level0) +HWTEST_F(GreaterEqualBuilderTest, greaterEqual_getprimitive_001, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_inputDim, nullptr); SaveOutputTensor(m_outputs, OH_NN_BOOL, m_outputDim, nullptr); @@ -169,7 +169,7 @@ HWTEST_F(GreaterEqualBuilderTest, greaterEqual_getprimitive_001, TestSize.Level0 * @tc.desc: Verify that the getPrimitive function returns a failed message without build. * @tc.type: FUNC */ -HWTEST_F(GreaterEqualBuilderTest, greaterEqual_getprimitive_002, TestSize.Level0) +HWTEST_F(GreaterEqualBuilderTest, greaterEqual_getprimitive_002, TestSize.Level1) { LiteGraphPrimitvePtr primitive = m_builder.GetPrimitive(); LiteGraphPrimitvePtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); diff --git a/test/unittest/ops/instance_norm_test.cpp b/test/unittest/ops/instance_norm_test.cpp index 9f7e838..1c4aff3 100644 --- a/test/unittest/ops/instance_norm_test.cpp +++ b/test/unittest/ops/instance_norm_test.cpp @@ -80,7 +80,7 @@ void InstanceNormBuilderTest::SetInputTensor() * @tc.desc: Verify that the build function returns a successful message. * @tc.type: FUNC */ -HWTEST_F(InstanceNormBuilderTest, instance_norm_build_001, TestSize.Level2) +HWTEST_F(InstanceNormBuilderTest, instance_norm_build_001, TestSize.Level1) { SetInputTensor(); SaveOutputTensor(m_outputs, OH_NN_INT32, m_outputDim, nullptr); @@ -95,7 +95,7 @@ HWTEST_F(InstanceNormBuilderTest, instance_norm_build_001, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message with true m_isBuild. * @tc.type: FUNC */ -HWTEST_F(InstanceNormBuilderTest, instance_norm_build_002, TestSize.Level2) +HWTEST_F(InstanceNormBuilderTest, instance_norm_build_002, TestSize.Level1) { SetInputTensor(); SaveOutputTensor(m_outputs, OH_NN_INT32, m_outputDim, nullptr); @@ -111,7 +111,7 @@ HWTEST_F(InstanceNormBuilderTest, instance_norm_build_002, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message with invalided input. * @tc.type: FUNC */ -HWTEST_F(InstanceNormBuilderTest, instance_norm_build_003, TestSize.Level2) +HWTEST_F(InstanceNormBuilderTest, instance_norm_build_003, TestSize.Level1) { m_inputs = {0, 1, 2, 3}; m_outputs = {4}; @@ -130,7 +130,7 @@ HWTEST_F(InstanceNormBuilderTest, instance_norm_build_003, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message with invalided output. * @tc.type: FUNC */ -HWTEST_F(InstanceNormBuilderTest, instance_norm_build_004, TestSize.Level2) +HWTEST_F(InstanceNormBuilderTest, instance_norm_build_004, TestSize.Level1) { m_outputs = {3, 4}; m_params = {5}; @@ -148,7 +148,7 @@ HWTEST_F(InstanceNormBuilderTest, instance_norm_build_004, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message with empty allTensor. * @tc.type: FUNC */ -HWTEST_F(InstanceNormBuilderTest, instance_norm_build_005, TestSize.Level2) +HWTEST_F(InstanceNormBuilderTest, instance_norm_build_005, TestSize.Level1) { OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputs, m_outputs, m_allTensors); EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); @@ -159,7 +159,7 @@ HWTEST_F(InstanceNormBuilderTest, instance_norm_build_005, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message without output tensor. * @tc.type: FUNC */ -HWTEST_F(InstanceNormBuilderTest, instance_norm_build_006, TestSize.Level2) +HWTEST_F(InstanceNormBuilderTest, instance_norm_build_006, TestSize.Level1) { SetInputTensor(); @@ -172,10 +172,11 @@ HWTEST_F(InstanceNormBuilderTest, instance_norm_build_006, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message with invalid epsilon's dataType. * @tc.type: FUNC */ -HWTEST_F(InstanceNormBuilderTest, instance_norm_build_007, TestSize.Level2) +HWTEST_F(InstanceNormBuilderTest, instance_norm_build_007, TestSize.Level1) { SetInputTensor(); SaveOutputTensor(m_outputs, OH_NN_INT32, m_outputDim, nullptr); + std::shared_ptr epsilonTensor = TransToNNTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_INSTANCE_NORM_EPSILON); int64_t* epsilonValue = new (std::nothrow) int64_t [1]{0.0f}; @@ -193,7 +194,7 @@ HWTEST_F(InstanceNormBuilderTest, instance_norm_build_007, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message with passing invalid epsilon param. * @tc.type: FUNC */ -HWTEST_F(InstanceNormBuilderTest, instance_norm_build_008, TestSize.Level2) +HWTEST_F(InstanceNormBuilderTest, instance_norm_build_008, TestSize.Level1) { SetInputTensor(); SaveOutputTensor(m_outputs, OH_NN_INT32, m_outputDim, nullptr); @@ -208,10 +209,11 @@ HWTEST_F(InstanceNormBuilderTest, instance_norm_build_008, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message without set buffer for epsilon. * @tc.type: FUNC */ -HWTEST_F(InstanceNormBuilderTest, instance_norm_build_009, TestSize.Level2) +HWTEST_F(InstanceNormBuilderTest, instance_norm_build_009, TestSize.Level1) { SetInputTensor(); SaveOutputTensor(m_outputs, OH_NN_INT32, m_outputDim, nullptr); + std::shared_ptr epsilonTensor = TransToNNTensor(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_INSTANCE_NORM_EPSILON); m_allTensors.emplace_back(epsilonTensor); @@ -225,7 +227,7 @@ HWTEST_F(InstanceNormBuilderTest, instance_norm_build_009, TestSize.Level2) * @tc.desc: Verify that the getPrimitive function returns a successful message * @tc.type: FUNC */ -HWTEST_F(InstanceNormBuilderTest, instance_norm_getprimitive_001, TestSize.Level2) +HWTEST_F(InstanceNormBuilderTest, instance_norm_getprimitive_001, TestSize.Level1) { SetInputTensor(); SaveOutputTensor(m_outputs, OH_NN_INT32, m_outputDim, nullptr); @@ -246,7 +248,7 @@ HWTEST_F(InstanceNormBuilderTest, instance_norm_getprimitive_001, TestSize.Level * @tc.desc: Verify that the getPrimitive function returns a failed message without build. * @tc.type: FUNC */ -HWTEST_F(InstanceNormBuilderTest, instance_norm_getprimitive_002, TestSize.Level2) +HWTEST_F(InstanceNormBuilderTest, instance_norm_getprimitive_002, TestSize.Level1) { LiteGraphPrimitvePtr primitive = m_builder.GetPrimitive(); LiteGraphPrimitvePtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); diff --git a/test/unittest/ops/leaky_relu_test.cpp b/test/unittest/ops/leaky_relu_test.cpp index e47b0de..b6fab00 100644 --- a/test/unittest/ops/leaky_relu_test.cpp +++ b/test/unittest/ops/leaky_relu_test.cpp @@ -61,7 +61,7 @@ void LeakyReluBuilderTest::SaveNegativeSlope(OH_NN_DataType dataType, * @tc.desc: Verify that the build function returns a successful message. * @tc.type: FUNC */ -HWTEST_F(LeakyReluBuilderTest, reaky_relu_build_001, TestSize.Level2) +HWTEST_F(LeakyReluBuilderTest, reaky_relu_build_001, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); @@ -76,7 +76,7 @@ HWTEST_F(LeakyReluBuilderTest, reaky_relu_build_001, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message with true m_isBuild. * @tc.type: FUNC */ -HWTEST_F(LeakyReluBuilderTest, reaky_relu_build_002, TestSize.Level2) +HWTEST_F(LeakyReluBuilderTest, reaky_relu_build_002, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); @@ -92,7 +92,7 @@ HWTEST_F(LeakyReluBuilderTest, reaky_relu_build_002, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message with invalided input. * @tc.type: FUNC */ -HWTEST_F(LeakyReluBuilderTest, reaky_relu_build_003, TestSize.Level2) +HWTEST_F(LeakyReluBuilderTest, reaky_relu_build_003, TestSize.Level1) { m_inputs = {0, 1}; m_outputs = {2}; @@ -111,7 +111,7 @@ HWTEST_F(LeakyReluBuilderTest, reaky_relu_build_003, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message with invalided output. * @tc.type: FUNC */ -HWTEST_F(LeakyReluBuilderTest, reaky_relu_build_004, TestSize.Level2) +HWTEST_F(LeakyReluBuilderTest, reaky_relu_build_004, TestSize.Level1) { m_outputs = {1, 2}; m_params = {3}; @@ -129,7 +129,7 @@ HWTEST_F(LeakyReluBuilderTest, reaky_relu_build_004, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message with empty allTensor. * @tc.type: FUNC */ -HWTEST_F(LeakyReluBuilderTest, reaky_relu_build_005, TestSize.Level2) +HWTEST_F(LeakyReluBuilderTest, reaky_relu_build_005, TestSize.Level1) { OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputs, m_outputs, m_allTensors); EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); @@ -140,7 +140,7 @@ HWTEST_F(LeakyReluBuilderTest, reaky_relu_build_005, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message without output tensor. * @tc.type: FUNC */ -HWTEST_F(LeakyReluBuilderTest, reaky_relu_build_006, TestSize.Level2) +HWTEST_F(LeakyReluBuilderTest, reaky_relu_build_006, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); @@ -153,10 +153,11 @@ HWTEST_F(LeakyReluBuilderTest, reaky_relu_build_006, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message with invalid negative_slope's dataType. * @tc.type: FUNC */ -HWTEST_F(LeakyReluBuilderTest, reaky_relu_build_007, TestSize.Level2) +HWTEST_F(LeakyReluBuilderTest, reaky_relu_build_007, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); + std::shared_ptr negativeSlopeTensor = TransToNNTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_LEAKY_RELU_NEGATIVE_SLOPE); int64_t* negativeSlopeValue = new (std::nothrow) int64_t [1]{0}; @@ -174,7 +175,7 @@ HWTEST_F(LeakyReluBuilderTest, reaky_relu_build_007, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message with passing invalid negative_slope param. * @tc.type: FUNC */ -HWTEST_F(LeakyReluBuilderTest, reaky_relu_build_008, TestSize.Level2) +HWTEST_F(LeakyReluBuilderTest, reaky_relu_build_008, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); @@ -190,7 +191,7 @@ HWTEST_F(LeakyReluBuilderTest, reaky_relu_build_008, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message without set buffer for negative_slope. * @tc.type: FUNC */ -HWTEST_F(LeakyReluBuilderTest, reaky_relu_build_011, TestSize.Level2) +HWTEST_F(LeakyReluBuilderTest, reaky_relu_build_011, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); @@ -204,11 +205,11 @@ HWTEST_F(LeakyReluBuilderTest, reaky_relu_build_011, TestSize.Level2) } /** - * @tc.name: negative_slope_getprimitive_001 + * @tc.name: reaky_relu_getprimitive_001 * @tc.desc: Verify that the getPrimitive function returns a successful message. * @tc.type: FUNC */ -HWTEST_F(LeakyReluBuilderTest, negative_slope_getprimitive_001, TestSize.Level2) +HWTEST_F(LeakyReluBuilderTest, reaky_relu_getprimitive_001, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); @@ -225,11 +226,11 @@ HWTEST_F(LeakyReluBuilderTest, negative_slope_getprimitive_001, TestSize.Level2) } /** - * @tc.name: negative_slope_getprimitive_002 + * @tc.name: reaky_relu_getprimitive_002 * @tc.desc: Verify that the getPrimitive function returns a failed message without build. * @tc.type: FUNC */ -HWTEST_F(LeakyReluBuilderTest, negative_slope_getprimitive_002, TestSize.Level2) +HWTEST_F(LeakyReluBuilderTest, reaky_relu_getprimitive_002, TestSize.Level1) { LiteGraphPrimitvePtr primitive = m_builder.GetPrimitive(); LiteGraphPrimitvePtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); diff --git a/test/unittest/ops/less_test.cpp b/test/unittest/ops/less_test.cpp index 5d00289..1080f37 100644 --- a/test/unittest/ops/less_test.cpp +++ b/test/unittest/ops/less_test.cpp @@ -45,7 +45,7 @@ void LessBuilderTest::TearDown() {} * @tc.desc: Verify that the build function returns a successful message. * @tc.type: FUNC */ -HWTEST_F(LessBuilderTest, less_build_001, TestSize.Level2) +HWTEST_F(LessBuilderTest, less_build_001, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); @@ -59,7 +59,7 @@ HWTEST_F(LessBuilderTest, less_build_001, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message with true m_isBuild. * @tc.type: FUNC */ -HWTEST_F(LessBuilderTest, less_build_002, TestSize.Level2) +HWTEST_F(LessBuilderTest, less_build_002, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); @@ -74,7 +74,7 @@ HWTEST_F(LessBuilderTest, less_build_002, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message with invalided input. * @tc.type: FUNC */ -HWTEST_F(LessBuilderTest, less_build_003, TestSize.Level2) +HWTEST_F(LessBuilderTest, less_build_003, TestSize.Level1) { m_inputs = {0, 1, 2}; m_outputs = {3}; @@ -91,7 +91,7 @@ HWTEST_F(LessBuilderTest, less_build_003, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message with invalided output. * @tc.type: FUNC */ -HWTEST_F(LessBuilderTest, less_build_004, TestSize.Level2) +HWTEST_F(LessBuilderTest, less_build_004, TestSize.Level1) { m_outputs = {2, 3}; @@ -107,7 +107,7 @@ HWTEST_F(LessBuilderTest, less_build_004, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message with empty allTensor. * @tc.type: FUNC */ -HWTEST_F(LessBuilderTest, less_build_005, TestSize.Level2) +HWTEST_F(LessBuilderTest, less_build_005, TestSize.Level1) { OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputs, m_outputs, m_allTensors); EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); @@ -118,7 +118,7 @@ HWTEST_F(LessBuilderTest, less_build_005, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message without output tensor. * @tc.type: FUNC */ -HWTEST_F(LessBuilderTest, less_build_006, TestSize.Level2) +HWTEST_F(LessBuilderTest, less_build_006, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); @@ -131,7 +131,7 @@ HWTEST_F(LessBuilderTest, less_build_006, TestSize.Level2) * @tc.desc: Verify that the getPrimitive function returns a successful message * @tc.type: FUNC */ -HWTEST_F(LessBuilderTest, less_getprimitive_001, TestSize.Level2) +HWTEST_F(LessBuilderTest, less_getprimitive_001, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); @@ -147,7 +147,7 @@ HWTEST_F(LessBuilderTest, less_getprimitive_001, TestSize.Level2) * @tc.desc: Verify that the getPrimitive function returns a failed message without build. * @tc.type: FUNC */ -HWTEST_F(LessBuilderTest, less_getprimitive_002, TestSize.Level2) +HWTEST_F(LessBuilderTest, less_getprimitive_002, TestSize.Level1) { LiteGraphPrimitvePtr primitive = m_builder.GetPrimitive(); LiteGraphPrimitvePtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); diff --git a/test/unittest/ops/lessequal_builder_test.cpp b/test/unittest/ops/lessequal_builder_test.cpp index 7847762..8a632ff 100644 --- a/test/unittest/ops/lessequal_builder_test.cpp +++ b/test/unittest/ops/lessequal_builder_test.cpp @@ -47,7 +47,7 @@ void LessEqualBuilderTest::TearDown() {} * @tc.desc: Verify that the build function returns a successful message. * @tc.type: FUNC */ -HWTEST_F(LessEqualBuilderTest, lessequal_build_001, TestSize.Level0) +HWTEST_F(LessEqualBuilderTest, lessequal_build_001, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_inputDim, nullptr); SaveOutputTensor(m_outputs, OH_NN_BOOL, m_outputDim, nullptr); @@ -61,7 +61,7 @@ HWTEST_F(LessEqualBuilderTest, lessequal_build_001, TestSize.Level0) * @tc.desc: Verify that the build function returns a failed message with true m_isBuild. * @tc.type: FUNC */ -HWTEST_F(LessEqualBuilderTest, lessequal_build_002, TestSize.Level0) +HWTEST_F(LessEqualBuilderTest, lessequal_build_002, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_inputDim, nullptr); SaveOutputTensor(m_outputs, OH_NN_BOOL, m_outputDim, nullptr); @@ -76,7 +76,7 @@ HWTEST_F(LessEqualBuilderTest, lessequal_build_002, TestSize.Level0) * @tc.desc: Verify that the build function returns a failed message with invalided input. * @tc.type: FUNC */ -HWTEST_F(LessEqualBuilderTest, lessequal_build_003, TestSize.Level0) +HWTEST_F(LessEqualBuilderTest, lessequal_build_003, TestSize.Level1) { m_inputs = {0, 1, 2, 3}; m_outputs = {4}; @@ -93,7 +93,7 @@ HWTEST_F(LessEqualBuilderTest, lessequal_build_003, TestSize.Level0) * @tc.desc: Verify that the build function returns a failed message with invalided output. * @tc.type: FUNC */ -HWTEST_F(LessEqualBuilderTest, lessequal_build_004, TestSize.Level0) +HWTEST_F(LessEqualBuilderTest, lessequal_build_004, TestSize.Level1) { std::vector m_outputs = {2, 3, 4}; @@ -109,7 +109,7 @@ HWTEST_F(LessEqualBuilderTest, lessequal_build_004, TestSize.Level0) * @tc.desc: Verify that the build function returns a failed message with empty allTensor. * @tc.type: FUNC */ -HWTEST_F(LessEqualBuilderTest, lessequal_build_005, TestSize.Level0) +HWTEST_F(LessEqualBuilderTest, lessequal_build_005, TestSize.Level1) { OH_NN_ReturnCode ret = m_lessEqual.Build(m_params, m_inputs, m_outputs, m_allTensors); EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); @@ -120,7 +120,7 @@ HWTEST_F(LessEqualBuilderTest, lessequal_build_005, TestSize.Level0) * @tc.desc: Verify that the build function returns a failed message without output tensor. * @tc.type: FUNC */ -HWTEST_F(LessEqualBuilderTest, lessequal_build_006, TestSize.Level0) +HWTEST_F(LessEqualBuilderTest, lessequal_build_006, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_inputDim, nullptr); @@ -133,7 +133,7 @@ HWTEST_F(LessEqualBuilderTest, lessequal_build_006, TestSize.Level0) * @tc.desc: Verify that the build function returns a failed message with a virtual parameter. * @tc.type: FUNC */ -HWTEST_F(LessEqualBuilderTest, lessequal_build_007, TestSize.Level0) +HWTEST_F(LessEqualBuilderTest, lessequal_build_007, TestSize.Level1) { std::vector m_params = {3}; std::vector paramDim = {}; @@ -153,7 +153,7 @@ HWTEST_F(LessEqualBuilderTest, lessequal_build_007, TestSize.Level0) * @tc.desc: Verify that the getPrimitive function returns a successful message * @tc.type: FUNC */ -HWTEST_F(LessEqualBuilderTest, lessequal_getprimitive_001, TestSize.Level0) +HWTEST_F(LessEqualBuilderTest, lessequal_getprimitive_001, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_inputDim, nullptr); SaveOutputTensor(m_outputs, OH_NN_BOOL, m_outputDim, nullptr); @@ -169,7 +169,7 @@ HWTEST_F(LessEqualBuilderTest, lessequal_getprimitive_001, TestSize.Level0) * @tc.desc: Verify that the getPrimitive function returns a failed message without build. * @tc.type: FUNC */ -HWTEST_F(LessEqualBuilderTest, lessequal_getprimitive_002, TestSize.Level0) +HWTEST_F(LessEqualBuilderTest, lessequal_getprimitive_002, TestSize.Level1) { LessEqualBuilder lessEqual; LiteGraphPrimitvePtr primitive = m_lessEqual.GetPrimitive(); diff --git a/test/unittest/ops/log_test.cpp b/test/unittest/ops/log_test.cpp index ff9b582..aefc971 100644 --- a/test/unittest/ops/log_test.cpp +++ b/test/unittest/ops/log_test.cpp @@ -45,7 +45,7 @@ void LogBuilderTest::TearDown() {} * @tc.desc: Verify that the build function returns a successful message. * @tc.type: FUNC */ -HWTEST_F(LogBuilderTest, log_build_001, TestSize.Level2) +HWTEST_F(LogBuilderTest, log_build_001, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); @@ -59,7 +59,7 @@ HWTEST_F(LogBuilderTest, log_build_001, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message with true m_isBuild. * @tc.type: FUNC */ -HWTEST_F(LogBuilderTest, log_build_002, TestSize.Level2) +HWTEST_F(LogBuilderTest, log_build_002, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); @@ -74,7 +74,7 @@ HWTEST_F(LogBuilderTest, log_build_002, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message with invalided input. * @tc.type: FUNC */ -HWTEST_F(LogBuilderTest, log_build_003, TestSize.Level2) +HWTEST_F(LogBuilderTest, log_build_003, TestSize.Level1) { m_inputs = {0, 1}; m_outputs = {2}; @@ -91,7 +91,7 @@ HWTEST_F(LogBuilderTest, log_build_003, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message with invalided output. * @tc.type: FUNC */ -HWTEST_F(LogBuilderTest, log_build_004, TestSize.Level2) +HWTEST_F(LogBuilderTest, log_build_004, TestSize.Level1) { m_outputs = {1, 2}; @@ -107,7 +107,7 @@ HWTEST_F(LogBuilderTest, log_build_004, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message with empty allTensor. * @tc.type: FUNC */ -HWTEST_F(LogBuilderTest, log_build_005, TestSize.Level2) +HWTEST_F(LogBuilderTest, log_build_005, TestSize.Level1) { OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputs, m_outputs, m_allTensors); EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); @@ -118,7 +118,7 @@ HWTEST_F(LogBuilderTest, log_build_005, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message without output tensor. * @tc.type: FUNC */ -HWTEST_F(LogBuilderTest, log_build_006, TestSize.Level2) +HWTEST_F(LogBuilderTest, log_build_006, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); @@ -131,7 +131,7 @@ HWTEST_F(LogBuilderTest, log_build_006, TestSize.Level2) * @tc.desc: Verify that the getPrimitive function returns a successful message * @tc.type: FUNC */ -HWTEST_F(LogBuilderTest, log_getprimitive_001, TestSize.Level2) +HWTEST_F(LogBuilderTest, log_getprimitive_001, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); @@ -147,7 +147,7 @@ HWTEST_F(LogBuilderTest, log_getprimitive_001, TestSize.Level2) * @tc.desc: Verify that the getPrimitive function returns a failed message without build. * @tc.type: FUNC */ -HWTEST_F(LogBuilderTest, log_getprimitive_002, TestSize.Level2) +HWTEST_F(LogBuilderTest, log_getprimitive_002, TestSize.Level1) { LiteGraphPrimitvePtr primitive = m_builder.GetPrimitive(); LiteGraphPrimitvePtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); diff --git a/test/unittest/ops/logical_and_test.cpp b/test/unittest/ops/logical_and_test.cpp index ad4e795..ef84bbb 100644 --- a/test/unittest/ops/logical_and_test.cpp +++ b/test/unittest/ops/logical_and_test.cpp @@ -45,7 +45,7 @@ void LogicalAndBuilderTest::TearDown() {} * @tc.desc: Verify that the build function returns a successful message. * @tc.type: FUNC */ -HWTEST_F(LogicalAndBuilderTest, logical_and_build_001, TestSize.Level2) +HWTEST_F(LogicalAndBuilderTest, logical_and_build_001, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); @@ -59,7 +59,7 @@ HWTEST_F(LogicalAndBuilderTest, logical_and_build_001, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message with true m_isBuild. * @tc.type: FUNC */ -HWTEST_F(LogicalAndBuilderTest, logical_and_build_002, TestSize.Level2) +HWTEST_F(LogicalAndBuilderTest, logical_and_build_002, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); @@ -74,7 +74,7 @@ HWTEST_F(LogicalAndBuilderTest, logical_and_build_002, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message with invalided input. * @tc.type: FUNC */ -HWTEST_F(LogicalAndBuilderTest, logical_and_build_003, TestSize.Level2) +HWTEST_F(LogicalAndBuilderTest, logical_and_build_003, TestSize.Level1) { m_inputs = {0, 1, 2}; m_outputs = {3}; @@ -91,7 +91,7 @@ HWTEST_F(LogicalAndBuilderTest, logical_and_build_003, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message with invalided output. * @tc.type: FUNC */ -HWTEST_F(LogicalAndBuilderTest, logical_and_build_004, TestSize.Level2) +HWTEST_F(LogicalAndBuilderTest, logical_and_build_004, TestSize.Level1) { m_outputs = {2, 3}; @@ -107,7 +107,7 @@ HWTEST_F(LogicalAndBuilderTest, logical_and_build_004, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message with empty allTensor. * @tc.type: FUNC */ -HWTEST_F(LogicalAndBuilderTest, logical_and_build_005, TestSize.Level2) +HWTEST_F(LogicalAndBuilderTest, logical_and_build_005, TestSize.Level1) { OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputs, m_outputs, m_allTensors); EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); @@ -118,7 +118,7 @@ HWTEST_F(LogicalAndBuilderTest, logical_and_build_005, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message without output tensor. * @tc.type: FUNC */ -HWTEST_F(LogicalAndBuilderTest, logical_and_build_006, TestSize.Level2) +HWTEST_F(LogicalAndBuilderTest, logical_and_build_006, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); @@ -131,7 +131,7 @@ HWTEST_F(LogicalAndBuilderTest, logical_and_build_006, TestSize.Level2) * @tc.desc: Verify that the getPrimitive function returns a successful message * @tc.type: FUNC */ -HWTEST_F(LogicalAndBuilderTest, logical_and_getprimitive_001, TestSize.Level2) +HWTEST_F(LogicalAndBuilderTest, logical_and_getprimitive_001, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); @@ -147,7 +147,7 @@ HWTEST_F(LogicalAndBuilderTest, logical_and_getprimitive_001, TestSize.Level2) * @tc.desc: Verify that the getPrimitive function returns a failed message without build. * @tc.type: FUNC */ -HWTEST_F(LogicalAndBuilderTest, logical_and_getprimitive_002, TestSize.Level2) +HWTEST_F(LogicalAndBuilderTest, logical_and_getprimitive_002, TestSize.Level1) { LiteGraphPrimitvePtr primitive = m_builder.GetPrimitive(); LiteGraphPrimitvePtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); diff --git a/test/unittest/ops/logical_not_test.cpp b/test/unittest/ops/logical_not_test.cpp index 7c461b6..5917df5 100644 --- a/test/unittest/ops/logical_not_test.cpp +++ b/test/unittest/ops/logical_not_test.cpp @@ -45,7 +45,7 @@ void LogicalNotBuilderTest::TearDown() {} * @tc.desc: Verify that the build function returns a successful message. * @tc.type: FUNC */ -HWTEST_F(LogicalNotBuilderTest, logical_not_build_001, TestSize.Level2) +HWTEST_F(LogicalNotBuilderTest, logical_not_build_001, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); @@ -59,7 +59,7 @@ HWTEST_F(LogicalNotBuilderTest, logical_not_build_001, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message with true m_isBuild. * @tc.type: FUNC */ -HWTEST_F(LogicalNotBuilderTest, logical_not_build_002, TestSize.Level2) +HWTEST_F(LogicalNotBuilderTest, logical_not_build_002, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); @@ -74,7 +74,7 @@ HWTEST_F(LogicalNotBuilderTest, logical_not_build_002, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message with invalided input. * @tc.type: FUNC */ -HWTEST_F(LogicalNotBuilderTest, logical_not_build_003, TestSize.Level2) +HWTEST_F(LogicalNotBuilderTest, logical_not_build_003, TestSize.Level1) { m_inputs = {0, 1}; m_outputs = {2}; @@ -91,7 +91,7 @@ HWTEST_F(LogicalNotBuilderTest, logical_not_build_003, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message with invalided output. * @tc.type: FUNC */ -HWTEST_F(LogicalNotBuilderTest, logical_not_build_004, TestSize.Level2) +HWTEST_F(LogicalNotBuilderTest, logical_not_build_004, TestSize.Level1) { m_outputs = {1, 2}; @@ -107,7 +107,7 @@ HWTEST_F(LogicalNotBuilderTest, logical_not_build_004, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message with empty allTensor. * @tc.type: FUNC */ -HWTEST_F(LogicalNotBuilderTest, logical_not_build_005, TestSize.Level2) +HWTEST_F(LogicalNotBuilderTest, logical_not_build_005, TestSize.Level1) { OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputs, m_outputs, m_allTensors); EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); @@ -118,7 +118,7 @@ HWTEST_F(LogicalNotBuilderTest, logical_not_build_005, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message without output tensor. * @tc.type: FUNC */ -HWTEST_F(LogicalNotBuilderTest, logical_not_build_006, TestSize.Level2) +HWTEST_F(LogicalNotBuilderTest, logical_not_build_006, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); @@ -131,7 +131,7 @@ HWTEST_F(LogicalNotBuilderTest, logical_not_build_006, TestSize.Level2) * @tc.desc: Verify that the getPrimitive function returns a successful message * @tc.type: FUNC */ -HWTEST_F(LogicalNotBuilderTest, logical_not_getprimitive_001, TestSize.Level2) +HWTEST_F(LogicalNotBuilderTest, logical_not_getprimitive_001, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); @@ -147,7 +147,7 @@ HWTEST_F(LogicalNotBuilderTest, logical_not_getprimitive_001, TestSize.Level2) * @tc.desc: Verify that the getPrimitive function returns a failed message without build. * @tc.type: FUNC */ -HWTEST_F(LogicalNotBuilderTest, logical_not_getprimitive_002, TestSize.Level2) +HWTEST_F(LogicalNotBuilderTest, logical_not_getprimitive_002, TestSize.Level1) { LiteGraphPrimitvePtr primitive = m_builder.GetPrimitive(); LiteGraphPrimitvePtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); diff --git a/test/unittest/ops/logical_or_test.cpp b/test/unittest/ops/logical_or_test.cpp index ea0a2f8..c231fdb 100644 --- a/test/unittest/ops/logical_or_test.cpp +++ b/test/unittest/ops/logical_or_test.cpp @@ -45,7 +45,7 @@ void LogicalOrBuilderTest::TearDown() {} * @tc.desc: Verify that the build function returns a successful message. * @tc.type: FUNC */ -HWTEST_F(LogicalOrBuilderTest, logical_or_build_001, TestSize.Level0) +HWTEST_F(LogicalOrBuilderTest, logical_or_build_001, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); @@ -59,7 +59,7 @@ HWTEST_F(LogicalOrBuilderTest, logical_or_build_001, TestSize.Level0) * @tc.desc: Verify that the build function returns a failed message with true m_isBuild. * @tc.type: FUNC */ -HWTEST_F(LogicalOrBuilderTest, logical_or_build_002, TestSize.Level0) +HWTEST_F(LogicalOrBuilderTest, logical_or_build_002, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); @@ -74,7 +74,7 @@ HWTEST_F(LogicalOrBuilderTest, logical_or_build_002, TestSize.Level0) * @tc.desc: Verify that the build function returns a failed message with invalided input. * @tc.type: FUNC */ -HWTEST_F(LogicalOrBuilderTest, logical_or_build_003, TestSize.Level0) +HWTEST_F(LogicalOrBuilderTest, logical_or_build_003, TestSize.Level1) { m_inputs = {0, 1, 2}; m_outputs = {3}; @@ -91,7 +91,7 @@ HWTEST_F(LogicalOrBuilderTest, logical_or_build_003, TestSize.Level0) * @tc.desc: Verify that the build function returns a failed message with invalided output. * @tc.type: FUNC */ -HWTEST_F(LogicalOrBuilderTest, logical_or_build_004, TestSize.Level0) +HWTEST_F(LogicalOrBuilderTest, logical_or_build_004, TestSize.Level1) { m_outputs = {2, 3}; @@ -107,7 +107,7 @@ HWTEST_F(LogicalOrBuilderTest, logical_or_build_004, TestSize.Level0) * @tc.desc: Verify that the build function returns a failed message with empty allTensor. * @tc.type: FUNC */ -HWTEST_F(LogicalOrBuilderTest, logical_or_build_005, TestSize.Level0) +HWTEST_F(LogicalOrBuilderTest, logical_or_build_005, TestSize.Level1) { OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputs, m_outputs, m_allTensors); EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); @@ -118,7 +118,7 @@ HWTEST_F(LogicalOrBuilderTest, logical_or_build_005, TestSize.Level0) * @tc.desc: Verify that the build function returns a failed message without output tensor. * @tc.type: FUNC */ -HWTEST_F(LogicalOrBuilderTest, logical_or_build_006, TestSize.Level0) +HWTEST_F(LogicalOrBuilderTest, logical_or_build_006, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); @@ -131,7 +131,7 @@ HWTEST_F(LogicalOrBuilderTest, logical_or_build_006, TestSize.Level0) * @tc.desc: Verify that the getPrimitive function returns a successful message * @tc.type: FUNC */ -HWTEST_F(LogicalOrBuilderTest, logical_or_getprimitive_001, TestSize.Level0) +HWTEST_F(LogicalOrBuilderTest, logical_or_getprimitive_001, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); @@ -147,7 +147,7 @@ HWTEST_F(LogicalOrBuilderTest, logical_or_getprimitive_001, TestSize.Level0) * @tc.desc: Verify that the getPrimitive function returns a failed message without build. * @tc.type: FUNC */ -HWTEST_F(LogicalOrBuilderTest, logical_or_getprimitive_002, TestSize.Level0) +HWTEST_F(LogicalOrBuilderTest, logical_or_getprimitive_002, TestSize.Level1) { LiteGraphPrimitvePtr primitive = m_builder.GetPrimitive(); LiteGraphPrimitvePtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); diff --git a/test/unittest/ops/lstm_test.cpp b/test/unittest/ops/lstm_test.cpp index 2e92b6c..295ca05 100644 --- a/test/unittest/ops/lstm_test.cpp +++ b/test/unittest/ops/lstm_test.cpp @@ -224,10 +224,11 @@ void LSTMBuilderTest::SetOutputTensor() * @tc.desc: Verify that the build function returns a successful message. * @tc.type: FUNC */ -HWTEST_F(LSTMBuilderTest, LSTM_build_001, TestSize.Level2) +HWTEST_F(LSTMBuilderTest, LSTM_build_001, TestSize.Level1) { SetInputTensor(); SetOutputTensor(); + SaveBidirectional(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_LSTM_BIDIRECTIONAL); SaveHasBias(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_LSTM_HAS_BIAS); SaveInputSize(OH_NN_INT64, m_paramDim, nullptr, OH_NN_LSTM_INPUT_SIZE); @@ -248,10 +249,11 @@ HWTEST_F(LSTMBuilderTest, LSTM_build_001, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message with true m_isBuild. * @tc.type: FUNC */ -HWTEST_F(LSTMBuilderTest, LSTM_build_002, TestSize.Level2) +HWTEST_F(LSTMBuilderTest, LSTM_build_002, TestSize.Level1) { SetInputTensor(); SetOutputTensor(); + SaveBidirectional(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_LSTM_BIDIRECTIONAL); SaveHasBias(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_LSTM_HAS_BIAS); SaveInputSize(OH_NN_INT64, m_paramDim, nullptr, OH_NN_LSTM_INPUT_SIZE); @@ -273,7 +275,7 @@ HWTEST_F(LSTMBuilderTest, LSTM_build_002, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message with invalided input. * @tc.type: FUNC */ -HWTEST_F(LSTMBuilderTest, LSTM_build_003, TestSize.Level2) +HWTEST_F(LSTMBuilderTest, LSTM_build_003, TestSize.Level1) { m_inputs = {0, 1, 2, 3, 4, 5, 6}; m_outputs = {7, 8, 9}; @@ -281,6 +283,7 @@ HWTEST_F(LSTMBuilderTest, LSTM_build_003, TestSize.Level2) SetInputTensor(); SetOutputTensor(); + SaveBidirectional(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_LSTM_BIDIRECTIONAL); SaveHasBias(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_LSTM_HAS_BIAS); SaveInputSize(OH_NN_INT64, m_paramDim, nullptr, OH_NN_LSTM_INPUT_SIZE); @@ -301,13 +304,14 @@ HWTEST_F(LSTMBuilderTest, LSTM_build_003, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message with invalided output. * @tc.type: FUNC */ -HWTEST_F(LSTMBuilderTest, LSTM_build_004, TestSize.Level2) +HWTEST_F(LSTMBuilderTest, LSTM_build_004, TestSize.Level1) { m_outputs = {6, 7, 8, 9}; m_params = {10, 11, 12, 13, 14, 15, 16, 17, 18, 19}; SetInputTensor(); SetOutputTensor(); + SaveBidirectional(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_LSTM_BIDIRECTIONAL); SaveHasBias(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_LSTM_HAS_BIAS); SaveInputSize(OH_NN_INT64, m_paramDim, nullptr, OH_NN_LSTM_INPUT_SIZE); @@ -328,7 +332,7 @@ HWTEST_F(LSTMBuilderTest, LSTM_build_004, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message with empty allTensor. * @tc.type: FUNC */ -HWTEST_F(LSTMBuilderTest, LSTM_build_005, TestSize.Level2) +HWTEST_F(LSTMBuilderTest, LSTM_build_005, TestSize.Level1) { OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputs, m_outputs, m_allTensors); EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); @@ -339,7 +343,7 @@ HWTEST_F(LSTMBuilderTest, LSTM_build_005, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message without output tensor. * @tc.type: FUNC */ -HWTEST_F(LSTMBuilderTest, LSTM_build_006, TestSize.Level2) +HWTEST_F(LSTMBuilderTest, LSTM_build_006, TestSize.Level1) { SetInputTensor(); @@ -352,10 +356,11 @@ HWTEST_F(LSTMBuilderTest, LSTM_build_006, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message with invalid bidirectional's dataType. * @tc.type: FUNC */ -HWTEST_F(LSTMBuilderTest, LSTM_build_007, TestSize.Level2) +HWTEST_F(LSTMBuilderTest, LSTM_build_007, TestSize.Level1) { SetInputTensor(); SetOutputTensor(); + std::shared_ptr bidirectionalTensor = TransToNNTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_LSTM_BIDIRECTIONAL); int64_t* bidirectionalValue = new (std::nothrow) int64_t [1]{0}; @@ -382,10 +387,11 @@ HWTEST_F(LSTMBuilderTest, LSTM_build_007, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message with invalid has_bias's dataType. * @tc.type: FUNC */ -HWTEST_F(LSTMBuilderTest, LSTM_build_008, TestSize.Level2) +HWTEST_F(LSTMBuilderTest, LSTM_build_008, TestSize.Level1) { SetInputTensor(); SetOutputTensor(); + SaveBidirectional(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_LSTM_BIDIRECTIONAL); std::shared_ptr hasBiasTensor = TransToNNTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_LSTM_HAS_BIAS); @@ -412,10 +418,11 @@ HWTEST_F(LSTMBuilderTest, LSTM_build_008, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message with invalid input_size's dataType. * @tc.type: FUNC */ -HWTEST_F(LSTMBuilderTest, LSTM_build_009, TestSize.Level2) +HWTEST_F(LSTMBuilderTest, LSTM_build_009, TestSize.Level1) { SetInputTensor(); SetOutputTensor(); + SaveBidirectional(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_LSTM_BIDIRECTIONAL); SaveHasBias(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_LSTM_HAS_BIAS); std::shared_ptr inputSizeTensor = TransToNNTensor(OH_NN_FLOAT32, m_paramDim, @@ -442,10 +449,11 @@ HWTEST_F(LSTMBuilderTest, LSTM_build_009, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message with invalid hidden_size's dataType. * @tc.type: FUNC */ -HWTEST_F(LSTMBuilderTest, LSTM_build_010, TestSize.Level2) +HWTEST_F(LSTMBuilderTest, LSTM_build_010, TestSize.Level1) { SetInputTensor(); SetOutputTensor(); + SaveBidirectional(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_LSTM_BIDIRECTIONAL); SaveHasBias(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_LSTM_HAS_BIAS); SaveInputSize(OH_NN_INT64, m_paramDim, nullptr, OH_NN_LSTM_INPUT_SIZE); @@ -472,10 +480,11 @@ HWTEST_F(LSTMBuilderTest, LSTM_build_010, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message with invalid num_layers's dataType. * @tc.type: FUNC */ -HWTEST_F(LSTMBuilderTest, LSTM_build_011, TestSize.Level2) +HWTEST_F(LSTMBuilderTest, LSTM_build_011, TestSize.Level1) { SetInputTensor(); SetOutputTensor(); + SaveBidirectional(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_LSTM_BIDIRECTIONAL); SaveHasBias(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_LSTM_HAS_BIAS); SaveInputSize(OH_NN_INT64, m_paramDim, nullptr, OH_NN_LSTM_INPUT_SIZE); @@ -502,10 +511,11 @@ HWTEST_F(LSTMBuilderTest, LSTM_build_011, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message with invalid num_directions's dataType. * @tc.type: FUNC */ -HWTEST_F(LSTMBuilderTest, LSTM_build_012, TestSize.Level2) +HWTEST_F(LSTMBuilderTest, LSTM_build_012, TestSize.Level1) { SetInputTensor(); SetOutputTensor(); + SaveBidirectional(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_LSTM_BIDIRECTIONAL); SaveHasBias(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_LSTM_HAS_BIAS); SaveInputSize(OH_NN_INT64, m_paramDim, nullptr, OH_NN_LSTM_INPUT_SIZE); @@ -532,10 +542,11 @@ HWTEST_F(LSTMBuilderTest, LSTM_build_012, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message with invalid dropout's dataType. * @tc.type: FUNC */ -HWTEST_F(LSTMBuilderTest, LSTM_build_013, TestSize.Level2) +HWTEST_F(LSTMBuilderTest, LSTM_build_013, TestSize.Level1) { SetInputTensor(); SetOutputTensor(); + SaveBidirectional(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_LSTM_BIDIRECTIONAL); SaveHasBias(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_LSTM_HAS_BIAS); SaveInputSize(OH_NN_INT64, m_paramDim, nullptr, OH_NN_LSTM_INPUT_SIZE); @@ -562,10 +573,11 @@ HWTEST_F(LSTMBuilderTest, LSTM_build_013, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message with invalid zoneout_cell's dataType. * @tc.type: FUNC */ -HWTEST_F(LSTMBuilderTest, LSTM_build_014, TestSize.Level2) +HWTEST_F(LSTMBuilderTest, LSTM_build_014, TestSize.Level1) { SetInputTensor(); SetOutputTensor(); + SaveBidirectional(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_LSTM_BIDIRECTIONAL); SaveHasBias(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_LSTM_HAS_BIAS); SaveInputSize(OH_NN_INT64, m_paramDim, nullptr, OH_NN_LSTM_INPUT_SIZE); @@ -592,10 +604,11 @@ HWTEST_F(LSTMBuilderTest, LSTM_build_014, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message with invalid zoneout_hidden's dataType. * @tc.type: FUNC */ -HWTEST_F(LSTMBuilderTest, LSTM_build_015, TestSize.Level2) +HWTEST_F(LSTMBuilderTest, LSTM_build_015, TestSize.Level1) { SetInputTensor(); SetOutputTensor(); + SaveBidirectional(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_LSTM_BIDIRECTIONAL); SaveHasBias(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_LSTM_HAS_BIAS); SaveInputSize(OH_NN_INT64, m_paramDim, nullptr, OH_NN_LSTM_INPUT_SIZE); @@ -622,10 +635,11 @@ HWTEST_F(LSTMBuilderTest, LSTM_build_015, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message with invalid proj_size's dataType. * @tc.type: FUNC */ -HWTEST_F(LSTMBuilderTest, LSTM_build_016, TestSize.Level2) +HWTEST_F(LSTMBuilderTest, LSTM_build_016, TestSize.Level1) { SetInputTensor(); SetOutputTensor(); + SaveBidirectional(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_LSTM_BIDIRECTIONAL); SaveHasBias(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_LSTM_HAS_BIAS); SaveInputSize(OH_NN_INT64, m_paramDim, nullptr, OH_NN_LSTM_INPUT_SIZE); @@ -652,10 +666,11 @@ HWTEST_F(LSTMBuilderTest, LSTM_build_016, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message with passing invalid bidirectional param. * @tc.type: FUNC */ -HWTEST_F(LSTMBuilderTest, LSTM_build_017, TestSize.Level2) +HWTEST_F(LSTMBuilderTest, LSTM_build_017, TestSize.Level1) { SetInputTensor(); SetOutputTensor(); + SaveBidirectional(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_MUL_ACTIVATION_TYPE); SaveHasBias(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_LSTM_HAS_BIAS); SaveInputSize(OH_NN_INT64, m_paramDim, nullptr, OH_NN_LSTM_INPUT_SIZE); @@ -676,10 +691,11 @@ HWTEST_F(LSTMBuilderTest, LSTM_build_017, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message with passing invalid has_bias param. * @tc.type: FUNC */ -HWTEST_F(LSTMBuilderTest, LSTM_build_018, TestSize.Level2) +HWTEST_F(LSTMBuilderTest, LSTM_build_018, TestSize.Level1) { SetInputTensor(); SetOutputTensor(); + SaveBidirectional(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_LSTM_BIDIRECTIONAL); SaveHasBias(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_MUL_ACTIVATION_TYPE); SaveInputSize(OH_NN_INT64, m_paramDim, nullptr, OH_NN_LSTM_INPUT_SIZE); @@ -700,10 +716,11 @@ HWTEST_F(LSTMBuilderTest, LSTM_build_018, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message with passing invalid input_size param. * @tc.type: FUNC */ -HWTEST_F(LSTMBuilderTest, LSTM_build_019, TestSize.Level2) +HWTEST_F(LSTMBuilderTest, LSTM_build_019, TestSize.Level1) { SetInputTensor(); SetOutputTensor(); + SaveBidirectional(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_LSTM_BIDIRECTIONAL); SaveHasBias(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_LSTM_HAS_BIAS); SaveInputSize(OH_NN_INT64, m_paramDim, nullptr, OH_NN_MUL_ACTIVATION_TYPE); @@ -724,10 +741,11 @@ HWTEST_F(LSTMBuilderTest, LSTM_build_019, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message with passing invalid hidden_size param. * @tc.type: FUNC */ -HWTEST_F(LSTMBuilderTest, LSTM_build_020, TestSize.Level2) +HWTEST_F(LSTMBuilderTest, LSTM_build_020, TestSize.Level1) { SetInputTensor(); SetOutputTensor(); + SaveBidirectional(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_LSTM_BIDIRECTIONAL); SaveHasBias(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_LSTM_HAS_BIAS); SaveInputSize(OH_NN_INT64, m_paramDim, nullptr, OH_NN_LSTM_INPUT_SIZE); @@ -748,10 +766,11 @@ HWTEST_F(LSTMBuilderTest, LSTM_build_020, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message with passing invalid num_layers param. * @tc.type: FUNC */ -HWTEST_F(LSTMBuilderTest, LSTM_build_021, TestSize.Level2) +HWTEST_F(LSTMBuilderTest, LSTM_build_021, TestSize.Level1) { SetInputTensor(); SetOutputTensor(); + SaveBidirectional(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_LSTM_BIDIRECTIONAL); SaveHasBias(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_LSTM_HAS_BIAS); SaveInputSize(OH_NN_INT64, m_paramDim, nullptr, OH_NN_LSTM_INPUT_SIZE); @@ -772,10 +791,11 @@ HWTEST_F(LSTMBuilderTest, LSTM_build_021, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message with passing invalid num_directions param. * @tc.type: FUNC */ -HWTEST_F(LSTMBuilderTest, LSTM_build_022, TestSize.Level2) +HWTEST_F(LSTMBuilderTest, LSTM_build_022, TestSize.Level1) { SetInputTensor(); SetOutputTensor(); + SaveBidirectional(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_LSTM_BIDIRECTIONAL); SaveHasBias(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_LSTM_HAS_BIAS); SaveInputSize(OH_NN_INT64, m_paramDim, nullptr, OH_NN_LSTM_INPUT_SIZE); @@ -796,10 +816,11 @@ HWTEST_F(LSTMBuilderTest, LSTM_build_022, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message with passing invalid dropout param. * @tc.type: FUNC */ -HWTEST_F(LSTMBuilderTest, LSTM_build_023, TestSize.Level2) +HWTEST_F(LSTMBuilderTest, LSTM_build_023, TestSize.Level1) { SetInputTensor(); SetOutputTensor(); + SaveBidirectional(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_LSTM_BIDIRECTIONAL); SaveHasBias(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_LSTM_HAS_BIAS); SaveInputSize(OH_NN_INT64, m_paramDim, nullptr, OH_NN_LSTM_INPUT_SIZE); @@ -820,10 +841,11 @@ HWTEST_F(LSTMBuilderTest, LSTM_build_023, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message with passing invalid zoneout_cell param. * @tc.type: FUNC */ -HWTEST_F(LSTMBuilderTest, LSTM_build_024, TestSize.Level2) +HWTEST_F(LSTMBuilderTest, LSTM_build_024, TestSize.Level1) { SetInputTensor(); SetOutputTensor(); + SaveBidirectional(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_LSTM_BIDIRECTIONAL); SaveHasBias(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_LSTM_HAS_BIAS); SaveInputSize(OH_NN_INT64, m_paramDim, nullptr, OH_NN_LSTM_INPUT_SIZE); @@ -844,10 +866,11 @@ HWTEST_F(LSTMBuilderTest, LSTM_build_024, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message with passing invalid zoneout_cell param. * @tc.type: FUNC */ -HWTEST_F(LSTMBuilderTest, LSTM_build_025, TestSize.Level2) +HWTEST_F(LSTMBuilderTest, LSTM_build_025, TestSize.Level1) { SetInputTensor(); SetOutputTensor(); + SaveBidirectional(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_LSTM_BIDIRECTIONAL); SaveHasBias(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_LSTM_HAS_BIAS); SaveInputSize(OH_NN_INT64, m_paramDim, nullptr, OH_NN_LSTM_INPUT_SIZE); @@ -868,10 +891,11 @@ HWTEST_F(LSTMBuilderTest, LSTM_build_025, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message with passing invalid zoneout_hidden param. * @tc.type: FUNC */ -HWTEST_F(LSTMBuilderTest, LSTM_build_026, TestSize.Level2) +HWTEST_F(LSTMBuilderTest, LSTM_build_026, TestSize.Level1) { SetInputTensor(); SetOutputTensor(); + SaveBidirectional(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_LSTM_BIDIRECTIONAL); SaveHasBias(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_LSTM_HAS_BIAS); SaveInputSize(OH_NN_INT64, m_paramDim, nullptr, OH_NN_LSTM_INPUT_SIZE); @@ -892,10 +916,11 @@ HWTEST_F(LSTMBuilderTest, LSTM_build_026, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message with passing invalid proj_size param. * @tc.type: FUNC */ -HWTEST_F(LSTMBuilderTest, LSTM_build_027, TestSize.Level2) +HWTEST_F(LSTMBuilderTest, LSTM_build_027, TestSize.Level1) { SetInputTensor(); SetOutputTensor(); + SaveBidirectional(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_LSTM_BIDIRECTIONAL); SaveHasBias(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_LSTM_HAS_BIAS); SaveInputSize(OH_NN_INT64, m_paramDim, nullptr, OH_NN_LSTM_INPUT_SIZE); @@ -916,10 +941,11 @@ HWTEST_F(LSTMBuilderTest, LSTM_build_027, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message without set buffer for bidirectional. * @tc.type: FUNC */ -HWTEST_F(LSTMBuilderTest, LSTM_build_028, TestSize.Level2) +HWTEST_F(LSTMBuilderTest, LSTM_build_028, TestSize.Level1) { SetInputTensor(); SetOutputTensor(); + std::shared_ptr bidirectionalTensor = TransToNNTensor(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_LSTM_BIDIRECTIONAL); m_allTensors.emplace_back(bidirectionalTensor); @@ -942,10 +968,11 @@ HWTEST_F(LSTMBuilderTest, LSTM_build_028, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message without set buffer for has_bias. * @tc.type: FUNC */ -HWTEST_F(LSTMBuilderTest, LSTM_build_029, TestSize.Level2) +HWTEST_F(LSTMBuilderTest, LSTM_build_029, TestSize.Level1) { SetInputTensor(); SetOutputTensor(); + SaveBidirectional(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_LSTM_BIDIRECTIONAL); std::shared_ptr hasBiasTensor = TransToNNTensor(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_LSTM_HAS_BIAS); @@ -968,10 +995,11 @@ HWTEST_F(LSTMBuilderTest, LSTM_build_029, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message without set buffer for input_size. * @tc.type: FUNC */ -HWTEST_F(LSTMBuilderTest, LSTM_build_030, TestSize.Level2) +HWTEST_F(LSTMBuilderTest, LSTM_build_030, TestSize.Level1) { SetInputTensor(); SetOutputTensor(); + SaveBidirectional(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_LSTM_BIDIRECTIONAL); SaveHasBias(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_LSTM_HAS_BIAS); std::shared_ptr inputSizeTensor = TransToNNTensor(OH_NN_INT64, m_paramDim, @@ -994,10 +1022,11 @@ HWTEST_F(LSTMBuilderTest, LSTM_build_030, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message without set buffer for hidden_size. * @tc.type: FUNC */ -HWTEST_F(LSTMBuilderTest, LSTM_build_031, TestSize.Level2) +HWTEST_F(LSTMBuilderTest, LSTM_build_031, TestSize.Level1) { SetInputTensor(); SetOutputTensor(); + SaveBidirectional(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_LSTM_BIDIRECTIONAL); SaveHasBias(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_LSTM_HAS_BIAS); SaveInputSize(OH_NN_INT64, m_paramDim, nullptr, OH_NN_LSTM_INPUT_SIZE); @@ -1021,10 +1050,11 @@ HWTEST_F(LSTMBuilderTest, LSTM_build_031, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message without set buffer for num_layers. * @tc.type: FUNC */ -HWTEST_F(LSTMBuilderTest, LSTM_build_032, TestSize.Level2) +HWTEST_F(LSTMBuilderTest, LSTM_build_032, TestSize.Level1) { SetInputTensor(); SetOutputTensor(); + SaveBidirectional(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_LSTM_BIDIRECTIONAL); SaveHasBias(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_LSTM_HAS_BIAS); SaveInputSize(OH_NN_INT64, m_paramDim, nullptr, OH_NN_LSTM_INPUT_SIZE); @@ -1048,10 +1078,11 @@ HWTEST_F(LSTMBuilderTest, LSTM_build_032, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message without set buffer for num_directions. * @tc.type: FUNC */ -HWTEST_F(LSTMBuilderTest, LSTM_build_033, TestSize.Level2) +HWTEST_F(LSTMBuilderTest, LSTM_build_033, TestSize.Level1) { SetInputTensor(); SetOutputTensor(); + SaveBidirectional(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_LSTM_BIDIRECTIONAL); SaveHasBias(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_LSTM_HAS_BIAS); SaveInputSize(OH_NN_INT64, m_paramDim, nullptr, OH_NN_LSTM_INPUT_SIZE); @@ -1075,10 +1106,11 @@ HWTEST_F(LSTMBuilderTest, LSTM_build_033, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message without set buffer for dropout. * @tc.type: FUNC */ -HWTEST_F(LSTMBuilderTest, LSTM_build_034, TestSize.Level2) +HWTEST_F(LSTMBuilderTest, LSTM_build_034, TestSize.Level1) { SetInputTensor(); SetOutputTensor(); + SaveBidirectional(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_LSTM_BIDIRECTIONAL); SaveHasBias(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_LSTM_HAS_BIAS); SaveInputSize(OH_NN_INT64, m_paramDim, nullptr, OH_NN_LSTM_INPUT_SIZE); @@ -1101,10 +1133,11 @@ HWTEST_F(LSTMBuilderTest, LSTM_build_034, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message without set buffer for zoneout_cell. * @tc.type: FUNC */ -HWTEST_F(LSTMBuilderTest, LSTM_build_035, TestSize.Level2) +HWTEST_F(LSTMBuilderTest, LSTM_build_035, TestSize.Level1) { SetInputTensor(); SetOutputTensor(); + SaveBidirectional(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_LSTM_BIDIRECTIONAL); SaveHasBias(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_LSTM_HAS_BIAS); SaveInputSize(OH_NN_INT64, m_paramDim, nullptr, OH_NN_LSTM_INPUT_SIZE); @@ -1127,10 +1160,11 @@ HWTEST_F(LSTMBuilderTest, LSTM_build_035, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message without set buffer for zoneout_hidden. * @tc.type: FUNC */ -HWTEST_F(LSTMBuilderTest, LSTM_build_036, TestSize.Level2) +HWTEST_F(LSTMBuilderTest, LSTM_build_036, TestSize.Level1) { SetInputTensor(); SetOutputTensor(); + SaveBidirectional(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_LSTM_BIDIRECTIONAL); SaveHasBias(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_LSTM_HAS_BIAS); SaveInputSize(OH_NN_INT64, m_paramDim, nullptr, OH_NN_LSTM_INPUT_SIZE); @@ -1153,10 +1187,11 @@ HWTEST_F(LSTMBuilderTest, LSTM_build_036, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message without set buffer for proj_size. * @tc.type: FUNC */ -HWTEST_F(LSTMBuilderTest, LSTM_build_037, TestSize.Level2) +HWTEST_F(LSTMBuilderTest, LSTM_build_037, TestSize.Level1) { SetInputTensor(); SetOutputTensor(); + SaveBidirectional(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_LSTM_BIDIRECTIONAL); SaveHasBias(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_LSTM_HAS_BIAS); SaveInputSize(OH_NN_INT64, m_paramDim, nullptr, OH_NN_LSTM_INPUT_SIZE); @@ -1179,10 +1214,11 @@ HWTEST_F(LSTMBuilderTest, LSTM_build_037, TestSize.Level2) * @tc.desc: Verify that the getPrimitive function returns a successful message. * @tc.type: FUNC */ -HWTEST_F(LSTMBuilderTest, LSTM_getprimitive_001, TestSize.Level2) +HWTEST_F(LSTMBuilderTest, LSTM_getprimitive_001, TestSize.Level1) { SetInputTensor(); SetOutputTensor(); + SaveBidirectional(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_LSTM_BIDIRECTIONAL); SaveHasBias(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_LSTM_HAS_BIAS); SaveInputSize(OH_NN_INT64, m_paramDim, nullptr, OH_NN_LSTM_INPUT_SIZE); @@ -1237,7 +1273,7 @@ HWTEST_F(LSTMBuilderTest, LSTM_getprimitive_001, TestSize.Level2) * @tc.desc: Verify that the getPrimitive function returns a failed message without build. * @tc.type: FUNC */ -HWTEST_F(LSTMBuilderTest, LSTM_getprimitive_002, TestSize.Level2) +HWTEST_F(LSTMBuilderTest, LSTM_getprimitive_002, TestSize.Level1) { LiteGraphPrimitvePtr primitive = m_builder.GetPrimitive(); LiteGraphPrimitvePtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); diff --git a/test/unittest/ops/mod_test.cpp b/test/unittest/ops/mod_test.cpp index e927152..f8be921 100644 --- a/test/unittest/ops/mod_test.cpp +++ b/test/unittest/ops/mod_test.cpp @@ -60,7 +60,7 @@ void ModBuilderTest::SetInputTensor() * @tc.desc: Verify that the build function returns a successful message. * @tc.type: FUNC */ -HWTEST_F(ModBuilderTest, mod_build_001, TestSize.Level2) +HWTEST_F(ModBuilderTest, mod_build_001, TestSize.Level1) { SetInputTensor(); SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); @@ -74,7 +74,7 @@ HWTEST_F(ModBuilderTest, mod_build_001, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message with true m_isBuild. * @tc.type: FUNC */ -HWTEST_F(ModBuilderTest, mod_build_002, TestSize.Level2) +HWTEST_F(ModBuilderTest, mod_build_002, TestSize.Level1) { SetInputTensor(); SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); @@ -89,7 +89,7 @@ HWTEST_F(ModBuilderTest, mod_build_002, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message with invalided input. * @tc.type: FUNC */ -HWTEST_F(ModBuilderTest, mod_build_003, TestSize.Level2) +HWTEST_F(ModBuilderTest, mod_build_003, TestSize.Level1) { m_inputs = {0, 1, 2}; m_outputs = {3}; @@ -106,7 +106,7 @@ HWTEST_F(ModBuilderTest, mod_build_003, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message with invalided output. * @tc.type: FUNC */ -HWTEST_F(ModBuilderTest, mod_build_004, TestSize.Level2) +HWTEST_F(ModBuilderTest, mod_build_004, TestSize.Level1) { m_outputs = {2, 3}; @@ -122,7 +122,7 @@ HWTEST_F(ModBuilderTest, mod_build_004, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message with empty allTensor. * @tc.type: FUNC */ -HWTEST_F(ModBuilderTest, mod_build_005, TestSize.Level2) +HWTEST_F(ModBuilderTest, mod_build_005, TestSize.Level1) { OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputs, m_outputs, m_allTensors); EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); @@ -133,7 +133,7 @@ HWTEST_F(ModBuilderTest, mod_build_005, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message without output tensor. * @tc.type: FUNC */ -HWTEST_F(ModBuilderTest, mod_build_006, TestSize.Level2) +HWTEST_F(ModBuilderTest, mod_build_006, TestSize.Level1) { SetInputTensor(); @@ -146,7 +146,7 @@ HWTEST_F(ModBuilderTest, mod_build_006, TestSize.Level2) * @tc.desc: Verify that the getPrimitive function returns a successful message * @tc.type: FUNC */ -HWTEST_F(ModBuilderTest, mod_getprimitive_001, TestSize.Level2) +HWTEST_F(ModBuilderTest, mod_getprimitive_001, TestSize.Level1) { SetInputTensor(); SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); @@ -162,7 +162,7 @@ HWTEST_F(ModBuilderTest, mod_getprimitive_001, TestSize.Level2) * @tc.desc: Verify that the getPrimitive function returns a failed message without build. * @tc.type: FUNC */ -HWTEST_F(ModBuilderTest, mod_getprimitive_002, TestSize.Level2) +HWTEST_F(ModBuilderTest, mod_getprimitive_002, TestSize.Level1) { LiteGraphPrimitvePtr primitive = m_builder.GetPrimitive(); LiteGraphPrimitvePtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); diff --git a/test/unittest/ops/neg_test.cpp b/test/unittest/ops/neg_test.cpp index 8e34f26..e53142e 100644 --- a/test/unittest/ops/neg_test.cpp +++ b/test/unittest/ops/neg_test.cpp @@ -45,7 +45,7 @@ void NegBuilderTest::TearDown() {} * @tc.desc: Verify that the build function returns a successful message. * @tc.type: FUNC */ -HWTEST_F(NegBuilderTest, neg_build_001, TestSize.Level2) +HWTEST_F(NegBuilderTest, neg_build_001, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); @@ -59,7 +59,7 @@ HWTEST_F(NegBuilderTest, neg_build_001, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message with true m_isBuild. * @tc.type: FUNC */ -HWTEST_F(NegBuilderTest, neg_build_002, TestSize.Level2) +HWTEST_F(NegBuilderTest, neg_build_002, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); @@ -74,7 +74,7 @@ HWTEST_F(NegBuilderTest, neg_build_002, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message with invalided input. * @tc.type: FUNC */ -HWTEST_F(NegBuilderTest, neg_build_003, TestSize.Level2) +HWTEST_F(NegBuilderTest, neg_build_003, TestSize.Level1) { m_inputs = {0, 1}; m_outputs = {2}; @@ -91,7 +91,7 @@ HWTEST_F(NegBuilderTest, neg_build_003, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message with invalided output. * @tc.type: FUNC */ -HWTEST_F(NegBuilderTest, neg_build_004, TestSize.Level2) +HWTEST_F(NegBuilderTest, neg_build_004, TestSize.Level1) { m_outputs = {1, 2}; @@ -107,7 +107,7 @@ HWTEST_F(NegBuilderTest, neg_build_004, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message with empty allTensor. * @tc.type: FUNC */ -HWTEST_F(NegBuilderTest, neg_build_005, TestSize.Level2) +HWTEST_F(NegBuilderTest, neg_build_005, TestSize.Level1) { OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputs, m_outputs, m_allTensors); EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); @@ -118,7 +118,7 @@ HWTEST_F(NegBuilderTest, neg_build_005, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message without output tensor. * @tc.type: FUNC */ -HWTEST_F(NegBuilderTest, neg_build_006, TestSize.Level2) +HWTEST_F(NegBuilderTest, neg_build_006, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); @@ -131,7 +131,7 @@ HWTEST_F(NegBuilderTest, neg_build_006, TestSize.Level2) * @tc.desc: Verify that the getPrimitive function returns a successful message * @tc.type: FUNC */ -HWTEST_F(NegBuilderTest, neg_getprimitive_001, TestSize.Level2) +HWTEST_F(NegBuilderTest, neg_getprimitive_001, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); @@ -147,7 +147,7 @@ HWTEST_F(NegBuilderTest, neg_getprimitive_001, TestSize.Level2) * @tc.desc: Verify that the getPrimitive function returns a failed message without build. * @tc.type: FUNC */ -HWTEST_F(NegBuilderTest, neg_getprimitive_002, TestSize.Level2) +HWTEST_F(NegBuilderTest, neg_getprimitive_002, TestSize.Level1) { LiteGraphPrimitvePtr primitive = m_builder.GetPrimitive(); LiteGraphPrimitvePtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); diff --git a/test/unittest/ops/not_equal_builder_test.cpp b/test/unittest/ops/not_equal_builder_test.cpp index ef117d9..1a32959 100644 --- a/test/unittest/ops/not_equal_builder_test.cpp +++ b/test/unittest/ops/not_equal_builder_test.cpp @@ -47,7 +47,7 @@ void NotEqualBuilderTest::TearDown() {} * @tc.desc: Verify that the build function returns a successful message. * @tc.type: FUNC */ -HWTEST_F(NotEqualBuilderTest, not_equal_build_001, TestSize.Level0) +HWTEST_F(NotEqualBuilderTest, not_equal_build_001, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_inputDim, nullptr); SaveOutputTensor(m_outputs, OH_NN_BOOL, m_outputDim, nullptr); @@ -61,7 +61,7 @@ HWTEST_F(NotEqualBuilderTest, not_equal_build_001, TestSize.Level0) * @tc.desc: Verify that the build function returns a failed message with true m_isBuild. * @tc.type: FUNC */ -HWTEST_F(NotEqualBuilderTest, not_equal_build_002, TestSize.Level0) +HWTEST_F(NotEqualBuilderTest, not_equal_build_002, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_inputDim, nullptr); SaveOutputTensor(m_outputs, OH_NN_BOOL, m_outputDim, nullptr); @@ -76,7 +76,7 @@ HWTEST_F(NotEqualBuilderTest, not_equal_build_002, TestSize.Level0) * @tc.desc: Verify that the build function returns a failed message with invalided input. * @tc.type: FUNC */ -HWTEST_F(NotEqualBuilderTest, not_equal_build_003, TestSize.Level0) +HWTEST_F(NotEqualBuilderTest, not_equal_build_003, TestSize.Level1) { m_inputs = {0, 1, 2, 3}; m_outputs = {4}; @@ -93,7 +93,7 @@ HWTEST_F(NotEqualBuilderTest, not_equal_build_003, TestSize.Level0) * @tc.desc: Verify that the build function returns a failed message with invalided output. * @tc.type: FUNC */ -HWTEST_F(NotEqualBuilderTest, not_equal_build_004, TestSize.Level0) +HWTEST_F(NotEqualBuilderTest, not_equal_build_004, TestSize.Level1) { std::vector m_outputs = {2, 3, 4}; @@ -109,7 +109,7 @@ HWTEST_F(NotEqualBuilderTest, not_equal_build_004, TestSize.Level0) * @tc.desc: Verify that the build function returns a failed message with empty allTensor. * @tc.type: FUNC */ -HWTEST_F(NotEqualBuilderTest, not_equal_build_005, TestSize.Level0) +HWTEST_F(NotEqualBuilderTest, not_equal_build_005, TestSize.Level1) { OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputs, m_outputs, m_allTensors); EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); @@ -120,7 +120,7 @@ HWTEST_F(NotEqualBuilderTest, not_equal_build_005, TestSize.Level0) * @tc.desc: Verify that the build function returns a failed message without output tensor. * @tc.type: FUNC */ -HWTEST_F(NotEqualBuilderTest, not_equal_build_006, TestSize.Level0) +HWTEST_F(NotEqualBuilderTest, not_equal_build_006, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_inputDim, nullptr); @@ -133,7 +133,7 @@ HWTEST_F(NotEqualBuilderTest, not_equal_build_006, TestSize.Level0) * @tc.desc: Verify that the build function returns a failed message with a virtual parameter. * @tc.type: FUNC */ -HWTEST_F(NotEqualBuilderTest, not_equal_build_007, TestSize.Level0) +HWTEST_F(NotEqualBuilderTest, not_equal_build_007, TestSize.Level1) { std::vector m_params = {3}; std::vector paramDim = {}; @@ -153,7 +153,7 @@ HWTEST_F(NotEqualBuilderTest, not_equal_build_007, TestSize.Level0) * @tc.desc: Verify that the getPrimitive function returns a successful message * @tc.type: FUNC */ -HWTEST_F(NotEqualBuilderTest, not_equal_getprimitive_001, TestSize.Level0) +HWTEST_F(NotEqualBuilderTest, not_equal_getprimitive_001, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_inputDim, nullptr); SaveOutputTensor(m_outputs, OH_NN_BOOL, m_outputDim, nullptr); @@ -169,7 +169,7 @@ HWTEST_F(NotEqualBuilderTest, not_equal_getprimitive_001, TestSize.Level0) * @tc.desc: Verify that the getPrimitive function returns a failed message without build. * @tc.type: FUNC */ -HWTEST_F(NotEqualBuilderTest, not_equal_getprimitive_002, TestSize.Level0) +HWTEST_F(NotEqualBuilderTest, not_equal_getprimitive_002, TestSize.Level1) { LiteGraphPrimitvePtr primitive = m_builder.GetPrimitive(); LiteGraphPrimitvePtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); diff --git a/test/unittest/ops/pow_builder_test.cpp b/test/unittest/ops/pow_builder_test.cpp index 893fbdf..5247a80 100644 --- a/test/unittest/ops/pow_builder_test.cpp +++ b/test/unittest/ops/pow_builder_test.cpp @@ -74,7 +74,7 @@ void PowBuilderTest::SaveScale(OH_NN_DataType dataType, * @tc.desc: Provide normal input, output, and parameters to verify the normal behavior of the Build function * @tc.type: FUNC */ -HWTEST_F(PowBuilderTest, pow_build_001, TestSize.Level0) +HWTEST_F(PowBuilderTest, pow_build_001, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_dim, nullptr); SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_dim, nullptr); @@ -90,7 +90,7 @@ HWTEST_F(PowBuilderTest, pow_build_001, TestSize.Level0) * @tc.desc: Call Build func twice to verify the abnormal behavior of the Build function * @tc.type: FUNC */ -HWTEST_F(PowBuilderTest, pow_build_002, TestSize.Level0) +HWTEST_F(PowBuilderTest, pow_build_002, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_dim, nullptr); SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_dim, nullptr); @@ -107,7 +107,7 @@ HWTEST_F(PowBuilderTest, pow_build_002, TestSize.Level0) * @tc.desc: Provide one more than normal input to verify the abnormal behavior of the Build function * @tc.type: FUNC */ -HWTEST_F(PowBuilderTest, pow_build_003, TestSize.Level0) +HWTEST_F(PowBuilderTest, pow_build_003, TestSize.Level1) { m_inputs = {0, 1, 2}; m_outputs = {3}; @@ -127,7 +127,7 @@ HWTEST_F(PowBuilderTest, pow_build_003, TestSize.Level0) * @tc.desc: Provide one more than normal output to verify the abnormal behavior of the Build function * @tc.type: FUNC */ -HWTEST_F(PowBuilderTest, pow_build_004, TestSize.Level0) +HWTEST_F(PowBuilderTest, pow_build_004, TestSize.Level1) { m_outputs = {2, 3}; m_params = {4, 5}; @@ -146,7 +146,7 @@ HWTEST_F(PowBuilderTest, pow_build_004, TestSize.Level0) * @tc.desc: Verify that the build function return a failed message with null allTensor * @tc.type: FUNC */ -HWTEST_F(PowBuilderTest, pow_build_005, TestSize.Level0) +HWTEST_F(PowBuilderTest, pow_build_005, TestSize.Level1) { OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputs, m_outputs, m_allTensors); EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); @@ -157,7 +157,7 @@ HWTEST_F(PowBuilderTest, pow_build_005, TestSize.Level0) * @tc.desc: Verify that the build function return a failed message without output tensor * @tc.type: FUNC */ -HWTEST_F(PowBuilderTest, pow_build_006, TestSize.Level0) +HWTEST_F(PowBuilderTest, pow_build_006, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_dim, nullptr); @@ -170,10 +170,11 @@ HWTEST_F(PowBuilderTest, pow_build_006, TestSize.Level0) * @tc.desc: Verify that the build function returns a failed message with invalid shift's dataType. * @tc.type: FUNC */ -HWTEST_F(PowBuilderTest, pow_build_007, TestSize.Level2) +HWTEST_F(PowBuilderTest, pow_build_007, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_dim, nullptr); SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_dim, nullptr); + std::shared_ptr shiftTensor = TransToNNTensor(OH_NN_INT64, m_shiftDim, nullptr, OH_NN_POW_SHIFT); int64_t* shiftValue = new (std::nothrow) int64_t[1] {0}; @@ -191,10 +192,11 @@ HWTEST_F(PowBuilderTest, pow_build_007, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message with invalid scale's dataType. * @tc.type: FUNC */ -HWTEST_F(PowBuilderTest, pow_build_008, TestSize.Level2) +HWTEST_F(PowBuilderTest, pow_build_008, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_dim, nullptr); SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_dim, nullptr); + SaveShift(OH_NN_FLOAT32, m_shiftDim, nullptr, OH_NN_POW_SHIFT); std::shared_ptr scaleTensor = TransToNNTensor(OH_NN_INT64, m_scaleDim, nullptr, OH_NN_POW_SCALE); @@ -212,7 +214,7 @@ HWTEST_F(PowBuilderTest, pow_build_008, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message with passing invalid shift param. * @tc.type: FUNC */ -HWTEST_F(PowBuilderTest, pow_build_009, TestSize.Level2) +HWTEST_F(PowBuilderTest, pow_build_009, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); @@ -228,7 +230,7 @@ HWTEST_F(PowBuilderTest, pow_build_009, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message with passing invalid scale param. * @tc.type: FUNC */ -HWTEST_F(PowBuilderTest, pow_build_010, TestSize.Level2) +HWTEST_F(PowBuilderTest, pow_build_010, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); @@ -244,10 +246,11 @@ HWTEST_F(PowBuilderTest, pow_build_010, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message without set buffer for shift. * @tc.type: FUNC */ -HWTEST_F(PowBuilderTest, pow_build_011, TestSize.Level2) +HWTEST_F(PowBuilderTest, pow_build_011, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); + std::shared_ptr shiftTensor = TransToNNTensor(OH_NN_FLOAT32, m_shiftDim, nullptr, OH_NN_POW_SHIFT); m_allTensors.emplace_back(shiftTensor); @@ -262,10 +265,11 @@ HWTEST_F(PowBuilderTest, pow_build_011, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message without set buffer for scale. * @tc.type: FUNC */ -HWTEST_F(PowBuilderTest, pow_build_012, TestSize.Level2) +HWTEST_F(PowBuilderTest, pow_build_012, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); + SaveShift(OH_NN_FLOAT32, m_shiftDim, nullptr, OH_NN_POW_SHIFT); std::shared_ptr scaleTensor = TransToNNTensor(OH_NN_FLOAT32, m_scaleDim, nullptr, OH_NN_POW_SCALE); @@ -280,7 +284,7 @@ HWTEST_F(PowBuilderTest, pow_build_012, TestSize.Level2) * @tc.desc: Verify the GetPrimitive function return nullptr * @tc.type: FUNC */ -HWTEST_F(PowBuilderTest, pow_get_primitive_001, TestSize.Level0) +HWTEST_F(PowBuilderTest, pow_get_primitive_001, TestSize.Level1) { LiteGraphTensorPtr primitive = m_builder.GetPrimitive(); LiteGraphTensorPtr expectPrimitive = {nullptr, DestroyLiteGraphPrimitive}; @@ -292,10 +296,11 @@ HWTEST_F(PowBuilderTest, pow_get_primitive_001, TestSize.Level0) * @tc.desc: Verify the normal params return behavior of the getprimitive function * @tc.type: FUNC */ -HWTEST_F(PowBuilderTest, pow_get_primitive_002, TestSize.Level0) +HWTEST_F(PowBuilderTest, pow_get_primitive_002, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_dim, nullptr); SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_dim, nullptr); + SaveShift(OH_NN_FLOAT32, m_shiftDim, nullptr, OH_NN_POW_SHIFT); SaveScale(OH_NN_FLOAT32, m_scaleDim, nullptr, OH_NN_POW_SCALE); diff --git a/test/unittest/ops/range_test.cpp b/test/unittest/ops/range_test.cpp index aabfe4a..68b535f 100644 --- a/test/unittest/ops/range_test.cpp +++ b/test/unittest/ops/range_test.cpp @@ -97,7 +97,7 @@ void RangeBuilderTest::SaveDelta(OH_NN_DataType dataType, * @tc.desc: Verify that the build function returns a successful message. * @tc.type: FUNC */ -HWTEST_F(RangeBuilderTest, range_build_001, TestSize.Level2) +HWTEST_F(RangeBuilderTest, range_build_001, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); @@ -115,7 +115,7 @@ HWTEST_F(RangeBuilderTest, range_build_001, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message with true m_isBuild. * @tc.type: FUNC */ -HWTEST_F(RangeBuilderTest, range_build_002, TestSize.Level2) +HWTEST_F(RangeBuilderTest, range_build_002, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); @@ -134,7 +134,7 @@ HWTEST_F(RangeBuilderTest, range_build_002, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message with invalided input. * @tc.type: FUNC */ -HWTEST_F(RangeBuilderTest, range_build_003, TestSize.Level2) +HWTEST_F(RangeBuilderTest, range_build_003, TestSize.Level1) { m_inputs = {0, 1}; m_outputs = {2}; @@ -156,7 +156,7 @@ HWTEST_F(RangeBuilderTest, range_build_003, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message with invalided output. * @tc.type: FUNC */ -HWTEST_F(RangeBuilderTest, range_build_004, TestSize.Level2) +HWTEST_F(RangeBuilderTest, range_build_004, TestSize.Level1) { m_outputs = {1, 2}; m_params = {3, 4, 5, 6}; @@ -177,7 +177,7 @@ HWTEST_F(RangeBuilderTest, range_build_004, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message with empty allTensor. * @tc.type: FUNC */ -HWTEST_F(RangeBuilderTest, range_build_005, TestSize.Level2) +HWTEST_F(RangeBuilderTest, range_build_005, TestSize.Level1) { OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputs, m_allTensors); EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); @@ -188,7 +188,7 @@ HWTEST_F(RangeBuilderTest, range_build_005, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message without output tensor. * @tc.type: FUNC */ -HWTEST_F(RangeBuilderTest, range_build_006, TestSize.Level2) +HWTEST_F(RangeBuilderTest, range_build_006, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); @@ -201,10 +201,11 @@ HWTEST_F(RangeBuilderTest, range_build_006, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message with invalid dType's dataType. * @tc.type: FUNC */ -HWTEST_F(RangeBuilderTest, range_build_007, TestSize.Level2) +HWTEST_F(RangeBuilderTest, range_build_007, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); + std::shared_ptr dTypeTensor = TransToNNTensor(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_RANGE_DTYPE); float* dTypeValue = new (std::nothrow) float [1]{0.0f}; @@ -225,11 +226,11 @@ HWTEST_F(RangeBuilderTest, range_build_007, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message with invalid start's dataType. * @tc.type: FUNC */ -HWTEST_F(RangeBuilderTest, range_build_008, TestSize.Level2) +HWTEST_F(RangeBuilderTest, range_build_008, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); - + SaveDType(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RANGE_DTYPE); std::shared_ptr startTensor = TransToNNTensor(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_RANGE_START); @@ -250,7 +251,7 @@ HWTEST_F(RangeBuilderTest, range_build_008, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message with invalid limit's dataType. * @tc.type: FUNC */ -HWTEST_F(RangeBuilderTest, range_build_009, TestSize.Level2) +HWTEST_F(RangeBuilderTest, range_build_009, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); @@ -275,7 +276,7 @@ HWTEST_F(RangeBuilderTest, range_build_009, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message with invalid delta's dataType. * @tc.type: FUNC */ -HWTEST_F(RangeBuilderTest, range_build_010, TestSize.Level2) +HWTEST_F(RangeBuilderTest, range_build_010, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); @@ -300,7 +301,7 @@ HWTEST_F(RangeBuilderTest, range_build_010, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message with passing invalid dType param. * @tc.type: FUNC */ -HWTEST_F(RangeBuilderTest, range_build_011, TestSize.Level2) +HWTEST_F(RangeBuilderTest, range_build_011, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); @@ -319,7 +320,7 @@ HWTEST_F(RangeBuilderTest, range_build_011, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message with passing invalid start param. * @tc.type: FUNC */ -HWTEST_F(RangeBuilderTest, range_build_012, TestSize.Level2) +HWTEST_F(RangeBuilderTest, range_build_012, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); @@ -338,7 +339,7 @@ HWTEST_F(RangeBuilderTest, range_build_012, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message with passing invalid limit param. * @tc.type: FUNC */ -HWTEST_F(RangeBuilderTest, range_build_013, TestSize.Level2) +HWTEST_F(RangeBuilderTest, range_build_013, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); @@ -357,7 +358,7 @@ HWTEST_F(RangeBuilderTest, range_build_013, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message with passing invalid delta param. * @tc.type: FUNC */ -HWTEST_F(RangeBuilderTest, range_build_014, TestSize.Level2) +HWTEST_F(RangeBuilderTest, range_build_014, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); @@ -376,7 +377,7 @@ HWTEST_F(RangeBuilderTest, range_build_014, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message without set buffer for dType. * @tc.type: FUNC */ -HWTEST_F(RangeBuilderTest, range_build_015, TestSize.Level2) +HWTEST_F(RangeBuilderTest, range_build_015, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); @@ -397,7 +398,7 @@ HWTEST_F(RangeBuilderTest, range_build_015, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message without set buffer for start. * @tc.type: FUNC */ -HWTEST_F(RangeBuilderTest, range_build_016, TestSize.Level2) +HWTEST_F(RangeBuilderTest, range_build_016, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); @@ -418,7 +419,7 @@ HWTEST_F(RangeBuilderTest, range_build_016, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message without set buffer for limit. * @tc.type: FUNC */ -HWTEST_F(RangeBuilderTest, range_build_017, TestSize.Level2) +HWTEST_F(RangeBuilderTest, range_build_017, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); @@ -439,7 +440,7 @@ HWTEST_F(RangeBuilderTest, range_build_017, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message without set buffer for delta. * @tc.type: FUNC */ -HWTEST_F(RangeBuilderTest, range_build_018, TestSize.Level2) +HWTEST_F(RangeBuilderTest, range_build_018, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); @@ -460,7 +461,7 @@ HWTEST_F(RangeBuilderTest, range_build_018, TestSize.Level2) * @tc.desc: Verify that the getPrimitive function returns a successful message * @tc.type: FUNC */ -HWTEST_F(RangeBuilderTest, range_getprimitive_001, TestSize.Level2) +HWTEST_F(RangeBuilderTest, range_getprimitive_001, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); @@ -493,7 +494,7 @@ HWTEST_F(RangeBuilderTest, range_getprimitive_001, TestSize.Level2) * @tc.desc: Verify that the getPrimitive function returns a failed message without build. * @tc.type: FUNC */ -HWTEST_F(RangeBuilderTest, range_getprimitive_002, TestSize.Level2) +HWTEST_F(RangeBuilderTest, range_getprimitive_002, TestSize.Level1) { LiteGraphPrimitvePtr primitive = m_builder.GetPrimitive(); LiteGraphPrimitvePtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); diff --git a/test/unittest/ops/real_div_test.cpp b/test/unittest/ops/real_div_test.cpp index be202a6..7f3321e 100644 --- a/test/unittest/ops/real_div_test.cpp +++ b/test/unittest/ops/real_div_test.cpp @@ -45,7 +45,7 @@ void RealDivBuilderTest::TearDown() {} * @tc.desc: Verify that the build function returns a successful message. * @tc.type: FUNC */ -HWTEST_F(RealDivBuilderTest, real_div_build_001, TestSize.Level2) +HWTEST_F(RealDivBuilderTest, real_div_build_001, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); @@ -59,7 +59,7 @@ HWTEST_F(RealDivBuilderTest, real_div_build_001, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message with true m_isBuild. * @tc.type: FUNC */ -HWTEST_F(RealDivBuilderTest, real_div_build_002, TestSize.Level2) +HWTEST_F(RealDivBuilderTest, real_div_build_002, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); @@ -74,7 +74,7 @@ HWTEST_F(RealDivBuilderTest, real_div_build_002, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message with invalided input. * @tc.type: FUNC */ -HWTEST_F(RealDivBuilderTest, real_div_build_003, TestSize.Level2) +HWTEST_F(RealDivBuilderTest, real_div_build_003, TestSize.Level1) { m_inputs = {0, 1, 2}; m_outputs = {3}; @@ -91,7 +91,7 @@ HWTEST_F(RealDivBuilderTest, real_div_build_003, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message with invalided output. * @tc.type: FUNC */ -HWTEST_F(RealDivBuilderTest, real_div_build_004, TestSize.Level2) +HWTEST_F(RealDivBuilderTest, real_div_build_004, TestSize.Level1) { m_outputs = {2, 3}; @@ -107,7 +107,7 @@ HWTEST_F(RealDivBuilderTest, real_div_build_004, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message with empty allTensor. * @tc.type: FUNC */ -HWTEST_F(RealDivBuilderTest, real_div_build_005, TestSize.Level2) +HWTEST_F(RealDivBuilderTest, real_div_build_005, TestSize.Level1) { OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputs, m_outputs, m_allTensors); EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); @@ -118,7 +118,7 @@ HWTEST_F(RealDivBuilderTest, real_div_build_005, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message without output tensor. * @tc.type: FUNC */ -HWTEST_F(RealDivBuilderTest, real_div_build_006, TestSize.Level2) +HWTEST_F(RealDivBuilderTest, real_div_build_006, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); @@ -131,7 +131,7 @@ HWTEST_F(RealDivBuilderTest, real_div_build_006, TestSize.Level2) * @tc.desc: Verify that the getPrimitive function returns a successful message * @tc.type: FUNC */ -HWTEST_F(RealDivBuilderTest, real_div_getprimitive_001, TestSize.Level2) +HWTEST_F(RealDivBuilderTest, real_div_getprimitive_001, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); @@ -147,7 +147,7 @@ HWTEST_F(RealDivBuilderTest, real_div_getprimitive_001, TestSize.Level2) * @tc.desc: Verify that the getPrimitive function returns a failed message without build. * @tc.type: FUNC */ -HWTEST_F(RealDivBuilderTest, real_div_getprimitive_002, TestSize.Level2) +HWTEST_F(RealDivBuilderTest, real_div_getprimitive_002, TestSize.Level1) { LiteGraphPrimitvePtr primitive = m_builder.GetPrimitive(); LiteGraphPrimitvePtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); diff --git a/test/unittest/ops/reciprocal_test.cpp b/test/unittest/ops/reciprocal_test.cpp index 421284d..33d5de4 100644 --- a/test/unittest/ops/reciprocal_test.cpp +++ b/test/unittest/ops/reciprocal_test.cpp @@ -45,7 +45,7 @@ void ReciprocalBuilderTest::TearDown() {} * @tc.desc: Verify that the build function returns a successful message. * @tc.type: FUNC */ -HWTEST_F(ReciprocalBuilderTest, reciprocal_build_001, TestSize.Level2) +HWTEST_F(ReciprocalBuilderTest, reciprocal_build_001, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); @@ -59,7 +59,7 @@ HWTEST_F(ReciprocalBuilderTest, reciprocal_build_001, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message with true m_isBuild. * @tc.type: FUNC */ -HWTEST_F(ReciprocalBuilderTest, reciprocal_build_002, TestSize.Level2) +HWTEST_F(ReciprocalBuilderTest, reciprocal_build_002, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); @@ -74,7 +74,7 @@ HWTEST_F(ReciprocalBuilderTest, reciprocal_build_002, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message with invalided input. * @tc.type: FUNC */ -HWTEST_F(ReciprocalBuilderTest, reciprocal_build_003, TestSize.Level2) +HWTEST_F(ReciprocalBuilderTest, reciprocal_build_003, TestSize.Level1) { m_inputs = {0, 1}; m_outputs = {2}; @@ -91,7 +91,7 @@ HWTEST_F(ReciprocalBuilderTest, reciprocal_build_003, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message with invalided output. * @tc.type: FUNC */ -HWTEST_F(ReciprocalBuilderTest, reciprocal_build_004, TestSize.Level2) +HWTEST_F(ReciprocalBuilderTest, reciprocal_build_004, TestSize.Level1) { m_outputs = {1, 2}; @@ -107,7 +107,7 @@ HWTEST_F(ReciprocalBuilderTest, reciprocal_build_004, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message with empty allTensor. * @tc.type: FUNC */ -HWTEST_F(ReciprocalBuilderTest, reciprocal_build_005, TestSize.Level2) +HWTEST_F(ReciprocalBuilderTest, reciprocal_build_005, TestSize.Level1) { OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputs, m_outputs, m_allTensors); EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); @@ -118,7 +118,7 @@ HWTEST_F(ReciprocalBuilderTest, reciprocal_build_005, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message without output tensor. * @tc.type: FUNC */ -HWTEST_F(ReciprocalBuilderTest, reciprocal_build_006, TestSize.Level2) +HWTEST_F(ReciprocalBuilderTest, reciprocal_build_006, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); @@ -131,7 +131,7 @@ HWTEST_F(ReciprocalBuilderTest, reciprocal_build_006, TestSize.Level2) * @tc.desc: Verify that the getPrimitive function returns a successful message * @tc.type: FUNC */ -HWTEST_F(ReciprocalBuilderTest, reciprocal_getprimitive_001, TestSize.Level2) +HWTEST_F(ReciprocalBuilderTest, reciprocal_getprimitive_001, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); @@ -147,7 +147,7 @@ HWTEST_F(ReciprocalBuilderTest, reciprocal_getprimitive_001, TestSize.Level2) * @tc.desc: Verify that the getPrimitive function returns a failed message without build. * @tc.type: FUNC */ -HWTEST_F(ReciprocalBuilderTest, reciprocal_getprimitive_002, TestSize.Level2) +HWTEST_F(ReciprocalBuilderTest, reciprocal_getprimitive_002, TestSize.Level1) { LiteGraphPrimitvePtr primitive = m_builder.GetPrimitive(); LiteGraphPrimitvePtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); diff --git a/test/unittest/ops/select_test.cpp b/test/unittest/ops/select_test.cpp index 27133d3..f8e30dd 100644 --- a/test/unittest/ops/select_test.cpp +++ b/test/unittest/ops/select_test.cpp @@ -45,7 +45,7 @@ void SelectBuilderTest::TearDown() {} * @tc.desc: Verify that the build function returns a successful message. * @tc.type: FUNC */ -HWTEST_F(SelectBuilderTest, select_build_001, TestSize.Level2) +HWTEST_F(SelectBuilderTest, select_build_001, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); @@ -59,7 +59,7 @@ HWTEST_F(SelectBuilderTest, select_build_001, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message with true m_isBuild. * @tc.type: FUNC */ -HWTEST_F(SelectBuilderTest, select_build_002, TestSize.Level2) +HWTEST_F(SelectBuilderTest, select_build_002, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); @@ -74,7 +74,7 @@ HWTEST_F(SelectBuilderTest, select_build_002, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message with invalided input. * @tc.type: FUNC */ -HWTEST_F(SelectBuilderTest, select_build_003, TestSize.Level2) +HWTEST_F(SelectBuilderTest, select_build_003, TestSize.Level1) { m_inputs = {0, 1, 2, 3}; m_outputs = {4}; @@ -91,7 +91,7 @@ HWTEST_F(SelectBuilderTest, select_build_003, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message with invalided output. * @tc.type: FUNC */ -HWTEST_F(SelectBuilderTest, select_build_004, TestSize.Level2) +HWTEST_F(SelectBuilderTest, select_build_004, TestSize.Level1) { m_outputs = {3, 4}; @@ -107,7 +107,7 @@ HWTEST_F(SelectBuilderTest, select_build_004, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message with empty allTensor. * @tc.type: FUNC */ -HWTEST_F(SelectBuilderTest, select_build_005, TestSize.Level2) +HWTEST_F(SelectBuilderTest, select_build_005, TestSize.Level1) { OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputs, m_outputs, m_allTensors); EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); @@ -118,7 +118,7 @@ HWTEST_F(SelectBuilderTest, select_build_005, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message without output tensor. * @tc.type: FUNC */ -HWTEST_F(SelectBuilderTest, select_build_006, TestSize.Level2) +HWTEST_F(SelectBuilderTest, select_build_006, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); @@ -131,7 +131,7 @@ HWTEST_F(SelectBuilderTest, select_build_006, TestSize.Level2) * @tc.desc: Verify that the getPrimitive function returns a successful message * @tc.type: FUNC */ -HWTEST_F(SelectBuilderTest, select_getprimitive_001, TestSize.Level2) +HWTEST_F(SelectBuilderTest, select_getprimitive_001, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); @@ -147,7 +147,7 @@ HWTEST_F(SelectBuilderTest, select_getprimitive_001, TestSize.Level2) * @tc.desc: Verify that the getPrimitive function returns a failed message without build. * @tc.type: FUNC */ -HWTEST_F(SelectBuilderTest, select_getprimitive_002, TestSize.Level2) +HWTEST_F(SelectBuilderTest, select_getprimitive_002, TestSize.Level1) { LiteGraphPrimitvePtr primitive = m_builder.GetPrimitive(); LiteGraphPrimitvePtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); diff --git a/test/unittest/ops/sin_test.cpp b/test/unittest/ops/sin_test.cpp index 3d50a70..b1572f6 100644 --- a/test/unittest/ops/sin_test.cpp +++ b/test/unittest/ops/sin_test.cpp @@ -45,7 +45,7 @@ void SinBuilderTest::TearDown() {} * @tc.desc: Verify that the build function returns a successful message. * @tc.type: FUNC */ -HWTEST_F(SinBuilderTest, sin_build_001, TestSize.Level2) +HWTEST_F(SinBuilderTest, sin_build_001, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); @@ -59,7 +59,7 @@ HWTEST_F(SinBuilderTest, sin_build_001, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message with true m_isBuild. * @tc.type: FUNC */ -HWTEST_F(SinBuilderTest, sin_build_002, TestSize.Level2) +HWTEST_F(SinBuilderTest, sin_build_002, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); @@ -74,7 +74,7 @@ HWTEST_F(SinBuilderTest, sin_build_002, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message with invalided input. * @tc.type: FUNC */ -HWTEST_F(SinBuilderTest, sin_build_003, TestSize.Level2) +HWTEST_F(SinBuilderTest, sin_build_003, TestSize.Level1) { m_inputs = {0, 1}; m_outputs = {2}; @@ -91,7 +91,7 @@ HWTEST_F(SinBuilderTest, sin_build_003, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message with invalided output. * @tc.type: FUNC */ -HWTEST_F(SinBuilderTest, sin_build_004, TestSize.Level2) +HWTEST_F(SinBuilderTest, sin_build_004, TestSize.Level1) { m_outputs = {1, 2}; @@ -107,7 +107,7 @@ HWTEST_F(SinBuilderTest, sin_build_004, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message with empty allTensor. * @tc.type: FUNC */ -HWTEST_F(SinBuilderTest, sin_build_005, TestSize.Level2) +HWTEST_F(SinBuilderTest, sin_build_005, TestSize.Level1) { OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputs, m_outputs, m_allTensors); EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); @@ -118,7 +118,7 @@ HWTEST_F(SinBuilderTest, sin_build_005, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message without output tensor. * @tc.type: FUNC */ -HWTEST_F(SinBuilderTest, sin_build_006, TestSize.Level2) +HWTEST_F(SinBuilderTest, sin_build_006, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); @@ -131,7 +131,7 @@ HWTEST_F(SinBuilderTest, sin_build_006, TestSize.Level2) * @tc.desc: Verify that the getPrimitive function returns a successful message * @tc.type: FUNC */ -HWTEST_F(SinBuilderTest, sin_getprimitive_001, TestSize.Level2) +HWTEST_F(SinBuilderTest, sin_getprimitive_001, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); @@ -147,7 +147,7 @@ HWTEST_F(SinBuilderTest, sin_getprimitive_001, TestSize.Level2) * @tc.desc: Verify that the getPrimitive function returns a failed message without build. * @tc.type: FUNC */ -HWTEST_F(SinBuilderTest, sin_getprimitive_002, TestSize.Level2) +HWTEST_F(SinBuilderTest, sin_getprimitive_002, TestSize.Level1) { LiteGraphPrimitvePtr primitive = m_builder.GetPrimitive(); LiteGraphPrimitvePtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); diff --git a/test/unittest/ops/sparse_to_dense_test.cpp b/test/unittest/ops/sparse_to_dense_test.cpp index add8e5c..037b1bd 100644 --- a/test/unittest/ops/sparse_to_dense_test.cpp +++ b/test/unittest/ops/sparse_to_dense_test.cpp @@ -69,7 +69,7 @@ void SparseToDenseBuilderTest::SetInputTensor() * @tc.desc: Verify that the build function returns a successful message. * @tc.type: FUNC */ -HWTEST_F(SparseToDenseBuilderTest, SparseToDense_build_001, TestSize.Level2) +HWTEST_F(SparseToDenseBuilderTest, SparseToDense_build_001, TestSize.Level1) { SetInputTensor(); SaveOutputTensor(m_outputs, OH_NN_INT32, m_outputDim, nullptr); @@ -83,7 +83,7 @@ HWTEST_F(SparseToDenseBuilderTest, SparseToDense_build_001, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message with true m_isBuild. * @tc.type: FUNC */ -HWTEST_F(SparseToDenseBuilderTest, SparseToDense_build_002, TestSize.Level2) +HWTEST_F(SparseToDenseBuilderTest, SparseToDense_build_002, TestSize.Level1) { SetInputTensor(); SaveOutputTensor(m_outputs, OH_NN_INT32, m_outputDim, nullptr); @@ -98,7 +98,7 @@ HWTEST_F(SparseToDenseBuilderTest, SparseToDense_build_002, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message with invalided input. * @tc.type: FUNC */ -HWTEST_F(SparseToDenseBuilderTest, SparseToDense_build_003, TestSize.Level2) +HWTEST_F(SparseToDenseBuilderTest, SparseToDense_build_003, TestSize.Level1) { m_inputs = {0, 1, 2, 3}; m_outputs = {4}; @@ -115,7 +115,7 @@ HWTEST_F(SparseToDenseBuilderTest, SparseToDense_build_003, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message with invalided output. * @tc.type: FUNC */ -HWTEST_F(SparseToDenseBuilderTest, SparseToDense_build_004, TestSize.Level2) +HWTEST_F(SparseToDenseBuilderTest, SparseToDense_build_004, TestSize.Level1) { m_outputs = {3, 4}; @@ -131,7 +131,7 @@ HWTEST_F(SparseToDenseBuilderTest, SparseToDense_build_004, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message with empty allTensor. * @tc.type: FUNC */ -HWTEST_F(SparseToDenseBuilderTest, SparseToDense_build_005, TestSize.Level2) +HWTEST_F(SparseToDenseBuilderTest, SparseToDense_build_005, TestSize.Level1) { OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputs, m_outputs, m_allTensors); EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); @@ -142,7 +142,7 @@ HWTEST_F(SparseToDenseBuilderTest, SparseToDense_build_005, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message without output tensor. * @tc.type: FUNC */ -HWTEST_F(SparseToDenseBuilderTest, SparseToDense_build_006, TestSize.Level2) +HWTEST_F(SparseToDenseBuilderTest, SparseToDense_build_006, TestSize.Level1) { SetInputTensor(); @@ -155,7 +155,7 @@ HWTEST_F(SparseToDenseBuilderTest, SparseToDense_build_006, TestSize.Level2) * @tc.desc: Verify that the getPrimitive function returns a successful message. * @tc.type: FUNC */ -HWTEST_F(SparseToDenseBuilderTest, SparseToDense_getprimitive_001, TestSize.Level2) +HWTEST_F(SparseToDenseBuilderTest, SparseToDense_getprimitive_001, TestSize.Level1) { SetInputTensor(); SaveOutputTensor(m_outputs, OH_NN_INT32, m_outputDim, nullptr); @@ -171,7 +171,7 @@ HWTEST_F(SparseToDenseBuilderTest, SparseToDense_getprimitive_001, TestSize.Leve * @tc.desc: Verify that the getPrimitive function returns a failed message without build. * @tc.type: FUNC */ -HWTEST_F(SparseToDenseBuilderTest, SparseToDense_getprimitive_002, TestSize.Level2) +HWTEST_F(SparseToDenseBuilderTest, SparseToDense_getprimitive_002, TestSize.Level1) { LiteGraphPrimitvePtr primitive = m_builder.GetPrimitive(); LiteGraphPrimitvePtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); diff --git a/test/unittest/ops/square_test.cpp b/test/unittest/ops/square_test.cpp index 697e6f6..a561663 100644 --- a/test/unittest/ops/square_test.cpp +++ b/test/unittest/ops/square_test.cpp @@ -45,7 +45,7 @@ void SquareBuilderTest::TearDown() {} * @tc.desc: Verify that the build function returns a successful message. * @tc.type: FUNC */ -HWTEST_F(SquareBuilderTest, square_build_001, TestSize.Level2) +HWTEST_F(SquareBuilderTest, square_build_001, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); @@ -59,7 +59,7 @@ HWTEST_F(SquareBuilderTest, square_build_001, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message with true m_isBuild. * @tc.type: FUNC */ -HWTEST_F(SquareBuilderTest, square_build_002, TestSize.Level2) +HWTEST_F(SquareBuilderTest, square_build_002, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); @@ -74,7 +74,7 @@ HWTEST_F(SquareBuilderTest, square_build_002, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message with invalided input. * @tc.type: FUNC */ -HWTEST_F(SquareBuilderTest, square_build_003, TestSize.Level2) +HWTEST_F(SquareBuilderTest, square_build_003, TestSize.Level1) { m_inputs = {0, 1}; m_outputs = {2}; @@ -91,7 +91,7 @@ HWTEST_F(SquareBuilderTest, square_build_003, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message with invalided output. * @tc.type: FUNC */ -HWTEST_F(SquareBuilderTest, square_build_004, TestSize.Level2) +HWTEST_F(SquareBuilderTest, square_build_004, TestSize.Level1) { m_outputs = {1, 2}; @@ -107,7 +107,7 @@ HWTEST_F(SquareBuilderTest, square_build_004, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message with empty allTensor. * @tc.type: FUNC */ -HWTEST_F(SquareBuilderTest, square_build_005, TestSize.Level2) +HWTEST_F(SquareBuilderTest, square_build_005, TestSize.Level1) { OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputs, m_outputs, m_allTensors); EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); @@ -118,7 +118,7 @@ HWTEST_F(SquareBuilderTest, square_build_005, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message without output tensor. * @tc.type: FUNC */ -HWTEST_F(SquareBuilderTest, square_build_006, TestSize.Level2) +HWTEST_F(SquareBuilderTest, square_build_006, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); @@ -131,7 +131,7 @@ HWTEST_F(SquareBuilderTest, square_build_006, TestSize.Level2) * @tc.desc: Verify that the getPrimitive function returns a successful message * @tc.type: FUNC */ -HWTEST_F(SquareBuilderTest, square_getprimitive_001, TestSize.Level2) +HWTEST_F(SquareBuilderTest, square_getprimitive_001, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); @@ -147,7 +147,7 @@ HWTEST_F(SquareBuilderTest, square_getprimitive_001, TestSize.Level2) * @tc.desc: Verify that the getPrimitive function returns a failed message without build. * @tc.type: FUNC */ -HWTEST_F(SquareBuilderTest, square_getprimitive_002, TestSize.Level2) +HWTEST_F(SquareBuilderTest, square_getprimitive_002, TestSize.Level1) { LiteGraphPrimitvePtr primitive = m_builder.GetPrimitive(); LiteGraphPrimitvePtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); diff --git a/test/unittest/ops/unstack_test.cpp b/test/unittest/ops/unstack_test.cpp index 6675e57..5076a5a 100644 --- a/test/unittest/ops/unstack_test.cpp +++ b/test/unittest/ops/unstack_test.cpp @@ -61,7 +61,7 @@ void UnstackBuilderTest::SaveParamsTensor(OH_NN_DataType dataType, * @tc.desc: Verify that the build function returns a successful message. * @tc.type: FUNC */ -HWTEST_F(UnstackBuilderTest, unstack_build_001, TestSize.Level2) +HWTEST_F(UnstackBuilderTest, unstack_build_001, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); @@ -76,7 +76,7 @@ HWTEST_F(UnstackBuilderTest, unstack_build_001, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message with true m_isBuild. * @tc.type: FUNC */ -HWTEST_F(UnstackBuilderTest, unstack_build_002, TestSize.Level2) +HWTEST_F(UnstackBuilderTest, unstack_build_002, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); @@ -92,7 +92,7 @@ HWTEST_F(UnstackBuilderTest, unstack_build_002, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message with invalided input. * @tc.type: FUNC */ -HWTEST_F(UnstackBuilderTest, unstack_build_003, TestSize.Level2) +HWTEST_F(UnstackBuilderTest, unstack_build_003, TestSize.Level1) { m_inputs = {0, 1}; m_outputs = {2}; @@ -111,7 +111,7 @@ HWTEST_F(UnstackBuilderTest, unstack_build_003, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message with empty allTensor. * @tc.type: FUNC */ -HWTEST_F(UnstackBuilderTest, unstack_build_004, TestSize.Level2) +HWTEST_F(UnstackBuilderTest, unstack_build_004, TestSize.Level1) { OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputs, m_outputs, m_allTensors); EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); @@ -122,7 +122,7 @@ HWTEST_F(UnstackBuilderTest, unstack_build_004, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message without output tensor. * @tc.type: FUNC */ -HWTEST_F(UnstackBuilderTest, unstack_build_005, TestSize.Level2) +HWTEST_F(UnstackBuilderTest, unstack_build_005, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); @@ -135,10 +135,11 @@ HWTEST_F(UnstackBuilderTest, unstack_build_005, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message with invalid axis's dataType. * @tc.type: FUNC */ -HWTEST_F(UnstackBuilderTest, unstack_build_006, TestSize.Level2) +HWTEST_F(UnstackBuilderTest, unstack_build_006, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); + std::shared_ptr axisTensor = TransToNNTensor(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_UNSTACK_AXIS); float* axisValue = new (std::nothrow) float[1]{1.0f}; @@ -155,7 +156,7 @@ HWTEST_F(UnstackBuilderTest, unstack_build_006, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message with passing invalid axis param. * @tc.type: FUNC */ -HWTEST_F(UnstackBuilderTest, unstack_build_007, TestSize.Level2) +HWTEST_F(UnstackBuilderTest, unstack_build_007, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); @@ -170,10 +171,11 @@ HWTEST_F(UnstackBuilderTest, unstack_build_007, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message without set buffer for axis. * @tc.type: FUNC */ -HWTEST_F(UnstackBuilderTest, unstack_build_008, TestSize.Level2) +HWTEST_F(UnstackBuilderTest, unstack_build_008, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); + std::shared_ptr axisTensor = TransToNNTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_UNSTACK_AXIS); m_allTensors.emplace_back(axisTensor); @@ -187,7 +189,7 @@ HWTEST_F(UnstackBuilderTest, unstack_build_008, TestSize.Level2) * @tc.desc: Verify that the getPrimitive function returns a successful message * @tc.type: FUNC */ -HWTEST_F(UnstackBuilderTest, unstack_getprimitive_001, TestSize.Level2) +HWTEST_F(UnstackBuilderTest, unstack_getprimitive_001, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); @@ -208,7 +210,7 @@ HWTEST_F(UnstackBuilderTest, unstack_getprimitive_001, TestSize.Level2) * @tc.desc: Verify that the getPrimitive function returns a failed message without build. * @tc.type: FUNC */ -HWTEST_F(UnstackBuilderTest, unstack_getprimitive_002, TestSize.Level2) +HWTEST_F(UnstackBuilderTest, unstack_getprimitive_002, TestSize.Level1) { LiteGraphPrimitvePtr primitive = m_builder.GetPrimitive(); LiteGraphPrimitvePtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); diff --git a/test/unittest/ops/where_test.cpp b/test/unittest/ops/where_test.cpp index a3a1860..5c9dbf3 100644 --- a/test/unittest/ops/where_test.cpp +++ b/test/unittest/ops/where_test.cpp @@ -45,7 +45,7 @@ void WhereBuilderTest::TearDown() {} * @tc.desc: Verify that the build function returns a successful message. * @tc.type: FUNC */ -HWTEST_F(WhereBuilderTest, where_build_001, TestSize.Level2) +HWTEST_F(WhereBuilderTest, where_build_001, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); @@ -59,7 +59,7 @@ HWTEST_F(WhereBuilderTest, where_build_001, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message with true m_isBuild. * @tc.type: FUNC */ -HWTEST_F(WhereBuilderTest, where_build_002, TestSize.Level2) +HWTEST_F(WhereBuilderTest, where_build_002, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); @@ -74,7 +74,7 @@ HWTEST_F(WhereBuilderTest, where_build_002, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message with invalided input. * @tc.type: FUNC */ -HWTEST_F(WhereBuilderTest, where_build_003, TestSize.Level2) +HWTEST_F(WhereBuilderTest, where_build_003, TestSize.Level1) { m_inputs = {0, 1, 2, 3}; m_outputs = {4}; @@ -91,7 +91,7 @@ HWTEST_F(WhereBuilderTest, where_build_003, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message with invalided output. * @tc.type: FUNC */ -HWTEST_F(WhereBuilderTest, where_build_004, TestSize.Level2) +HWTEST_F(WhereBuilderTest, where_build_004, TestSize.Level1) { m_outputs = {3, 4}; @@ -107,7 +107,7 @@ HWTEST_F(WhereBuilderTest, where_build_004, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message with empty allTensor. * @tc.type: FUNC */ -HWTEST_F(WhereBuilderTest, where_build_005, TestSize.Level2) +HWTEST_F(WhereBuilderTest, where_build_005, TestSize.Level1) { OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputs, m_outputs, m_allTensors); EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); @@ -118,7 +118,7 @@ HWTEST_F(WhereBuilderTest, where_build_005, TestSize.Level2) * @tc.desc: Verify that the build function returns a failed message without output tensor. * @tc.type: FUNC */ -HWTEST_F(WhereBuilderTest, where_build_006, TestSize.Level2) +HWTEST_F(WhereBuilderTest, where_build_006, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); @@ -131,7 +131,7 @@ HWTEST_F(WhereBuilderTest, where_build_006, TestSize.Level2) * @tc.desc: Verify that the getPrimitive function returns a successful message * @tc.type: FUNC */ -HWTEST_F(WhereBuilderTest, where_getprimitive_001, TestSize.Level2) +HWTEST_F(WhereBuilderTest, where_getprimitive_001, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); @@ -147,7 +147,7 @@ HWTEST_F(WhereBuilderTest, where_getprimitive_001, TestSize.Level2) * @tc.desc: Verify that the getPrimitive function returns a failed message without build. * @tc.type: FUNC */ -HWTEST_F(WhereBuilderTest, where_getprimitive_002, TestSize.Level2) +HWTEST_F(WhereBuilderTest, where_getprimitive_002, TestSize.Level1) { LiteGraphPrimitvePtr primitive = m_builder.GetPrimitive(); LiteGraphPrimitvePtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); -- Gitee From 9783addb34e7f1000f1d1f9460fcc4f64b112ca2 Mon Sep 17 00:00:00 2001 From: wWX1227061 Date: Wed, 28 Feb 2024 16:42:20 +0800 Subject: [PATCH 7/9] fix abs,leakyRelu ops,delete realDiv ops Signed-off-by: wWX1227061 --- .../native/neural_network_runtime/BUILD.gn | 1 - .../lite_graph_to_hdi_model_v2_1.cpp | 58 ------- .../ops/abs_builder.cpp | 9 +- .../ops/leaky_relu_builder.cpp | 9 +- .../neural_network_runtime_type.h | 71 ++++---- test/unittest/BUILD.gn | 4 +- test/unittest/ops/real_div_test.cpp | 158 ------------------ 7 files changed, 47 insertions(+), 263 deletions(-) delete mode 100644 test/unittest/ops/real_div_test.cpp diff --git a/frameworks/native/neural_network_runtime/BUILD.gn b/frameworks/native/neural_network_runtime/BUILD.gn index af8948b..1b0cd3e 100644 --- a/frameworks/native/neural_network_runtime/BUILD.gn +++ b/frameworks/native/neural_network_runtime/BUILD.gn @@ -109,7 +109,6 @@ ops_sources = [ "ops/prelu_builder.cpp", "ops/quant_dtype_cast_builder.cpp", "ops/range_builder.cpp", - "ops/real_div_builder.cpp", "ops/reciprocal_builder.cpp", "ops/reduceall_builder.cpp", "ops/reducemean_builder.cpp", diff --git a/frameworks/native/neural_network_runtime/lite_graph_to_hdi_model_v2_1.cpp b/frameworks/native/neural_network_runtime/lite_graph_to_hdi_model_v2_1.cpp index efc04c0..a4f21b6 100644 --- a/frameworks/native/neural_network_runtime/lite_graph_to_hdi_model_v2_1.cpp +++ b/frameworks/native/neural_network_runtime/lite_graph_to_hdi_model_v2_1.cpp @@ -29,22 +29,6 @@ typedef void *TensorPtr; namespace OHOS { namespace NeuralNetworkRuntime { namespace NNRt_V2_1 { -std::vector ConvertAbs(PrimitivePtr primitive) -{ - if (primitive == nullptr) { - LOGE("ConvertAbs v2_1 failed, primitive is nullptr."); - return {}; - } - - Abs abs{}; - - OHOS::MessageParcel data; - (void)AbsBlockMarshalling(data, abs); - std::vector ret(reinterpret_cast(data.GetData()), - reinterpret_cast(data.GetData()) + data.GetDataSize()); - return ret; -} - std::vector ConvertActivation(PrimitivePtr primitive) { if (primitive == nullptr) { @@ -604,23 +588,6 @@ std::vector ConvertLayerNormFusion(PrimitivePtr primitive) return ret; } -std::vector ConvertLeakyRelu(PrimitivePtr primitive) -{ - if (primitive == nullptr) { - LOGE("ConvertLeakyRelu v2_1 failed, primitive is nullptr."); - return {}; - } - - LeakyRelu leakyRelu{}; - leakyRelu.negative_slope = mindspore::lite::MindIR_LeakyRelu_GetNegativeSlope(primitive); - - OHOS::MessageParcel data; - (void)LeakyReluBlockMarshalling(data, leakyRelu); - std::vector ret(reinterpret_cast(data.GetData()), - reinterpret_cast(data.GetData()) + data.GetDataSize()); - return ret; -} - std::vector ConvertLess(PrimitivePtr primitive) { if (primitive == nullptr) { @@ -986,22 +953,6 @@ std::vector ConvertReciprocal(PrimitivePtr primitive) return ret; } -std::vector ConvertRealDiv(PrimitivePtr primitive) -{ - if (primitive == nullptr) { - LOGE("ConvertRealDiv v2_1 failed, primitive is nullptr."); - return {}; - } - - RealDiv realDiv{}; - - OHOS::MessageParcel data; - (void)RealDivBlockMarshalling(data, realDiv); - std::vector ret(reinterpret_cast(data.GetData()), - reinterpret_cast(data.GetData()) + data.GetDataSize()); - return ret; -} - std::vector ConvertReduceFusion(PrimitivePtr primitive) { if (primitive == nullptr) { @@ -1456,9 +1407,6 @@ std::vector Convert(OHOS::HDI::Nnrt::V2_1::NodeType type, PrimitivePtr p case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_ACTIVATION: return ConvertActivation(primitive); break; - case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_ABS: - return ConvertAbs(primitive); - break; case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_ADD_FUSION: return ConvertAddFusion(primitive); break; @@ -1549,9 +1497,6 @@ std::vector Convert(OHOS::HDI::Nnrt::V2_1::NodeType type, PrimitivePtr p case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_LAYER_NORM_FUSION: return ConvertLayerNormFusion(primitive); break; - case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_LEAKY_RELU: - return ConvertLeakyRelu(primitive); - break; case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_LESS: return ConvertLess(primitive); break; @@ -1615,9 +1560,6 @@ std::vector Convert(OHOS::HDI::Nnrt::V2_1::NodeType type, PrimitivePtr p case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_RECIPROCAL: return ConvertReciprocal(primitive); break; - case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_REAL_DIV: - return ConvertRealDiv(primitive); - break; case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_REDUCE_FUSION: return ConvertReduceFusion(primitive); break; diff --git a/frameworks/native/neural_network_runtime/ops/abs_builder.cpp b/frameworks/native/neural_network_runtime/ops/abs_builder.cpp index 182e1b5..febc840 100755 --- a/frameworks/native/neural_network_runtime/ops/abs_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/abs_builder.cpp @@ -62,7 +62,14 @@ LiteGraphPrimitvePtr AbsBuilder::GetPrimitive() return {nullptr, DestroyLiteGraphPrimitive}; } - void* primitive = mindspore::lite::MindIR_Abs_CreatePrimitive(); + float alpha{0.0f}; + float minVal{0.0f}; + float maxVal{0.0f}; + bool approximate{false}; + mindspore::lite::ActivationType activationType{mindspore::lite::ACTIVATION_TYPE_ABS}; + + void* primitive = mindspore::lite::MindIR_Activation_CreatePrimitive(activationType, alpha, + minVal, maxVal, approximate); LiteGraphPrimitvePtr graphPrimitivePtr(primitive, DestroyLiteGraphPrimitive) ; return graphPrimitivePtr; } diff --git a/frameworks/native/neural_network_runtime/ops/leaky_relu_builder.cpp b/frameworks/native/neural_network_runtime/ops/leaky_relu_builder.cpp index 4d6102f..6778140 100644 --- a/frameworks/native/neural_network_runtime/ops/leaky_relu_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/leaky_relu_builder.cpp @@ -99,7 +99,14 @@ LiteGraphPrimitvePtr LeakyReluBuilder::GetPrimitive() return {nullptr, DestroyLiteGraphPrimitive}; } - void* primitive = mindspore::lite::MindIR_LeakyRelu_CreatePrimitive(m_negativeSlope); + float alpha {m_negativeSlope}; + float minVal {0.0f}; + float maxVal {0.0f}; + bool approximate {false}; + mindspore::lite::ActivationType activationType {mindspore::lite::ACTIVATION_TYPE_RELU}; + + void* primitive = mindspore::lite::MindIR_Activation_CreatePrimitive(activationType, alpha, + minVal, maxVal, approximate); LiteGraphPrimitvePtr graphPrimitivePtr(primitive, DestroyLiteGraphPrimitive) ; return graphPrimitivePtr; } diff --git a/interfaces/kits/c/neural_network_runtime/neural_network_runtime_type.h b/interfaces/kits/c/neural_network_runtime/neural_network_runtime_type.h index fa77804..a60ead7 100644 --- a/interfaces/kits/c/neural_network_runtime/neural_network_runtime_type.h +++ b/interfaces/kits/c/neural_network_runtime/neural_network_runtime_type.h @@ -1710,19 +1710,6 @@ typedef enum { */ OH_NN_OPS_EXP = 60, - /** - * Return input1 / input2 element-wise for real types. If input1 and input2 are reals, - * this will return floating-point division. - * - * Inputs: - * * input1: n-dimensional tensor. - * * input2: n-dimensional tensor. Has the same data type as x. - * - * Outputs: - * * output: A tensor, has the same data type as x. - */ - OH_NN_OPS_REAL_DIV = 61, - /** * Returns the tensor resulted from performing the less logical operation elementwise\n * on the input tensors input1 and input2. @@ -1736,7 +1723,7 @@ typedef enum { * Outputs: * * output: A tensor, the shape is the same as the one after broadcasting, and the data type is bool. */ - OH_NN_OPS_LESS = 62, + OH_NN_OPS_LESS = 61, /** * Selects elements from input1 or input2, depending on condition. @@ -1758,7 +1745,7 @@ typedef enum { * Outputs: * * output: A tensor, has the same shape as the input_cond. */ - OH_NN_OPS_SELECT = 63, + OH_NN_OPS_SELECT = 62, /** * Calculates the square of a tensor. @@ -1769,7 +1756,7 @@ typedef enum { * Outputs: * * output: A tensor, has the same shape and dtype as the input. */ - OH_NN_OPS_SQUARE = 64, + OH_NN_OPS_SQUARE = 63, /** * Flattens the input tensor into a 2D matrix. If input tensor has shape (d_0, d_1, … d_n), @@ -1790,7 +1777,7 @@ typedef enum { * with input dimensions up to axis flattened to the outer dimension of * the output and remaining input dimensions flattened into the inner dimension of the output. */ - OH_NN_OPS_FLATTEN = 65, + OH_NN_OPS_FLATTEN = 64, /** * DepthToSpace rearranges (permutes) data from depth into blocks of spatial data. @@ -1812,7 +1799,7 @@ typedef enum { * * output: Output tensor of [N, H * blocksize, W * blocksize, C/(blocksize * blocksize)] for NHWC format * or [N, C/(blocksize * blocksize), H * blocksize, W * blocksize] for NCHW format. */ - OH_NN_OPS_DEPTH_TO_SPACE = 66, + OH_NN_OPS_DEPTH_TO_SPACE = 65, /** * Generate a tensor containing a sequence of numbers that begin at start\n @@ -1830,7 +1817,7 @@ typedef enum { * Outputs: * * output: A 1-dimensional tensor with specific data type containing generated range of values. */ - OH_NN_OPS_RANGE = 67, + OH_NN_OPS_RANGE = 66, /** * Carries out instance normalization as formula y = scale * (x - mean) / sqrt(variance + epsilon) + B, @@ -1850,7 +1837,7 @@ typedef enum { * Outputs: * * output: A tensor, has the same shape as the input. */ - OH_NN_OPS_INSTANCE_NORM = 68, + OH_NN_OPS_INSTANCE_NORM = 67, /** * Generate a tensor with given value and shape. @@ -1866,7 +1853,7 @@ typedef enum { * Outputs: * * output: A tensor, has the same shape as the input. */ - OH_NN_OPS_CONSTANT_OF_SHAPE = 69, + OH_NN_OPS_CONSTANT_OF_SHAPE = 68, /** * Broadcast a tensor for a compatiable shape. @@ -1880,7 +1867,7 @@ typedef enum { * Outputs: * * output: A tensor after broadcasted. */ - OH_NN_OPS_BROADCAST_TO = 70, + OH_NN_OPS_BROADCAST_TO = 69, /** * Returns the tensor resulted from performing the equal logical operation elementwise\n @@ -1893,7 +1880,7 @@ typedef enum { * Outputs: * * output: A tensor. */ - OH_NN_OPS_EQUAL = 71, + OH_NN_OPS_EQUAL = 70, /** * Returns the tensor resulted from performing the greater logical operation elementwise\n @@ -1906,7 +1893,7 @@ typedef enum { * Outputs: * * output: A tensor. */ - OH_NN_OPS_GREATER = 72, + OH_NN_OPS_GREATER = 71, /** * Returns the tensor resulted from performing the not_equal logical operation elementwise\n @@ -1919,7 +1906,7 @@ typedef enum { * Outputs: * * output: A tensor. */ - OH_NN_OPS_NOT_EQUAL = 73, + OH_NN_OPS_NOT_EQUAL = 72, /** * Returns the tensor resulted from performing the greater_equal logical operation elementwise\n @@ -1932,7 +1919,7 @@ typedef enum { * Outputs: * * output: A tensor. */ - OH_NN_OPS_GREATER_EQUAL = 74, + OH_NN_OPS_GREATER_EQUAL = 73, /** * LeakyRelu takes input data (Tensor) and an argument alpha, and produces one output data (Tensor) @@ -1948,7 +1935,7 @@ typedef enum { * Outputs: * * output: A tensor, with the same data type and shape as the input tensor. */ - OH_NN_OPS_LEAKY_RELU = 75, + OH_NN_OPS_LEAKY_RELU = 74, /** * Computes an one-layer LSTM. This operator is usually supported via some custom implementation. @@ -1984,7 +1971,7 @@ typedef enum { * * cy: The last output tensor of the cell, * shape is [num_directions * num_layers, batch_size, hidden_size]. */ - OH_NN_OPS_LSTM = 76, + OH_NN_OPS_LSTM = 75, /** * Returns a tensor of the same type and shape as input tensor with its value clipped to min and max. @@ -2000,7 +1987,7 @@ typedef enum { * Outputs: * * output: n-dimensional tensor., with the same data type and shape as the input tensor. */ - OH_NN_OPS_CLIP = 77, + OH_NN_OPS_CLIP = 76, /** * Determine whether all emements in a given tensor are non-zero. It returns a boolean tensor @@ -2017,7 +2004,7 @@ typedef enum { * Outputs: * * output: Indices or values before the maximum input tensor on the axis. */ - OH_NN_OPS_ALL = 78, + OH_NN_OPS_ALL = 77, /** * Asserts that the given condition si true. @@ -2034,7 +2021,7 @@ typedef enum { * Outputs: * * output: Tensor after average pooling. */ - OH_NN_OPS_ASSERT = 79, + OH_NN_OPS_ASSERT = 78, /** * Calculates the cosine of the given input tensor, element-wise. @@ -2045,7 +2032,7 @@ typedef enum { * Outputs: * * output: n-dimensional tensor. The cosine of the input tensor computed element-wise. */ - OH_NN_OPS_COS = 80, + OH_NN_OPS_COS = 79, /** * Calculates the result of nature logarithm of the input. @@ -2056,7 +2043,7 @@ typedef enum { * Outputs: * * output: n-dimensional tensor with the same shape as the input tensor. */ - OH_NN_OPS_LOG = 81, + OH_NN_OPS_LOG = 80, /** * Calculates the truth value of input0 and input1 element-wise. @@ -2068,7 +2055,7 @@ typedef enum { * Outputs: * * output: A tensor of type bool with the shape that x1 and x2 broadcast to. */ - OH_NN_OPS_LOGICAL_AND = 82, + OH_NN_OPS_LOGICAL_AND = 81, /** * Calculates the truth value of NOT x element-wise. @@ -2079,7 +2066,7 @@ typedef enum { * Outputs: * * output: A tensor of type bool with the shape of input. */ - OH_NN_OPS_LOGICAL_NOT = 83, + OH_NN_OPS_LOGICAL_NOT = 82, /** * Computes the remainder of dividing the first input tensor by the second input tensor element-wise. @@ -2097,7 +2084,7 @@ typedef enum { * * output: The shape is the same shape as the boradcast shape. The data type is the type with * the higher precision or the highest data type between the two inputs. */ - OH_NN_OPS_MOD = 84, + OH_NN_OPS_MOD = 83, /** * Returns a tensor with negative values of the input tensor element-wise. @@ -2108,7 +2095,7 @@ typedef enum { * Outputs: * * output: A tensor with the same shape as the input tensor. */ - OH_NN_OPS_NEG = 85, + OH_NN_OPS_NEG = 84, /** * Calculate reciprocal of a tensor element-wise. @@ -2119,7 +2106,7 @@ typedef enum { * Outputs: * * output: A tensor with the same shape as the input tensor. */ - OH_NN_OPS_RECIPROCAL = 86, + OH_NN_OPS_RECIPROCAL = 85, /** * Calculate sine of the input element-wise. @@ -2130,7 +2117,7 @@ typedef enum { * Outputs: * * output: A tensor with the same data type and shape as the input tensor. */ - OH_NN_OPS_SIN = 87, + OH_NN_OPS_SIN = 86, /** * Selects elements from x1 or x2 based on condition and returns a tensor. @@ -2145,7 +2132,7 @@ typedef enum { * Outputs: * * output: A tensor, has the same shape as the input_cond. */ - OH_NN_OPS_WHERE = 88, + OH_NN_OPS_WHERE = 87, /** * Converts a sparse representation into a dense tensor. @@ -2160,7 +2147,7 @@ typedef enum { * Outputs: * * output: A tensor. The data type is the same as values, and the shape is specified by sparseShape. */ - OH_NN_OPS_SPARSE_TO_DENSE = 89, + OH_NN_OPS_SPARSE_TO_DENSE = 88, /** * Calculates the truth value of input0 or input1 element-wise. @@ -2172,7 +2159,7 @@ typedef enum { * Outputs: * * output: A tensor of type bool with the shape that input0 and input1 broadcast to. */ - OH_NN_OPS_LOGICAL_OR = 90, + OH_NN_OPS_LOGICAL_OR = 89, } OH_NN_OperationType; /** diff --git a/test/unittest/BUILD.gn b/test/unittest/BUILD.gn index eed3e38..e4396c5 100644 --- a/test/unittest/BUILD.gn +++ b/test/unittest/BUILD.gn @@ -16,8 +16,8 @@ import("//build/ohos.gni") group("unittest") { testonly = true deps = [ - "components:components_unittest", - "inner_kits:inner_kits_unittest", + #"components:components_unittest", + #"inner_kits:inner_kits_unittest", "ops:ops_unittest", ] } diff --git a/test/unittest/ops/real_div_test.cpp b/test/unittest/ops/real_div_test.cpp deleted file mode 100644 index 7f3321e..0000000 --- a/test/unittest/ops/real_div_test.cpp +++ /dev/null @@ -1,158 +0,0 @@ -/* - * Copyright (c) 2023 Huawei Device Co., Ltd. - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "ops/real_div_builder.h" - -#include "ops_test.h" - -using namespace testing; -using namespace testing::ext; -using namespace OHOS::NeuralNetworkRuntime::Ops; - -namespace OHOS { -namespace NeuralNetworkRuntime { -namespace UnitTest { -class RealDivBuilderTest : public OpsTest { -public: - void SetUp() override; - void TearDown() override; - -protected: - RealDivBuilder m_builder; - std::vector m_inputs {0, 1}; - std::vector m_outputs {2}; - std::vector m_dim {1, 2, 2, 1}; -}; - -void RealDivBuilderTest::SetUp() {} - -void RealDivBuilderTest::TearDown() {} - -/** - * @tc.name: real_div_build_001 - * @tc.desc: Verify that the build function returns a successful message. - * @tc.type: FUNC - */ -HWTEST_F(RealDivBuilderTest, real_div_build_001, TestSize.Level1) -{ - SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); - SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); - - OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); - EXPECT_EQ(OH_NN_SUCCESS, ret); -} - -/** - * @tc.name: real_div_build_001 - * @tc.desc: Verify that the build function returns a failed message with true m_isBuild. - * @tc.type: FUNC - */ -HWTEST_F(RealDivBuilderTest, real_div_build_002, TestSize.Level1) -{ - SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); - SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); - - EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); - OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); - EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); -} - -/** - * @tc.name: real_div_build_003 - * @tc.desc: Verify that the build function returns a failed message with invalided input. - * @tc.type: FUNC - */ -HWTEST_F(RealDivBuilderTest, real_div_build_003, TestSize.Level1) -{ - m_inputs = {0, 1, 2}; - m_outputs = {3}; - - SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); - SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); - - OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); - EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); -} - -/** - * @tc.name: real_div_build_004 - * @tc.desc: Verify that the build function returns a failed message with invalided output. - * @tc.type: FUNC - */ -HWTEST_F(RealDivBuilderTest, real_div_build_004, TestSize.Level1) -{ - m_outputs = {2, 3}; - - SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); - SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); - - OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); - EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); -} - -/** - * @tc.name: real_div_build_005 - * @tc.desc: Verify that the build function returns a failed message with empty allTensor. - * @tc.type: FUNC - */ -HWTEST_F(RealDivBuilderTest, real_div_build_005, TestSize.Level1) -{ - OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputs, m_outputs, m_allTensors); - EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); -} - -/** - * @tc.name: real_div_build_006 - * @tc.desc: Verify that the build function returns a failed message without output tensor. - * @tc.type: FUNC - */ -HWTEST_F(RealDivBuilderTest, real_div_build_006, TestSize.Level1) -{ - SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); - - OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputs, m_allTensors); - EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); -} - -/** - * @tc.name: real_div_getprimitive_001 - * @tc.desc: Verify that the getPrimitive function returns a successful message - * @tc.type: FUNC - */ -HWTEST_F(RealDivBuilderTest, real_div_getprimitive_001, TestSize.Level1) -{ - SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); - SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); - - EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); - LiteGraphPrimitvePtr primitive = m_builder.GetPrimitive(); - LiteGraphPrimitvePtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); - EXPECT_NE(expectPrimitive, primitive); -} - -/** - * @tc.name: real_div_getprimitive_002 - * @tc.desc: Verify that the getPrimitive function returns a failed message without build. - * @tc.type: FUNC - */ -HWTEST_F(RealDivBuilderTest, real_div_getprimitive_002, TestSize.Level1) -{ - LiteGraphPrimitvePtr primitive = m_builder.GetPrimitive(); - LiteGraphPrimitvePtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); - EXPECT_EQ(expectPrimitive, primitive); -} -} -} -} \ No newline at end of file -- Gitee From e161b064ebf5b5eee23443c05fe21fa0ae74dab4 Mon Sep 17 00:00:00 2001 From: wWX1227061 Date: Wed, 28 Feb 2024 16:46:52 +0800 Subject: [PATCH 8/9] delete realdiv builder Signed-off-by: wWX1227061 --- .../ops/real_div_builder.cpp | 73 ------------------- .../ops/real_div_builder.h | 42 ----------- test/unittest/BUILD.gn | 4 +- 3 files changed, 2 insertions(+), 117 deletions(-) delete mode 100755 frameworks/native/neural_network_runtime/ops/real_div_builder.cpp delete mode 100755 frameworks/native/neural_network_runtime/ops/real_div_builder.h diff --git a/frameworks/native/neural_network_runtime/ops/real_div_builder.cpp b/frameworks/native/neural_network_runtime/ops/real_div_builder.cpp deleted file mode 100755 index e595844..0000000 --- a/frameworks/native/neural_network_runtime/ops/real_div_builder.cpp +++ /dev/null @@ -1,73 +0,0 @@ -/* - * Copyright (c) 2023 Huawei Device Co., Ltd. - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "real_div_builder.h" - -namespace OHOS { -namespace NeuralNetworkRuntime { -namespace Ops { -static const int INPUT_NUM = 2; -static const int OUTPUT_NUM = 1; -static const std::string OP_NAME = "RealDiv"; - -RealDivBuilder::RealDivBuilder() {} - -RealDivBuilder::~RealDivBuilder() {} - -OH_NN_ReturnCode RealDivBuilder::Build(const std::vector& paramsIndex, - const std::vector& inputsIndex, - const std::vector& outputsIndex, - const std::vector>& allTensors) -{ - if (m_isBuild) { - LOGE("[RealDiv] Build failed, the realDiv operation has been build. cannot build again."); - return OH_NN_OPERATION_FORBIDDEN; - } - - auto ret = CheckIOIndex(inputsIndex, outputsIndex, allTensors, INPUT_NUM, OUTPUT_NUM); - if (ret != OH_NN_SUCCESS) { - LOGE("[RealDiv] Build failed, passed invalid input or output index."); - return ret; - } - - if (!paramsIndex.empty()) { - LOGW("[RealDiv] Build failed, the realDiv expects no parameters, but receive %zu", paramsIndex.size()); - return OH_NN_INVALID_PARAMETER; - } - - m_inputsIndex = inputsIndex; - m_outputsIndex = outputsIndex; - - m_name = OP_NAME; - m_isBuild = true; - return OH_NN_SUCCESS; -} - -LiteGraphPrimitvePtr RealDivBuilder::GetPrimitive() -{ - if (!m_isBuild) { - LOGE("[RealDiv] GetPrimitive failed, cannot get primitive before call build."); - return {nullptr, DestroyLiteGraphPrimitive}; - } - - void* primitive = mindspore::lite::MindIR_RealDiv_CreatePrimitive(); - LiteGraphPrimitvePtr graphPrimitivePtr(primitive, DestroyLiteGraphPrimitive) ; - return graphPrimitivePtr; -} - -REGISTER_OPS(RealDivBuilder, OH_NN_OPS_REAL_DIV); -} // namespace Ops -} // namespace NeuralNetworkRuntime -} // namespace OHOS diff --git a/frameworks/native/neural_network_runtime/ops/real_div_builder.h b/frameworks/native/neural_network_runtime/ops/real_div_builder.h deleted file mode 100755 index 30eb05b..0000000 --- a/frameworks/native/neural_network_runtime/ops/real_div_builder.h +++ /dev/null @@ -1,42 +0,0 @@ -/* - * Copyright (c) 2023 Huawei Device Co., Ltd. - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef NEURAL_NETWORK_RUNTIME_REAL_DIV_BUILDER_H -#define NEURAL_NETWORK_RUNTIME_REAL_DIV_BUILDER_H - -#include "mindir.h" - -#include "ops_builder.h" -#include "ops_registry.h" - -namespace OHOS { -namespace NeuralNetworkRuntime { -namespace Ops { -class RealDivBuilder : public OpsBuilder { -public: - RealDivBuilder(); - ~RealDivBuilder() override; - OH_NN_ReturnCode Build(const std::vector& paramsIndex, - const std::vector& inputsIndex, - const std::vector& outputsIndex, - const std::vector>& allTensors) override; - - LiteGraphPrimitvePtr GetPrimitive() override; -}; -} // namespace Ops -} // namespace NeuralNetworkRuntime -} // namespace OHOS - -#endif // NEURAL_NETWORK_RUNTIME_REAL_DIV_BUILDER_H \ No newline at end of file diff --git a/test/unittest/BUILD.gn b/test/unittest/BUILD.gn index e4396c5..eed3e38 100644 --- a/test/unittest/BUILD.gn +++ b/test/unittest/BUILD.gn @@ -16,8 +16,8 @@ import("//build/ohos.gni") group("unittest") { testonly = true deps = [ - #"components:components_unittest", - #"inner_kits:inner_kits_unittest", + "components:components_unittest", + "inner_kits:inner_kits_unittest", "ops:ops_unittest", ] } -- Gitee From e5d6a37d4038d0a45b720a931a709191d3abe895 Mon Sep 17 00:00:00 2001 From: wWX1227061 Date: Thu, 29 Feb 2024 10:51:07 +0800 Subject: [PATCH 9/9] fix bug Signed-off-by: wWX1227061 --- .../ops/abs_builder.cpp | 10 ++--- .../ops/leaky_relu_builder.cpp | 2 +- test/unittest/ops/BUILD.gn | 1 - test/unittest/ops/leaky_relu_test.cpp | 44 +++++++++---------- 4 files changed, 28 insertions(+), 29 deletions(-) diff --git a/frameworks/native/neural_network_runtime/ops/abs_builder.cpp b/frameworks/native/neural_network_runtime/ops/abs_builder.cpp index febc840..9d0b11a 100755 --- a/frameworks/native/neural_network_runtime/ops/abs_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/abs_builder.cpp @@ -62,11 +62,11 @@ LiteGraphPrimitvePtr AbsBuilder::GetPrimitive() return {nullptr, DestroyLiteGraphPrimitive}; } - float alpha{0.0f}; - float minVal{0.0f}; - float maxVal{0.0f}; - bool approximate{false}; - mindspore::lite::ActivationType activationType{mindspore::lite::ACTIVATION_TYPE_ABS}; + float alpha {0.0f}; + float minVal {0.0f}; + float maxVal {0.0f}; + bool approximate {false}; + mindspore::lite::ActivationType activationType {mindspore::lite::ACTIVATION_TYPE_ABS}; void* primitive = mindspore::lite::MindIR_Activation_CreatePrimitive(activationType, alpha, minVal, maxVal, approximate); diff --git a/frameworks/native/neural_network_runtime/ops/leaky_relu_builder.cpp b/frameworks/native/neural_network_runtime/ops/leaky_relu_builder.cpp index 6778140..eeab513 100644 --- a/frameworks/native/neural_network_runtime/ops/leaky_relu_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/leaky_relu_builder.cpp @@ -103,7 +103,7 @@ LiteGraphPrimitvePtr LeakyReluBuilder::GetPrimitive() float minVal {0.0f}; float maxVal {0.0f}; bool approximate {false}; - mindspore::lite::ActivationType activationType {mindspore::lite::ACTIVATION_TYPE_RELU}; + mindspore::lite::ActivationType activationType {mindspore::lite::ACTIVATION_TYPE_LEAKY_RELU}; void* primitive = mindspore::lite::MindIR_Activation_CreatePrimitive(activationType, alpha, minVal, maxVal, approximate); diff --git a/test/unittest/ops/BUILD.gn b/test/unittest/ops/BUILD.gn index 4a4fa2c..44eb8c9 100644 --- a/test/unittest/ops/BUILD.gn +++ b/test/unittest/ops/BUILD.gn @@ -94,7 +94,6 @@ ohos_unittest("OpsUnittest") { sources += [ "./prelu_builder_test.cpp" ] sources += [ "./quant_dtype_cast_builder_test.cpp" ] sources += [ "./range_test.cpp" ] - sources += [ "./real_div_test.cpp" ] sources += [ "./reciprocal_test.cpp" ] sources += [ "./reduce_all_builder_test.cpp" ] sources += [ "./reduce_mean_builder_test.cpp" ] diff --git a/test/unittest/ops/leaky_relu_test.cpp b/test/unittest/ops/leaky_relu_test.cpp index b6fab00..6647375 100644 --- a/test/unittest/ops/leaky_relu_test.cpp +++ b/test/unittest/ops/leaky_relu_test.cpp @@ -57,11 +57,11 @@ void LeakyReluBuilderTest::SaveNegativeSlope(OH_NN_DataType dataType, } /** - * @tc.name: reaky_relu_build_001 + * @tc.name: leaky_relu_build_001 * @tc.desc: Verify that the build function returns a successful message. * @tc.type: FUNC */ -HWTEST_F(LeakyReluBuilderTest, reaky_relu_build_001, TestSize.Level1) +HWTEST_F(LeakyReluBuilderTest, leaky_relu_build_001, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); @@ -72,11 +72,11 @@ HWTEST_F(LeakyReluBuilderTest, reaky_relu_build_001, TestSize.Level1) } /** - * @tc.name: reaky_relu_build_002 + * @tc.name: leaky_relu_build_002 * @tc.desc: Verify that the build function returns a failed message with true m_isBuild. * @tc.type: FUNC */ -HWTEST_F(LeakyReluBuilderTest, reaky_relu_build_002, TestSize.Level1) +HWTEST_F(LeakyReluBuilderTest, leaky_relu_build_002, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); @@ -88,11 +88,11 @@ HWTEST_F(LeakyReluBuilderTest, reaky_relu_build_002, TestSize.Level1) } /** - * @tc.name: reaky_relu_build_003 + * @tc.name: leaky_relu_build_003 * @tc.desc: Verify that the build function returns a failed message with invalided input. * @tc.type: FUNC */ -HWTEST_F(LeakyReluBuilderTest, reaky_relu_build_003, TestSize.Level1) +HWTEST_F(LeakyReluBuilderTest, leaky_relu_build_003, TestSize.Level1) { m_inputs = {0, 1}; m_outputs = {2}; @@ -107,11 +107,11 @@ HWTEST_F(LeakyReluBuilderTest, reaky_relu_build_003, TestSize.Level1) } /** - * @tc.name: reaky_relu_build_004 + * @tc.name: leaky_relu_build_004 * @tc.desc: Verify that the build function returns a failed message with invalided output. * @tc.type: FUNC */ -HWTEST_F(LeakyReluBuilderTest, reaky_relu_build_004, TestSize.Level1) +HWTEST_F(LeakyReluBuilderTest, leaky_relu_build_004, TestSize.Level1) { m_outputs = {1, 2}; m_params = {3}; @@ -125,22 +125,22 @@ HWTEST_F(LeakyReluBuilderTest, reaky_relu_build_004, TestSize.Level1) } /** - * @tc.name: reaky_relu_build_005 + * @tc.name: leaky_relu_build_005 * @tc.desc: Verify that the build function returns a failed message with empty allTensor. * @tc.type: FUNC */ -HWTEST_F(LeakyReluBuilderTest, reaky_relu_build_005, TestSize.Level1) +HWTEST_F(LeakyReluBuilderTest, leaky_relu_build_005, TestSize.Level1) { OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputs, m_outputs, m_allTensors); EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); } /** - * @tc.name: reaky_relu_build_006 + * @tc.name: leaky_relu_build_006 * @tc.desc: Verify that the build function returns a failed message without output tensor. * @tc.type: FUNC */ -HWTEST_F(LeakyReluBuilderTest, reaky_relu_build_006, TestSize.Level1) +HWTEST_F(LeakyReluBuilderTest, leaky_relu_build_006, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); @@ -149,11 +149,11 @@ HWTEST_F(LeakyReluBuilderTest, reaky_relu_build_006, TestSize.Level1) } /** - * @tc.name: reaky_relu_build_007 + * @tc.name: leaky_relu_build_007 * @tc.desc: Verify that the build function returns a failed message with invalid negative_slope's dataType. * @tc.type: FUNC */ -HWTEST_F(LeakyReluBuilderTest, reaky_relu_build_007, TestSize.Level1) +HWTEST_F(LeakyReluBuilderTest, leaky_relu_build_007, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); @@ -171,11 +171,11 @@ HWTEST_F(LeakyReluBuilderTest, reaky_relu_build_007, TestSize.Level1) } /** - * @tc.name: reaky_relu_build_008 + * @tc.name: leaky_relu_build_008 * @tc.desc: Verify that the build function returns a failed message with passing invalid negative_slope param. * @tc.type: FUNC */ -HWTEST_F(LeakyReluBuilderTest, reaky_relu_build_008, TestSize.Level1) +HWTEST_F(LeakyReluBuilderTest, leaky_relu_build_008, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); @@ -187,11 +187,11 @@ HWTEST_F(LeakyReluBuilderTest, reaky_relu_build_008, TestSize.Level1) } /** - * @tc.name: reaky_relu_build_009 + * @tc.name: leaky_relu_build_009 * @tc.desc: Verify that the build function returns a failed message without set buffer for negative_slope. * @tc.type: FUNC */ -HWTEST_F(LeakyReluBuilderTest, reaky_relu_build_011, TestSize.Level1) +HWTEST_F(LeakyReluBuilderTest, leaky_relu_build_011, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); @@ -205,11 +205,11 @@ HWTEST_F(LeakyReluBuilderTest, reaky_relu_build_011, TestSize.Level1) } /** - * @tc.name: reaky_relu_getprimitive_001 + * @tc.name: leaky_relu_getprimitive_001 * @tc.desc: Verify that the getPrimitive function returns a successful message. * @tc.type: FUNC */ -HWTEST_F(LeakyReluBuilderTest, reaky_relu_getprimitive_001, TestSize.Level1) +HWTEST_F(LeakyReluBuilderTest, leaky_relu_getprimitive_001, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); @@ -226,11 +226,11 @@ HWTEST_F(LeakyReluBuilderTest, reaky_relu_getprimitive_001, TestSize.Level1) } /** - * @tc.name: reaky_relu_getprimitive_002 + * @tc.name: leaky_relu_getprimitive_002 * @tc.desc: Verify that the getPrimitive function returns a failed message without build. * @tc.type: FUNC */ -HWTEST_F(LeakyReluBuilderTest, reaky_relu_getprimitive_002, TestSize.Level1) +HWTEST_F(LeakyReluBuilderTest, leaky_relu_getprimitive_002, TestSize.Level1) { LiteGraphPrimitvePtr primitive = m_builder.GetPrimitive(); LiteGraphPrimitvePtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); -- Gitee