diff --git a/frameworks/native/neural_network_core/validation.cpp b/frameworks/native/neural_network_core/validation.cpp index 110d4ee4b763e92997c9d2345e3881b1ccb94313..d3308d14cf3642c918cd89d8b1f7f60f65665cc2 100644 --- a/frameworks/native/neural_network_core/validation.cpp +++ b/frameworks/native/neural_network_core/validation.cpp @@ -60,7 +60,7 @@ bool ValidateFuseType(OH_NN_FuseType fuseType) bool ValidateTensorType(OH_NN_TensorType nnTensorType) { - if ((nnTensorType >= OH_NN_TENSOR) && (nnTensorType <= OH_NN_POW_SHIFT)) { + if ((nnTensorType >= OH_NN_TENSOR) && (nnTensorType <= OH_NN_TILE_DIMS)) { return true; } return false; diff --git a/frameworks/native/neural_network_runtime/BUILD.gn b/frameworks/native/neural_network_runtime/BUILD.gn index 1b0cd3ed97daf922e5e184c30b190bbf039e5f18..779ed9ac97bac2611cbd23984806730bc6ddbaf1 100644 --- a/frameworks/native/neural_network_runtime/BUILD.gn +++ b/frameworks/native/neural_network_runtime/BUILD.gn @@ -62,14 +62,17 @@ ops_sources = [ "ops/bias_add_builder.cpp", "ops/broadcast_to_builder.cpp", "ops/cast_builder.cpp", + "ops/ceil_builder.cpp", "ops/clip_builder.cpp", "ops/concat_builder.cpp", "ops/constant_of_shape_builder.cpp", "ops/conv2d_builder.cpp", "ops/conv2d_transpose_builder.cpp", "ops/cos_builder.cpp", + "ops/crop_builder.cpp", "ops/depth_to_space_builder.cpp", "ops/depthwise_conv2d_native_builder.cpp", + "ops/detection_post_process_builder.cpp", "ops/div_builder.cpp", "ops/eltwise_builder.cpp", "ops/equal_builder.cpp", @@ -78,6 +81,7 @@ ops_sources = [ "ops/expandims_builder.cpp", "ops/fill_builder.cpp", "ops/flatten_builder.cpp", + "ops/floor_builder.cpp", "ops/fullconnection_builder.cpp", "ops/gather_builder.cpp", "ops/gelu_builder.cpp", @@ -85,18 +89,22 @@ ops_sources = [ "ops/greater_equal_builder.cpp", "ops/hswish_builder.cpp", "ops/instance_norm_builder.cpp", + "ops/l2_normalize_builder.cpp", "ops/layernorm_builder.cpp", "ops/leaky_relu_builder.cpp", "ops/less_builder.cpp", "ops/lessequal_builder.cpp", "ops/log_builder.cpp", + "ops/log_softmax_builder.cpp", "ops/logical_and_builder.cpp", "ops/logical_not_builder.cpp", "ops/logical_or_builder.cpp", + "ops/lrn_builder.cpp", "ops/lstm_builder.cpp", "ops/matmul_builder.cpp", "ops/maximum_builder.cpp", "ops/maxpool_builder.cpp", + "ops/minimum_builder.cpp", "ops/mod_builder.cpp", "ops/mul_builder.cpp", "ops/neg_builder.cpp", @@ -109,16 +117,22 @@ ops_sources = [ "ops/prelu_builder.cpp", "ops/quant_dtype_cast_builder.cpp", "ops/range_builder.cpp", + "ops/rank_builder.cpp", "ops/reciprocal_builder.cpp", "ops/reduceall_builder.cpp", + "ops/reducemax_builder.cpp", "ops/reducemean_builder.cpp", + "ops/reducemin_builder.cpp", "ops/reduceprod_builder.cpp", + "ops/reducesum_builder.cpp", "ops/relu6_builder.cpp", "ops/relu_builder.cpp", "ops/reshape_builder.cpp", "ops/resize_bilinear_builder.cpp", + "ops/round_builder.cpp", "ops/rsqrt_builder.cpp", "ops/scale_builder.cpp", + "ops/scatter_nd_builder.cpp", "ops/select_builder.cpp", "ops/shape_builder.cpp", "ops/sigmoid_builder.cpp", @@ -126,6 +140,7 @@ ops_sources = [ "ops/slice_builder.cpp", "ops/softmax_builder.cpp", "ops/space_to_batch_nd_builder.cpp", + "ops/space_to_depth_builder.cpp", "ops/sparse_to_dense_builder.cpp", "ops/split_builder.cpp", "ops/sqrt_builder.cpp", @@ -135,6 +150,7 @@ ops_sources = [ "ops/stack_builder.cpp", "ops/strided_slice_builder.cpp", "ops/sub_builder.cpp", + "ops/swish_builder.cpp", "ops/tanh_builder.cpp", "ops/tile_builder.cpp", "ops/top_k_builder.cpp", diff --git a/frameworks/native/neural_network_runtime/lite_graph_to_hdi_model_v2_1.cpp b/frameworks/native/neural_network_runtime/lite_graph_to_hdi_model_v2_1.cpp index a4f21b6db616568eebf0ed923c6e3a378f51d7cc..b87a47fd4857021cb685adfc2e833159f4558613 100644 --- a/frameworks/native/neural_network_runtime/lite_graph_to_hdi_model_v2_1.cpp +++ b/frameworks/native/neural_network_runtime/lite_graph_to_hdi_model_v2_1.cpp @@ -213,6 +213,21 @@ std::vector ConvertCast(PrimitivePtr primitive) return ret; } +std::vector ConvertCeil(PrimitivePtr primitive) +{ + if (primitive == nullptr) { + LOGE("ConvertCeil v2_1 failed, primitive is nullptr."); + return {}; + } + + Ceil ceil{}; + OHOS::MessageParcel data; + (void)CeilBlockMarshalling(data, ceil); + std::vector ret(reinterpret_cast(data.GetData()), + reinterpret_cast(data.GetData()) + data.GetDataSize()); + return ret; +} + std::vector ConvertClip(PrimitivePtr primitive) { if (primitive == nullptr) { @@ -335,6 +350,24 @@ std::vector ConvertConstantOfShape(PrimitivePtr primitive) return ret; } +std::vector ConvertCrop(PrimitivePtr primitive) +{ + if (primitive == nullptr) { + LOGE("ConvertCrop v2_1 failed, primitive is nullptr."); + return {}; + } + + Crop crop{}; + crop.axis = mindspore::lite::MindIR_Crop_GetAxis(primitive); + crop.offset = mindspore::lite::MindIR_Crop_GetOffsets(primitive); + + OHOS::MessageParcel data; + (void)CropBlockMarshalling(data, crop); + std::vector ret(reinterpret_cast(data.GetData()), + reinterpret_cast(data.GetData()) + data.GetDataSize()); + return ret; +} + std::vector ConvertDepthToSpace(PrimitivePtr primitive) { if (primitive == nullptr) { @@ -355,6 +388,37 @@ std::vector ConvertDepthToSpace(PrimitivePtr primitive) return ret; } +std::vector ConvertDetectionPostProcess(PrimitivePtr primitive) +{ + if (primitive == nullptr) { + LOGE("ConvertDetectionPostProcess v2_1 failed, primitive is nullptr."); + return {}; + } + + DetectionPostProcess detectionPostProcess{}; + detectionPostProcess.format = static_cast( + mindspore::lite::MindIR_DetectionPostProcess_GetFormat(primitive)); + detectionPostProcess.input_size = mindspore::lite::MindIR_DetectionPostProcess_GetInputSize(primitive); + detectionPostProcess.scale = mindspore::lite::MindIR_DetectionPostProcess_GetScale(primitive); + detectionPostProcess.nmsIoUThreshold = mindspore::lite::MindIR_DetectionPostProcess_GetNmsIouThreshold(primitive); + detectionPostProcess.nmsScoreThreshold = + mindspore::lite::MindIR_DetectionPostProcess_GetNmsScoreThreshold(primitive); + detectionPostProcess.maxDetections = mindspore::lite::MindIR_DetectionPostProcess_GetMaxDetections(primitive); + detectionPostProcess.detectionsPerClass = + mindspore::lite::MindIR_DetectionPostProcess_GetDetectionsPerClass(primitive); + detectionPostProcess.maxClassesPerDetection = + mindspore::lite::MindIR_DetectionPostProcess_GetMaxClassesPerDetection(primitive); + detectionPostProcess.numClasses = mindspore::lite::MindIR_DetectionPostProcess_GetNumClasses(primitive); + detectionPostProcess.useRegularNms = mindspore::lite::MindIR_DetectionPostProcess_GetUseRegularNms(primitive); + detectionPostProcess.outQuantized = mindspore::lite::MindIR_DetectionPostProcess_GetOutQuantized(primitive); + + OHOS::MessageParcel data; + (void)DetectionPostProcessBlockMarshalling(data, detectionPostProcess); + std::vector ret(reinterpret_cast(data.GetData()), + reinterpret_cast(data.GetData()) + data.GetDataSize()); + return ret; +} + std::vector ConvertDivFusion(PrimitivePtr primitive) { if (primitive == nullptr) { @@ -454,6 +518,22 @@ std::vector ConvertFlatten(PrimitivePtr primitive) return ret; } +std::vector ConvertFloor(PrimitivePtr primitive) +{ + if (primitive == nullptr) { + LOGE("ConvertFloor v2_1 failed, primitive is nullptr."); + return {}; + } + + Floor floor{}; + + OHOS::MessageParcel data; + (void)FloorBlockMarshalling(data, floor); + std::vector ret(reinterpret_cast(data.GetData()), + reinterpret_cast(data.GetData()) + data.GetDataSize()); + return ret; +} + std::vector ConvertFill(PrimitivePtr primitive) { if (primitive == nullptr) { @@ -682,6 +762,27 @@ std::vector ConvertLogicalOr(PrimitivePtr primitive) return ret; } +std::vector ConvertLrn(PrimitivePtr primitive) +{ + if (primitive == nullptr) { + LOGE("ConvertLrn v2_1 failed, primitive is nullptr."); + return {}; + } + + LRN lRN{}; + lRN.depthRadius = mindspore::lite::MindIR_LRN_GetDepthRadius(primitive); + lRN.bias = mindspore::lite::MindIR_LRN_GetBias(primitive); + lRN.alpha = mindspore::lite::MindIR_LRN_GetAlpha(primitive); + lRN.beta = mindspore::lite::MindIR_LRN_GetBeta(primitive); + lRN.normRegion = mindspore::lite::MindIR_LRN_GetNormRegion(primitive); + + OHOS::MessageParcel data; + (void)LRNBlockMarshalling(data, lRN); + std::vector ret(reinterpret_cast(data.GetData()), + reinterpret_cast(data.GetData()) + data.GetDataSize()); + return ret; +} + std::vector ConvertLstm(PrimitivePtr primitive) { if (primitive == nullptr) { @@ -708,6 +809,26 @@ std::vector ConvertLstm(PrimitivePtr primitive) return ret; } +std::vector ConvertL2NormalizeFusion(PrimitivePtr primitive) +{ + if (primitive == nullptr) { + LOGE("ConvertL2NormalizeFusion v2_1 failed, primitive is nullptr."); + return {}; + } + + L2NormalizeFusion l2NormalizeFusion{}; + l2NormalizeFusion.axis = mindspore::lite::MindIR_L2NormalizeFusion_GetAxis(primitive); + l2NormalizeFusion.epslion = mindspore::lite::MindIR_L2NormalizeFusion_GetEpsilon(primitive); + l2NormalizeFusion.activationType = static_cast( + mindspore::lite::MindIR_L2NormalizeFusion_GetActivationType(primitive)); + + OHOS::MessageParcel data; + (void)L2NormalizeFusionBlockMarshalling(data, l2NormalizeFusion); + std::vector ret(reinterpret_cast(data.GetData()), + reinterpret_cast(data.GetData()) + data.GetDataSize()); + return ret; +} + std::vector ConvertMatMulFusion(PrimitivePtr primitive) { if (primitive == nullptr) { @@ -768,6 +889,22 @@ std::vector ConvertMaxPoolFusion(PrimitivePtr primitive) return ret; } +std::vector ConvertMinimum(PrimitivePtr primitive) +{ + if (primitive == nullptr) { + LOGE("ConvertMinimum v2_1 failed, primitive is nullptr."); + return {}; + } + + Minimum minimum{}; + + OHOS::MessageParcel data; + (void)MinimumBlockMarshalling(data, minimum); + std::vector ret(reinterpret_cast(data.GetData()), + reinterpret_cast(data.GetData()) + data.GetDataSize()); + return ret; +} + std::vector ConvertMod(PrimitivePtr primitive) { if (primitive == nullptr) { @@ -907,11 +1044,29 @@ std::vector ConvertQuantDTypeCast(PrimitivePtr primitive) return {}; } - QuantDTypeCast quant_d_type_cast{}; + QuantDTypeCastV2 quant_d_type_cast{}; quant_d_type_cast.srcT = mindspore::lite::MindIR_QuantDTypeCast_GetSrcT(primitive); quant_d_type_cast.dstT = mindspore::lite::MindIR_QuantDTypeCast_GetDstT(primitive); + quant_d_type_cast.axis = mindspore::lite::MindIR_QuantDTypeCast_GetAxis(primitive); + + OHOS::MessageParcel data; + (void)QuantDTypeCastV2BlockMarshalling(data, quant_d_type_cast); + std::vector ret(reinterpret_cast(data.GetData()), + reinterpret_cast(data.GetData()) + data.GetDataSize()); + return ret; +} + +std::vector ConvertRank(PrimitivePtr primitive) +{ + if (primitive == nullptr) { + LOGE("ConvertRank v2_1 failed, primitive is nullptr."); + return {}; + } + + Rank rank{}; + OHOS::MessageParcel data; - (void)QuantDTypeCastBlockMarshalling(data, quant_d_type_cast); + (void)RankBlockMarshalling(data, rank); std::vector ret(reinterpret_cast(data.GetData()), reinterpret_cast(data.GetData()) + data.GetDataSize()); return ret; @@ -1012,6 +1167,22 @@ std::vector ConvertResize(PrimitivePtr primitive) return ret; } +std::vector ConvertRound(PrimitivePtr primitive) +{ + if (primitive == nullptr) { + LOGE("ConvertRound v2_1 failed, primitive is nullptr."); + return {}; + } + + Round round{}; + + OHOS::MessageParcel data; + (void)RoundBlockMarshalling(data, round); + std::vector ret(reinterpret_cast(data.GetData()), + reinterpret_cast(data.GetData()) + data.GetDataSize()); + return ret; +} + std::vector ConvertRsqrt(PrimitivePtr primitive) { if (primitive == nullptr) { @@ -1045,6 +1216,22 @@ std::vector ConvertScaleFusion(PrimitivePtr primitive) return ret; } +std::vector ConvertScatterNd(PrimitivePtr primitive) +{ + if (primitive == nullptr) { + LOGE("ConvertScatterNd v2_1 failed, primitive is nullptr."); + return {}; + } + + ScatterNd scatterNd{}; + + OHOS::MessageParcel data; + (void)ScatterNdBlockMarshalling(data, scatterNd); + std::vector ret(reinterpret_cast(data.GetData()), + reinterpret_cast(data.GetData()) + data.GetDataSize()); + return ret; +} + std::vector ConvertShape(PrimitivePtr primitive) { if (primitive == nullptr) { @@ -1125,6 +1312,24 @@ std::vector ConvertSpaceToBatchND(PrimitivePtr primitive) return ret; } +std::vector ConvertSpaceToDepth(PrimitivePtr primitive) +{ + if (primitive == nullptr) { + LOGE("ConvertSpaceToDepth v2_1 failed, primitive is nullptr."); + return {}; + } + + SpaceToDepth spaceToDepth{}; + spaceToDepth.format = static_cast(mindspore::lite::MindIR_SpaceToDepth_GetFormat(primitive)); + spaceToDepth.blocksize = mindspore::lite::MindIR_SpaceToDepth_GetBlockSize(primitive); + + OHOS::MessageParcel data; + (void)SpaceToDepthBlockMarshalling(data, spaceToDepth); + std::vector ret(reinterpret_cast(data.GetData()), + reinterpret_cast(data.GetData()) + data.GetDataSize()); + return ret; +} + std::vector ConvertSparseToDense(PrimitivePtr primitive) { if (primitive == nullptr) { @@ -1401,6 +1606,23 @@ std::vector ConvertErf(PrimitivePtr primitive) return ret; } +std::vector ConvertLogSoftmax(PrimitivePtr primitive) +{ + if (primitive == nullptr) { + LOGE("ConvertLogSoftmax v2_1 failed, primitive is nullptr."); + return {}; + } + + LogSoftmax logSoftmax{}; + logSoftmax.axis = mindspore::lite::MindIR_LogSoftmax_GetAxis(primitive); + + OHOS::MessageParcel data; + (void)LogSoftmaxBlockMarshalling(data, logSoftmax); + std::vector ret(reinterpret_cast(data.GetData()), + reinterpret_cast(data.GetData()) + data.GetDataSize()); + return ret; +} + std::vector Convert(OHOS::HDI::Nnrt::V2_1::NodeType type, PrimitivePtr primitive) { switch (type) { @@ -1434,6 +1656,9 @@ std::vector Convert(OHOS::HDI::Nnrt::V2_1::NodeType type, PrimitivePtr p case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_CAST: return ConvertCast(primitive); break; + case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_CEIL: + return ConvertCeil(primitive); + break; case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_CLIP: return ConvertClip(primitive); break; @@ -1452,9 +1677,15 @@ std::vector Convert(OHOS::HDI::Nnrt::V2_1::NodeType type, PrimitivePtr p case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_CONSTANT_OF_SHAPE: return ConvertConstantOfShape(primitive); break; + case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_CROP: + return ConvertCrop(primitive); + break; case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_DEPTH_TO_SPACE: return ConvertDepthToSpace(primitive); break; + case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_DETECTION_POST_PROCESS: + return ConvertDetectionPostProcess(primitive); + break; case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_DIV_FUSION: return ConvertDivFusion(primitive); break; @@ -1473,6 +1704,9 @@ std::vector Convert(OHOS::HDI::Nnrt::V2_1::NodeType type, PrimitivePtr p case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_FLATTEN: return ConvertFlatten(primitive); break; + case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_FLOOR: + return ConvertFloor(primitive); + break; case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_FILL: return ConvertFill(primitive); break; @@ -1515,9 +1749,15 @@ std::vector Convert(OHOS::HDI::Nnrt::V2_1::NodeType type, PrimitivePtr p case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_LOGICAL_OR: return ConvertLogicalOr(primitive); break; + case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_LRN: + return ConvertLrn(primitive); + break; case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_LSTM: return ConvertLstm(primitive); break; + case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_L2_NORMALIZE_FUSION: + return ConvertL2NormalizeFusion(primitive); + break; case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_MATMUL_FUSION: return ConvertMatMulFusion(primitive); break; @@ -1527,6 +1767,9 @@ std::vector Convert(OHOS::HDI::Nnrt::V2_1::NodeType type, PrimitivePtr p case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_MAX_POOL_FUSION: return ConvertMaxPoolFusion(primitive); break; + case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_MINIMUM: + return ConvertMinimum(primitive); + break; case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_MOD: return ConvertMod(primitive); break; @@ -1554,6 +1797,9 @@ std::vector Convert(OHOS::HDI::Nnrt::V2_1::NodeType type, PrimitivePtr p case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_QUANT_DTYPE_CAST: return ConvertQuantDTypeCast(primitive); break; + case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_RANK: + return ConvertRank(primitive); + break; case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_RANGE: return ConvertRange(primitive); break; @@ -1569,12 +1815,18 @@ std::vector Convert(OHOS::HDI::Nnrt::V2_1::NodeType type, PrimitivePtr p case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_RESIZE: return ConvertResize(primitive); break; + case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_ROUND: + return ConvertRound(primitive); + break; case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_RSQRT: return ConvertRsqrt(primitive); break; case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_SCALE_FUSION: return ConvertScaleFusion(primitive); break; + case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_SCATTER_ND: + return ConvertScatterNd(primitive); + break; case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_SHAPE: return ConvertShape(primitive); break; @@ -1590,6 +1842,9 @@ std::vector Convert(OHOS::HDI::Nnrt::V2_1::NodeType type, PrimitivePtr p case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_SPACE_TO_BATCH_ND: return ConvertSpaceToBatchND(primitive); break; + case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_SPACE_TO_DEPTH: + return ConvertSpaceToDepth(primitive); + break; case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_SPARSE_TO_DENSE: return ConvertSparseToDense(primitive); break; @@ -1641,6 +1896,9 @@ std::vector Convert(OHOS::HDI::Nnrt::V2_1::NodeType type, PrimitivePtr p case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_ERF: return ConvertErf(primitive); break; + case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_LOG_SOFTMAX: + return ConvertLogSoftmax(primitive); + break; default: return {}; } diff --git a/frameworks/native/neural_network_runtime/ops/abs_builder.cpp b/frameworks/native/neural_network_runtime/ops/abs_builder.cpp index 9d0b11a43f4b901d81fd2e5233e9a2e6758e9d17..32339aa59bc5f5f3c109ba723ef4bf549f32d0e5 100755 --- a/frameworks/native/neural_network_runtime/ops/abs_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/abs_builder.cpp @@ -20,6 +20,7 @@ namespace NeuralNetworkRuntime { namespace Ops { static const int INPUT_NUM = 1; static const int OUTPUT_NUM = 1; +static const int PARAM_NUM = 0; static const std::string OP_NAME = "Abs"; AbsBuilder::AbsBuilder() {} @@ -41,15 +42,16 @@ OH_NN_ReturnCode AbsBuilder::Build(const std::vector& paramsIndex, LOGE("[Abs] Build failed, passed invalid input or output index."); return ret; } - - if (!paramsIndex.empty()) { - LOGW("[Abs] Build failed, the abs expects no parameters, but receive %zu", paramsIndex.size()); - return OH_NN_INVALID_PARAMETER; - } m_inputsIndex = inputsIndex; m_outputsIndex = outputsIndex; + ret = CheckParamIndex(paramsIndex, allTensors, PARAM_NUM); + if (ret != OH_NN_SUCCESS) { + LOGE("[Abs] Build failed, passed invalid param index."); + return ret; + } + m_name = OP_NAME; m_isBuild = true; return OH_NN_SUCCESS; diff --git a/frameworks/native/neural_network_runtime/ops/add_builder.cpp b/frameworks/native/neural_network_runtime/ops/add_builder.cpp index b4ede0c70051951e6e916a70c0c2537204c84360..431733af094146199a27a7cf1b0e570dd760533e 100644 --- a/frameworks/native/neural_network_runtime/ops/add_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/add_builder.cpp @@ -23,6 +23,7 @@ namespace NeuralNetworkRuntime { namespace Ops { static const int INPUT_NUM = 2; static const int OUTPUT_NUM = 1; +static const int PARAM_MAX_NUM = 1; static const std::string OP_NAME = "Add"; AddBuilder::AddBuilder() {} @@ -72,6 +73,12 @@ OH_NN_ReturnCode AddBuilder::Build(const std::vector& paramsIndex, m_inputsIndex = inputsIndex; m_outputsIndex = outputsIndex; + ret = CheckParamIndex(paramsIndex, allTensors, PARAM_MAX_NUM); + if (ret != OH_NN_SUCCESS) { + LOGE("[Add] Build failed, the param index of Add operation is invalid."); + return ret; + } + for (uint32_t i : paramsIndex) { std::shared_ptr tensor = allTensors[i]; switch (tensor->GetType()) { diff --git a/frameworks/native/neural_network_runtime/ops/add_builder.h b/frameworks/native/neural_network_runtime/ops/add_builder.h index 58594b7737db0d68206fd839c19803c9ec7a71eb..15ec1b7e797acdf35ce2ae47ac6e4b1650cffa89 100644 --- a/frameworks/native/neural_network_runtime/ops/add_builder.h +++ b/frameworks/native/neural_network_runtime/ops/add_builder.h @@ -39,7 +39,7 @@ private: OH_NN_ReturnCode SetActivation(std::shared_ptr& tensor); private: - mindspore::lite::ActivationType m_activationType{mindspore::lite::ACTIVATION_TYPE_NO_ACTIVATION}; + mindspore::lite::ActivationType m_activationType {mindspore::lite::ACTIVATION_TYPE_NO_ACTIVATION}; }; } // namespace Ops } // namespace NeuralNetworkRuntime diff --git a/frameworks/native/neural_network_runtime/ops/all_builder.cpp b/frameworks/native/neural_network_runtime/ops/all_builder.cpp index aa53e2f1dd6099e2fd81e45847eef03076448112..63ac02b7c4aba7ebdbcd66233cd19ff4895090a8 100644 --- a/frameworks/native/neural_network_runtime/ops/all_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/all_builder.cpp @@ -20,6 +20,7 @@ namespace NeuralNetworkRuntime { namespace Ops { static const int INPUT_NUM = 2; static const int OUTPUT_NUM = 1; +static const int PARAM_MAX_NUM = 1; static const int SCALAR_LENGTH = 1; static const std::string OP_NAME = "All"; @@ -64,10 +65,16 @@ OH_NN_ReturnCode AllBuilder::Build(const std::vector& paramsIndex, LOGE("[All] Build failed, passed invalid input or output index."); return ret; } - + m_inputsIndex = inputsIndex; m_outputsIndex = outputsIndex; + ret = CheckParamIndex(paramsIndex, allTensors, PARAM_MAX_NUM); + if (ret != OH_NN_SUCCESS) { + LOGE("[All] Build failed, passed invalid param index."); + return ret; + } + OH_NN_ReturnCode returnCode; for (int i : paramsIndex) { std::shared_ptr tensor = allTensors[i]; @@ -107,4 +114,4 @@ LiteGraphPrimitvePtr AllBuilder::GetPrimitive() REGISTER_OPS(AllBuilder, OH_NN_OPS_ALL); } // namespace Ops } // namespace NeuralNetworkRuntime -} // namespace OHOS +} // namespace OHOS \ No newline at end of file diff --git a/frameworks/native/neural_network_runtime/ops/argmax_builder.cpp b/frameworks/native/neural_network_runtime/ops/argmax_builder.cpp index d40f3a88d9214a9f1aca05acf0bad40d1170fca1..5d11dceec4fd06ed523e7d1f41a4f146b76d9ee6 100644 --- a/frameworks/native/neural_network_runtime/ops/argmax_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/argmax_builder.cpp @@ -20,6 +20,7 @@ namespace NeuralNetworkRuntime { namespace Ops { static const int INPUT_NUM = 1; static const int OUTPUT_NUM = 1; +static const int PARAM_MAX_NUM = 4; static const std::string OP_NAME = "ArgMax"; ArgMaxBuilder::ArgMaxBuilder() {} @@ -31,7 +32,7 @@ OH_NN_ReturnCode ArgMaxBuilder::SetAxis(std::shared_ptr tensor) tensor->IdentifyOpParameter(); if (tensor->GetDataType() != OH_NN_INT64) { - LOGE("[ArgMax] SetAxis failed, the axis should be type HNN_INT64."); + LOGE("[ArgMax] SetAxis failed, the axis should be type OH_NN_INT64."); return OH_NN_INVALID_PARAMETER; } @@ -45,12 +46,31 @@ OH_NN_ReturnCode ArgMaxBuilder::SetAxis(std::shared_ptr tensor) return OH_NN_SUCCESS; } +OH_NN_ReturnCode ArgMaxBuilder::SetTopK(std::shared_ptr tensor) +{ + tensor->IdentifyOpParameter(); + + if (tensor->GetDataType() != OH_NN_INT64) { + LOGE("[ArgMax] SetTopK failed, the topK should be type OH_NN_INT64."); + return OH_NN_INVALID_PARAMETER; + } + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[ArgMax] SetTopK GetBuffer return nullptr."); + return OH_NN_INVALID_PARAMETER; + } + + m_topK = *(static_cast(buffer)); + return OH_NN_SUCCESS; +} + OH_NN_ReturnCode ArgMaxBuilder::SetKeepdims(std::shared_ptr tensor) { tensor->IdentifyOpParameter(); if (tensor->GetDataType() != OH_NN_BOOL) { - LOGE("[ArgMax] SetKeepdims failed, the keep_dims should be type HNN_BOOL."); + LOGE("[ArgMax] SetKeepdims failed, the keep_dims should be type OH_NN_BOOL."); return OH_NN_INVALID_PARAMETER; } @@ -64,6 +84,25 @@ OH_NN_ReturnCode ArgMaxBuilder::SetKeepdims(std::shared_ptr tensor) return OH_NN_SUCCESS; } +OH_NN_ReturnCode ArgMaxBuilder::SetOutMaxValue(std::shared_ptr tensor) +{ + tensor->IdentifyOpParameter(); + + if (tensor->GetDataType() != OH_NN_BOOL) { + LOGE("[ArgMax] SetOutMaxValue failed, the outMaxValue should be type OH_NN_BOOL."); + return OH_NN_INVALID_PARAMETER; + } + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[ArgMax] SetOutMaxValue GetBuffer return nullptr."); + return OH_NN_INVALID_PARAMETER; + } + m_outMaxValue = *(static_cast(buffer)); + + return OH_NN_SUCCESS; +} + /** * Build method. * 1.build primitive of ops. @@ -87,15 +126,27 @@ OH_NN_ReturnCode ArgMaxBuilder::Build(const std::vector& paramsIndex, m_inputsIndex = inputsIndex; m_outputsIndex = outputsIndex; + returnCode = CheckParamIndex(paramsIndex, allTensors, PARAM_MAX_NUM); + if (returnCode != OH_NN_SUCCESS) { + LOGE("[ArgMax] Build failed, passed invalid param index."); + return returnCode; + } + for (int i : paramsIndex) { const std::shared_ptr tensor = allTensors[i]; switch (tensor->GetType()) { case OH_NN_ARG_MAX_AXIS: returnCode = SetAxis(tensor); break; + case OH_NN_ARG_MAX_TOP_K: + returnCode = SetTopK(tensor); + break; case OH_NN_ARG_MAX_KEEPDIMS: returnCode = SetKeepdims(tensor); break; + case OH_NN_ARG_MAX_OUT_MAX_VALUE: + returnCode = SetOutMaxValue(tensor); + break; default: LOGE("[ArgMax] Build failed, param invalid, type = %d.", tensor->GetType()); return OH_NN_INVALID_PARAMETER; diff --git a/frameworks/native/neural_network_runtime/ops/argmax_builder.h b/frameworks/native/neural_network_runtime/ops/argmax_builder.h index aedb9f2caeba49c8062854b3fac27df80d461291..20ce125098b3f54501eb0022ec857cdbcebaeb4f 100644 --- a/frameworks/native/neural_network_runtime/ops/argmax_builder.h +++ b/frameworks/native/neural_network_runtime/ops/argmax_builder.h @@ -37,9 +37,11 @@ public: private: OH_NN_ReturnCode SetAxis(std::shared_ptr tensor); OH_NN_ReturnCode SetKeepdims(std::shared_ptr tensor); + OH_NN_ReturnCode SetTopK(std::shared_ptr tensor); + OH_NN_ReturnCode SetOutMaxValue(std::shared_ptr tensor); private: - int64_t m_axis {-1}; + int64_t m_axis {0}; int64_t m_topK {1}; bool m_keepDims {false}; bool m_outMaxValue {false}; diff --git a/frameworks/native/neural_network_runtime/ops/assert_builder.cpp b/frameworks/native/neural_network_runtime/ops/assert_builder.cpp index d11d7fb6ade80f542edd7ebbd25b538b57797a4b..b185e196e8f84dfd7e23d697be772d1f85cc26f4 100644 --- a/frameworks/native/neural_network_runtime/ops/assert_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/assert_builder.cpp @@ -20,6 +20,7 @@ namespace NeuralNetworkRuntime { namespace Ops { static const int INPUT_NUM = 2; static const int OUTPUT_NUM = 1; +static const int PARAM_MAX_NUM = 1; static const int SCALAR_LENGTH = 1; static const std::string OP_NAME = "Assert"; @@ -68,6 +69,12 @@ OH_NN_ReturnCode AssertBuilder::Build(const std::vector& paramsIndex, m_inputsIndex = inputsIndex; m_outputsIndex = outputsIndex; + ret = CheckParamIndex(paramsIndex, allTensors, PARAM_MAX_NUM); + if (ret != OH_NN_SUCCESS) { + LOGE("[Assert] Build failed, passed invalid param index."); + return ret; + } + OH_NN_ReturnCode returnCode; for (int i : paramsIndex) { std::shared_ptr tensor = allTensors[i]; diff --git a/frameworks/native/neural_network_runtime/ops/batch_to_space_nd_builder.cpp b/frameworks/native/neural_network_runtime/ops/batch_to_space_nd_builder.cpp index 39d103b01a8f5ce0d7de343999b52dc80f6192ae..1e6ced63367faf9ddf17ebbe7f7719d44d2effc0 100644 --- a/frameworks/native/neural_network_runtime/ops/batch_to_space_nd_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/batch_to_space_nd_builder.cpp @@ -20,6 +20,7 @@ namespace NeuralNetworkRuntime { namespace Ops { static const int INPUT_NUM = 1; static const int OUTPUT_NUM = 1; +static const int PARAM_MAX_NUM = 2; static const int CROPS_ROWS = 2; static const int CROPS_COLUMN = 2; static const std::string OP_NAME = "BatchToSpaceND"; @@ -100,6 +101,12 @@ OH_NN_ReturnCode BatchToSpaceNDBuilder::Build(const std::vector& param m_inputsIndex = inputsIndex; m_outputsIndex = outputsIndex; + returnCode = CheckParamIndex(paramsIndex, allTensors, PARAM_MAX_NUM); + if (returnCode != OH_NN_SUCCESS) { + LOGE("[BatchToSpaceND] Build failed, passed invalid param index."); + return returnCode; + } + for (int i : paramsIndex) { std::shared_ptr tensor = allTensors[i]; switch (tensor->GetType()) { diff --git a/frameworks/native/neural_network_runtime/ops/batchnorm_builder.cpp b/frameworks/native/neural_network_runtime/ops/batchnorm_builder.cpp index 0a013b458f1d6d16222d0e8b08084bf2d3bd707b..ee05ea483bb1067ea31bcf111645beeb4fa1eb64 100644 --- a/frameworks/native/neural_network_runtime/ops/batchnorm_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/batchnorm_builder.cpp @@ -24,6 +24,7 @@ namespace NeuralNetworkRuntime { namespace Ops { static const int INPUT_NUM = 5; static const int OUTPUT_NUM = 1; +static const int PARAM_MAX_NUM = 1; static const int SCALAR_LENGTH = 1; const std::string OP_NAME = "BatchNorm"; @@ -73,6 +74,12 @@ OH_NN_ReturnCode BatchNormBuilder::Build(const std::vector& paramsInde m_inputsIndex = inputsIndex; m_outputsIndex = outputsIndex; + returnCode = CheckParamIndex(paramsIndex, allTensors, PARAM_MAX_NUM); + if (returnCode != OH_NN_SUCCESS) { + LOGE("[BatchNorm] Build failed, passed invalid param index."); + return returnCode; + } + for (int i : paramsIndex) { std::shared_ptr tensor = allTensors[i]; switch (tensor->GetType()) { diff --git a/frameworks/native/neural_network_runtime/ops/batchnorm_builder.h b/frameworks/native/neural_network_runtime/ops/batchnorm_builder.h index 3c9916b8fb6590a76ee5919d5cb620cda7186422..ec9ed369bd076433c3bb247ecb26d79bd965e7e1 100644 --- a/frameworks/native/neural_network_runtime/ops/batchnorm_builder.h +++ b/frameworks/native/neural_network_runtime/ops/batchnorm_builder.h @@ -35,7 +35,7 @@ private: OH_NN_ReturnCode SetEpsilon(std::shared_ptr tensor); private: - float m_epsilon{1e-7}; + float m_epsilon {0.0001f}; }; } // namespace Ops } // namespace NeuralNetworkRuntime diff --git a/frameworks/native/neural_network_runtime/ops/bias_add_builder.cpp b/frameworks/native/neural_network_runtime/ops/bias_add_builder.cpp index 4130bd6d5f4bb002acb281c66e472c0c84072cb6..541b2c6c107b1cac8bf712a543fe6e3532b88d4b 100644 --- a/frameworks/native/neural_network_runtime/ops/bias_add_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/bias_add_builder.cpp @@ -20,6 +20,7 @@ namespace NeuralNetworkRuntime { namespace Ops { static const int INPUT_NUM = 2; static const int OUTPUT_NUM = 1; +static const int PARAM_NUM = 0; static const std::string OP_NAME = "BiasAdd"; BiasAddBuilder::BiasAddBuilder() {} @@ -44,9 +45,11 @@ OH_NN_ReturnCode BiasAddBuilder::Build(const std::vector& paramsIndex, m_inputsIndex = inputsIndex; m_outputsIndex = outputsIndex; - if (!paramsIndex.empty()) { - LOGE("[BiasAdd] Build failed, expects no parameters"); - return OH_NN_INVALID_PARAMETER; + + returnCode = CheckParamIndex(paramsIndex, allTensors, PARAM_NUM); + if (returnCode != OH_NN_SUCCESS) { + LOGE("[BiasAdd] Build failed, passed invalid param index."); + return returnCode; } // The quantization type of the first output determinies that of the operator. diff --git a/frameworks/native/neural_network_runtime/ops/broadcast_to_builder.cpp b/frameworks/native/neural_network_runtime/ops/broadcast_to_builder.cpp index da5a0c7c0ca6630d9ef99b9f197411928f967c63..132cd8ccd78c3f6adc51ab7e26eaf6c2f6a43696 100755 --- a/frameworks/native/neural_network_runtime/ops/broadcast_to_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/broadcast_to_builder.cpp @@ -20,6 +20,7 @@ namespace NeuralNetworkRuntime { namespace Ops { static const int INPUT_NUM = 1; static const int OUTPUT_NUM = 1; +static const int PARAM_MAX_NUM = 1; static const std::string OP_NAME = "BroadcastTo"; BroadcastToBuilder::BroadcastToBuilder() {} @@ -69,7 +70,13 @@ OH_NN_ReturnCode BroadcastToBuilder::Build(const std::vector& paramsIn m_inputsIndex = inputsIndex; m_outputsIndex = outputsIndex; - + + ret = CheckParamIndex(paramsIndex, allTensors, PARAM_MAX_NUM); + if (ret != OH_NN_SUCCESS) { + LOGE("[BroadcastTo] Build failed, passed invalid param index."); + return ret; + } + OH_NN_ReturnCode returnCode; for (int i : paramsIndex) { std::shared_ptr tensor = allTensors[i]; diff --git a/frameworks/native/neural_network_runtime/ops/cast_builder.cpp b/frameworks/native/neural_network_runtime/ops/cast_builder.cpp index baeb41f8a21aec035a421d45b9e31e6c08fc05d8..baafb92cc4a6a98423cde5e39c0cc88d4047cac6 100644 --- a/frameworks/native/neural_network_runtime/ops/cast_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/cast_builder.cpp @@ -24,6 +24,7 @@ namespace Ops { static const int INPUT_NUM = 2; static const int OUTPUT_NUM = 1; static const int INPUT_TYPE = 1; +static const int PARAM_NUM = 0; static const std::string OP_NAME = "Cast"; CastBuilder::CastBuilder() {} @@ -47,6 +48,12 @@ OH_NN_ReturnCode CastBuilder::Build(const std::vector& paramsIndex, m_inputsIndex = inputsIndex; m_outputsIndex = outputsIndex; + ret = CheckParamIndex(paramsIndex, allTensors, PARAM_NUM); + if (ret != OH_NN_SUCCESS) { + LOGE("[Cast] Build failed, the param index of Cast operation is invalid."); + return ret; + } + auto castType = allTensors[inputsIndex[INPUT_TYPE]]->GetBuffer(); if (castType == nullptr) { LOGE("[Cast] Build castType GetBuffer return nullptr."); @@ -58,11 +65,6 @@ OH_NN_ReturnCode CastBuilder::Build(const std::vector& paramsIndex, return OH_NN_INVALID_PARAMETER; } - if (!paramsIndex.empty()) { - LOGE("[Cast] Cast expects no parameters"); - return OH_NN_INVALID_PARAMETER; - } - // The quantization type of the first output determinies that of the operator. SetQuantType(outputsIndex, allTensors); m_isBuild = true; diff --git a/frameworks/native/neural_network_runtime/ops/ceil_builder.cpp b/frameworks/native/neural_network_runtime/ops/ceil_builder.cpp new file mode 100644 index 0000000000000000000000000000000000000000..51e7d84211332075344f175cc3aee9a4e1439938 --- /dev/null +++ b/frameworks/native/neural_network_runtime/ops/ceil_builder.cpp @@ -0,0 +1,75 @@ +/* + * Copyright (c) 2024 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ceil_builder.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +static const int INPUT_NUM = 1; +static const int OUTPUT_NUM = 1; +static const int PARAM_NUM = 0; +static const std::string OP_NAME = "Ceil"; + +CeilBuilder::CeilBuilder() {} + +CeilBuilder::~CeilBuilder() {} + +OH_NN_ReturnCode CeilBuilder::Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) +{ + if (m_isBuild) { + LOGE("[Ceil] Build failed, the ceil operation has been build. cannot build again."); + return OH_NN_OPERATION_FORBIDDEN; + } + + auto ret = CheckIOIndex(inputsIndex, outputsIndex, allTensors, INPUT_NUM, OUTPUT_NUM); + if (ret != OH_NN_SUCCESS) { + LOGE("[Ceil] Build failed, passed invalid input or output index."); + return ret; + } + + ret = CheckParamIndex(paramsIndex, allTensors, PARAM_NUM); + if (ret != OH_NN_SUCCESS) { + LOGE("[Ceil] Build failed, passed invalid param index."); + return ret; + } + + m_inputsIndex = inputsIndex; + m_outputsIndex = outputsIndex; + + m_name = OP_NAME; + m_isBuild = true; + return OH_NN_SUCCESS; +} + +LiteGraphPrimitvePtr CeilBuilder::GetPrimitive() +{ + if (!m_isBuild) { + LOGE("[Ceil] GetPrimitive failed, cannot get primitive before call build."); + return {nullptr, DestroyLiteGraphPrimitive}; + } + + void* primitive = mindspore::lite::MindIR_Ceil_CreatePrimitive(); + LiteGraphPrimitvePtr graphPrimitivePtr(primitive, DestroyLiteGraphPrimitive) ; + return graphPrimitivePtr; +} + +REGISTER_OPS(CeilBuilder, OH_NN_OPS_CEIL); +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS diff --git a/frameworks/native/neural_network_runtime/ops/ceil_builder.h b/frameworks/native/neural_network_runtime/ops/ceil_builder.h new file mode 100644 index 0000000000000000000000000000000000000000..15df14191a6339e2f7f11e4b7e859f7f6c2b7062 --- /dev/null +++ b/frameworks/native/neural_network_runtime/ops/ceil_builder.h @@ -0,0 +1,42 @@ +/* + * Copyright (c) 2024 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_CEIL_BUILDER_H +#define NEURAL_NETWORK_RUNTIME_CEIL_BUILDER_H + +#include "mindir.h" + +#include "ops_builder.h" +#include "ops_registry.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +class CeilBuilder : public OpsBuilder { +public: + CeilBuilder(); + ~CeilBuilder() override; + OH_NN_ReturnCode Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) override; + + LiteGraphPrimitvePtr GetPrimitive() override; +}; +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS + +#endif // NEURAL_NETWORK_RUNTIME_CEIL_BUILDER_H \ No newline at end of file diff --git a/frameworks/native/neural_network_runtime/ops/clip_builder.cpp b/frameworks/native/neural_network_runtime/ops/clip_builder.cpp index 5f46555e0a91999ba4768eb81e4a66560e6aa405..2f5257d09602de57c0c5bc2fd6b5735588396267 100644 --- a/frameworks/native/neural_network_runtime/ops/clip_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/clip_builder.cpp @@ -20,6 +20,7 @@ namespace NeuralNetworkRuntime { namespace Ops { static const int INPUT_NUM = 1; static const int OUTPUT_NUM = 1; +static const int PARAM_MAX_NUM = 2; static constexpr int SCALAR_LENGTH = 1; static const std::string OP_NAME = "Clip"; @@ -89,7 +90,13 @@ OH_NN_ReturnCode ClipBuilder::Build(const std::vector& paramsIndex, m_inputsIndex = inputsIndex; m_outputsIndex = outputsIndex; - + + ret = CheckParamIndex(paramsIndex, allTensors, PARAM_MAX_NUM); + if (ret != OH_NN_SUCCESS) { + LOGE("[Clip] Build failed, passed invalid param index."); + return ret; + } + OH_NN_ReturnCode returnCode; for (int i : paramsIndex) { std::shared_ptr tensor = allTensors[i]; diff --git a/frameworks/native/neural_network_runtime/ops/concat_builder.cpp b/frameworks/native/neural_network_runtime/ops/concat_builder.cpp index a66e07103b335ae856c768655f277e85428407d7..7e08da0446ba8c52dd2392699a266612003534a3 100644 --- a/frameworks/native/neural_network_runtime/ops/concat_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/concat_builder.cpp @@ -20,6 +20,7 @@ namespace NeuralNetworkRuntime { namespace Ops { static constexpr int MINIMUM_INTPUT = 2; static constexpr int OUTPUT_NUM = 1; +static constexpr int PARAM_MAX_NUM = 1; static constexpr int AXIS_LENGTH = 1; static const std::string OP_NAME = "Concat"; @@ -76,6 +77,12 @@ OH_NN_ReturnCode ConcatBuilder::Build(const std::vector& paramsIndex, return returnCode; } + returnCode = CheckParamIndex(paramsIndex, allTensors, PARAM_MAX_NUM); + if (returnCode != OH_NN_SUCCESS) { + LOGE("[Concat] Build failed, passed invalid param index."); + return returnCode; + } + for (int i : paramsIndex) { std::shared_ptr tensor = allTensors[i]; switch (tensor->GetType()) { diff --git a/frameworks/native/neural_network_runtime/ops/constant_of_shape_builder.cpp b/frameworks/native/neural_network_runtime/ops/constant_of_shape_builder.cpp index d5a9aee25634c3b8550a362d76732e575bb9b70f..d80bc0950af2932227c1c5350166352f0616e77c 100755 --- a/frameworks/native/neural_network_runtime/ops/constant_of_shape_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/constant_of_shape_builder.cpp @@ -20,6 +20,7 @@ namespace NeuralNetworkRuntime { namespace Ops { static const int INPUT_NUM = 1; static const int OUTPUT_NUM = 1; +static const int PARAM_MAX_NUM = 2; static const int SCALAR_LENGTH = 1; static const std::string OP_NAME = "ConstantOfShape"; @@ -92,26 +93,31 @@ OH_NN_ReturnCode ConstantOfShapeBuilder::Build(const std::vector& para m_inputsIndex = inputsIndex; m_outputsIndex = outputsIndex; - - OH_NN_ReturnCode returnCode; + + ret = CheckParamIndex(paramsIndex, allTensors, PARAM_MAX_NUM); + if (ret != OH_NN_SUCCESS) { + LOGE("[ConstantOfShape] Build failed, passed invalid invalid index."); + return ret; + } + for (int i : paramsIndex) { std::shared_ptr tensor = allTensors[i]; tensor->IdentifyOpParameter(); switch (tensor->GetType()) { case OH_NN_CONSTANT_OF_SHAPE_DATA_TYPE: - returnCode = SetDataType(tensor); + ret = SetDataType(tensor); break; case OH_NN_CONSTANT_OF_SHAPE_VALUE: - returnCode = SetValue(tensor); + ret = SetValue(tensor); break; default: LOGE("[ConstantOfShape] Build failed, param invalid, type=%d", tensor->GetType()); return OH_NN_INVALID_PARAMETER; } - if (returnCode != OH_NN_SUCCESS) { + if (ret != OH_NN_SUCCESS) { LOGE("[ConstantOfShape] Build failed, passed invalid param."); - return returnCode; + return ret; } } diff --git a/frameworks/native/neural_network_runtime/ops/conv2d_builder.cpp b/frameworks/native/neural_network_runtime/ops/conv2d_builder.cpp index 04f65bbb4a04babde6a34a2f3e0c0c675b90caf4..cb618e189677c2b1a0d108d72a7e1048b16ae5c8 100644 --- a/frameworks/native/neural_network_runtime/ops/conv2d_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/conv2d_builder.cpp @@ -24,6 +24,7 @@ namespace NeuralNetworkRuntime { namespace Ops { static constexpr int INPUT_NUM = 3; static constexpr int OUTPUT_NUM = 1; +static constexpr int PARAM_MAX_NUM = 9; static constexpr int CONV2D_INPUT_WEIGHT = 1; static constexpr int WEIGHT_SIZE = 4; static constexpr int OUT_CHANNEL_INDEX = 0; @@ -236,6 +237,12 @@ OH_NN_ReturnCode Conv2DBuilder::Build(const std::vector& paramsIndex, return returnCode; } + returnCode = CheckParamIndex(paramsIndex, allTensors, PARAM_MAX_NUM); + if (returnCode != OH_NN_SUCCESS) { + LOGE("[Conv2d] Build failed, passed invalid invalid index."); + return returnCode; + } + returnCode = SetChannel(inputsIndex, allTensors); if (returnCode != OH_NN_SUCCESS) { return returnCode; diff --git a/frameworks/native/neural_network_runtime/ops/conv2d_builder.h b/frameworks/native/neural_network_runtime/ops/conv2d_builder.h index 4c942e8ca540f59ed0fa4ddb8e4055efd64d99dc..5b89b47adbd71896c9f49161507b9663dd33d64b 100644 --- a/frameworks/native/neural_network_runtime/ops/conv2d_builder.h +++ b/frameworks/native/neural_network_runtime/ops/conv2d_builder.h @@ -49,15 +49,15 @@ private: OH_NN_ReturnCode SetActavitation(std::shared_ptr tensor); private: - int64_t m_group{1}; - int64_t m_inChannel{0}; - int64_t m_outChannel{0}; + int64_t m_group {1}; + int64_t m_inChannel {0}; + int64_t m_outChannel {0}; std::vector m_kernelSize; std::vector m_strides; std::vector m_pad; std::vector m_dilation; - mindspore::lite::PadMode m_padMode{mindspore::lite::PAD_MODE_PAD}; - mindspore::lite::ActivationType m_activationType{mindspore::lite::ACTIVATION_TYPE_NO_ACTIVATION}; + mindspore::lite::PadMode m_padMode {mindspore::lite::PAD_MODE_PAD}; + mindspore::lite::ActivationType m_activationType {mindspore::lite::ACTIVATION_TYPE_NO_ACTIVATION}; }; } // namespace Ops } // namespace NeuralNetworkRuntime diff --git a/frameworks/native/neural_network_runtime/ops/conv2d_transpose_builder.cpp b/frameworks/native/neural_network_runtime/ops/conv2d_transpose_builder.cpp index ae17160f86b6aefd13b022a3682e6d4f6d7b6234..fe54a4b907a9fa504421facc18f27053518627b4 100644 --- a/frameworks/native/neural_network_runtime/ops/conv2d_transpose_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/conv2d_transpose_builder.cpp @@ -24,6 +24,7 @@ namespace NeuralNetworkRuntime { namespace Ops { static constexpr int INPUT_NUM = 3; static constexpr int OUTPUT_NUM = 1; +static constexpr int PARAM_MAX_NUM = 10; static constexpr int INPUT_WEIGHT = 1; static constexpr int WEIGHT_SIZE = 4; static constexpr int OUT_CHANNEL_INDEX = 0; @@ -251,6 +252,12 @@ OH_NN_ReturnCode Conv2DTransposeBuilder::Build(const std::vector& para return returnCode; } + returnCode = CheckParamIndex(paramsIndex, allTensors, PARAM_MAX_NUM); + if (returnCode != OH_NN_SUCCESS) { + LOGE("[Conv2DTranspose] Build failed, passed invalid param index."); + return returnCode; + } + SetKernelSize(inputsIndex, allTensors); for (int i : paramsIndex) { diff --git a/frameworks/native/neural_network_runtime/ops/conv2d_transpose_builder.h b/frameworks/native/neural_network_runtime/ops/conv2d_transpose_builder.h index 2094b29201049ab64cc92b7fa17f65a6b528eadb..a84c280d82a7cf390f2e39d28ce5d949def9f33b 100644 --- a/frameworks/native/neural_network_runtime/ops/conv2d_transpose_builder.h +++ b/frameworks/native/neural_network_runtime/ops/conv2d_transpose_builder.h @@ -48,9 +48,9 @@ private: OH_NN_ReturnCode SetActivation(std::shared_ptr tensor); private: - int64_t m_group{1}; - int64_t m_inChannel{0}; - int64_t m_outChannel{0}; + int64_t m_group {1}; + int64_t m_inChannel {0}; + int64_t m_outChannel {0}; std::vector m_kernelSize; std::vector m_strides; std::vector m_padList; diff --git a/frameworks/native/neural_network_runtime/ops/cos_builder.cpp b/frameworks/native/neural_network_runtime/ops/cos_builder.cpp index 86b0ca9d43ebaf03a4c91b48b65d766af5a09170..df3b1e9be57e8ed04d851899cb812ec03119f18b 100644 --- a/frameworks/native/neural_network_runtime/ops/cos_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/cos_builder.cpp @@ -20,6 +20,7 @@ namespace NeuralNetworkRuntime { namespace Ops { static const int INPUT_NUM = 1; static const int OUTPUT_NUM = 1; +static const int PARAM_NUM = 0; static const std::string OP_NAME = "Cos"; CosBuilder::CosBuilder() {} @@ -41,10 +42,11 @@ OH_NN_ReturnCode CosBuilder::Build(const std::vector& paramsIndex, LOGE("[Cos] Build failed, passed invalid input or output index."); return ret; } - - if (!paramsIndex.empty()) { - LOGW("[Cos] Build failed, the cos expects no parameters, but receive %zu", paramsIndex.size()); - return OH_NN_INVALID_PARAMETER; + + ret = CheckParamIndex(paramsIndex, allTensors, PARAM_NUM); + if (ret != OH_NN_SUCCESS) { + LOGE("[Cos] Build failed, passed invalid param index."); + return ret; } m_inputsIndex = inputsIndex; diff --git a/frameworks/native/neural_network_runtime/ops/crop_builder.cpp b/frameworks/native/neural_network_runtime/ops/crop_builder.cpp new file mode 100644 index 0000000000000000000000000000000000000000..5ca12e7d3d9b4f476da72f3d2c66a63278ac2132 --- /dev/null +++ b/frameworks/native/neural_network_runtime/ops/crop_builder.cpp @@ -0,0 +1,145 @@ +/* + * Copyright (c) 2024 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "crop_builder.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +static const int INPUT_NUM = 2; +static const int OUTPUT_NUM = 1; +static const int PARAM_MAX_NUM = 2; +static const int SCALAR_LENGTH = 1; +static const std::string OP_NAME = "Crop"; + +CropBuilder::CropBuilder() {} + +CropBuilder::~CropBuilder() {} + +OH_NN_ReturnCode CropBuilder::SetAxis(std::shared_ptr tensor) +{ + if (tensor->GetDataType() != OH_NN_INT64) { + LOGE("[Crop] The axis should be type OH_NN_INT64."); + return OH_NN_INVALID_PARAMETER; + } + + if (tensor->GetElementCount() != SCALAR_LENGTH) { + LOGE("[Crop] The axis should be scalar."); + return OH_NN_INVALID_PARAMETER; + } + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[Crop] Tensor buffer is nullptr."); + return OH_NN_INVALID_PARAMETER; + } + m_axis = *static_cast(buffer); + + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode CropBuilder::SetOffset(std::shared_ptr tensor) +{ + if (tensor->GetDataType() != OH_NN_INT64) { + LOGE("[Crop] The offset should be type OH_NN_INT64."); + return OH_NN_INVALID_PARAMETER; + } + + m_offset.clear(); + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[Crop] Tensor buffer is nullptr."); + return OH_NN_INVALID_PARAMETER; + } + + int64_t* pOffset = static_cast(buffer); + + uint32_t elementCount = tensor->GetElementCount(); + for (uint32_t i = 0; i < elementCount; ++i) { + m_offset.emplace_back(*pOffset); + ++pOffset; + } + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode CropBuilder::Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) +{ + if (m_isBuild) { + LOGE("[Crop] Build failed, the Crop operation has been build. cannot build again."); + return OH_NN_OPERATION_FORBIDDEN; + } + + auto ret = CheckIOIndex(inputsIndex, outputsIndex, allTensors, INPUT_NUM, OUTPUT_NUM); + if (ret != OH_NN_SUCCESS) { + LOGE("[Crop] Build failed, passed invalid input or output index."); + return ret; + } + + ret = CheckParamIndex(paramsIndex, allTensors, PARAM_MAX_NUM); + if (ret != OH_NN_SUCCESS) { + LOGE("[Crop] Build failed, passed invalid param index."); + return ret; + } + + m_inputsIndex = inputsIndex; + m_outputsIndex = outputsIndex; + + OH_NN_ReturnCode returnCode; + for (int i : paramsIndex) { + std::shared_ptr tensor = allTensors[i]; + tensor->IdentifyOpParameter(); + switch (tensor->GetType()) { + case OH_NN_CROP_AXIS: + returnCode = SetAxis(tensor); + break; + case OH_NN_CROP_OFFSET: + returnCode = SetOffset(tensor); + break; + default: + LOGE("[Crop] Build failed, param invalid, type=%d", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; + } + + if (returnCode != OH_NN_SUCCESS) { + LOGE("[Crop] Build failed, passed invalid param."); + return returnCode; + } + } + + m_name = OP_NAME; + m_isBuild = true; + return OH_NN_SUCCESS; +} + +LiteGraphPrimitvePtr CropBuilder::GetPrimitive() +{ + if (!m_isBuild) { + LOGE("[Crop] GetPrimitive failed, cannot get primitive before call build."); + return {nullptr, DestroyLiteGraphPrimitive}; + } + + void* primitive = mindspore::lite::MindIR_Crop_CreatePrimitive(m_axis, m_offset); + LiteGraphPrimitvePtr graphPrimitivePtr(primitive, DestroyLiteGraphPrimitive) ; + return graphPrimitivePtr; +} + +REGISTER_OPS(CropBuilder, OH_NN_OPS_CROP); +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS diff --git a/frameworks/native/neural_network_runtime/ops/crop_builder.h b/frameworks/native/neural_network_runtime/ops/crop_builder.h new file mode 100644 index 0000000000000000000000000000000000000000..cc4ac758a7f9da6ddd5c064bf6aa56ca73a1dc68 --- /dev/null +++ b/frameworks/native/neural_network_runtime/ops/crop_builder.h @@ -0,0 +1,50 @@ +/* + * Copyright (c) 2024 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_CROP_BUILDER_H +#define NEURAL_NETWORK_RUNTIME_CROP_BUILDER_H + +#include "mindir.h" + +#include "ops_builder.h" +#include "ops_registry.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +class CropBuilder : public OpsBuilder { +public: + CropBuilder(); + ~CropBuilder() override; + OH_NN_ReturnCode Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) override; + + LiteGraphPrimitvePtr GetPrimitive() override; + +private: + OH_NN_ReturnCode SetAxis(std::shared_ptr tensor); + OH_NN_ReturnCode SetOffset(std::shared_ptr tensor); + +private: + int64_t m_axis {0}; + std::vector m_offset; +}; +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS + +#endif // NEURAL_NETWORK_RUNTIME_CROP_BUILDER_H \ No newline at end of file diff --git a/frameworks/native/neural_network_runtime/ops/depth_to_space_builder.cpp b/frameworks/native/neural_network_runtime/ops/depth_to_space_builder.cpp index a003f29a03613feb777bdc5d7910d0d6dece905b..561caefad705ba8163788e2975cf9083507b57ac 100755 --- a/frameworks/native/neural_network_runtime/ops/depth_to_space_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/depth_to_space_builder.cpp @@ -12,7 +12,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -#include + #include "depth_to_space_builder.h" #include "transform.h" @@ -23,8 +23,10 @@ namespace NeuralNetworkRuntime { namespace Ops { static const int INPUT_NUM = 1; static const int OUTPUT_NUM = 1; +static const int PARAM_MAX_NUM = 2; static const int SCALAR_LENGTH = 1; static const std::string OP_NAME = "DepthToSpace"; +static const std::unordered_map modeList = {{0, "DCR"}, {1, "CRD"}}; DepthToSpaceBuilder::DepthToSpaceBuilder() {} @@ -52,15 +54,10 @@ OH_NN_ReturnCode DepthToSpaceBuilder::SetBlockSize(std::shared_ptr ten return OH_NN_SUCCESS; } -OH_NN_ReturnCode DepthToSpaceBuilder::SetFormat(std::shared_ptr tensor) +OH_NN_ReturnCode DepthToSpaceBuilder::SetMode(std::shared_ptr tensor) { - if (tensor->GetDataType() != OH_NN_INT8) { - LOGE("[DepthToSpace] The format should be type OH_NN_INT8."); - return OH_NN_INVALID_PARAMETER; - } - - if (tensor->GetElementCount() != SCALAR_LENGTH) { - LOGE("[DepthToSpace] The format should be scalar."); + if (tensor->GetDataType() != OH_NN_INT32) { + LOGE("[DepthToSpace] The mode should be type OH_NN_INT32."); return OH_NN_INVALID_PARAMETER; } @@ -70,32 +67,15 @@ OH_NN_ReturnCode DepthToSpaceBuilder::SetFormat(std::shared_ptr tensor return OH_NN_INVALID_PARAMETER; } - int8_t* formatData = static_cast(buffer); - - if (!OHOS::NeuralNetworkRuntime::Validation::ValidateTensorFormat(static_cast(*formatData))) { - LOGE("[DepthToSpace] SetFormat failed. Format type is invalid."); - return OH_NN_INVALID_PARAMETER; - } - - auto pFormat = (OH_NN_Format)(*formatData); - m_format = NNToMS::TransformFormat(pFormat); - - return OH_NN_SUCCESS; -} - -OH_NN_ReturnCode DepthToSpaceBuilder::SetMode(std::shared_ptr tensor) -{ - if (tensor->GetDataType() != OH_NN_INT8) { - LOGE("[DepthToSpace] The mode should be type OH_NN_INT8."); - return OH_NN_INVALID_PARAMETER; - } - - void* buffer = tensor->GetBuffer(); - if (buffer == nullptr) { - LOGE("[DepthToSpace] Tensor buffer is nullptr."); + int modeKey = *(static_cast(buffer)); + auto it = modeList.find(modeKey); + if (it != modeList.end()) { + m_mode = it->second; + } else { + LOGE("[DepthToSpace] The mode value should between [0, 1], but get %d.", modeKey); + LOGE("[DepthToSpace] mode value: 0-DCR, 1-CRD"); return OH_NN_INVALID_PARAMETER; } - m_mode = static_cast(buffer); return OH_NN_SUCCESS; } @@ -118,29 +98,31 @@ OH_NN_ReturnCode DepthToSpaceBuilder::Build(const std::vector& paramsI m_inputsIndex = inputsIndex; m_outputsIndex = outputsIndex; - - OH_NN_ReturnCode returnCode; + + ret = CheckParamIndex(paramsIndex, allTensors, PARAM_MAX_NUM); + if (ret != OH_NN_SUCCESS) { + LOGE("[DepthToSpace] Build failed, passed invalid param index."); + return ret; + } + for (int i : paramsIndex) { std::shared_ptr tensor = allTensors[i]; tensor->IdentifyOpParameter(); switch (tensor->GetType()) { case OH_NN_DEPTH_TO_SPACE_BLOCK_SIZE: - returnCode = SetBlockSize(tensor); - break; - case OH_NN_DEPTH_TO_SPACE_FORMAT: - returnCode = SetFormat(tensor); + ret = SetBlockSize(tensor); break; case OH_NN_DEPTH_TO_SPACE_MODE: - returnCode = SetMode(tensor); + ret = SetMode(tensor); break; default: LOGE("[DepthToSpace] Build failed, param invalid, type=%d", tensor->GetType()); return OH_NN_INVALID_PARAMETER; } - if (returnCode != OH_NN_SUCCESS) { + if (ret != OH_NN_SUCCESS) { LOGE("[DepthToSpace] Build failed, passed invalid param."); - return returnCode; + return ret; } } @@ -156,7 +138,9 @@ LiteGraphPrimitvePtr DepthToSpaceBuilder::GetPrimitive() return {nullptr, DestroyLiteGraphPrimitive}; } - void* primitive = mindspore::lite::MindIR_DepthToSpace_CreatePrimitive(m_blockSize, m_format, m_mode); + mindspore::lite::Format format {mindspore::lite::FORMAT_NCHW}; + + void* primitive = mindspore::lite::MindIR_DepthToSpace_CreatePrimitive(m_blockSize, format, m_mode); LiteGraphPrimitvePtr graphPrimitivePtr(primitive, DestroyLiteGraphPrimitive) ; return graphPrimitivePtr; } diff --git a/frameworks/native/neural_network_runtime/ops/depth_to_space_builder.h b/frameworks/native/neural_network_runtime/ops/depth_to_space_builder.h index aa5d1914eda7584eda5075cd19015d4e59aa954c..5ecc071488323f1e34aed4c627a570e98b087d74 100755 --- a/frameworks/native/neural_network_runtime/ops/depth_to_space_builder.h +++ b/frameworks/native/neural_network_runtime/ops/depth_to_space_builder.h @@ -37,12 +37,10 @@ public: private: OH_NN_ReturnCode SetBlockSize(std::shared_ptr tensor); - OH_NN_ReturnCode SetFormat(std::shared_ptr tensor); OH_NN_ReturnCode SetMode(std::shared_ptr tensor); private: int64_t m_blockSize {0}; - mindspore::lite::Format m_format {mindspore::lite::FORMAT_NCHW}; std::string m_mode; }; } // namespace Ops diff --git a/frameworks/native/neural_network_runtime/ops/depthwise_conv2d_native_builder.cpp b/frameworks/native/neural_network_runtime/ops/depthwise_conv2d_native_builder.cpp index 9c24d5c921bf73babe0f30a8f6b73d238c901dcd..255c83da038b7db0a4e60b390e7d92d453895035 100644 --- a/frameworks/native/neural_network_runtime/ops/depthwise_conv2d_native_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/depthwise_conv2d_native_builder.cpp @@ -24,6 +24,7 @@ namespace NeuralNetworkRuntime { namespace Ops { static const int INPUT_NUM = 3; static const int OUTPUT_NUM = 1; +static const int PARAM_MAX_NUM = 8; static const int PAD_MODE_SIZE = 1; static const int PAD_LIST_SIZE = 4; static const int IN_CHANNEL_IN_INPUT = 3; @@ -211,6 +212,12 @@ OH_NN_ReturnCode DepthwiseConv2DNativeBuilder::Build(const std::vector return ret; } + ret = CheckParamIndex(paramsIndex, allTensors, PARAM_MAX_NUM); + if (ret != OH_NN_SUCCESS) { + LOGE("[DepthwiseConv2DNative] Build failed, passed invalid param index."); + return ret; + } + auto inputShape = allTensors[inputsIndex[INPUT_X]]->GetDimensions(); if (inputShape.size() != INPUT_RANK) { LOGE("[DepthwiseConv2DNative] Build failed, invalid rank of shape of input, should be 4 dimensions."); diff --git a/frameworks/native/neural_network_runtime/ops/detection_post_process_builder.cpp b/frameworks/native/neural_network_runtime/ops/detection_post_process_builder.cpp new file mode 100644 index 0000000000000000000000000000000000000000..e08cb376ce4d5b9a0b96e1385923ea102122eb0c --- /dev/null +++ b/frameworks/native/neural_network_runtime/ops/detection_post_process_builder.cpp @@ -0,0 +1,354 @@ +/* + * Copyright (c) 2024 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "detection_post_process_builder.h" + +#include "transform.h" +#include "validation.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +static const int INPUT_NUM = 3; +static const int OUTPUT_NUM = 4; +static const int PARAM_MAX_NUM = 10; +static const int SCALAR_LENGTH = 1; +static const std::string OP_NAME = "DetectionPostProcess"; + +DetectionPostProcessBuilder::DetectionPostProcessBuilder() {} + +DetectionPostProcessBuilder::~DetectionPostProcessBuilder() {} + +OH_NN_ReturnCode DetectionPostProcessBuilder::SetInputSize(std::shared_ptr tensor) +{ + if (tensor->GetDataType() != OH_NN_INT64) { + LOGE("[DetectionPostProcess] The inputSize should be type OH_NN_INT64."); + return OH_NN_INVALID_PARAMETER; + } + + if (tensor->GetElementCount() != SCALAR_LENGTH) { + LOGE("[DetectionPostProcess] The inputSize should be scalar."); + return OH_NN_INVALID_PARAMETER; + } + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[DetectionPostProcess] Tensor buffer is nullptr."); + return OH_NN_INVALID_PARAMETER; + } + m_inputSize = *(static_cast(buffer)); + + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode DetectionPostProcessBuilder::SetScale(std::shared_ptr tensor) +{ + if (tensor->GetDataType() != OH_NN_FLOAT32) { + LOGE("[DetectionPostProcess] The scale should be type OH_NN_FLOAT32."); + return OH_NN_INVALID_PARAMETER; + } + + m_scale.clear(); + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[DetectionPostProcess] Tensor buffer is nullptr."); + return OH_NN_INVALID_PARAMETER; + } + + float* pScale = static_cast(buffer); + + uint32_t elementCount = tensor->GetElementCount(); + for (uint32_t i = 0; i < elementCount; ++i) { + m_scale.emplace_back(*pScale); + ++pScale; + } + + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode DetectionPostProcessBuilder::SetNmsIoUThreshold(std::shared_ptr tensor) +{ + if (tensor->GetDataType() != OH_NN_FLOAT32) { + LOGE("[DetectionPostProcess] The nmsIoUThreshold should be type OH_NN_FLOAT32."); + return OH_NN_INVALID_PARAMETER; + } + + if (tensor->GetElementCount() != SCALAR_LENGTH) { + LOGE("[DetectionPostProcess] The nmsIoUThreshold should be scalar."); + return OH_NN_INVALID_PARAMETER; + } + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[DetectionPostProcess] Tensor buffer is nullptr."); + return OH_NN_INVALID_PARAMETER; + } + m_nmsIoUThreshold = *(static_cast(buffer)); + + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode DetectionPostProcessBuilder::SetNmsScoreThreshold(std::shared_ptr tensor) +{ + if (tensor->GetDataType() != OH_NN_FLOAT32) { + LOGE("[DetectionPostProcess] The scoreThreshold should be type OH_NN_FLOAT32."); + return OH_NN_INVALID_PARAMETER; + } + + if (tensor->GetElementCount() != SCALAR_LENGTH) { + LOGE("[DetectionPostProcess] The scoreThreshold should be scalar."); + return OH_NN_INVALID_PARAMETER; + } + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[DetectionPostProcess] Tensor buffer is nullptr."); + return OH_NN_INVALID_PARAMETER; + } + m_nmsScoreThreshold = *(static_cast(buffer)); + + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode DetectionPostProcessBuilder::SetMaxDetections(std::shared_ptr tensor) +{ + if (tensor->GetDataType() != OH_NN_INT64) { + LOGE("[DetectionPostProcess] The maxDetections should be type OH_NN_INT64."); + return OH_NN_INVALID_PARAMETER; + } + + if (tensor->GetElementCount() != SCALAR_LENGTH) { + LOGE("[DetectionPostProcess] The maxDetections should be scalar."); + return OH_NN_INVALID_PARAMETER; + } + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[DetectionPostProcess] Tensor buffer is nullptr."); + return OH_NN_INVALID_PARAMETER; + } + m_maxDetections = *(static_cast(buffer)); + + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode DetectionPostProcessBuilder::SetDetectionsPerClass(std::shared_ptr tensor) +{ + if (tensor->GetDataType() != OH_NN_INT64) { + LOGE("[DetectionPostProcess] The detectionsPerClass should be type OH_NN_INT64."); + return OH_NN_INVALID_PARAMETER; + } + + if (tensor->GetElementCount() != SCALAR_LENGTH) { + LOGE("[DetectionPostProcess] The detectionsPerClass should be scalar."); + return OH_NN_INVALID_PARAMETER; + } + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[DetectionPostProcess] Tensor buffer is nullptr."); + return OH_NN_INVALID_PARAMETER; + } + m_detectionsPerClass = *(static_cast(buffer)); + + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode DetectionPostProcessBuilder::SetMaxClassesPerDetection(std::shared_ptr tensor) +{ + if (tensor->GetDataType() != OH_NN_INT64) { + LOGE("[DetectionPostProcess] The maxClassesPerDetection should be type OH_NN_INT64."); + return OH_NN_INVALID_PARAMETER; + } + + if (tensor->GetElementCount() != SCALAR_LENGTH) { + LOGE("[DetectionPostProcess] The maxClassesPerDetection should be scalar."); + return OH_NN_INVALID_PARAMETER; + } + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[DetectionPostProcess] Tensor buffer is nullptr."); + return OH_NN_INVALID_PARAMETER; + } + m_maxClassesPerDetection = *(static_cast(buffer)); + + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode DetectionPostProcessBuilder::SetNumClasses(std::shared_ptr tensor) +{ + if (tensor->GetDataType() != OH_NN_INT64) { + LOGE("[DetectionPostProcess] The numClasses should be type OH_NN_INT64."); + return OH_NN_INVALID_PARAMETER; + } + + if (tensor->GetElementCount() != SCALAR_LENGTH) { + LOGE("[DetectionPostProcess] The numClasses should be scalar."); + return OH_NN_INVALID_PARAMETER; + } + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[DetectionPostProcess] Tensor buffer is nullptr."); + return OH_NN_INVALID_PARAMETER; + } + m_numClasses = *(static_cast(buffer)); + + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode DetectionPostProcessBuilder::SetUseRegularNms(std::shared_ptr tensor) +{ + if (tensor->GetDataType() != OH_NN_BOOL) { + LOGE("[DetectionPostProcess] The useRegularNms should be type OH_NN_BOOL."); + return OH_NN_INVALID_PARAMETER; + } + + if (tensor->GetElementCount() != SCALAR_LENGTH) { + LOGE("[DetectionPostProcess] The useRegularNms should be scalar."); + return OH_NN_INVALID_PARAMETER; + } + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[DetectionPostProcess] Tensor buffer is nullptr."); + return OH_NN_INVALID_PARAMETER; + } + m_useRegularNms = *(static_cast(buffer)); + + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode DetectionPostProcessBuilder::SetOutQuantized(std::shared_ptr tensor) +{ + if (tensor->GetDataType() != OH_NN_BOOL) { + LOGE("[DetectionPostProcess] The outQuantized should be type OH_NN_BOOL."); + return OH_NN_INVALID_PARAMETER; + } + + if (tensor->GetElementCount() != SCALAR_LENGTH) { + LOGE("[DetectionPostProcess] The outQuantized should be scalar."); + return OH_NN_INVALID_PARAMETER; + } + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[DetectionPostProcess] Tensor buffer is nullptr."); + return OH_NN_INVALID_PARAMETER; + } + m_outQuantized = *(static_cast(buffer)); + + return OH_NN_SUCCESS; +} + + +OH_NN_ReturnCode DetectionPostProcessBuilder::Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) +{ + if (m_isBuild) { + LOGE("[DetectionPostProcess] Build failed, the detectionPostProcess operation has been build. \ + cannot build again."); + return OH_NN_OPERATION_FORBIDDEN; + } + + auto ret = CheckIOIndex(inputsIndex, outputsIndex, allTensors, INPUT_NUM, OUTPUT_NUM); + if (ret != OH_NN_SUCCESS) { + LOGE("[DetectionPostProcess] Build failed, passed invalid input or output index."); + return ret; + } + + m_inputsIndex = inputsIndex; + m_outputsIndex = outputsIndex; + + ret = CheckParamIndex(paramsIndex, allTensors, PARAM_MAX_NUM); + if (ret != OH_NN_SUCCESS) { + LOGE("[DetectionPostProcess] Build failed, passed invalid param index."); + return ret; + } + + for (int i : paramsIndex) { + std::shared_ptr tensor = allTensors[i]; + tensor->IdentifyOpParameter(); + switch (tensor->GetType()) { + case OH_NN_DETECTION_POST_PROCESS_INPUT_SIZE: + ret = SetInputSize(tensor); + break; + case OH_NN_DETECTION_POST_PROCESS_SCALE: + ret = SetScale(tensor); + break; + case OH_NN_DETECTION_POST_PROCESS_NMS_IOU_THRESHOLD: + ret = SetNmsIoUThreshold(tensor); + break; + case OH_NN_DETECTION_POST_PROCESS_NMS_SCORE_THRESHOLD: + ret = SetNmsScoreThreshold(tensor); + break; + case OH_NN_DETECTION_POST_PROCESS_MAX_DETECTIONS: + ret = SetMaxDetections(tensor); + break; + case OH_NN_DETECTION_POST_PROCESS_DETECTIONS_PER_CLASS: + ret = SetDetectionsPerClass(tensor); + break; + case OH_NN_DETECTION_POST_PROCESS_MAX_CLASSES_PER_DETECTION: + ret = SetMaxClassesPerDetection(tensor); + break; + case OH_NN_DETECTION_POST_PROCESS_NUM_CLASSES: + ret = SetNumClasses(tensor); + break; + case OH_NN_DETECTION_POST_PROCESS_USE_REGULAR_NMS: + ret = SetUseRegularNms(tensor); + break; + case OH_NN_DETECTION_POST_PROCESS_OUT_QUANTIZED: + ret = SetOutQuantized(tensor); + break; + default: + LOGE("[DetectionPostProcess] Build failed, param invalid, type=%d", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; + } + + if (ret != OH_NN_SUCCESS) { + LOGE("[DetectionPostProcess] Build failed, passed invalid param."); + return ret; + } + } + + m_name = OP_NAME; + m_isBuild = true; + return OH_NN_SUCCESS; +} + +LiteGraphPrimitvePtr DetectionPostProcessBuilder::GetPrimitive() +{ + if (!m_isBuild) { + LOGE("[DetectionPostProcess] GetPrimitive failed, cannot get primitive before call build."); + return {nullptr, DestroyLiteGraphPrimitive}; + } + + mindspore::lite::Format format {mindspore::lite::FORMAT_NCHW}; + + void* primitive = mindspore::lite::MindIR_DetectionPostProcess_CreatePrimitive(format, m_inputSize, m_scale, + m_nmsIoUThreshold, m_nmsScoreThreshold, m_maxDetections, m_detectionsPerClass, m_maxClassesPerDetection, + m_numClasses, m_useRegularNms, m_outQuantized); + LiteGraphPrimitvePtr graphPrimitivePtr(primitive, DestroyLiteGraphPrimitive) ; + return graphPrimitivePtr; +} + +REGISTER_OPS(DetectionPostProcessBuilder, OH_NN_OPS_DETECTION_POST_PROCESS); +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS \ No newline at end of file diff --git a/frameworks/native/neural_network_runtime/ops/detection_post_process_builder.h b/frameworks/native/neural_network_runtime/ops/detection_post_process_builder.h new file mode 100644 index 0000000000000000000000000000000000000000..bd2b8dcc56cfb734c67a5a6abedfc06480de4a80 --- /dev/null +++ b/frameworks/native/neural_network_runtime/ops/detection_post_process_builder.h @@ -0,0 +1,66 @@ +/* + * Copyright (c) 2024 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_DETECTION_POST_PROCESS_BUILDER_H +#define NEURAL_NETWORK_RUNTIME_DETECTION_POST_PROCESS_BUILDER_H + +#include "mindir.h" + +#include "ops_builder.h" +#include "ops_registry.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +class DetectionPostProcessBuilder : public OpsBuilder { +public: + DetectionPostProcessBuilder(); + ~DetectionPostProcessBuilder() override; + OH_NN_ReturnCode Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) override; + + LiteGraphPrimitvePtr GetPrimitive() override; + +private: + OH_NN_ReturnCode SetInputSize(std::shared_ptr tensor); + OH_NN_ReturnCode SetScale(std::shared_ptr tensor); + OH_NN_ReturnCode SetNmsIoUThreshold(std::shared_ptr tensor); + OH_NN_ReturnCode SetNmsScoreThreshold(std::shared_ptr tensor); + OH_NN_ReturnCode SetMaxDetections(std::shared_ptr tensor); + OH_NN_ReturnCode SetDetectionsPerClass(std::shared_ptr tensor); + OH_NN_ReturnCode SetMaxClassesPerDetection(std::shared_ptr tensor); + OH_NN_ReturnCode SetNumClasses(std::shared_ptr tensor); + OH_NN_ReturnCode SetUseRegularNms(std::shared_ptr tensor); + OH_NN_ReturnCode SetOutQuantized(std::shared_ptr tensor); + +private: + int64_t m_inputSize {0}; + std::vector m_scale; + float m_nmsIoUThreshold {0.0f}; + float m_nmsScoreThreshold {0.0f}; + int64_t m_maxDetections {0}; + int64_t m_detectionsPerClass {0}; + int64_t m_maxClassesPerDetection {0}; + int64_t m_numClasses {0}; + bool m_useRegularNms {false}; + bool m_outQuantized {false}; +}; +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS + +#endif // NEURAL_NETWORK_RUNTIME_DETECTION_POST_PROCESS_BUILDER_H diff --git a/frameworks/native/neural_network_runtime/ops/div_builder.cpp b/frameworks/native/neural_network_runtime/ops/div_builder.cpp index 6b3e62fb2cd6da057cc673e16a3e4aa9fbf5115b..89227b01a0ff4b0d08756afbfc40e1197f7b9a28 100644 --- a/frameworks/native/neural_network_runtime/ops/div_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/div_builder.cpp @@ -23,6 +23,7 @@ namespace NeuralNetworkRuntime { namespace Ops { static const int INPUT_NUM = 2; static const int OUTPUT_NUM = 1; +static const int PARAM_MAX_NUM = 1; static constexpr int SCALAR_LENGTH = 1; static const std::string OP_NAME = "Div"; @@ -78,6 +79,12 @@ OH_NN_ReturnCode DivBuilder::Build(const std::vector& paramsIndex, m_inputsIndex = inputsIndex; m_outputsIndex = outputsIndex; + returnCode = CheckParamIndex(paramsIndex, allTensors, PARAM_MAX_NUM); + if (returnCode != OH_NN_SUCCESS) { + LOGE("[Div] Build failed, passed invalid param index."); + return returnCode; + } + for (int i : paramsIndex) { std::shared_ptr tensor = allTensors[i]; switch (tensor->GetType()) { diff --git a/frameworks/native/neural_network_runtime/ops/div_builder.h b/frameworks/native/neural_network_runtime/ops/div_builder.h index ac8544620f14240a77ab3661335f46c95be756a9..1c7daaf46e6217b3c3d09e3566471d2b0806d0ec 100644 --- a/frameworks/native/neural_network_runtime/ops/div_builder.h +++ b/frameworks/native/neural_network_runtime/ops/div_builder.h @@ -39,7 +39,7 @@ private: OH_NN_ReturnCode SetActicationType(std::shared_ptr tensor); private: - mindspore::lite::ActivationType m_activationType{mindspore::lite::ACTIVATION_TYPE_NO_ACTIVATION}; + mindspore::lite::ActivationType m_activationType {mindspore::lite::ACTIVATION_TYPE_NO_ACTIVATION}; }; } // namespace Ops } // namespace NeuralNetworkRuntime diff --git a/frameworks/native/neural_network_runtime/ops/eltwise_builder.cpp b/frameworks/native/neural_network_runtime/ops/eltwise_builder.cpp index df6b649518ec80f1be3b145ba32dd0799335a35c..8a77b3db211a00b4466a3cbeb2da35baf0daaefe 100644 --- a/frameworks/native/neural_network_runtime/ops/eltwise_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/eltwise_builder.cpp @@ -20,6 +20,7 @@ namespace NeuralNetworkRuntime { namespace Ops { static const int INPUT_NUM = 2; static const int OUTPUT_NUM = 1; +static const int PARAM_MAX_NUM = 1; static constexpr int SCALAR_LENGTH = 1; static const std::string OP_NAME = "Eltwise"; @@ -77,6 +78,12 @@ OH_NN_ReturnCode EltwiseBuilder::Build(const std::vector& paramsIndex, m_inputsIndex = inputsIndex; m_outputsIndex = outputsIndex; + returnCode = CheckParamIndex(paramsIndex, allTensors, PARAM_MAX_NUM); + if (returnCode != OH_NN_SUCCESS) { + LOGE("[Eltwise] Build failed, passed invalid input param indices."); + return returnCode; + } + for (int i : paramsIndex) { std::shared_ptr tensor = allTensors[i]; switch (tensor->GetType()) { diff --git a/frameworks/native/neural_network_runtime/ops/eltwise_builder.h b/frameworks/native/neural_network_runtime/ops/eltwise_builder.h index 239a194e34ada8ac388b1cf774acabd9680776cf..78d4dc55e0a09af0200a7a56ca887fc7f8e9c4e3 100644 --- a/frameworks/native/neural_network_runtime/ops/eltwise_builder.h +++ b/frameworks/native/neural_network_runtime/ops/eltwise_builder.h @@ -39,7 +39,7 @@ private: OH_NN_ReturnCode SetMode(std::shared_ptr tensor); private: - mindspore::lite::EltwiseMode m_mode{mindspore::lite::ELTWISE_MODE_PROD}; + mindspore::lite::EltwiseMode m_mode {mindspore::lite::ELTWISE_MODE_PROD}; }; } // namespace Ops } // namespace NeuralNetworkRuntime diff --git a/frameworks/native/neural_network_runtime/ops/equal_builder.cpp b/frameworks/native/neural_network_runtime/ops/equal_builder.cpp index cb3a9b99ee3756a6fdc392e764b049190c769b9c..eacbd676d279991b9f6144f9e9ba2fabdc58d995 100644 --- a/frameworks/native/neural_network_runtime/ops/equal_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/equal_builder.cpp @@ -20,6 +20,7 @@ namespace NeuralNetworkRuntime { namespace Ops { static const int INPUT_NUM = 2; static const int OUTPUT_NUM = 1; +static const int PARAM_NUM = 0; static const std::string OP_NAME = "Equal"; EqualBuilder::EqualBuilder() {} @@ -42,9 +43,10 @@ OH_NN_ReturnCode EqualBuilder::Build(const std::vector& paramsIndex, return ret; } - if (!paramsIndex.empty()) { - LOGW("[Equal] Build failed, the equal expects no parameters, but receive %zu", paramsIndex.size()); - return OH_NN_INVALID_PARAMETER; + ret = CheckParamIndex(paramsIndex, allTensors, PARAM_NUM); + if (ret != OH_NN_SUCCESS) { + LOGE("[Equal] Build failed, passed invalid param index."); + return ret; } m_inputsIndex = inputsIndex; diff --git a/frameworks/native/neural_network_runtime/ops/erf_builder.cpp b/frameworks/native/neural_network_runtime/ops/erf_builder.cpp index 19424e78f9ee5340bf43a1fd7d88e591abac3b09..95a36c3499f08a15a05f91729b5eabf2d9cc9ddd 100755 --- a/frameworks/native/neural_network_runtime/ops/erf_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/erf_builder.cpp @@ -20,6 +20,7 @@ namespace NeuralNetworkRuntime { namespace Ops { static const int INPUT_NUM = 1; static const int OUTPUT_NUM = 1; +static const int PARAM_NUM = 0; static const std::string OP_NAME = "Erf"; ErfBuilder::ErfBuilder() {} @@ -41,7 +42,13 @@ OH_NN_ReturnCode ErfBuilder::Build(const std::vector& paramsIndex, LOGE("[Erf] Build failed, passed invalid input or output index."); return ret; } - + + ret = CheckParamIndex(paramsIndex, allTensors, PARAM_NUM); + if (ret != OH_NN_SUCCESS) { + LOGE("[Erf] Build failed, passed invalid param index."); + return ret; + } + if (!paramsIndex.empty()) { LOGW("[Erf] Build failed, the erf expects no parameters, but receive %zu", paramsIndex.size()); return OH_NN_INVALID_PARAMETER; diff --git a/frameworks/native/neural_network_runtime/ops/exp_builder.cpp b/frameworks/native/neural_network_runtime/ops/exp_builder.cpp index 93e48c002e419b615b5d3a1dfdd47b8d583549a6..4afb105f5bb7e9f621ea1453525e7d6c45e62546 100755 --- a/frameworks/native/neural_network_runtime/ops/exp_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/exp_builder.cpp @@ -20,6 +20,7 @@ namespace NeuralNetworkRuntime { namespace Ops { static const int INPUT_NUM = 1; static const int OUTPUT_NUM = 1; +static const int PARAM_MAX_NUM = 3; static const int SCALAR_LENGTH = 1; static const std::string OP_NAME = "Exp"; @@ -111,29 +112,34 @@ OH_NN_ReturnCode ExpBuilder::Build(const std::vector& paramsIndex, m_inputsIndex = inputsIndex; m_outputsIndex = outputsIndex; - - OH_NN_ReturnCode returnCode; + + ret = CheckParamIndex(paramsIndex, allTensors, PARAM_MAX_NUM); + if (ret != OH_NN_SUCCESS) { + LOGE("[Exp] Build failed, passed invalid param index."); + return ret; + } + for (int i : paramsIndex) { std::shared_ptr tensor = allTensors[i]; tensor->IdentifyOpParameter(); switch (tensor->GetType()) { case OH_NN_EXP_BASE: - returnCode = SetBase(tensor); + ret = SetBase(tensor); break; case OH_NN_EXP_SCALE: - returnCode = SetScale(tensor); + ret = SetScale(tensor); break; case OH_NN_EXP_SHIFT: - returnCode = SetShift(tensor); + ret = SetShift(tensor); break; default: LOGE("[Exp] Build failed, param invalid, type=%d", tensor->GetType()); return OH_NN_INVALID_PARAMETER; } - if (returnCode != OH_NN_SUCCESS) { + if (ret != OH_NN_SUCCESS) { LOGE("[Exp] Build failed, passed invalid param."); - return returnCode; + return ret; } } diff --git a/frameworks/native/neural_network_runtime/ops/exp_builder.h b/frameworks/native/neural_network_runtime/ops/exp_builder.h index 8c4067efb42fb70d4c0cfdd884b005f4e82a29e7..edacbfc14375c0c9732886c7eef544196bb0e5db 100755 --- a/frameworks/native/neural_network_runtime/ops/exp_builder.h +++ b/frameworks/native/neural_network_runtime/ops/exp_builder.h @@ -42,7 +42,7 @@ private: private: float m_base {-1.0f}; - float m_scale {0.1f}; + float m_scale {1.0f}; float m_shift {0.0f}; }; } // namespace Ops diff --git a/frameworks/native/neural_network_runtime/ops/expandims_builder.cpp b/frameworks/native/neural_network_runtime/ops/expandims_builder.cpp index 095db7bd461c79cc35f0c2c309e1ee3cc6406c80..ded2bd5050b1dc7f18d4144c984aaeee91eaf939 100644 --- a/frameworks/native/neural_network_runtime/ops/expandims_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/expandims_builder.cpp @@ -20,6 +20,7 @@ namespace NeuralNetworkRuntime { namespace Ops { static const int INPUT_NUM = 2; static const int OUTPUT_NUM = 1; +static const int PARAM_NUM = 0; static const std::string OP_NAME = "ExpandDims"; ExpandDimsBuilder::ExpandDimsBuilder() {} @@ -45,9 +46,10 @@ OH_NN_ReturnCode ExpandDimsBuilder::Build(const std::vector& paramsInd m_inputsIndex = inputsIndex; m_outputsIndex = outputsIndex; - if (!paramsIndex.empty()) { - LOGE("[ExpandDims] Build failed, expandDims expects no parameters"); - return OH_NN_INVALID_PARAMETER; + ret = CheckParamIndex(paramsIndex, allTensors, PARAM_NUM); + if (ret != OH_NN_SUCCESS) { + LOGE("[ExpandDims] Build failed, the param index of ExpandDims operation is invalid."); + return ret; } m_isBuild = true; diff --git a/frameworks/native/neural_network_runtime/ops/fill_builder.cpp b/frameworks/native/neural_network_runtime/ops/fill_builder.cpp index 94c3f37452ac3c1cce0c68df2969c39e246e7645..f09f0b14a351abb6d033d777ae6f39ec9a55d2d4 100644 --- a/frameworks/native/neural_network_runtime/ops/fill_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/fill_builder.cpp @@ -24,6 +24,7 @@ namespace NeuralNetworkRuntime { namespace Ops { static const int INPUT_NUM = 2; static const int OUTPUT_NUM = 1; +static const int PARAM_NUM = 0; static const std::string OP_NAME = "Fill"; FillBuilder::FillBuilder() {} @@ -46,9 +47,10 @@ OH_NN_ReturnCode FillBuilder::Build(const std::vector& paramsIndex, return returnCode; } - if (!paramsIndex.empty()) { - LOGW("[Fill] Build failed, fill expects no parameters, but receive %zu", paramsIndex.size()); - return OH_NN_INVALID_PARAMETER; + returnCode = CheckParamIndex(paramsIndex, allTensors, PARAM_NUM); + if (returnCode != OH_NN_SUCCESS) { + LOGE("[Fill] Fill Build failed, Passed invalid param indices."); + return returnCode; } m_inputsIndex = inputsIndex; diff --git a/frameworks/native/neural_network_runtime/ops/flatten_builder.cpp b/frameworks/native/neural_network_runtime/ops/flatten_builder.cpp index acaf90dfbdbb34599ae124d13e090582f57526ca..f9b16c108e7660dd2d8c79e6f0ce5f529105a070 100755 --- a/frameworks/native/neural_network_runtime/ops/flatten_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/flatten_builder.cpp @@ -20,6 +20,7 @@ namespace NeuralNetworkRuntime { namespace Ops { static const int INPUT_NUM = 1; static const int OUTPUT_NUM = 1; +static const int PARAM_MAX_NUM = 1; static const int SCALAR_LENGTH = 1; static const std::string OP_NAME = "Flatten"; @@ -67,23 +68,28 @@ OH_NN_ReturnCode FlattenBuilder::Build(const std::vector& paramsIndex, m_inputsIndex = inputsIndex; m_outputsIndex = outputsIndex; - - OH_NN_ReturnCode returnCode; + + ret = CheckParamIndex(paramsIndex, allTensors, PARAM_MAX_NUM); + if (ret != OH_NN_SUCCESS) { + LOGE("[Flatten] Build failed, passed invalid param index."); + return ret; + } + for (int i : paramsIndex) { std::shared_ptr tensor = allTensors[i]; tensor->IdentifyOpParameter(); switch (tensor->GetType()) { case OH_NN_FLATTEN_AXIS: - returnCode = SetAxis(tensor); + ret = SetAxis(tensor); break; default: LOGE("[Flatten] Build failed, param invalid, type=%d", tensor->GetType()); return OH_NN_INVALID_PARAMETER; } - if (returnCode != OH_NN_SUCCESS) { + if (ret != OH_NN_SUCCESS) { LOGE("[Flatten] Build failed, passed invalid param."); - return returnCode; + return ret; } } diff --git a/frameworks/native/neural_network_runtime/ops/floor_builder.cpp b/frameworks/native/neural_network_runtime/ops/floor_builder.cpp new file mode 100644 index 0000000000000000000000000000000000000000..b76845750e1f8fe3e64d2e4706766a3f74ab02c0 --- /dev/null +++ b/frameworks/native/neural_network_runtime/ops/floor_builder.cpp @@ -0,0 +1,75 @@ +/* + * Copyright (c) 2024 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "floor_builder.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +static const int INPUT_NUM = 1; +static const int OUTPUT_NUM = 1; +static const int PARAM_NUM = 0; +static const std::string OP_NAME = "Floor"; + +FloorBuilder::FloorBuilder() {} + +FloorBuilder::~FloorBuilder() {} + +OH_NN_ReturnCode FloorBuilder::Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) +{ + if (m_isBuild) { + LOGE("[Floor] Build failed, the floor operation has been build. cannot build again."); + return OH_NN_OPERATION_FORBIDDEN; + } + + auto ret = CheckIOIndex(inputsIndex, outputsIndex, allTensors, INPUT_NUM, OUTPUT_NUM); + if (ret != OH_NN_SUCCESS) { + LOGE("[Floor] Build failed, passed invalid input or output index."); + return ret; + } + + ret = CheckParamIndex(paramsIndex, allTensors, PARAM_NUM); + if (ret != OH_NN_SUCCESS) { + LOGE("[Floor] Build failed, passed invalid param index."); + return ret; + } + + m_inputsIndex = inputsIndex; + m_outputsIndex = outputsIndex; + + m_name = OP_NAME; + m_isBuild = true; + return OH_NN_SUCCESS; +} + +LiteGraphPrimitvePtr FloorBuilder::GetPrimitive() +{ + if (!m_isBuild) { + LOGE("[Floor] GetPrimitive failed, cannot get primitive before call build."); + return {nullptr, DestroyLiteGraphPrimitive}; + } + + void* primitive = mindspore::lite::MindIR_Floor_CreatePrimitive(); + LiteGraphPrimitvePtr graphPrimitivePtr(primitive, DestroyLiteGraphPrimitive) ; + return graphPrimitivePtr; +} + +REGISTER_OPS(FloorBuilder, OH_NN_OPS_FLOOR); +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS diff --git a/frameworks/native/neural_network_runtime/ops/floor_builder.h b/frameworks/native/neural_network_runtime/ops/floor_builder.h new file mode 100644 index 0000000000000000000000000000000000000000..afdd51b7a7f71a64e65b49c40e8ffd845a16c741 --- /dev/null +++ b/frameworks/native/neural_network_runtime/ops/floor_builder.h @@ -0,0 +1,42 @@ +/* + * Copyright (c) 2024 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_FLOOR_BUILDER_H +#define NEURAL_NETWORK_RUNTIME_FLOOR_BUILDER_H + +#include "mindir.h" + +#include "ops_builder.h" +#include "ops_registry.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +class FloorBuilder : public OpsBuilder { +public: + FloorBuilder(); + ~FloorBuilder() override; + OH_NN_ReturnCode Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) override; + + LiteGraphPrimitvePtr GetPrimitive() override; +}; +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS + +#endif // NEURAL_NETWORK_RUNTIME_FLOOR_BUILDER_H \ No newline at end of file diff --git a/frameworks/native/neural_network_runtime/ops/fullconnection_builder.cpp b/frameworks/native/neural_network_runtime/ops/fullconnection_builder.cpp index 37d50691210b860a7dd0c7778d1e4556eca663e8..9592ae57e347eaa7783350692e9e0d8767576213 100644 --- a/frameworks/native/neural_network_runtime/ops/fullconnection_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/fullconnection_builder.cpp @@ -21,9 +21,8 @@ namespace OHOS { namespace NeuralNetworkRuntime { namespace Ops { -static constexpr int INPUT_WITH_AXIS = 2; -static constexpr int INPUT_WITHOUT_AXIS = 1; static constexpr int OUTPUT_NUM = 1; +static constexpr int PARAM_NUM = 4; static constexpr int SCALAR_LENGTH = 1; static const std::string OP_NAME = "FullConnection"; @@ -40,12 +39,11 @@ OH_NN_ReturnCode FullConnectionBuilder::SetFullConnectionInput(const std::vector return OH_NN_INVALID_PARAMETER; } size_t allTensorsSize = allTensors.size(); - bool isOverTensorSize = std::any_of(inputsIndex.begin(), inputsIndex.end(), [allTensorsSize](uint32_t index) { - return index >= allTensorsSize; - }); - if (isOverTensorSize) { - LOGE("[FullConnection] SetFullConnectionInput failed, the index of inputs is out of range."); - return OH_NN_INVALID_PARAMETER; + for (auto index : inputsIndex) { + if (index >= allTensorsSize) { + LOGE("[FullConnection] SetFullConnectionInput failed, the index of inputs is out of range."); + return OH_NN_INVALID_PARAMETER; + } } m_inputsIndex = inputsIndex; @@ -54,6 +52,55 @@ OH_NN_ReturnCode FullConnectionBuilder::SetFullConnectionInput(const std::vector return OH_NN_SUCCESS; } +OH_NN_ReturnCode FullConnectionBuilder::SetHasBias(std::shared_ptr tensor) +{ + if (tensor->GetDataType() != OH_NN_BOOL) { + LOGE("[FullConnection] The hasBias should be type OH_NN_BOOL."); + return OH_NN_INVALID_PARAMETER; + } + + if (tensor->GetElementCount() != SCALAR_LENGTH) { + LOGE("[FullConnection] The hasBias should be scalar."); + return OH_NN_INVALID_PARAMETER; + } + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[FullConnection] Tensor buffer is nullptr."); + return OH_NN_INVALID_PARAMETER; + } + m_hasBias = *(static_cast(buffer)); + + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode FullConnectionBuilder::SetUseAxis(std::shared_ptr tensor) +{ + if (tensor->GetDataType() != OH_NN_BOOL) { + LOGE("[FullConnection] The useAxis should be type OH_NN_BOOL."); + return OH_NN_INVALID_PARAMETER; + } + + if (tensor->GetElementCount() != SCALAR_LENGTH) { + LOGE("[FullConnection] The useAxis should be scalar."); + return OH_NN_INVALID_PARAMETER; + } + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[FullConnection] Tensor buffer is nullptr."); + return OH_NN_INVALID_PARAMETER; + } + + bool useAxis = *(static_cast(buffer)); + if (!useAxis && m_useAxis) { + LOGE("[FullConnection] SetAxis but set useAxis false."); + return OH_NN_INVALID_PARAMETER; + } + m_useAxis = useAxis; + return OH_NN_SUCCESS; +} + OH_NN_ReturnCode FullConnectionBuilder::SetFullConnectionActivation(std::shared_ptr tensor) { tensor->IdentifyOpParameter(); @@ -86,27 +133,26 @@ OH_NN_ReturnCode FullConnectionBuilder::SetFullConnectionActivation(std::shared_ OH_NN_ReturnCode FullConnectionBuilder::SetAxis(std::shared_ptr tensor) { - if (m_useAxis) { - tensor->IdentifyOpParameter(); - - if (tensor->GetElementCount() != SCALAR_LENGTH) { - LOGE("[FullConnection] SetFullConnectionActivation failed, the axis shoule be a scalar"); - return OH_NN_INVALID_PARAMETER; - } + tensor->IdentifyOpParameter(); - if (tensor->GetDataType() != OH_NN_INT64) { - LOGE("[FullConnection] SetFullConnectionActivation failed, the Axis should be type OH_NN_INT64."); - return OH_NN_INVALID_PARAMETER; - } + if (tensor->GetElementCount() != SCALAR_LENGTH) { + LOGE("[FullConnection] SetAxis failed, the axis shoule be a scalar"); + return OH_NN_INVALID_PARAMETER; + } - void* buffer = tensor->GetBuffer(); - if (buffer == nullptr) { - LOGE("[FullConnection] SetAxis GetBuffer return nullptr"); - return OH_NN_INVALID_PARAMETER; - } + if (tensor->GetDataType() != OH_NN_INT64) { + LOGE("[FullConnection] SetAxis failed, the Axis should be type OH_NN_INT64."); + return OH_NN_INVALID_PARAMETER; + } - m_axis = *static_cast(buffer); + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[FullConnection] SetAxis GetBuffer return nullptr"); + return OH_NN_INVALID_PARAMETER; } + + m_axis = *static_cast(buffer); + m_useAxis = true; return OH_NN_SUCCESS; } @@ -121,34 +167,42 @@ OH_NN_ReturnCode FullConnectionBuilder::Build(const std::vector& param return OH_NN_OPERATION_FORBIDDEN; } - bool useAxis = false; - if (paramsIndex.size() == INPUT_WITH_AXIS) { - useAxis = true; - } else if (paramsIndex.size() != INPUT_WITHOUT_AXIS) { - LOGE("[FullConnection] Build failed, the index of inputs should equal to %d if axis used or %d if not.", - INPUT_WITH_AXIS, INPUT_WITHOUT_AXIS); - return OH_NN_INVALID_PARAMETER; - } - OH_NN_ReturnCode returnCode = SetFullConnectionInput(inputsIndex, outputsIndex, allTensors); if (returnCode != OH_NN_SUCCESS) { LOGE("[FullConnection] Build failed, SetFullConnectionInput failed."); return returnCode; } - // Set axis - m_useAxis = useAxis; + returnCode = CheckParamIndex(paramsIndex, allTensors, PARAM_NUM); + if (returnCode != OH_NN_SUCCESS) { + LOGE("[FullConnection] Build failed, passed invalid param index."); + return returnCode; + } + + for (int i : paramsIndex) { + std::shared_ptr tensor = allTensors[i]; + if (tensor->GetType() == OH_NN_FULL_CONNECTION_AXIS) { + returnCode = SetAxis(tensor); + break; + } + } + for (int i : paramsIndex) { std::shared_ptr tensor = allTensors[i]; // 参数 tensor switch (tensor->GetType()) { case OH_NN_FULL_CONNECTION_AXIS: - returnCode = SetAxis(tensor); + break; + case OH_NN_FULL_CONNECTION_HAS_BIAS: + returnCode = SetHasBias(tensor); + break; + case OH_NN_FULL_CONNECTION_USE_AXIS: + returnCode = SetUseAxis(tensor); break; case OH_NN_FULL_CONNECTION_ACTIVATIONTYPE: returnCode = SetFullConnectionActivation(tensor); break; default: - LOGE("[FullConnection] Build failed, param invalid, type = %d.", tensor->GetType()); + LOGE("[FullConnection] Build failed, param invalid, type = %{public}d.", tensor->GetType()); return OH_NN_INVALID_PARAMETER; } if (returnCode != OH_NN_SUCCESS) { diff --git a/frameworks/native/neural_network_runtime/ops/fullconnection_builder.h b/frameworks/native/neural_network_runtime/ops/fullconnection_builder.h index d775f9a03fd64cd47171d061a2efd9e193fc7dcf..fc97ac2c47ac7dabdfdcfeed93a6e1e0cce7d41b 100644 --- a/frameworks/native/neural_network_runtime/ops/fullconnection_builder.h +++ b/frameworks/native/neural_network_runtime/ops/fullconnection_builder.h @@ -39,14 +39,16 @@ private: OH_NN_ReturnCode SetFullConnectionInput(const std::vector& inputsIndex, const std::vector& outputsIndex, const std::vector>& allTensors); + OH_NN_ReturnCode SetHasBias(std::shared_ptr tensor); + OH_NN_ReturnCode SetUseAxis(std::shared_ptr tensor); OH_NN_ReturnCode SetFullConnectionActivation(std::shared_ptr tensor); OH_NN_ReturnCode SetAxis(std::shared_ptr tensor); private: - bool m_hasBias{true}; - bool m_useAxis{false}; - int64_t m_axis{0}; - mindspore::lite::ActivationType m_activationType{mindspore::lite::ACTIVATION_TYPE_NO_ACTIVATION}; + bool m_hasBias {false}; + bool m_useAxis {false}; + int64_t m_axis {0}; + mindspore::lite::ActivationType m_activationType {mindspore::lite::ACTIVATION_TYPE_NO_ACTIVATION}; }; } // namespace Ops } // namespace NeuralNetworkRuntime diff --git a/frameworks/native/neural_network_runtime/ops/gather_builder.cpp b/frameworks/native/neural_network_runtime/ops/gather_builder.cpp index da10010ac8fea47d6ab19d51837f793d20f5895b..113fa2b802c4e93c3ef3b2d8e5537bcb7920713d 100644 --- a/frameworks/native/neural_network_runtime/ops/gather_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/gather_builder.cpp @@ -24,6 +24,7 @@ namespace NeuralNetworkRuntime { namespace Ops { static const int INPUT_NUM = 3; static const int OUTPUT_NUM = 1; +static const int PARAM_NUM = 0; static const std::string OP_NAME = "Gather"; GatherBuilder::GatherBuilder() {} @@ -46,9 +47,10 @@ OH_NN_ReturnCode GatherBuilder::Build(const std::vector& paramsIndex, return returnCode; } - if (!paramsIndex.empty()) { - LOGW("[Gather] Build failed, gather expects no parameters, but receive %zu", paramsIndex.size()); - return OH_NN_INVALID_PARAMETER; + returnCode = CheckParamIndex(paramsIndex, allTensors, PARAM_NUM); + if (returnCode != OH_NN_SUCCESS) { + LOGE("[Gather] Build failed, the param index of Gather operation is invalid."); + return returnCode; } m_inputsIndex = inputsIndex; diff --git a/frameworks/native/neural_network_runtime/ops/gelu_builder.cpp b/frameworks/native/neural_network_runtime/ops/gelu_builder.cpp index b175b6311a09c50c87fca6e494d6940bdba9c065..d4b800abbfea70d43b703b20bf60e272acc98555 100644 --- a/frameworks/native/neural_network_runtime/ops/gelu_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/gelu_builder.cpp @@ -24,12 +24,36 @@ namespace NeuralNetworkRuntime { namespace Ops { static const int INPUT_NUMS = 1; static const int OUTPUT_NUMS = 1; +static const int PARAM_MAX_NUM = 1; +static const int SCALAR_LENGTH = 1; static const std::string OP_NAME = "Gelu"; GeluBuilder::GeluBuilder() {} GeluBuilder::~GeluBuilder() {} +OH_NN_ReturnCode GeluBuilder::SetApproximate(std::shared_ptr tensor) +{ + if (tensor->GetDataType() != OH_NN_BOOL) { + LOGE("[GeLU] The approximate should be type OH_NN_BOOL."); + return OH_NN_INVALID_PARAMETER; + } + + if (tensor->GetElementCount() != SCALAR_LENGTH) { + LOGE("[GeLU] The approximate should be scalar."); + return OH_NN_INVALID_PARAMETER; + } + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[GeLU] Tensor buffer is nullptr."); + return OH_NN_INVALID_PARAMETER; + } + m_approximate = *(static_cast(buffer)); + + return OH_NN_SUCCESS; +} + OH_NN_ReturnCode GeluBuilder::Build(const std::vector& paramsIndex, const std::vector& inputsIndex, const std::vector& outputsIndex, @@ -46,9 +70,27 @@ OH_NN_ReturnCode GeluBuilder::Build(const std::vector& paramsIndex, return returnCode; } - if (!paramsIndex.empty()) { - LOGW("[Gelu] Build failed, gelu expects no parameters, but receive %zu", paramsIndex.size()); - return OH_NN_INVALID_PARAMETER; + returnCode = CheckParamIndex(paramsIndex, allTensors, PARAM_MAX_NUM); + if (returnCode != OH_NN_SUCCESS) { + LOGE("[Gelu] Build failed, passed invalid param indices."); + return returnCode; + } + + for (int i : paramsIndex) { + std::shared_ptr tensor = allTensors[i]; + tensor->IdentifyOpParameter(); + switch (tensor->GetType()) { + case OH_NN_GELU_APPROXIMATE: + returnCode = SetApproximate(tensor); + break; + default: + LOGE("[Gelu] Build failed, param invalid, type = %d.", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; + } + if (returnCode != OH_NN_SUCCESS) { + LOGE("[Gelu] Build failed, passed invalid param."); + return returnCode; + } } m_inputsIndex = inputsIndex; @@ -70,9 +112,8 @@ LiteGraphPrimitvePtr GeluBuilder::GetPrimitive() float alpha = 0.0f; float minVal = 0.0f; float maxVal = 0.0f; - bool approximate = false; void* primitive = mindspore::lite::MindIR_Activation_CreatePrimitive(activationType, - alpha, minVal, maxVal, approximate); + alpha, minVal, maxVal, m_approximate); LiteGraphPrimitvePtr graphPrimitivePtr(primitive, DestroyLiteGraphPrimitive); return graphPrimitivePtr; } diff --git a/frameworks/native/neural_network_runtime/ops/gelu_builder.h b/frameworks/native/neural_network_runtime/ops/gelu_builder.h index ca088dea2d79f13ea136344a3c379a4216b0ee92..0a590dcc99fa04844f6b8c1c8cf223cde506b3c9 100644 --- a/frameworks/native/neural_network_runtime/ops/gelu_builder.h +++ b/frameworks/native/neural_network_runtime/ops/gelu_builder.h @@ -30,6 +30,12 @@ public: const std::vector& outputsIndex, const std::vector>& allTensors) override; LiteGraphPrimitvePtr GetPrimitive() override; + +private: + OH_NN_ReturnCode SetApproximate(std::shared_ptr tensor); + +private: + bool m_approximate {false}; }; } // namespace Ops } // namespace NeuralNetworkRuntime diff --git a/frameworks/native/neural_network_runtime/ops/greater_builder.cpp b/frameworks/native/neural_network_runtime/ops/greater_builder.cpp index abe373be05d70cf3c99e6c5152af4e6cffd9cce2..1dd18b6e0cac4af4b0ea05cc64a83c5f8903aad0 100644 --- a/frameworks/native/neural_network_runtime/ops/greater_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/greater_builder.cpp @@ -24,6 +24,7 @@ namespace NeuralNetworkRuntime { namespace Ops { static const int INPUT_NUMS = 2; static const int OUTPUT_NUMS = 1; +static const int PARAM_NUM = 0; static const std::string OP_NAME = "Greater"; GreaterBuilder::GreaterBuilder() {} @@ -46,9 +47,10 @@ OH_NN_ReturnCode GreaterBuilder::Build(const std::vector& paramsIndex, return returnCode; } - if (!paramsIndex.empty()) { - LOGW("[Greater] Greater expects no parameters, but receive %zu", paramsIndex.size()); - return OH_NN_INVALID_PARAMETER; + returnCode = CheckParamIndex(paramsIndex, allTensors, PARAM_NUM); + if (returnCode != OH_NN_SUCCESS) { + LOGE("[Greater] Build failded, Passed invalid param indices."); + return returnCode; } m_inputsIndex = inputsIndex; diff --git a/frameworks/native/neural_network_runtime/ops/greater_equal_builder.cpp b/frameworks/native/neural_network_runtime/ops/greater_equal_builder.cpp index ea1e107daa83c7da99497755912973aaef2d8105..94da656cf58f81ad3bb54ff79070b0d2e7f206bf 100644 --- a/frameworks/native/neural_network_runtime/ops/greater_equal_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/greater_equal_builder.cpp @@ -24,6 +24,7 @@ namespace NeuralNetworkRuntime { namespace Ops { static const int INPUT_NUMS = 2; static const int OUTPUT_NUMS = 1; +static const int PARAM_NUM = 0; static const std::string OP_NAME = "GreaterEqual"; GreaterEqualBuilder::GreaterEqualBuilder() {} @@ -46,9 +47,10 @@ OH_NN_ReturnCode GreaterEqualBuilder::Build(const std::vector& paramsI return returnCode; } - if (!paramsIndex.empty()) { - LOGW("[GreaterEqual] GreaterEqual expects no parameters, but receive %zu", paramsIndex.size()); - return OH_NN_INVALID_PARAMETER; + returnCode = CheckParamIndex(paramsIndex, allTensors, PARAM_NUM); + if (returnCode != OH_NN_SUCCESS) { + LOGE("[GreaterEqual] Build failded, Passed invalid param indices."); + return returnCode; } m_inputsIndex = inputsIndex; diff --git a/frameworks/native/neural_network_runtime/ops/hswish_builder.cpp b/frameworks/native/neural_network_runtime/ops/hswish_builder.cpp index bb5796c884df52bb9bc3b1e341615175c1a880ae..8098bb4857a0938d615cac2f9f08d26d0770343b 100644 --- a/frameworks/native/neural_network_runtime/ops/hswish_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/hswish_builder.cpp @@ -24,6 +24,7 @@ namespace NeuralNetworkRuntime { namespace Ops { static const int INPUT_NUMS = 1; static const int OUTPUT_NUMS = 1; +static const int PARAM_NUM = 0; static const std::string OP_NAME = "Hswish"; HswishBuilder::HswishBuilder() {} @@ -46,9 +47,10 @@ OH_NN_ReturnCode HswishBuilder::Build(const std::vector& paramsIndex, return returnCode; } - if (!paramsIndex.empty()) { - LOGW("[Hswish] Build failed, hswish expects no parameters, but receive %zu", paramsIndex.size()); - return OH_NN_INVALID_PARAMETER; + returnCode = CheckParamIndex(paramsIndex, allTensors, PARAM_NUM); + if (returnCode != OH_NN_SUCCESS) { + LOGE("[Hswish] Build failed, passed invalid param indices."); + return returnCode; } m_inputsIndex = inputsIndex; diff --git a/frameworks/native/neural_network_runtime/ops/instance_norm_builder.cpp b/frameworks/native/neural_network_runtime/ops/instance_norm_builder.cpp index 13ad995c0ebc368b9cd0e76470aba14d221ec6ee..c6a6e43b5a381b4ec56376cdf55aa907f588e1c7 100755 --- a/frameworks/native/neural_network_runtime/ops/instance_norm_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/instance_norm_builder.cpp @@ -20,6 +20,7 @@ namespace NeuralNetworkRuntime { namespace Ops { static const int INPUT_NUM = 3; static const int OUTPUT_NUM = 1; +static const int PARAM_MAX_NUM = 1; static const int SCALAR_LENGTH = 1; static const std::string OP_NAME = "InstanceNorm"; @@ -67,23 +68,28 @@ OH_NN_ReturnCode InstanceNormBuilder::Build(const std::vector& paramsI m_inputsIndex = inputsIndex; m_outputsIndex = outputsIndex; - - OH_NN_ReturnCode returnCode; + + ret = CheckParamIndex(paramsIndex, allTensors, PARAM_MAX_NUM); + if (ret != OH_NN_SUCCESS) { + LOGE("[InstanceNorm] Build failed, passed invalid param index."); + return ret; + } + for (int i : paramsIndex) { std::shared_ptr tensor = allTensors[i]; tensor->IdentifyOpParameter(); switch (tensor->GetType()) { case OH_NN_INSTANCE_NORM_EPSILON: - returnCode = SetEpsilon(tensor); + ret = SetEpsilon(tensor); break; default: LOGE("[InstanceNorm] Build failed, param invalid, type=%d", tensor->GetType()); return OH_NN_INVALID_PARAMETER; } - if (returnCode != OH_NN_SUCCESS) { + if (ret != OH_NN_SUCCESS) { LOGE("[InstanceNorm] Build failed, passed invalid param."); - return returnCode; + return ret; } } diff --git a/frameworks/native/neural_network_runtime/ops/l2_normalize_builder.cpp b/frameworks/native/neural_network_runtime/ops/l2_normalize_builder.cpp new file mode 100644 index 0000000000000000000000000000000000000000..cc3b74067ba78842810799b0c87c375b4cf60e67 --- /dev/null +++ b/frameworks/native/neural_network_runtime/ops/l2_normalize_builder.cpp @@ -0,0 +1,179 @@ +/* + * Copyright (c) 2024 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "l2_normalize_builder.h" + +#include "transform.h" +#include "validation.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +static const int INPUT_NUM = 1; +static const int OUTPUT_NUM = 1; +static const int PARAM_MAX_NUM = 3; +static const int SCALAR_LENGTH = 1; +static const std::string OP_NAME = "L2Normalize"; + +L2NormalizeBuilder::L2NormalizeBuilder() {} + +L2NormalizeBuilder::~L2NormalizeBuilder() {} + +OH_NN_ReturnCode L2NormalizeBuilder::SetAxis(std::shared_ptr tensor) +{ + if (tensor->GetDataType() != OH_NN_INT64) { + LOGE("[L2Normalize] The axis should be type OH_NN_INT64."); + return OH_NN_INVALID_PARAMETER; + } + + m_axis.clear(); + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[L2Normalize] Tensor buffer is nullptr."); + return OH_NN_INVALID_PARAMETER; + } + + int64_t* pAxis = static_cast(buffer); + + uint32_t elementCount = tensor->GetElementCount(); + for (uint32_t i = 0; i < elementCount; ++i) { + m_axis.emplace_back(*pAxis); + ++pAxis; + } + + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode L2NormalizeBuilder::SetEpsilon(std::shared_ptr tensor) +{ + if (tensor->GetDataType() != OH_NN_FLOAT32) { + LOGE("[L2Normalize] The epsilon should be type OH_NN_FLOAT32."); + return OH_NN_INVALID_PARAMETER; + } + + if (tensor->GetElementCount() != SCALAR_LENGTH) { + LOGE("[L2Normalize] The epsilon should be scalar."); + return OH_NN_INVALID_PARAMETER; + } + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[L2Normalize] Tensor buffer is nullptr."); + return OH_NN_INVALID_PARAMETER; + } + m_epsilon = *(static_cast(buffer)); + + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode L2NormalizeBuilder::SetActivationType(std::shared_ptr tensor) +{ + if (tensor->GetDataType() != OH_NN_INT8) { + LOGE("[L2Normalize] SetActivationType failed, the activationType should have type OH_NN_INT8."); + return OH_NN_INVALID_PARAMETER; + } + + if (tensor->GetElementCount() != SCALAR_LENGTH) { + LOGE("[L2Normalize] SetActivationType failed, the activationType shoule be a scalar"); + return OH_NN_INVALID_PARAMETER; + } + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[L2Normalize] SetActivationType GetBuffer return nullptr"); + return OH_NN_INVALID_PARAMETER; + } + + int8_t* pActivationType = static_cast(buffer); + if (!OHOS::NeuralNetworkRuntime::Validation::ValidateFuseType(static_cast(*pActivationType))) { + LOGE("[L2Normalize] SetActivationType failed, activationType input is invalid."); + return OH_NN_INVALID_PARAMETER; + } + m_activationType = NNToMS::TransfromFusionType((OH_NN_FuseType)(*pActivationType)); + + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode L2NormalizeBuilder::Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) +{ + if (m_isBuild) { + LOGE("[L2Normalize] Build failed, the depthToSpace operation has been build. cannot build again."); + return OH_NN_OPERATION_FORBIDDEN; + } + + auto ret = CheckIOIndex(inputsIndex, outputsIndex, allTensors, INPUT_NUM, OUTPUT_NUM); + if (ret != OH_NN_SUCCESS) { + LOGE("[L2Normalize] Build failed, passed invalid input or output index."); + return ret; + } + + m_inputsIndex = inputsIndex; + m_outputsIndex = outputsIndex; + + ret = CheckParamIndex(paramsIndex, allTensors, PARAM_MAX_NUM); + if (ret != OH_NN_SUCCESS) { + LOGE("[L2Normalize] Build failed, passed invalid param index."); + return ret; + } + + for (int i : paramsIndex) { + std::shared_ptr tensor = allTensors[i]; + tensor->IdentifyOpParameter(); + switch (tensor->GetType()) { + case OH_NN_L2_NORMALIZE_AXIS: + ret = SetAxis(tensor); + break; + case OH_NN_L2_NORMALIZE_EPSILON: + ret = SetEpsilon(tensor); + break; + case OH_NN_L2_NORMALIZE_ACTIVATION_TYPE: + ret = SetActivationType(tensor); + break; + default: + LOGE("[L2Normalize] Build failed, param invalid, type=%d", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; + } + + if (ret != OH_NN_SUCCESS) { + LOGE("[L2Normalize] Build failed, passed invalid param."); + return ret; + } + } + + m_name = OP_NAME; + m_isBuild = true; + return OH_NN_SUCCESS; +} + +LiteGraphPrimitvePtr L2NormalizeBuilder::GetPrimitive() +{ + if (!m_isBuild) { + LOGE("[L2Normalize] GetPrimitive failed, cannot get primitive before call build."); + return {nullptr, DestroyLiteGraphPrimitive}; + } + + void* primitive = mindspore::lite::MindIR_L2NormalizeFusion_CreatePrimitive(m_axis, m_epsilon, m_activationType); + LiteGraphPrimitvePtr graphPrimitivePtr(primitive, DestroyLiteGraphPrimitive) ; + return graphPrimitivePtr; +} + +REGISTER_OPS(L2NormalizeBuilder, OH_NN_OPS_L2_NORMALIZE); +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS \ No newline at end of file diff --git a/frameworks/native/neural_network_runtime/ops/l2_normalize_builder.h b/frameworks/native/neural_network_runtime/ops/l2_normalize_builder.h new file mode 100644 index 0000000000000000000000000000000000000000..aa4f69dafd42783c75a623e18742f60cd6e40906 --- /dev/null +++ b/frameworks/native/neural_network_runtime/ops/l2_normalize_builder.h @@ -0,0 +1,52 @@ +/* + * Copyright (c) 2024 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_L2_NORMALIZE_BUILDER_H +#define NEURAL_NETWORK_RUNTIME_L2_NORMALIZE_BUILDER_H + +#include "mindir.h" + +#include "ops_builder.h" +#include "ops_registry.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +class L2NormalizeBuilder : public OpsBuilder { +public: + L2NormalizeBuilder(); + ~L2NormalizeBuilder() override; + OH_NN_ReturnCode Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) override; + + LiteGraphPrimitvePtr GetPrimitive() override; + +private: + OH_NN_ReturnCode SetAxis(std::shared_ptr tensor); + OH_NN_ReturnCode SetEpsilon(std::shared_ptr tensor); + OH_NN_ReturnCode SetActivationType(std::shared_ptr tensor); + +private: + std::vector m_axis; + float m_epsilon {1e-6}; + mindspore::lite::ActivationType m_activationType {mindspore::lite::ACTIVATION_TYPE_NO_ACTIVATION}; +}; +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS + +#endif // NEURAL_NETWORK_RUNTIME_L2_NORMALIZE_BUILDER_H diff --git a/frameworks/native/neural_network_runtime/ops/layernorm_builder.cpp b/frameworks/native/neural_network_runtime/ops/layernorm_builder.cpp index 0e9b2ef776122d97ed9fc76d57d8df2d84a6679b..cfb529721e5d842b964c8f05c34c3080d6c91ca7 100644 --- a/frameworks/native/neural_network_runtime/ops/layernorm_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/layernorm_builder.cpp @@ -24,6 +24,7 @@ namespace NeuralNetworkRuntime { namespace Ops { static const int INPUT_NUM = 3; static const int OUTPUT_NUM = 1; +static const int PARAM_MAX_NUM = 4; static const int INPUT_X = 0; static const int INPUT_GAMMA = 1; static const int INPUT_BETA = 2; @@ -36,8 +37,8 @@ LayerNormBuilder::~LayerNormBuilder() {} OH_NN_ReturnCode LayerNormBuilder::SetBeginNormAxis(std::shared_ptr tensor) { tensor->IdentifyOpParameter(); - if (tensor->GetDataType() != OH_NN_INT32) { - LOGE("[LayerNormBuilder] SetBeginNormAxis failed. The has_bias should be type OH_NN_INT32."); + if (tensor->GetDataType() != OH_NN_INT64) { + LOGE("[LayerNormBuilder] SetBeginNormAxis failed. The has_bias should be type OH_NN_INT64."); return OH_NN_INVALID_PARAMETER; } @@ -52,7 +53,7 @@ OH_NN_ReturnCode LayerNormBuilder::SetBeginNormAxis(std::shared_ptr te return OH_NN_INVALID_PARAMETER; } - m_beginNormAxis = *static_cast(buffer); + m_beginNormAxis = *static_cast(buffer); return OH_NN_SUCCESS; } @@ -82,8 +83,8 @@ OH_NN_ReturnCode LayerNormBuilder::SetEpsilon(std::shared_ptr tensor) OH_NN_ReturnCode LayerNormBuilder::SetBeginParamsAxis(std::shared_ptr tensor) { tensor->IdentifyOpParameter(); - if (tensor->GetDataType() != OH_NN_INT32) { - LOGE("[LayerNormBuilder] SetBeginParamsAxis failed. The has_bias should be type OH_NN_INT32."); + if (tensor->GetDataType() != OH_NN_INT64) { + LOGE("[LayerNormBuilder] SetBeginParamsAxis failed. The has_bias should be type OH_NN_INT64."); return OH_NN_INVALID_PARAMETER; } @@ -98,7 +99,7 @@ OH_NN_ReturnCode LayerNormBuilder::SetBeginParamsAxis(std::shared_ptr return OH_NN_INVALID_PARAMETER; } - m_beginParamsAxis = *static_cast(buffer); + m_beginParamsAxis = *static_cast(buffer); return OH_NN_SUCCESS; } @@ -121,6 +122,12 @@ OH_NN_ReturnCode LayerNormBuilder::Build(const std::vector& paramsInde m_inputsIndex = inputsIndex; m_outputsIndex = outputsIndex; + returnCode = CheckParamIndex(paramsIndex, allTensors, PARAM_MAX_NUM); + if (returnCode != OH_NN_SUCCESS) { + LOGE("[LayerNormBuilder] Build failed. Passed invalid param index."); + return returnCode; + } + for (int i : paramsIndex) { std::shared_ptr tensor = allTensors[i]; switch (tensor->GetType()) { @@ -145,7 +152,7 @@ OH_NN_ReturnCode LayerNormBuilder::Build(const std::vector& paramsInde } auto inputShape = allTensors[inputsIndex[INPUT_X]]->GetDimensions(); - int inputShapeSize = static_cast(inputShape.size()); + int64_t inputShapeSize = static_cast(inputShape.size()); // beginNormAxis must great than 1, because normal shape cannot equal input shape. if (m_beginNormAxis >= inputShapeSize || m_beginNormAxis < 1) { LOGE("[LayerNormBuilder] Build failed, invalid beginNormAxis value, it should be [1, rank(input))."); @@ -179,12 +186,12 @@ LiteGraphPrimitvePtr LayerNormBuilder::GetPrimitive() } OH_NN_ReturnCode LayerNormBuilder::ValidateGammaAndBetaShape(const std::vector& inputsIndex, - int beginAxis, const std::vector>& allTensors) const + int64_t beginAxis, const std::vector>& allTensors) const { auto inputShape = allTensors[inputsIndex[INPUT_X]]->GetDimensions(); auto gammaShape = allTensors[inputsIndex[INPUT_GAMMA]]->GetDimensions(); auto betaShape = allTensors[inputsIndex[INPUT_BETA]]->GetDimensions(); - int inputShapeSize = static_cast(inputShape.size()); + int64_t inputShapeSize = static_cast(inputShape.size()); if (gammaShape.size() != static_cast(inputShapeSize - beginAxis)) { LOGE("[LayerNormBuilder] Invalid gamma dimension, gamma dimension should be equal to normalized dimension."); return OH_NN_INVALID_PARAMETER; diff --git a/frameworks/native/neural_network_runtime/ops/layernorm_builder.h b/frameworks/native/neural_network_runtime/ops/layernorm_builder.h index fd0e5bcc790c5e6e2a7e47a04c4b08d0c8976afc..c7cf07c31bddc3ab9bcbf50922204ad6b46bc23e 100644 --- a/frameworks/native/neural_network_runtime/ops/layernorm_builder.h +++ b/frameworks/native/neural_network_runtime/ops/layernorm_builder.h @@ -36,13 +36,13 @@ private: OH_NN_ReturnCode SetEpsilon(std::shared_ptr tensor); OH_NN_ReturnCode SetBeginParamsAxis(std::shared_ptr tensor); OH_NN_ReturnCode ValidateGammaAndBetaShape(const std::vector& inputsIndex, - int beginAxis, const std::vector>& allTensors) const; + int64_t beginAxis, const std::vector>& allTensors) const; private: - int m_beginNormAxis{1}; - float m_epsilon{1e-7}; - bool m_elementwiseAffine{false}; - int m_beginParamsAxis{1}; + int64_t m_beginNormAxis {0}; + float m_epsilon {0.00001f}; + bool m_elementwiseAffine {false}; + int64_t m_beginParamsAxis {0}; }; } // namespace Ops } // namespace NeuralNetworkRuntime diff --git a/frameworks/native/neural_network_runtime/ops/leaky_relu_builder.cpp b/frameworks/native/neural_network_runtime/ops/leaky_relu_builder.cpp index eeab51316950fb3163e1d22c35653f711872d334..11fc65cac7ca5513b18021077c5ed01082c3d647 100644 --- a/frameworks/native/neural_network_runtime/ops/leaky_relu_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/leaky_relu_builder.cpp @@ -20,6 +20,7 @@ namespace NeuralNetworkRuntime { namespace Ops { static const int INPUT_NUM = 1; static const int OUTPUT_NUM = 1; +static const int PARAM_MAX_NUM = 1; static const int SCALAR_LENGTH = 1; static const std::string OP_NAME = "LeakyRelu"; @@ -67,23 +68,28 @@ OH_NN_ReturnCode LeakyReluBuilder::Build(const std::vector& paramsInde m_inputsIndex = inputsIndex; m_outputsIndex = outputsIndex; - - OH_NN_ReturnCode returnCode; + + ret = CheckParamIndex(paramsIndex, allTensors, PARAM_MAX_NUM); + if (ret != OH_NN_SUCCESS) { + LOGE("[LeakyRelu] Build failed, passed invalid param index."); + return ret; + } + for (int i : paramsIndex) { std::shared_ptr tensor = allTensors[i]; tensor->IdentifyOpParameter(); switch (tensor->GetType()) { case OH_NN_LEAKY_RELU_NEGATIVE_SLOPE: - returnCode = SetNegativeSlope(tensor); + ret = SetNegativeSlope(tensor); break; default: LOGE("[LeakyRelu] Build failed, param invalid, type=%d", tensor->GetType()); return OH_NN_INVALID_PARAMETER; } - if (returnCode != OH_NN_SUCCESS) { + if (ret != OH_NN_SUCCESS) { LOGE("[LeakyRelu] Build failed, passed invalid param."); - return returnCode; + return ret; } } diff --git a/frameworks/native/neural_network_runtime/ops/less_builder.cpp b/frameworks/native/neural_network_runtime/ops/less_builder.cpp index 9da2189851d453ac202476f79ffef759238998f7..9b76a5e09e1b9920dd3577f08c12e8eccc517ea7 100755 --- a/frameworks/native/neural_network_runtime/ops/less_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/less_builder.cpp @@ -20,6 +20,7 @@ namespace NeuralNetworkRuntime { namespace Ops { static const int INPUT_NUM = 2; static const int OUTPUT_NUM = 1; +static const int PARAM_NUM = 0; static const std::string OP_NAME = "Less"; LessBuilder::LessBuilder() {} @@ -41,15 +42,16 @@ OH_NN_ReturnCode LessBuilder::Build(const std::vector& paramsIndex, LOGE("[Less] Build failed, passed invalid input or output index."); return ret; } - - if (!paramsIndex.empty()) { - LOGW("[Less] Build failed, the less expects no parameters, but receive %zu", paramsIndex.size()); - return OH_NN_INVALID_PARAMETER; - } m_inputsIndex = inputsIndex; m_outputsIndex = outputsIndex; + ret = CheckParamIndex(paramsIndex, allTensors, PARAM_NUM); + if (ret != OH_NN_SUCCESS) { + LOGE("[Less] Build failed, passed invalid param index."); + return ret; + } + m_name = OP_NAME; m_isBuild = true; return OH_NN_SUCCESS; diff --git a/frameworks/native/neural_network_runtime/ops/lessequal_builder.cpp b/frameworks/native/neural_network_runtime/ops/lessequal_builder.cpp index d0ef5fd836437fc3be08146f2f3907886b451744..8c3250109b831c621ffb71d8e77fa20c98c316c5 100644 --- a/frameworks/native/neural_network_runtime/ops/lessequal_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/lessequal_builder.cpp @@ -24,6 +24,7 @@ namespace NeuralNetworkRuntime { namespace Ops { static const int INPUT_NUMS = 2; static const int OUTPUT_NUMS = 1; +static const int PARAM_NUM = 0; static const std::string OP_NAME = "LessEqual"; LessEqualBuilder::LessEqualBuilder() {} @@ -46,9 +47,10 @@ OH_NN_ReturnCode LessEqualBuilder::Build(const std::vector& paramsInde return returnCode; } - if (!paramsIndex.empty()) { - LOGW("[LessEqual] LessEqual expects no parameters, but receive %zu", paramsIndex.size()); - return OH_NN_INVALID_PARAMETER; + returnCode = CheckParamIndex(paramsIndex, allTensors, PARAM_NUM); + if (returnCode != OH_NN_SUCCESS) { + LOGE("[LessEqual] Build failded, Passed invalid param indices."); + return returnCode; } m_inputsIndex = inputsIndex; diff --git a/frameworks/native/neural_network_runtime/ops/log_builder.cpp b/frameworks/native/neural_network_runtime/ops/log_builder.cpp index b92652655f1881e5c92dc861579f86c2f58829f7..c90bf7444647ac9b4c6d3252b3cace3998880c6a 100644 --- a/frameworks/native/neural_network_runtime/ops/log_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/log_builder.cpp @@ -20,6 +20,7 @@ namespace NeuralNetworkRuntime { namespace Ops { static const int INPUT_NUM = 1; static const int OUTPUT_NUM = 1; +static const int PARAM_NUM = 0; static const std::string OP_NAME = "Log"; LogBuilder::LogBuilder() {} @@ -41,10 +42,11 @@ OH_NN_ReturnCode LogBuilder::Build(const std::vector& paramsIndex, LOGE("[Log] Build failed, passed invalid input or output index."); return ret; } - - if (!paramsIndex.empty()) { - LOGW("[Log] Build failed, the log expects no parameters, but receive %zu", paramsIndex.size()); - return OH_NN_INVALID_PARAMETER; + + ret = CheckParamIndex(paramsIndex, allTensors, PARAM_NUM); + if (ret != OH_NN_SUCCESS) { + LOGE("[Log] Build failed, passed invalid param index."); + return ret; } m_inputsIndex = inputsIndex; diff --git a/frameworks/native/neural_network_runtime/ops/log_softmax_builder.cpp b/frameworks/native/neural_network_runtime/ops/log_softmax_builder.cpp new file mode 100644 index 0000000000000000000000000000000000000000..8c9b090cc38c908c49c92abc55c774a33d8877c6 --- /dev/null +++ b/frameworks/native/neural_network_runtime/ops/log_softmax_builder.cpp @@ -0,0 +1,116 @@ +/* + * Copyright (c) 2024 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "log_softmax_builder.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +static const int INPUT_NUM = 1; +static const int OUTPUT_NUM = 1; +static const int PARAM_MAX_NUM = 1; +static const int SCALAR_LENGTH = 1; +static const std::string OP_NAME = "LogSoftmax"; + +LogSoftmaxBuilder::LogSoftmaxBuilder() {} + +LogSoftmaxBuilder::~LogSoftmaxBuilder() {} + +OH_NN_ReturnCode LogSoftmaxBuilder::SetAxis(std::shared_ptr& tensor) +{ + if (tensor->GetDataType() != OH_NN_INT64) { + LOGE("[LogSoftmax] The axis should be type OH_NN_INT64."); + return OH_NN_INVALID_PARAMETER; + } + + if (tensor->GetElementCount() != SCALAR_LENGTH) { + LOGE("[LogSoftmax] The axis should be scalar."); + return OH_NN_INVALID_PARAMETER; + } + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[LogSoftmax] Tensor buffer is nullptr."); + return OH_NN_INVALID_PARAMETER; + } + m_axis = *(static_cast(buffer)); + + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode LogSoftmaxBuilder::Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) +{ + if (m_isBuild) { + LOGE("[LogSoftmax] Build failed, the logSoftmax operation has been build. cannot build again."); + return OH_NN_OPERATION_FORBIDDEN; + } + + auto ret = CheckIOIndex(inputsIndex, outputsIndex, allTensors, INPUT_NUM, OUTPUT_NUM); + if (ret != OH_NN_SUCCESS) { + LOGE("[LogSoftmax] Build failed, passed invalid input or output index."); + return ret; + } + + m_inputsIndex = inputsIndex; + m_outputsIndex = outputsIndex; + + ret = CheckParamIndex(paramsIndex, allTensors, PARAM_MAX_NUM); + if (ret != OH_NN_SUCCESS) { + LOGE("[LogSoftmax] Build failed, passed invalid param index."); + return ret; + } + + for (int i : paramsIndex) { + std::shared_ptr tensor = allTensors[i]; + tensor->IdentifyOpParameter(); + switch (tensor->GetType()) { + case OH_NN_LOG_SOFTMAX_AXIS: + ret = SetAxis(tensor); + break; + default: + LOGE("[LogSoftmax] Build failed, param invalid, type=%d", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; + } + + if (ret != OH_NN_SUCCESS) { + LOGE("[LogSoftmax] Build failed, passed invalid param."); + return ret; + } + } + + m_name = OP_NAME; + m_isBuild = true; + return OH_NN_SUCCESS; +} + +LiteGraphPrimitvePtr LogSoftmaxBuilder::GetPrimitive() +{ + if (!m_isBuild) { + LOGE("[LogSoftmax] GetPrimitive failed, cannot get primitive before call build."); + return {nullptr, DestroyLiteGraphPrimitive}; + } + + void* primitive = mindspore::lite::MindIR_LogSoftmax_CreatePrimitive(m_axis); + LiteGraphPrimitvePtr graphPrimitivePtr(primitive, DestroyLiteGraphPrimitive) ; + return graphPrimitivePtr; +} + +REGISTER_OPS(LogSoftmaxBuilder, OH_NN_OPS_LOG_SOFTMAX); +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS diff --git a/frameworks/native/neural_network_runtime/ops/log_softmax_builder.h b/frameworks/native/neural_network_runtime/ops/log_softmax_builder.h new file mode 100644 index 0000000000000000000000000000000000000000..f8af2da37f90d44b85d74f96b0894cf7c6a227a1 --- /dev/null +++ b/frameworks/native/neural_network_runtime/ops/log_softmax_builder.h @@ -0,0 +1,48 @@ +/* + * Copyright (c) 2024 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_LOG_SOFTMAX_BUILDER_H +#define NEURAL_NETWORK_RUNTIME_LOG_SOFTMAX_BUILDER_H + +#include "mindir.h" + +#include "ops_builder.h" +#include "ops_registry.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +class LogSoftmaxBuilder : public OpsBuilder { +public: + LogSoftmaxBuilder(); + ~LogSoftmaxBuilder() override; + OH_NN_ReturnCode Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) override; + + LiteGraphPrimitvePtr GetPrimitive() override; + +private: + OH_NN_ReturnCode SetAxis(std::shared_ptr& tensor); + +private: + int64_t m_axis {0}; +}; +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS + +#endif // NEURAL_NETWORK_RUNTIME_LOG_SOFTMAX_BUILDER_H \ No newline at end of file diff --git a/frameworks/native/neural_network_runtime/ops/logical_and_builder.cpp b/frameworks/native/neural_network_runtime/ops/logical_and_builder.cpp index 6ce24c739724ce17a00c7893d3be72ebdadd3cba..dae6f4173fe4e1a92492dcf646188424d7318072 100644 --- a/frameworks/native/neural_network_runtime/ops/logical_and_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/logical_and_builder.cpp @@ -20,6 +20,7 @@ namespace NeuralNetworkRuntime { namespace Ops { static const int INPUT_NUM = 2; static const int OUTPUT_NUM = 1; +static const int PARAM_NUM = 0; static const std::string OP_NAME = "LogicalAnd"; LogicalAndBuilder::LogicalAndBuilder() {} @@ -41,10 +42,11 @@ OH_NN_ReturnCode LogicalAndBuilder::Build(const std::vector& paramsInd LOGE("[LogicalAnd] Build failed, passed invalid input or output index."); return ret; } - - if (!paramsIndex.empty()) { - LOGW("[LogicalAnd] Build failed, the logicalAnd expects no parameters, but receive %zu", paramsIndex.size()); - return OH_NN_INVALID_PARAMETER; + + ret = CheckParamIndex(paramsIndex, allTensors, PARAM_NUM); + if (ret != OH_NN_SUCCESS) { + LOGE("[LogicalAnd] Build failed, passed invalid param index."); + return ret; } m_inputsIndex = inputsIndex; diff --git a/frameworks/native/neural_network_runtime/ops/logical_not_builder.cpp b/frameworks/native/neural_network_runtime/ops/logical_not_builder.cpp index 05b4aed79d90dc2ccc8fc68b2f7be9d22726439d..645a2bbc8d14c075d657ae6244274bcd33f4b380 100644 --- a/frameworks/native/neural_network_runtime/ops/logical_not_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/logical_not_builder.cpp @@ -20,6 +20,7 @@ namespace NeuralNetworkRuntime { namespace Ops { static const int INPUT_NUM = 1; static const int OUTPUT_NUM = 1; +static const int PARAM_NUM = 0; static const std::string OP_NAME = "LogicalNot"; LogicalNotBuilder::LogicalNotBuilder() {} @@ -41,15 +42,16 @@ OH_NN_ReturnCode LogicalNotBuilder::Build(const std::vector& paramsInd LOGE("[LogicalNot] Build failed, passed invalid input or output index."); return ret; } - - if (!paramsIndex.empty()) { - LOGW("[LogicalNot] Build failed, the logicalNot expects no parameters, but receive %zu", paramsIndex.size()); - return OH_NN_INVALID_PARAMETER; - } m_inputsIndex = inputsIndex; m_outputsIndex = outputsIndex; + ret = CheckParamIndex(paramsIndex, allTensors, PARAM_NUM); + if (ret != OH_NN_SUCCESS) { + LOGE("[LogicalNot] Build failed, passed invalid param index."); + return ret; + } + m_name = OP_NAME; m_isBuild = true; return OH_NN_SUCCESS; diff --git a/frameworks/native/neural_network_runtime/ops/logical_or_builder.cpp b/frameworks/native/neural_network_runtime/ops/logical_or_builder.cpp index 3f1d5c7717605ce3af5c2499255cfad9141c363a..b3b61903d7dce5a19553aa68448cd133fa9da086 100644 --- a/frameworks/native/neural_network_runtime/ops/logical_or_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/logical_or_builder.cpp @@ -20,6 +20,7 @@ namespace NeuralNetworkRuntime { namespace Ops { static const int INPUT_NUM = 2; static const int OUTPUT_NUM = 1; +static const int PARAM_NUM = 0; static const std::string OP_NAME = "LogicalOr"; LogicalOrBuilder::LogicalOrBuilder() {} @@ -41,15 +42,16 @@ OH_NN_ReturnCode LogicalOrBuilder::Build(const std::vector& paramsInde LOGE("[LogicalOr] Build failed, passed invalid input or output index."); return ret; } - - if (!paramsIndex.empty()) { - LOGW("[LogicalOr] Build failed, the logicalOr expects no parameters, but receive %zu", paramsIndex.size()); - return OH_NN_INVALID_PARAMETER; - } m_inputsIndex = inputsIndex; m_outputsIndex = outputsIndex; + ret = CheckParamIndex(paramsIndex, allTensors, PARAM_NUM); + if (ret != OH_NN_SUCCESS) { + LOGE("[LogicalOr] Build failed, passed invalid param index."); + return ret; + } + m_name = OP_NAME; m_isBuild = true; return OH_NN_SUCCESS; diff --git a/frameworks/native/neural_network_runtime/ops/lrn_builder.cpp b/frameworks/native/neural_network_runtime/ops/lrn_builder.cpp new file mode 100644 index 0000000000000000000000000000000000000000..1517b1e49ea739378b6295e3e2c2ca8c96f073be --- /dev/null +++ b/frameworks/native/neural_network_runtime/ops/lrn_builder.cpp @@ -0,0 +1,226 @@ +/* + * Copyright (c) 2024 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "lrn_builder.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +static const int INPUT_NUM = 1; +static const int OUTPUT_NUM = 1; +static const int PARAM_MAX_NUM = 5; +static const int SCALAR_LENGTH = 1; +static const std::string OP_NAME = "LRN"; +static const std::unordered_map normRegionList = {{0, "ACROSS_CHANNELS"}}; + +LRNBuilder::LRNBuilder() {} + +LRNBuilder::~LRNBuilder() {} + +OH_NN_ReturnCode LRNBuilder::SetDepthRadius(std::shared_ptr tensor) +{ + if (tensor->GetDataType() != OH_NN_INT64) { + LOGE("[LRN] The depthRadius should be type OH_NN_INT64."); + return OH_NN_INVALID_PARAMETER; + } + + if (tensor->GetElementCount() != SCALAR_LENGTH) { + LOGE("[LRN] The depthRadius should be scalar."); + return OH_NN_INVALID_PARAMETER; + } + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[LRN] Tensor buffer is nullptr."); + return OH_NN_INVALID_PARAMETER; + } + m_depthRadius = *(static_cast(buffer)); + + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode LRNBuilder::SetBias(std::shared_ptr tensor) +{ + if (tensor->GetDataType() != OH_NN_FLOAT32) { + LOGE("[LRN] The bias should be type OH_NN_FLOAT32."); + return OH_NN_INVALID_PARAMETER; + } + + if (tensor->GetElementCount() != SCALAR_LENGTH) { + LOGE("[LRN] The bias should be scalar."); + return OH_NN_INVALID_PARAMETER; + } + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[LRN] Tensor buffer is nullptr."); + return OH_NN_INVALID_PARAMETER; + } + m_bias = *(static_cast(buffer)); + + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode LRNBuilder::SetAlpha(std::shared_ptr tensor) +{ + if (tensor->GetDataType() != OH_NN_FLOAT32) { + LOGE("[LRN] The alpha should be type OH_NN_FLOAT32."); + return OH_NN_INVALID_PARAMETER; + } + + if (tensor->GetElementCount() != SCALAR_LENGTH) { + LOGE("[LRN] The alpha should be scalar."); + return OH_NN_INVALID_PARAMETER; + } + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[LRN] Tensor buffer is nullptr."); + return OH_NN_INVALID_PARAMETER; + } + m_alpha = *(static_cast(buffer)); + + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode LRNBuilder::SetBeta(std::shared_ptr tensor) +{ + if (tensor->GetDataType() != OH_NN_FLOAT32) { + LOGE("[LRN] The beta should be type OH_NN_FLOAT32."); + return OH_NN_INVALID_PARAMETER; + } + + if (tensor->GetElementCount() != SCALAR_LENGTH) { + LOGE("[LRN] The beta should be scalar."); + return OH_NN_INVALID_PARAMETER; + } + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[LRN] Tensor buffer is nullptr."); + return OH_NN_INVALID_PARAMETER; + } + m_beta = *(static_cast(buffer)); + + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode LRNBuilder::SetNormRegion(std::shared_ptr tensor) +{ + if (tensor->GetDataType() != OH_NN_INT32) { + LOGE("[LRN] The normRegion should be type OH_NN_INT32."); + return OH_NN_INVALID_PARAMETER; + } + + if (tensor->GetElementCount() != SCALAR_LENGTH) { + LOGE("[LRN] The beta should be scalar."); + return OH_NN_INVALID_PARAMETER; + } + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[LRN] Tensor buffer is nullptr."); + return OH_NN_INVALID_PARAMETER; + } + int normRegionKey = *(static_cast(buffer)); + auto it = normRegionList.find(normRegionKey); + if (it != normRegionList.end()) { + m_normRegion = it->second; + } else { + LOGE("[LRN] The normRegion should between [0, 0], but get %d.", normRegionKey); + LOGE("[LRN] normRegion value: 0-ACROSS_CHANNELS"); + return OH_NN_INVALID_PARAMETER; + } + + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode LRNBuilder::Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) +{ + if (m_isBuild) { + LOGE("[LRN] Build failed, the LRN operation has been build. cannot build again."); + return OH_NN_OPERATION_FORBIDDEN; + } + + auto ret = CheckIOIndex(inputsIndex, outputsIndex, allTensors, INPUT_NUM, OUTPUT_NUM); + if (ret != OH_NN_SUCCESS) { + LOGE("[LRN] Build failed, passed invalid input or output index."); + return ret; + } + + m_inputsIndex = inputsIndex; + m_outputsIndex = outputsIndex; + + ret = CheckParamIndex(paramsIndex, allTensors, PARAM_MAX_NUM); + if (ret != OH_NN_SUCCESS) { + LOGE("[LRN] Build failed, passed invalid input or output index."); + return ret; + } + + for (int i : paramsIndex) { + std::shared_ptr tensor = allTensors[i]; + tensor->IdentifyOpParameter(); + switch (tensor->GetType()) { + case OH_NN_LRN_DEPTH_RADIUS: + ret = SetDepthRadius(tensor); + break; + case OH_NN_LRN_BIAS: + ret = SetBias(tensor); + break; + case OH_NN_LRN_ALPHA: + ret = SetAlpha(tensor); + break; + case OH_NN_LRN_BETA: + ret = SetBeta(tensor); + break; + case OH_NN_LRN_NORM_REGION: + ret = SetNormRegion(tensor); + break; + default: + LOGE("[LRN] Build failed, param invalid, type=%d", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; + } + + if (ret != OH_NN_SUCCESS) { + LOGE("[LRN] Build failed, passed invalid param."); + return ret; + } + } + + m_name = OP_NAME; + m_isBuild = true; + return OH_NN_SUCCESS; +} + +LiteGraphPrimitvePtr LRNBuilder::GetPrimitive() +{ + if (!m_isBuild) { + LOGE("[LRN] GetPrimitive failed, cannot get primitive before call build."); + return {nullptr, DestroyLiteGraphPrimitive}; + } + + void* primitive = mindspore::lite::MindIR_LRN_CreatePrimitive(m_depthRadius, m_bias, m_alpha, + m_beta, m_normRegion); + LiteGraphPrimitvePtr graphPrimitivePtr(primitive, DestroyLiteGraphPrimitive) ; + return graphPrimitivePtr; +} + +REGISTER_OPS(LRNBuilder, OH_NN_OPS_LRN); +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS \ No newline at end of file diff --git a/frameworks/native/neural_network_runtime/ops/lrn_builder.h b/frameworks/native/neural_network_runtime/ops/lrn_builder.h new file mode 100644 index 0000000000000000000000000000000000000000..7831e354627f35d3213094708bfd3e9753ab038d --- /dev/null +++ b/frameworks/native/neural_network_runtime/ops/lrn_builder.h @@ -0,0 +1,56 @@ +/* + * Copyright (c) 2024 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_LRN_BUILDER_H +#define NEURAL_NETWORK_RUNTIME_LRN_BUILDER_H + +#include "mindir.h" + +#include "ops_builder.h" +#include "ops_registry.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +class LRNBuilder : public OpsBuilder { +public: + LRNBuilder(); + ~LRNBuilder() override; + OH_NN_ReturnCode Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) override; + + LiteGraphPrimitvePtr GetPrimitive() override; + +private: + OH_NN_ReturnCode SetDepthRadius(std::shared_ptr tensor); + OH_NN_ReturnCode SetBias(std::shared_ptr tensor); + OH_NN_ReturnCode SetAlpha(std::shared_ptr tensor); + OH_NN_ReturnCode SetBeta(std::shared_ptr tensor); + OH_NN_ReturnCode SetNormRegion(std::shared_ptr tensor); + +private: + int64_t m_depthRadius {0}; + float m_bias {0.0f}; + float m_alpha {0.0f}; + float m_beta {0.0f}; + std::string m_normRegion {"ACROSS_CHANNELS"}; +}; +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS + +#endif // NEURAL_NETWORK_RUNTIME_LRN_BUILDER_H diff --git a/frameworks/native/neural_network_runtime/ops/lstm_builder.cpp b/frameworks/native/neural_network_runtime/ops/lstm_builder.cpp index 7fe42cf573fbd741f1940ae2a2be336d69bbc71a..5273ea82cc524ce8396db54610eafb64bae98fbf 100644 --- a/frameworks/native/neural_network_runtime/ops/lstm_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/lstm_builder.cpp @@ -20,6 +20,7 @@ namespace NeuralNetworkRuntime { namespace Ops { static const int INPUT_NUM = 6; static const int OUTPUT_NUM = 3; +static const int PARAM_MAX_NUM = 10; static const int SCALAR_LENGTH = 1; static const std::string OP_NAME = "LSTM"; @@ -316,6 +317,12 @@ OH_NN_ReturnCode LSTMBuilder::Build(const std::vector& paramsIndex, m_inputsIndex = inputsIndex; m_outputsIndex = outputsIndex; + ret = CheckParamIndex(paramsIndex, allTensors, PARAM_MAX_NUM); + if (ret != OH_NN_SUCCESS) { + LOGE("[LSTM] Build failed, passed invalid param index."); + return ret; + } + ret = ParseParam(paramsIndex, allTensors); if (ret != OH_NN_SUCCESS) { LOGE("[LSTM] ParseParam failed, passed invalid param."); diff --git a/frameworks/native/neural_network_runtime/ops/matmul_builder.cpp b/frameworks/native/neural_network_runtime/ops/matmul_builder.cpp index baa13a362913d5b1b71c1f7c4e2641eb6fe5e282..5913934bb3f6e8e24474361cae9cf15474049cf5 100644 --- a/frameworks/native/neural_network_runtime/ops/matmul_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/matmul_builder.cpp @@ -24,6 +24,7 @@ namespace NeuralNetworkRuntime { namespace Ops { static const int INPUT_NUM = 2; static const int OUTPUT_NUM = 1; +static const int PARAM_MAX_NUM = 3; static const int SCALE_LENGTH = 1; static const std::string OP_NAME = "Matmul"; @@ -126,6 +127,12 @@ OH_NN_ReturnCode MatmulBuilder::Build(const std::vector& paramsIndex, m_inputsIndex = inputsIndex; m_outputsIndex = outputsIndex; + returnCode = CheckParamIndex(paramsIndex, allTensors, PARAM_MAX_NUM); + if (returnCode != OH_NN_SUCCESS) { + LOGE("[Matmul] Matmul Build failed. Passed invalid param indices."); + return returnCode; + } + for (int i : paramsIndex) { std::shared_ptr tensor = allTensors[i]; switch (tensor->GetType()) { diff --git a/frameworks/native/neural_network_runtime/ops/maximum_builder.cpp b/frameworks/native/neural_network_runtime/ops/maximum_builder.cpp index a6e97e86c263f2656a3021650683e25b4d751cbb..9ade9eddfd3c682d0cae54e5888bb83b04a4c3bd 100644 --- a/frameworks/native/neural_network_runtime/ops/maximum_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/maximum_builder.cpp @@ -24,6 +24,7 @@ namespace NeuralNetworkRuntime { namespace Ops { static const int INPUT_NUM = 2; static const int OUTPUT_NUM = 1; +static const int PARAM_NUM = 0; static const std::string OP_NAME = "Maximum"; MaximumBuilder::MaximumBuilder() {} @@ -46,14 +47,15 @@ OH_NN_ReturnCode MaximumBuilder::Build(const std::vector& paramsIndex, return ret; } - if (!paramsIndex.empty()) { - LOGW("[Maximum] Maximum Build failed. Maximum expects no parameters, but receive %zu", paramsIndex.size()); - return OH_NN_INVALID_PARAMETER; - } - m_inputsIndex = inputsIndex; m_outputsIndex = outputsIndex; + ret = CheckParamIndex(paramsIndex, allTensors, PARAM_NUM); + if (ret != OH_NN_SUCCESS) { + LOGE("[Maximum] Maximum Build failed. The param index of Maximum operation is invalid."); + return ret; + } + m_isBuild = true; m_name = OP_NAME; return OH_NN_SUCCESS; diff --git a/frameworks/native/neural_network_runtime/ops/minimum_builder.cpp b/frameworks/native/neural_network_runtime/ops/minimum_builder.cpp new file mode 100644 index 0000000000000000000000000000000000000000..d9a4bc25a7e0953f20dda72b717da1c82c7ceda2 --- /dev/null +++ b/frameworks/native/neural_network_runtime/ops/minimum_builder.cpp @@ -0,0 +1,75 @@ +/* + * Copyright (c) 2024 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "minimum_builder.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +static const int INPUT_NUM = 2; +static const int OUTPUT_NUM = 1; +static const int PARAM_NUM = 0; +static const std::string OP_NAME = "Minimum"; + +MinimumBuilder::MinimumBuilder() {} + +MinimumBuilder::~MinimumBuilder() {} + +OH_NN_ReturnCode MinimumBuilder::Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) +{ + if (m_isBuild) { + LOGE("[Minimum] Build failed, the minimum operation has been build. cannot build again."); + return OH_NN_OPERATION_FORBIDDEN; + } + + auto ret = CheckIOIndex(inputsIndex, outputsIndex, allTensors, INPUT_NUM, OUTPUT_NUM); + if (ret != OH_NN_SUCCESS) { + LOGE("[Minimum] Build failed, passed invalid input or output index."); + return ret; + } + + m_inputsIndex = inputsIndex; + m_outputsIndex = outputsIndex; + + ret = CheckParamIndex(paramsIndex, allTensors, PARAM_NUM); + if (ret != OH_NN_SUCCESS) { + LOGE("[Minimum] Build failed, passed invalid param index."); + return ret; + } + + m_name = OP_NAME; + m_isBuild = true; + return OH_NN_SUCCESS; +} + +LiteGraphPrimitvePtr MinimumBuilder::GetPrimitive() +{ + if (!m_isBuild) { + LOGE("[Minimum] GetPrimitive failed, cannot get primitive before call build."); + return {nullptr, DestroyLiteGraphPrimitive}; + } + + void* primitive = mindspore::lite::MindIR_Minimum_CreatePrimitive(); + LiteGraphPrimitvePtr graphPrimitivePtr(primitive, DestroyLiteGraphPrimitive) ; + return graphPrimitivePtr; +} + +REGISTER_OPS(MinimumBuilder, OH_NN_OPS_MINIMUM); +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS diff --git a/frameworks/native/neural_network_runtime/ops/minimum_builder.h b/frameworks/native/neural_network_runtime/ops/minimum_builder.h new file mode 100644 index 0000000000000000000000000000000000000000..c23683a6510657766230669b63f0eec99da0bc19 --- /dev/null +++ b/frameworks/native/neural_network_runtime/ops/minimum_builder.h @@ -0,0 +1,42 @@ +/* + * Copyright (c) 2024 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_MINIMUM_BUILDER_H +#define NEURAL_NETWORK_RUNTIME_MINIMUM_BUILDER_H + +#include "mindir.h" + +#include "ops_builder.h" +#include "ops_registry.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +class MinimumBuilder : public OpsBuilder { +public: + MinimumBuilder(); + ~MinimumBuilder() override; + OH_NN_ReturnCode Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) override; + + LiteGraphPrimitvePtr GetPrimitive() override; +}; +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS + +#endif // NEURAL_NETWORK_RUNTIME_MINIMUM_BUILDER_H \ No newline at end of file diff --git a/frameworks/native/neural_network_runtime/ops/mod_builder.cpp b/frameworks/native/neural_network_runtime/ops/mod_builder.cpp index 61bd0234ee03489aa8a6db3d9d18f5ec0d4713f9..b0d9245dd4d50e1c3babbf00d11b5769420fe23f 100644 --- a/frameworks/native/neural_network_runtime/ops/mod_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/mod_builder.cpp @@ -20,6 +20,7 @@ namespace NeuralNetworkRuntime { namespace Ops { static const int INPUT_NUM = 2; static const int OUTPUT_NUM = 1; +static const int PARAM_NUM = 0; static const std::string OP_NAME = "Mod"; ModBuilder::ModBuilder() {} @@ -41,15 +42,16 @@ OH_NN_ReturnCode ModBuilder::Build(const std::vector& paramsIndex, LOGE("[Mod] Build failed, passed invalid input or output index."); return ret; } - - if (!paramsIndex.empty()) { - LOGW("[Mod] Build failed, the mod expects no parameters, but receive %zu", paramsIndex.size()); - return OH_NN_INVALID_PARAMETER; - } m_inputsIndex = inputsIndex; m_outputsIndex = outputsIndex; + ret = CheckParamIndex(paramsIndex, allTensors, PARAM_NUM); + if (ret != OH_NN_SUCCESS) { + LOGE("[Mod] Build failed, passed invalid param index."); + return ret; + } + m_name = OP_NAME; m_isBuild = true; return OH_NN_SUCCESS; diff --git a/frameworks/native/neural_network_runtime/ops/mul_builder.cpp b/frameworks/native/neural_network_runtime/ops/mul_builder.cpp index 01b438aaab6e14e697ff5d6c42095264ccec625a..ff9290617dc4621a8db3bf71b69ecc39bcb8bd42 100644 --- a/frameworks/native/neural_network_runtime/ops/mul_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/mul_builder.cpp @@ -24,6 +24,7 @@ namespace NeuralNetworkRuntime { namespace Ops { static const int INPUT_NUM = 2; static const int OUTPUT_NUM = 1; +static const int PARAM_MAX_NUM = 1; static const int SCALE_LENGTH = 1; static const std::string OP_NAME = "Mul"; @@ -80,6 +81,12 @@ OH_NN_ReturnCode MulBuilder::Build(const std::vector& paramsIndex, m_inputsIndex = inputsIndex; m_outputsIndex = outputsIndex; + returnCode = CheckParamIndex(paramsIndex, allTensors, PARAM_MAX_NUM); + if (returnCode != OH_NN_SUCCESS) { + LOGE("[Mul] Mul build failed. Passed invalid param index of Mul operation index."); + return returnCode; + } + for (int i : paramsIndex) { std::shared_ptr tensor = allTensors[i]; switch (tensor->GetType()) { diff --git a/frameworks/native/neural_network_runtime/ops/mul_builder.h b/frameworks/native/neural_network_runtime/ops/mul_builder.h index 62735cca1ea18f703e30ee5673a3ed2797f56f6a..62c0b240eda723d38169f446e0b995ffe3d684cc 100644 --- a/frameworks/native/neural_network_runtime/ops/mul_builder.h +++ b/frameworks/native/neural_network_runtime/ops/mul_builder.h @@ -37,7 +37,7 @@ private: OH_NN_ReturnCode SetActivationType(std::shared_ptr tensor); private: - mindspore::lite::ActivationType m_activationType{mindspore::lite::ACTIVATION_TYPE_NO_ACTIVATION}; + mindspore::lite::ActivationType m_activationType {mindspore::lite::ACTIVATION_TYPE_NO_ACTIVATION}; }; } // namespace Ops } // namespace NeuralNetworkRuntime diff --git a/frameworks/native/neural_network_runtime/ops/neg_builder.cpp b/frameworks/native/neural_network_runtime/ops/neg_builder.cpp index ae6834b7eef090a17a1ce5198e042745742ea67d..0387351e7c0476d1b23dd5c677c98de24a007d10 100644 --- a/frameworks/native/neural_network_runtime/ops/neg_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/neg_builder.cpp @@ -20,6 +20,7 @@ namespace NeuralNetworkRuntime { namespace Ops { static const int INPUT_NUM = 1; static const int OUTPUT_NUM = 1; +static const int PARAM_NUM = 0; static const std::string OP_NAME = "Neg"; NegBuilder::NegBuilder() {} @@ -41,15 +42,16 @@ OH_NN_ReturnCode NegBuilder::Build(const std::vector& paramsIndex, LOGE("[Neg] Build failed, passed invalid input or output index."); return ret; } - - if (!paramsIndex.empty()) { - LOGW("[Neg] Build failed, the neg expects no parameters, but receive %zu", paramsIndex.size()); - return OH_NN_INVALID_PARAMETER; - } m_inputsIndex = inputsIndex; m_outputsIndex = outputsIndex; + ret = CheckParamIndex(paramsIndex, allTensors, PARAM_NUM); + if (ret != OH_NN_SUCCESS) { + LOGE("[Neg] Build failed, passed invalid param index."); + return ret; + } + m_name = OP_NAME; m_isBuild = true; return OH_NN_SUCCESS; diff --git a/frameworks/native/neural_network_runtime/ops/notequal_builder.cpp b/frameworks/native/neural_network_runtime/ops/notequal_builder.cpp index b889bce24c339cb3d6db1254173d9229ec1f985e..01937c93101e29803afa4a9f77791a3ba7681211 100644 --- a/frameworks/native/neural_network_runtime/ops/notequal_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/notequal_builder.cpp @@ -20,6 +20,7 @@ namespace NeuralNetworkRuntime { namespace Ops { static const int INPUT_NUM = 2; static const int OUTPUT_NUM = 1; +static const int PARAM_NUM = 0; static const std::string OP_NAME = "NotEqual"; NotEqualBuilder::NotEqualBuilder() {} @@ -41,15 +42,16 @@ OH_NN_ReturnCode NotEqualBuilder::Build(const std::vector& paramsIndex LOGE("[NotEqual] Build failed, passed invalid input or output index."); return ret; } - - if (!paramsIndex.empty()) { - LOGW("[NotEqual] Build failed, the notEqual expects no parameters, but receive %zu", paramsIndex.size()); - return OH_NN_INVALID_PARAMETER; - } m_inputsIndex = inputsIndex; m_outputsIndex = outputsIndex; + ret = CheckParamIndex(paramsIndex, allTensors, PARAM_NUM); + if (ret != OH_NN_SUCCESS) { + LOGE("[NotEqual] Build failed, passed invalid param index."); + return ret; + } + m_name = OP_NAME; m_isBuild = true; return OH_NN_SUCCESS; diff --git a/frameworks/native/neural_network_runtime/ops/onehot_builder.cpp b/frameworks/native/neural_network_runtime/ops/onehot_builder.cpp index 2d8bcdb0f5e8eb9c89239603d281cf9055e508e6..88474d50f193259446bb5344264dd4c415bc0cb1 100644 --- a/frameworks/native/neural_network_runtime/ops/onehot_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/onehot_builder.cpp @@ -24,6 +24,7 @@ namespace NeuralNetworkRuntime { namespace Ops { static const int INPUT_NUM = 4; static const int OUTPUT_NUM = 1; +static const int PARAM_MAX_NUM = 1; static const std::string OP_NAME = "Onehot"; OnehotBuilder::OnehotBuilder() {} @@ -67,6 +68,12 @@ OH_NN_ReturnCode OnehotBuilder::Build(const std::vector& paramsIndex, m_inputsIndex = inputsIndex; m_outputsIndex = outputsIndex; + returnCode = CheckParamIndex(paramsIndex, allTensors, PARAM_MAX_NUM); + if (returnCode != OH_NN_SUCCESS) { + LOGE("[Onehot] Onehot build failed. Passed invalid param index of Onehot operation index."); + return returnCode; + } + for (int i : paramsIndex) { std::shared_ptr tensor = allTensors[i]; switch (tensor->GetType()) { diff --git a/frameworks/native/neural_network_runtime/ops/onehot_builder.h b/frameworks/native/neural_network_runtime/ops/onehot_builder.h index 918185fdd8881f2807d83c440a40e397f4832b35..9364d790be566eed56758af3fccb114ce0f2ccc6 100644 --- a/frameworks/native/neural_network_runtime/ops/onehot_builder.h +++ b/frameworks/native/neural_network_runtime/ops/onehot_builder.h @@ -35,7 +35,7 @@ private: OH_NN_ReturnCode SetAxis(std::shared_ptr tensor); private: - int64_t m_axis{-1}; + int64_t m_axis {0}; }; } // namespace Ops } // namespace NeuralNetworkRuntime diff --git a/frameworks/native/neural_network_runtime/ops/pad_builder.cpp b/frameworks/native/neural_network_runtime/ops/pad_builder.cpp index a2259f9db3ef9197b4316dcef40e4d442e1a711a..ea79fd9d40d322f78bb82d6c67abd57a001a2834 100644 --- a/frameworks/native/neural_network_runtime/ops/pad_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/pad_builder.cpp @@ -15,22 +15,61 @@ #include "pad_builder.h" -#include "mindir.h" - #include "ops_registry.h" +#include "transform.h" +#include "validation.h" namespace OHOS { namespace NeuralNetworkRuntime { namespace Ops { static const int INPUT_NUM = 2; static const int OUTPUT_NUM = 1; +static const int PARAM_MAX_NUM = 2; static const int SCALE_LENGTH = 1; static const std::string OP_NAME = "Pad"; +static const std::unordered_map paddingList = + {{0, mindspore::lite::PADDING_MODE_CONSTANT}, + {1, mindspore::lite::PADDING_MODE_REFLECT}, + {2, mindspore::lite::PADDING_MODE_SYMMETRIC}, + {3, mindspore::lite::PADDING_MODE_RESERVED}}; PadBuilder::PadBuilder() {} PadBuilder::~PadBuilder() {} +OH_NN_ReturnCode PadBuilder::SetPaddingMode(std::shared_ptr tensor) +{ + tensor->IdentifyOpParameter(); + if (tensor->GetElementCount() != SCALE_LENGTH) { + LOGE("[Pad] SetPaddingMode failed, the paddingMode shoule be a scalar"); + return OH_NN_INVALID_PARAMETER; + } + + if (tensor->GetDataType() != OH_NN_INT32) { + LOGE("[Pad] SetPaddingMode failed, the paddingMode should be type OH_NN_INT32."); + return OH_NN_INVALID_PARAMETER; + } + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[Pad] SetPaddingMode GetBuffer return nullptr"); + return OH_NN_INVALID_PARAMETER; + } + + int paddingModeKey = *(static_cast(buffer)); + auto it = paddingList.find(paddingModeKey); + if (it != paddingList.end()) { + m_paddingMode = it->second; + } else { + LOGE("[DepthToSpace] The padding mode value should between [0, 3], but get %d.", paddingModeKey); + LOGE("[DepthToSpace] paddingMode value:"); + LOGE(" 0-PADDING_MODE_CONSTANT, 1-PADDING_MODE_REFLECT, 2-PADDING_MODE_SYMMETRIC, 3-PADDING_MODE_RESERVED"); + return OH_NN_INVALID_PARAMETER; + } + + return OH_NN_SUCCESS; +} + OH_NN_ReturnCode PadBuilder::SetConstantValue(std::shared_ptr tensor) { tensor->IdentifyOpParameter(); @@ -73,12 +112,21 @@ OH_NN_ReturnCode PadBuilder::Build(const std::vector& paramsIndex, m_inputsIndex = inputsIndex; m_outputsIndex = outputsIndex; + returnCode = CheckParamIndex(paramsIndex, allTensors, PARAM_MAX_NUM); + if (returnCode != OH_NN_SUCCESS) { + LOGE("[Pad] Pad Build failed. Passed invalid param index of Pad operation index."); + return returnCode; + } + for (int i : paramsIndex) { std::shared_ptr tensor = allTensors[i]; switch (tensor->GetType()) { case OH_NN_PAD_CONSTANT_VALUE: returnCode = SetConstantValue(tensor); break; + case OH_NN_PAD_PADDING_MODE: + returnCode = SetPaddingMode(tensor); + break; default: LOGE("[Pad] Parameter Type is invalid, type=%d", tensor->GetType()); return OH_NN_INVALID_PARAMETER; @@ -104,8 +152,9 @@ LiteGraphPrimitvePtr PadBuilder::GetPrimitive() return {nullptr, DestroyLiteGraphPrimitive}; } - mindspore::lite::PaddingMode padding_mode = mindspore::lite::PADDING_MODE_CONSTANT; - void* primitive = MindIR_PadFusion_CreatePrimitive(paddings, padding_mode, m_constantValue); + std::vector> paddings; + + void* primitive = MindIR_PadFusion_CreatePrimitive(paddings, m_paddingMode, m_constantValue); LiteGraphPrimitvePtr graphPrimitivePtr(primitive, DestroyLiteGraphPrimitive); return graphPrimitivePtr; } diff --git a/frameworks/native/neural_network_runtime/ops/pad_builder.h b/frameworks/native/neural_network_runtime/ops/pad_builder.h index 7cc192c6f5d4331d5f567515aacd0444b4fb8c73..661d9efdd7b2e2394466c8d7d7783096f68fa3ac 100644 --- a/frameworks/native/neural_network_runtime/ops/pad_builder.h +++ b/frameworks/native/neural_network_runtime/ops/pad_builder.h @@ -18,6 +18,8 @@ #include "ops_builder.h" +#include "mindir.h" + namespace OHOS { namespace NeuralNetworkRuntime { namespace Ops { @@ -33,10 +35,11 @@ public: private: OH_NN_ReturnCode SetConstantValue(std::shared_ptr tensor); + OH_NN_ReturnCode SetPaddingMode(std::shared_ptr tensor); private: - std::vector> paddings{}; - float m_constantValue{0.0}; + float m_constantValue {0.0f}; + mindspore::lite::PaddingMode m_paddingMode {mindspore::lite::PADDING_MODE_CONSTANT}; }; } // namespace Ops } // namespace NeuralNetworkRuntime diff --git a/frameworks/native/neural_network_runtime/ops/pooling_builder.cpp b/frameworks/native/neural_network_runtime/ops/pooling_builder.cpp index 83214102c14620eaa9e1af3d430b6934ffb2cebf..52005118ca23d9a58c36dcda2bc5e67e785de3e5 100644 --- a/frameworks/native/neural_network_runtime/ops/pooling_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/pooling_builder.cpp @@ -24,9 +24,13 @@ namespace NeuralNetworkRuntime { namespace Ops { static const int INPUT_NUM = 1; static const int OUTPUT_NUM = 1; +static const int PARAM_MAX_NUM = 8; +static const int SCALAR_LENGTH = 1; static const int NUM_ELEMENT_PAD_MODE = 1; static const int NUM_ELEMENT_PAD_LIST = 4; static const int ACTIVATION_LENGTH = 1; +static const std::unordered_map roundList = {{0, mindspore::lite::ROUND_MODE_FLOOR}, + {1, mindspore::lite::ROUND_MODE_CEIL}}; OH_NN_ReturnCode PoolingBuilder::PoolingBuild(const std::vector& paramsIndex, const std::vector& inputsIndex, @@ -45,6 +49,12 @@ OH_NN_ReturnCode PoolingBuilder::PoolingBuild(const std::vector& param return returnCode; } + returnCode = CheckParamIndex(paramsIndex, allTensors, PARAM_MAX_NUM); + if (returnCode != OH_NN_SUCCESS) { + LOGE("[PoolingBuilder] PoolingBuild failed, passed invalid param index of Onehot operation index."); + return returnCode; + } + for (int i : paramsIndex) { std::shared_ptr tensor = allTensors[i]; switch (tensor->GetType()) { @@ -62,10 +72,18 @@ OH_NN_ReturnCode PoolingBuilder::PoolingBuild(const std::vector& param case OH_NN_AVG_POOL_PAD: returnCode = SetPadModeOrPaddings(tensor); break; + case OH_NN_AVG_POOL_ROUND_MODE: + case OH_NN_MAX_POOL_ROUND_MODE: + returnCode = SetRoundMode(tensor); + break; case OH_NN_AVG_POOL_ACTIVATION_TYPE: case OH_NN_MAX_POOL_ACTIVATION_TYPE: returnCode = SetActivation(tensor); break; + case OH_NN_AVG_POOL_GLOBAL: + case OH_NN_MAX_POOL_GLOBAL: + returnCode = SetGlobal(tensor); + break; default: LOGE("[PoolingBuilder] Build failed, param invalid, type = %d.", tensor->GetType()); return OH_NN_INVALID_PARAMETER; @@ -186,10 +204,43 @@ OH_NN_ReturnCode PoolingBuilder::SetPadModeOrPaddings(std::shared_ptr return OH_NN_SUCCESS; } +OH_NN_ReturnCode PoolingBuilder::SetRoundMode(std::shared_ptr tensor) +{ + tensor->IdentifyOpParameter(); + + if (tensor->GetElementCount() != ACTIVATION_LENGTH) { + LOGE("[PoolingBuilder] SetRoundMode failed, the roundMode shoule be a scalar"); + return OH_NN_INVALID_PARAMETER; + } + + if (tensor->GetDataType() != OH_NN_INT32) { + LOGE("[PoolingBuilder] SetRoundMode failed, the roundMode should be type OH_NN_INT32."); + return OH_NN_INVALID_PARAMETER; + } + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[PoolingBuilder] SetRoundMode GetBuffer return nullptr"); + return OH_NN_INVALID_PARAMETER; + } + + int roundModeKey = *(static_cast(buffer)); + auto it = roundList.find(roundModeKey); + if (it != roundList.end()) { + m_roundMode = it->second; + } else { + LOGE("[PoolingBuilder] The roundMode value should between [0, 1], but get %d.", roundModeKey); + LOGE("[PoolingBuilder] roundMode: 0-OH_NN_ROUND_FLOOR, 1-OH_NN_ROUND_CEIL"); + return OH_NN_INVALID_PARAMETER; + } + + return OH_NN_SUCCESS; +} + OH_NN_ReturnCode PoolingBuilder::SetActivation(std::shared_ptr tensor) { tensor->IdentifyOpParameter(); - // Set ActivationType + if (tensor->GetElementCount() != ACTIVATION_LENGTH) { LOGE("[PoolingBuilder] SetActivation failed, the Activation shoule be a scalar"); return OH_NN_INVALID_PARAMETER; @@ -216,6 +267,28 @@ OH_NN_ReturnCode PoolingBuilder::SetActivation(std::shared_ptr tensor) return OH_NN_SUCCESS; } + +OH_NN_ReturnCode PoolingBuilder::SetGlobal(std::shared_ptr tensor) +{ + if (tensor->GetDataType() != OH_NN_BOOL) { + LOGE("[PoolingBuilder] The global should be type OH_NN_BOOL."); + return OH_NN_INVALID_PARAMETER; + } + + if (tensor->GetElementCount() != SCALAR_LENGTH) { + LOGE("[PoolingBuilder] The global should be scalar."); + return OH_NN_INVALID_PARAMETER; + } + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[PoolingBuilder] Tensor buffer is nullptr."); + return OH_NN_INVALID_PARAMETER; + } + m_global = *(static_cast(buffer)); + + return OH_NN_SUCCESS; +} } // namespace Ops } // namespace NeuralNetworkRuntime } // namespace OHOS diff --git a/frameworks/native/neural_network_runtime/ops/pooling_builder.h b/frameworks/native/neural_network_runtime/ops/pooling_builder.h index 4fbbcd80e39c92cebd010cd376cd83554b2fe044..76701db0f7d1faf3fdc5cd5301681dd942b96bd6 100644 --- a/frameworks/native/neural_network_runtime/ops/pooling_builder.h +++ b/frameworks/native/neural_network_runtime/ops/pooling_builder.h @@ -28,9 +28,9 @@ public: PoolingBuilder() = default; virtual ~PoolingBuilder() = default; - OH_NN_ReturnCode PoolingBuild(const std::vector& paramsIndex, - const std::vector& inputsIndex, + OH_NN_ReturnCode PoolingBuild(const std::vector& inputsIndex, const std::vector& outputsIndex, + const std::vector& paramsIndex, const std::vector>& allTensors); OH_NN_ReturnCode SetInputAndOutput(const std::vector& inputsIndex, @@ -40,17 +40,19 @@ public: OH_NN_ReturnCode SetKernel(std::shared_ptr tensor); OH_NN_ReturnCode SetStrides(std::shared_ptr tensor); OH_NN_ReturnCode SetPadModeOrPaddings(std::shared_ptr tensor); + OH_NN_ReturnCode SetRoundMode(std::shared_ptr tensor); OH_NN_ReturnCode SetActivation(std::shared_ptr tensor); + OH_NN_ReturnCode SetGlobal(std::shared_ptr tensor); protected: std::vector m_kernelSize; std::vector m_pad; std::vector m_strides; - mindspore::lite::PadMode m_padMode{mindspore::lite::PAD_MODE_PAD}; - mindspore::lite::ActivationType m_activationType{mindspore::lite::ACTIVATION_TYPE_NO_ACTIVATION}; - mindspore::lite::RoundMode m_roundMode = mindspore::lite::ROUND_MODE_FLOOR; - mindspore::lite::Format m_format = mindspore::lite::FORMAT_NCHW; - bool m_global = false; + mindspore::lite::PadMode m_padMode {mindspore::lite::PAD_MODE_PAD}; + mindspore::lite::ActivationType m_activationType {mindspore::lite::ACTIVATION_TYPE_NO_ACTIVATION}; + mindspore::lite::RoundMode m_roundMode {mindspore::lite::ROUND_MODE_FLOOR}; + mindspore::lite::Format m_format {mindspore::lite::FORMAT_NCHW}; + bool m_global {false}; }; } // namespace Ops } // namespace NeuralNetworkRuntime diff --git a/frameworks/native/neural_network_runtime/ops/pow_builder.cpp b/frameworks/native/neural_network_runtime/ops/pow_builder.cpp index 774119c9034b4e3ebdc8119e2da24ce59dc6ac57..1d1170060127d9884d16c9a3135d52c4daf3ba83 100644 --- a/frameworks/native/neural_network_runtime/ops/pow_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/pow_builder.cpp @@ -24,6 +24,7 @@ namespace NeuralNetworkRuntime { namespace Ops { static const int INPUT_NUM = 2; static const int OUTPUT_NUM = 1; +static const int PARAM_MAX_NUM = 2; static const int SCALAR_LENGTH = 1; static const std::string OP_NAME = "Pow"; @@ -95,7 +96,12 @@ OH_NN_ReturnCode PowBuilder::Build(const std::vector& paramsIndex, m_outputsIndex = outputsIndex; SetQuantType(outputsIndex, allTensors); - + returnCode = CheckParamIndex(paramsIndex, allTensors, PARAM_MAX_NUM); + if (returnCode != OH_NN_SUCCESS) { + LOGE("[Pow] Build failed, passed invalid param index of Pow operation index."); + return returnCode; + } + for (int i : paramsIndex) { std::shared_ptr tensor = allTensors[i]; tensor->IdentifyOpParameter(); diff --git a/frameworks/native/neural_network_runtime/ops/prelu_builder.cpp b/frameworks/native/neural_network_runtime/ops/prelu_builder.cpp index 3e859858c75b09900ccd63e9b647ca9401057ed5..cbea89949f6e8283ace67a9888be9e6aa1c7cf05 100644 --- a/frameworks/native/neural_network_runtime/ops/prelu_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/prelu_builder.cpp @@ -24,6 +24,7 @@ namespace NeuralNetworkRuntime { namespace Ops { static const int INPUT_NUMS = 2; static const int OUTPUT_NUMS = 1; +static const int PARAM_NUM = 0; static const std::string OP_NAME = "PRelu"; PReluBuilder::PReluBuilder() {} @@ -46,14 +47,15 @@ OH_NN_ReturnCode PReluBuilder::Build(const std::vector& paramsIndex, return returnCode; } - if (!paramsIndex.empty()) { - LOGW("[PRelu] Build failed, the PRelu expects no parameters, but receive %zu", paramsIndex.size()); - return OH_NN_INVALID_PARAMETER; - } - m_inputsIndex = inputsIndex; m_outputsIndex = outputsIndex; + returnCode = CheckParamIndex(paramsIndex, allTensors, PARAM_NUM); + if (returnCode != OH_NN_SUCCESS) { + LOGE("[PRelu] Build failed, passed invalid param index of PRelu operation index."); + return returnCode; + } + m_name = OP_NAME; m_isBuild = true; return OH_NN_SUCCESS; diff --git a/frameworks/native/neural_network_runtime/ops/quant_dtype_cast_builder.cpp b/frameworks/native/neural_network_runtime/ops/quant_dtype_cast_builder.cpp index 84d45ea1ff9f7bfdf7fb4bd4e1a608e02b8a1205..487d011df6eeac6be1155393b64a3300d902ac76 100644 --- a/frameworks/native/neural_network_runtime/ops/quant_dtype_cast_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/quant_dtype_cast_builder.cpp @@ -1,128 +1,156 @@ -/* - * Copyright (c) 2022 Huawei Device Co., Ltd. - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "quant_dtype_cast_builder.h" - -#include "mindir.h" - -#include "ops_registry.h" - -namespace OHOS { -namespace NeuralNetworkRuntime { -namespace Ops { -static const int INPUT_NUM = 1; -static const int OUTPUT_NUM = 1; -static const std::string OP_NAME = "QuantDTypeCast"; - -QuantDTypeCastBuilder::QuantDTypeCastBuilder() {} - -QuantDTypeCastBuilder::~QuantDTypeCastBuilder() {} - -OH_NN_ReturnCode QuantDTypeCastBuilder::SetSrcT(std::shared_ptr tensor) -{ - tensor->IdentifyOpParameter(); - if (tensor->GetDataType() != OH_NN_INT64) { - LOGE("[QuantDTypeCast] SetSrcT failed, the src_t should be type OH_NN_INT64."); - return OH_NN_INVALID_PARAMETER; - } - - void* buffer = tensor->GetBuffer(); - if (buffer == nullptr) { - LOGE("[QuantDTypeCast] SetSrcT failed, the src_t passed buffer is empty."); - return OH_NN_INVALID_PARAMETER; - } - - m_src_t = static_cast(buffer); - return OH_NN_SUCCESS; -} - -OH_NN_ReturnCode QuantDTypeCastBuilder::SetDstT(std::shared_ptr tensor) -{ - tensor->IdentifyOpParameter(); - if (tensor->GetDataType() != OH_NN_INT64) { - LOGE("[QuantDTypeCast] SetDstT failed, the dst_t should be type OH_NN_INT64."); - return OH_NN_INVALID_PARAMETER; - } - - void* buffer = tensor->GetBuffer(); - if (buffer == nullptr) { - LOGE("[QuantDTypeCast] SetDstT failed, the dst_t passed buffer is empty."); - return OH_NN_INVALID_PARAMETER; - } - - m_dst_t = static_cast(buffer); - return OH_NN_SUCCESS; -} - -OH_NN_ReturnCode QuantDTypeCastBuilder::Build(const std::vector& paramsIndex, - const std::vector& inputsIndex, - const std::vector& outputsIndex, - const std::vector>& allTensors) -{ - if (m_isBuild) { - LOGE("[QuantDTypeCast] Build failed, the QuantDTypeCast operation has been build, cannot build again."); - return OH_NN_OPERATION_FORBIDDEN; - } - - OH_NN_ReturnCode returnCode = CheckIOIndex(inputsIndex, outputsIndex, allTensors, INPUT_NUM, OUTPUT_NUM); - if (returnCode != OH_NN_SUCCESS) { - LOGE("[QuantDTypeCast] Build failed, passed invalid input or output index."); - return returnCode; - } - - m_inputsIndex = inputsIndex; - m_outputsIndex = outputsIndex; - - for (uint32_t i : paramsIndex) { - std::shared_ptr tensor = allTensors[i]; - switch (tensor->GetType()) { - case OH_NN_QUANT_DTYPE_CAST_SRC_T: - returnCode = SetSrcT(tensor); - break; - case OH_NN_QUANT_DTYPE_CAST_DST_T: - returnCode = SetDstT(tensor); - break; - default: - LOGE("[QuantDTypeCast] Build failed, parameter type is invalid. type=%d", tensor->GetType()); - return OH_NN_INVALID_PARAMETER; - } - - if (returnCode != OH_NN_SUCCESS) { - LOGE("[QuantDTypeCast] Build failed, passed invalid param."); - return returnCode; - } - } - - m_isBuild = true; - m_name = OP_NAME; - return OH_NN_SUCCESS; -} - -LiteGraphPrimitvePtr QuantDTypeCastBuilder::GetPrimitive() -{ - if (!m_isBuild) { - LOGE("[QuantDTypeCast] GetPrimitive failed, cannot get primitive before call build."); - return {nullptr, DestroyLiteGraphPrimitive}; - } - - void* primitive = mindspore::lite::MindIR_QuantDTypeCast_CreatePrimitive(*m_src_t, *m_dst_t); - LiteGraphPrimitvePtr graphPrimitivePtr(primitive, DestroyLiteGraphPrimitive); - return graphPrimitivePtr; -} - -REGISTER_OPS(QuantDTypeCastBuilder, OH_NN_OPS_QUANT_DTYPE_CAST); -} // namespace Ops -} // namespace NeuralNetworkRuntime +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "quant_dtype_cast_builder.h" + +#include "mindir.h" + +#include "ops_registry.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +static const int INPUT_NUM = 1; +static const int OUTPUT_NUM = 1; +static const int PARAM_MAX_NUM = 3; +static const std::string OP_NAME = "QuantDTypeCast"; + +QuantDTypeCastBuilder::QuantDTypeCastBuilder() {} + +QuantDTypeCastBuilder::~QuantDTypeCastBuilder() {} + +OH_NN_ReturnCode QuantDTypeCastBuilder::SetSrcT(std::shared_ptr tensor) +{ + tensor->IdentifyOpParameter(); + if (tensor->GetDataType() != OH_NN_INT64) { + LOGE("[QuantDTypeCast] SetSrcT failed, the src_t should be type OH_NN_INT64."); + return OH_NN_INVALID_PARAMETER; + } + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[QuantDTypeCast] SetSrcT failed, the src_t passed buffer is empty."); + return OH_NN_INVALID_PARAMETER; + } + + m_src_t = static_cast(buffer); + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode QuantDTypeCastBuilder::SetDstT(std::shared_ptr tensor) +{ + tensor->IdentifyOpParameter(); + if (tensor->GetDataType() != OH_NN_INT64) { + LOGE("[QuantDTypeCast] SetDstT failed, the dst_t should be type OH_NN_INT64."); + return OH_NN_INVALID_PARAMETER; + } + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[QuantDTypeCast] SetDstT failed, the dst_t passed buffer is empty."); + return OH_NN_INVALID_PARAMETER; + } + + m_dst_t = static_cast(buffer); + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode QuantDTypeCastBuilder::SetAxis(std::shared_ptr tensor) +{ + tensor->IdentifyOpParameter(); + if (tensor->GetDataType() != OH_NN_INT64) { + LOGE("[QuantDTypeCast] SetAxis failed, the dst_t should be type OH_NN_INT64."); + return OH_NN_INVALID_PARAMETER; + } + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[QuantDTypeCast] SetAxis failed, the dst_t passed buffer is empty."); + return OH_NN_INVALID_PARAMETER; + } + + m_axis = *(static_cast(buffer)); + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode QuantDTypeCastBuilder::Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) +{ + if (m_isBuild) { + LOGE("[QuantDTypeCast] Build failed, the QuantDTypeCast operation has been build, cannot build again."); + return OH_NN_OPERATION_FORBIDDEN; + } + + OH_NN_ReturnCode returnCode = CheckIOIndex(inputsIndex, outputsIndex, allTensors, INPUT_NUM, OUTPUT_NUM); + if (returnCode != OH_NN_SUCCESS) { + LOGE("[QuantDTypeCast] Build failed, passed invalid input or output index."); + return returnCode; + } + + m_inputsIndex = inputsIndex; + m_outputsIndex = outputsIndex; + + returnCode = CheckParamIndex(paramsIndex, allTensors, PARAM_MAX_NUM); + if (returnCode != OH_NN_SUCCESS) { + LOGE("[QuantDTypeCast] Build failed, passed invalid param index."); + return returnCode; + } + + for (uint32_t i : paramsIndex) { + std::shared_ptr tensor = allTensors[i]; + switch (tensor->GetType()) { + case OH_NN_QUANT_DTYPE_CAST_SRC_T: + returnCode = SetSrcT(tensor); + break; + case OH_NN_QUANT_DTYPE_CAST_DST_T: + returnCode = SetDstT(tensor); + break; + case OH_NN_QUANT_DTYPE_CAST_AXIS: + returnCode = SetAxis(tensor); + break; + default: + LOGE("[QuantDTypeCast] Build failed, parameter type is invalid. type=%d", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; + } + + if (returnCode != OH_NN_SUCCESS) { + LOGE("[QuantDTypeCast] Build failed, passed invalid param."); + return returnCode; + } + } + + m_isBuild = true; + m_name = OP_NAME; + return OH_NN_SUCCESS; +} + +LiteGraphPrimitvePtr QuantDTypeCastBuilder::GetPrimitive() +{ + if (!m_isBuild) { + LOGE("[QuantDTypeCast] GetPrimitive failed, cannot get primitive before call build."); + return {nullptr, DestroyLiteGraphPrimitive}; + } + + void* primitive = mindspore::lite::MindIR_QuantDTypeCast_CreatePrimitive(*m_src_t, *m_dst_t, m_axis); + LiteGraphPrimitvePtr graphPrimitivePtr(primitive, DestroyLiteGraphPrimitive); + return graphPrimitivePtr; +} + +REGISTER_OPS(QuantDTypeCastBuilder, OH_NN_OPS_QUANT_DTYPE_CAST); +} // namespace Ops +} // namespace NeuralNetworkRuntime } // namespace OHOS \ No newline at end of file diff --git a/frameworks/native/neural_network_runtime/ops/quant_dtype_cast_builder.h b/frameworks/native/neural_network_runtime/ops/quant_dtype_cast_builder.h index 6ddef1b03acf9d7f567997bcf92f8503d3011e69..8270ee9bb0d82dd7274c0e72e7c442e18d0ebd8d 100644 --- a/frameworks/native/neural_network_runtime/ops/quant_dtype_cast_builder.h +++ b/frameworks/native/neural_network_runtime/ops/quant_dtype_cast_builder.h @@ -1,47 +1,49 @@ -/* - * Copyright (c) 2022 Huawei Device Co., Ltd. - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef NEURAL_NETWORK_RUNTIME_QUANTDTYPECAST_BUILDER_H -#define NEURAL_NETWORK_RUNTIME_QUANTDTYPECAST_BUILDER_H - -#include "ops_builder.h" - -namespace OHOS { -namespace NeuralNetworkRuntime { -namespace Ops { -class QuantDTypeCastBuilder : public OpsBuilder { -public: - QuantDTypeCastBuilder(); - ~QuantDTypeCastBuilder() override; - OH_NN_ReturnCode Build(const std::vector& paramsIndex, - const std::vector& inputsIndex, - const std::vector& outputsIndex, - const std::vector>& allTensors) override; - - LiteGraphPrimitvePtr GetPrimitive() override; - -private: - OH_NN_ReturnCode SetSrcT(std::shared_ptr tensor); - OH_NN_ReturnCode SetDstT(std::shared_ptr tensor); - -private: - const uint64_t* m_src_t{nullptr}; - const uint64_t* m_dst_t{nullptr}; -}; -} // namespace Ops -} // namespace NeuralNetworkRuntime -} // namespace OHOS - +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_QUANTDTYPECAST_BUILDER_H +#define NEURAL_NETWORK_RUNTIME_QUANTDTYPECAST_BUILDER_H + +#include "ops_builder.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +class QuantDTypeCastBuilder : public OpsBuilder { +public: + QuantDTypeCastBuilder(); + ~QuantDTypeCastBuilder() override; + OH_NN_ReturnCode Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) override; + + LiteGraphPrimitvePtr GetPrimitive() override; + +private: + OH_NN_ReturnCode SetSrcT(std::shared_ptr tensor); + OH_NN_ReturnCode SetDstT(std::shared_ptr tensor); + OH_NN_ReturnCode SetAxis(std::shared_ptr tensor); + +private: + const uint64_t* m_src_t {nullptr}; + const uint64_t* m_dst_t {nullptr}; + int64_t m_axis {0}; +}; +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS + #endif // NEURAL_NETWORK_RUNTIME_QUANTDTYPECAST_BUILDER_H \ No newline at end of file diff --git a/frameworks/native/neural_network_runtime/ops/range_builder.cpp b/frameworks/native/neural_network_runtime/ops/range_builder.cpp index e01d4a585c551677fe721b6523e5e03822af8a5c..f7fd0879fd1499dc3bc4104d3923c3f084d5ad49 100755 --- a/frameworks/native/neural_network_runtime/ops/range_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/range_builder.cpp @@ -20,6 +20,7 @@ namespace NeuralNetworkRuntime { namespace Ops { static const int INPUT_NUM = 1; static const int OUTPUT_NUM = 1; +static const int PARAM_MAX_NUM = 3; static const int SCALAR_LENGTH = 1; static const std::string OP_NAME = "Range"; @@ -27,28 +28,6 @@ RangeBuilder::RangeBuilder() {} RangeBuilder::~RangeBuilder() {} -OH_NN_ReturnCode RangeBuilder::SetDType(std::shared_ptr tensor) -{ - if (tensor->GetDataType() != OH_NN_INT64) { - LOGE("[Range] The dType should be type OH_NN_INT64."); - return OH_NN_INVALID_PARAMETER; - } - - if (tensor->GetElementCount() != SCALAR_LENGTH) { - LOGE("[Range] The dType should be scalar."); - return OH_NN_INVALID_PARAMETER; - } - - void* buffer = tensor->GetBuffer(); - if (buffer == nullptr) { - LOGE("[Range] Tensor buffer is nullptr."); - return OH_NN_INVALID_PARAMETER; - } - m_dType = *(static_cast(buffer)); - - return OH_NN_SUCCESS; -} - OH_NN_ReturnCode RangeBuilder::SetStart(std::shared_ptr tensor) { if (tensor->GetDataType() != OH_NN_INT64) { @@ -133,32 +112,34 @@ OH_NN_ReturnCode RangeBuilder::Build(const std::vector& paramsIndex, m_inputsIndex = inputsIndex; m_outputsIndex = outputsIndex; - - OH_NN_ReturnCode returnCode; + + ret = CheckParamIndex(paramsIndex, allTensors, PARAM_MAX_NUM); + if (ret != OH_NN_SUCCESS) { + LOGE("[Range] Build failed, passed invalid param index."); + return ret; + } + for (int i : paramsIndex) { std::shared_ptr tensor = allTensors[i]; tensor->IdentifyOpParameter(); switch (tensor->GetType()) { - case OH_NN_RANGE_DTYPE: - returnCode = SetDType(tensor); - break; case OH_NN_RANGE_START: - returnCode = SetStart(tensor); + ret = SetStart(tensor); break; case OH_NN_RANGE_LIMIT: - returnCode = SetLimit(tensor); + ret = SetLimit(tensor); break; case OH_NN_RANGE_DELTA: - returnCode = SetDelta(tensor); + ret = SetDelta(tensor); break; default: LOGE("[Range] Build failed, param invalid, type=%d", tensor->GetType()); return OH_NN_INVALID_PARAMETER; } - if (returnCode != OH_NN_SUCCESS) { + if (ret != OH_NN_SUCCESS) { LOGE("[Range] Build failed, passed invalid param."); - return returnCode; + return ret; } } @@ -174,7 +155,9 @@ LiteGraphPrimitvePtr RangeBuilder::GetPrimitive() return {nullptr, DestroyLiteGraphPrimitive}; } - void* primitive = mindspore::lite::MindIR_Range_CreatePrimitive(m_dType, m_start, m_limit, m_delta); + int64_t dType {0.0f}; + + void* primitive = mindspore::lite::MindIR_Range_CreatePrimitive(dType, m_start, m_limit, m_delta); LiteGraphPrimitvePtr graphPrimitivePtr(primitive, DestroyLiteGraphPrimitive) ; return graphPrimitivePtr; } diff --git a/frameworks/native/neural_network_runtime/ops/range_builder.h b/frameworks/native/neural_network_runtime/ops/range_builder.h index 1189f4420b535c79d82d12e0a526fbf10026b4d9..8bc33f142c82eb579b01e612966ea7ddcdc9a059 100755 --- a/frameworks/native/neural_network_runtime/ops/range_builder.h +++ b/frameworks/native/neural_network_runtime/ops/range_builder.h @@ -36,16 +36,14 @@ public: LiteGraphPrimitvePtr GetPrimitive() override; private: - OH_NN_ReturnCode SetDType(std::shared_ptr tensor); OH_NN_ReturnCode SetStart(std::shared_ptr tensor); OH_NN_ReturnCode SetLimit(std::shared_ptr tensor); OH_NN_ReturnCode SetDelta(std::shared_ptr tensor); private: - int64_t m_dType {0.0f}; - int64_t m_start {0.0f}; - int64_t m_limit {0.0f}; - int64_t m_delta {1.0f}; + int64_t m_start {0}; + int64_t m_limit {0}; + int64_t m_delta {1}; }; } // namespace Ops } // namespace NeuralNetworkRuntime diff --git a/frameworks/native/neural_network_runtime/ops/rank_builder.cpp b/frameworks/native/neural_network_runtime/ops/rank_builder.cpp new file mode 100644 index 0000000000000000000000000000000000000000..b2042702cde95b42a15bd68cca25d24c94e57ee5 --- /dev/null +++ b/frameworks/native/neural_network_runtime/ops/rank_builder.cpp @@ -0,0 +1,75 @@ +/* + * Copyright (c) 2024 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "rank_builder.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +static const int INPUT_NUM = 1; +static const int OUTPUT_NUM = 1; +static const int PARAM_NUM = 0; +static const std::string OP_NAME = "Rank"; + +RankBuilder::RankBuilder() {} + +RankBuilder::~RankBuilder() {} + +OH_NN_ReturnCode RankBuilder::Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) +{ + if (m_isBuild) { + LOGE("[Rank] Build failed, the rank operation has been build. cannot build again."); + return OH_NN_OPERATION_FORBIDDEN; + } + + auto ret = CheckIOIndex(inputsIndex, outputsIndex, allTensors, INPUT_NUM, OUTPUT_NUM); + if (ret != OH_NN_SUCCESS) { + LOGE("[Rank] Build failed, passed invalid input or output index."); + return ret; + } + + m_inputsIndex = inputsIndex; + m_outputsIndex = outputsIndex; + + ret = CheckParamIndex(paramsIndex, allTensors, PARAM_NUM); + if (ret != OH_NN_SUCCESS) { + LOGE("[Rank] Build failed, passed invalid param index."); + return ret; + } + + m_name = OP_NAME; + m_isBuild = true; + return OH_NN_SUCCESS; +} + +LiteGraphPrimitvePtr RankBuilder::GetPrimitive() +{ + if (!m_isBuild) { + LOGE("[Rank] GetPrimitive failed, cannot get primitive before call build."); + return {nullptr, DestroyLiteGraphPrimitive}; + } + + void* primitive = mindspore::lite::MindIR_Rank_CreatePrimitive(); + LiteGraphPrimitvePtr graphPrimitivePtr(primitive, DestroyLiteGraphPrimitive) ; + return graphPrimitivePtr; +} + +REGISTER_OPS(RankBuilder, OH_NN_OPS_RANK); +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS diff --git a/frameworks/native/neural_network_runtime/ops/rank_builder.h b/frameworks/native/neural_network_runtime/ops/rank_builder.h new file mode 100644 index 0000000000000000000000000000000000000000..cd782543be3696d099dc4b69e099359c45bda70e --- /dev/null +++ b/frameworks/native/neural_network_runtime/ops/rank_builder.h @@ -0,0 +1,42 @@ +/* + * Copyright (c) 2024 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_RANK_BUILDER_H +#define NEURAL_NETWORK_RUNTIME_RANK_BUILDER_H + +#include "mindir.h" + +#include "ops_builder.h" +#include "ops_registry.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +class RankBuilder : public OpsBuilder { +public: + RankBuilder(); + ~RankBuilder() override; + OH_NN_ReturnCode Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) override; + + LiteGraphPrimitvePtr GetPrimitive() override; +}; +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS + +#endif // NEURAL_NETWORK_RUNTIME_RANK_BUILDER_H \ No newline at end of file diff --git a/frameworks/native/neural_network_runtime/ops/reciprocal_builder.cpp b/frameworks/native/neural_network_runtime/ops/reciprocal_builder.cpp index 44f7c74a86aa44d574ec9344aeecf44dd8095568..bed077a3b53cfc184a3fff5c3b2ba9e39c93cc5d 100644 --- a/frameworks/native/neural_network_runtime/ops/reciprocal_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/reciprocal_builder.cpp @@ -20,6 +20,7 @@ namespace NeuralNetworkRuntime { namespace Ops { static const int INPUT_NUM = 1; static const int OUTPUT_NUM = 1; +static const int PARAM_NUM = 0; static const std::string OP_NAME = "Reciprocal"; ReciprocalBuilder::ReciprocalBuilder() {} @@ -41,15 +42,16 @@ OH_NN_ReturnCode ReciprocalBuilder::Build(const std::vector& paramsInd LOGE("[Reciprocal] Build failed, passed invalid input or output index."); return ret; } - - if (!paramsIndex.empty()) { - LOGW("[Reciprocal] Build failed, the reciprocal expects no parameters, but receive %zu", paramsIndex.size()); - return OH_NN_INVALID_PARAMETER; - } m_inputsIndex = inputsIndex; m_outputsIndex = outputsIndex; + ret = CheckParamIndex(paramsIndex, allTensors, PARAM_NUM); + if (ret != OH_NN_SUCCESS) { + LOGE("[Reciprocal] Build failed, passed invalid param index."); + return ret; + } + m_name = OP_NAME; m_isBuild = true; return OH_NN_SUCCESS; diff --git a/frameworks/native/neural_network_runtime/ops/reduceall_builder.cpp b/frameworks/native/neural_network_runtime/ops/reduceall_builder.cpp index fd5c22eac5d8591c782de2a6ffbf2c643b374afc..63201b8efdbd4deabe49b97613660c168f35d320 100644 --- a/frameworks/native/neural_network_runtime/ops/reduceall_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/reduceall_builder.cpp @@ -22,26 +22,72 @@ namespace NeuralNetworkRuntime { namespace Ops { static const int INPUT_NUM = 2; static const int OUTPUT_NUM = 1; +static const int PARAM_MAX_NUM = 3; static const int SCALE_LENGTH = 1; static const std::string OP_NAME = "ReduceAll"; ReduceAllBuilder::ReduceAllBuilder() {} -ReduceAllBuilder:: ~ReduceAllBuilder() {} +ReduceAllBuilder::~ReduceAllBuilder() {} -OH_NN_ReturnCode ReduceAllBuilder::SetKeepDims(std::shared_ptr tensor) +OH_NN_ReturnCode ReduceAllBuilder::SetCoeff(std::shared_ptr tensor) +{ + if (tensor->GetDataType() != OH_NN_FLOAT32) { + LOGE("[ReduceAll] The coeff should be type OH_NN_FLOAT32."); + return OH_NN_INVALID_PARAMETER; + } + + if (tensor->GetElementCount() != SCALE_LENGTH) { + LOGE("[ReduceAll] The coeff should be scalar."); + return OH_NN_INVALID_PARAMETER; + } + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[ReduceAll] Tensor buffer is nullptr."); + return OH_NN_INVALID_PARAMETER; + } + m_coeff = *(static_cast(buffer)); + + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode ReduceAllBuilder::SetReduceToEnd(std::shared_ptr tensor) { + if (tensor->GetDataType() != OH_NN_BOOL) { + LOGE("[ReduceAll] SetReduceToEnd failed, the reduceToEnd should be type OH_NN_BOOL."); + return OH_NN_INVALID_PARAMETER; + } + tensor->IdentifyOpParameter(); if (tensor->GetElementCount() != SCALE_LENGTH) { - LOGE("[ReduceAll] SetKeepDims failed, the keep_dims dimensions should be scalar."); + LOGE("[ReduceAll] SetReduceToEnd failed, the reduceToEnd dimensions should be scalar."); return OH_NN_INVALID_PARAMETER; } + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[ReduceAll] SetReduceToEnd failed, the reduceToEnd passed buffer is empty."); + return OH_NN_INVALID_PARAMETER; + } + + m_reduceToEnd = *(static_cast(buffer)); + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode ReduceAllBuilder::SetKeepDims(std::shared_ptr tensor) +{ if (tensor->GetDataType() != OH_NN_BOOL) { LOGE("[ReduceAll] SetKeepDims failed, the keep_dims should be type OH_NN_BOOL."); return OH_NN_INVALID_PARAMETER; } + tensor->IdentifyOpParameter(); + if (tensor->GetElementCount() != SCALE_LENGTH) { + LOGE("[ReduceAll] SetKeepDims failed, the keep_dims dimensions should be scalar."); + return OH_NN_INVALID_PARAMETER; + } + void* buffer = tensor->GetBuffer(); if (buffer == nullptr) { LOGE("[ReduceAll] SetKeepDims failed, the keep_dims passed buffer is empty."); @@ -71,12 +117,24 @@ OH_NN_ReturnCode ReduceAllBuilder::Build(const std::vector& paramsInde m_inputsIndex = inputsIndex; m_outputsIndex = outputsIndex; + returnCode = CheckParamIndex(paramsIndex, allTensors, PARAM_MAX_NUM); + if (returnCode != OH_NN_SUCCESS) { + LOGE("[ReduceAll] Build failed, passed invalid param index of ReduceAll operation index."); + return returnCode; + } + for (uint32_t i : paramsIndex) { std::shared_ptr tensor = allTensors[i]; switch (tensor->GetType()) { case OH_NN_REDUCE_ALL_KEEP_DIMS: returnCode = SetKeepDims(tensor); break; + case OH_NN_REDUCE_ALL_REDUCE_TO_END: + returnCode = SetReduceToEnd(tensor); + break; + case OH_NN_REDUCE_ALL_COEFF: + returnCode = SetCoeff(tensor); + break; default: LOGE("[ReduceAll] Build failed, parameter type is invalid. type=%d", tensor->GetType()); return OH_NN_INVALID_PARAMETER; @@ -100,10 +158,9 @@ LiteGraphPrimitvePtr ReduceAllBuilder::GetPrimitive() return {nullptr, DestroyLiteGraphPrimitive}; } - bool reduceToEnd{false}; - float coeff{0.0f}; + mindspore::lite::ReduceMode m_mode {mindspore::lite::REDUCE_MODE_ALL}; - void* primitive = mindspore::lite::MindIR_ReduceFusion_CreatePrimitive(m_keepDims, m_mode, reduceToEnd, coeff); + void* primitive = mindspore::lite::MindIR_ReduceFusion_CreatePrimitive(m_keepDims, m_mode, m_reduceToEnd, m_coeff); LiteGraphPrimitvePtr graphPrimitivePtr(primitive, DestroyLiteGraphPrimitive); return graphPrimitivePtr; } diff --git a/frameworks/native/neural_network_runtime/ops/reduceall_builder.h b/frameworks/native/neural_network_runtime/ops/reduceall_builder.h index 176ecb3defe00c19254a1fd3bd08a5c1f6b0d55b..afd00a87007ab32247107f94e1baf2689498500d 100644 --- a/frameworks/native/neural_network_runtime/ops/reduceall_builder.h +++ b/frameworks/native/neural_network_runtime/ops/reduceall_builder.h @@ -35,11 +35,14 @@ public: LiteGraphPrimitvePtr GetPrimitive() override; private: + OH_NN_ReturnCode SetCoeff(std::shared_ptr tensor); + OH_NN_ReturnCode SetReduceToEnd(std::shared_ptr tensor); OH_NN_ReturnCode SetKeepDims(std::shared_ptr tensor); private: - bool m_keepDims{false}; - mindspore::lite::ReduceMode m_mode{mindspore::lite::REDUCE_MODE_ALL}; + float m_coeff {0.0f}; + bool m_reduceToEnd {false}; + bool m_keepDims {false}; }; } // namespace Ops } // namespace NeuralNetworkRuntime diff --git a/frameworks/native/neural_network_runtime/ops/reducemax_builder.cpp b/frameworks/native/neural_network_runtime/ops/reducemax_builder.cpp new file mode 100644 index 0000000000000000000000000000000000000000..43d1101c6b83b671c32dbbc6309c05a514593c62 --- /dev/null +++ b/frameworks/native/neural_network_runtime/ops/reducemax_builder.cpp @@ -0,0 +1,171 @@ +/* + * Copyright (c) 2024 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "reducemax_builder.h" + +#include "ops_registry.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +static const int INPUT_NUM = 2; +static const int OUTPUT_NUM = 1; +static const int PARAM_MAX_NUM = 3; +static const int SCALE_LENGTH = 1; +static const std::string OP_NAME = "ReduceMax"; + +ReduceMaxBuilder::ReduceMaxBuilder() {} + +ReduceMaxBuilder::~ReduceMaxBuilder() {} + +OH_NN_ReturnCode ReduceMaxBuilder::SetCoeff(std::shared_ptr tensor) +{ + if (tensor->GetDataType() != OH_NN_FLOAT32) { + LOGE("[ReduceMax] The coeff should be type OH_NN_FLOAT32."); + return OH_NN_INVALID_PARAMETER; + } + + if (tensor->GetElementCount() != SCALE_LENGTH) { + LOGE("[ReduceMax] The coeff should be scalar."); + return OH_NN_INVALID_PARAMETER; + } + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[ReduceMax] Tensor buffer is nullptr."); + return OH_NN_INVALID_PARAMETER; + } + m_coeff = *(static_cast(buffer)); + + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode ReduceMaxBuilder::SetReduceToEnd(std::shared_ptr tensor) +{ + if (tensor->GetDataType() != OH_NN_BOOL) { + LOGE("[ReduceMax] SetReduceToEnd failed, the reduceToEnd should be type OH_NN_BOOL."); + return OH_NN_INVALID_PARAMETER; + } + + tensor->IdentifyOpParameter(); + if (tensor->GetElementCount() != SCALE_LENGTH) { + LOGE("[ReduceMax] SetReduceToEnd failed, the reduceToEnd dimensions should be scalar."); + return OH_NN_INVALID_PARAMETER; + } + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[ReduceMax] SetReduceToEnd failed, the reduceToEnd passed buffer is empty."); + return OH_NN_INVALID_PARAMETER; + } + + m_reduceToEnd = *(static_cast(buffer)); + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode ReduceMaxBuilder::SetKeepDims(std::shared_ptr tensor) +{ + if (tensor->GetDataType() != OH_NN_BOOL) { + LOGE("[ReduceMax] SetKeepDims failed, the keep_dims should be type OH_NN_BOOL."); + return OH_NN_INVALID_PARAMETER; + } + + tensor->IdentifyOpParameter(); + if (tensor->GetElementCount() != SCALE_LENGTH) { + LOGE("[ReduceMax] SetKeepDims failed, the keep_dims dimensions should be scalar."); + return OH_NN_INVALID_PARAMETER; + } + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[ReduceMax] SetKeepDims failed, the keep_dims passed buffer is empty."); + return OH_NN_INVALID_PARAMETER; + } + + m_keepDims = *(static_cast(buffer)); + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode ReduceMaxBuilder::Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) +{ + if (m_isBuild) { + LOGE("[ReduceMax] Build failed, the ReduceMax operation has been build, cannot build again."); + return OH_NN_OPERATION_FORBIDDEN; + } + + OH_NN_ReturnCode returnCode = CheckIOIndex(inputsIndex, outputsIndex, allTensors, INPUT_NUM, OUTPUT_NUM); + if (returnCode != OH_NN_SUCCESS) { + LOGE("[ReduceMax] Build failed, passed invalid input or output index of ReduceMax operation index."); + return returnCode; + } + + m_inputsIndex = inputsIndex; + m_outputsIndex = outputsIndex; + + returnCode = CheckParamIndex(paramsIndex, allTensors, PARAM_MAX_NUM); + if (returnCode != OH_NN_SUCCESS) { + LOGE("[ReduceMax] Build failed, passed invalid param index of ReduceMax operation index."); + return returnCode; + } + + for (uint32_t i : paramsIndex) { + std::shared_ptr tensor = allTensors[i]; + switch (tensor->GetType()) { + case OH_NN_REDUCE_MAX_KEEP_DIMS: + returnCode = SetKeepDims(tensor); + break; + case OH_NN_REDUCE_MAX_REDUCE_TO_END: + returnCode = SetReduceToEnd(tensor); + break; + case OH_NN_REDUCE_MAX_COEFF: + returnCode = SetCoeff(tensor); + break; + default: + LOGE("[ReduceMax] Build failed, parameter type is invalid. type=%d", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; + } + + if (returnCode != OH_NN_SUCCESS) { + LOGE("[ReduceMax] Build failed, passed invalid param."); + return returnCode; + } + } + + m_name = OP_NAME; + m_isBuild = true; + return OH_NN_SUCCESS; +} + +LiteGraphPrimitvePtr ReduceMaxBuilder::GetPrimitive() +{ + if (!m_isBuild) { + LOGE("[ReduceMax] GetPrimitive failed, cannot get primitive before call build."); + return {nullptr, DestroyLiteGraphPrimitive}; + } + + mindspore::lite::ReduceMode m_mode {mindspore::lite::REDUCE_MODE_MAX}; + + void* primitive = mindspore::lite::MindIR_ReduceFusion_CreatePrimitive(m_keepDims, m_mode, m_reduceToEnd, m_coeff); + LiteGraphPrimitvePtr graphPrimitivePtr(primitive, DestroyLiteGraphPrimitive); + return graphPrimitivePtr; +} + +REGISTER_OPS(ReduceMaxBuilder, OH_NN_OPS_REDUCE_MAX); +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS \ No newline at end of file diff --git a/frameworks/native/neural_network_runtime/ops/reducemax_builder.h b/frameworks/native/neural_network_runtime/ops/reducemax_builder.h new file mode 100644 index 0000000000000000000000000000000000000000..5f14bc3ef403223f12224a13d53fd8b7fded7f7d --- /dev/null +++ b/frameworks/native/neural_network_runtime/ops/reducemax_builder.h @@ -0,0 +1,51 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_REDUCEMAX_BUILDER_H +#define NEURAL_NETWORK_RUNTIME_REDUCEMAX_BUILDER_H + +#include "mindir.h" + +#include "ops_builder.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +class ReduceMaxBuilder : public OpsBuilder { +public: + ReduceMaxBuilder(); + ~ReduceMaxBuilder() override; + OH_NN_ReturnCode Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) override; + + LiteGraphPrimitvePtr GetPrimitive() override; + +private: + OH_NN_ReturnCode SetCoeff(std::shared_ptr tensor); + OH_NN_ReturnCode SetReduceToEnd(std::shared_ptr tensor); + OH_NN_ReturnCode SetKeepDims(std::shared_ptr tensor); + +private: + float m_coeff {0.0f}; + bool m_reduceToEnd {false}; + bool m_keepDims {false}; +}; +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS + +#endif // NEURAL_NETWORK_RUNTIME_REDUCEMAX_BUILDER_H \ No newline at end of file diff --git a/frameworks/native/neural_network_runtime/ops/reducemean_builder.cpp b/frameworks/native/neural_network_runtime/ops/reducemean_builder.cpp index 5783beaa826b66ab8410d22e3942e57270d5d4c3..3648a6d6c4455d7ae073917278fce1f2e6c304ef 100644 --- a/frameworks/native/neural_network_runtime/ops/reducemean_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/reducemean_builder.cpp @@ -22,6 +22,7 @@ namespace NeuralNetworkRuntime { namespace Ops { static const int INPUT_NUM = 2; static const int OUTPUT_NUM = 1; +static const int PARAM_MAX_NUM = 3; static const int SCALE_LENGTH = 1; static const std::string OP_NAME = "ReduceMean"; @@ -29,6 +30,51 @@ ReduceMeanBuilder::ReduceMeanBuilder() {} ReduceMeanBuilder:: ~ReduceMeanBuilder() {} +OH_NN_ReturnCode ReduceMeanBuilder::SetCoeff(std::shared_ptr tensor) +{ + if (tensor->GetDataType() != OH_NN_FLOAT32) { + LOGE("[ReduceAll] The coeff should be type OH_NN_FLOAT32."); + return OH_NN_INVALID_PARAMETER; + } + + if (tensor->GetElementCount() != SCALE_LENGTH) { + LOGE("[ReduceAll] The coeff should be scalar."); + return OH_NN_INVALID_PARAMETER; + } + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[ReduceAll] Tensor buffer is nullptr."); + return OH_NN_INVALID_PARAMETER; + } + m_coeff = *(static_cast(buffer)); + + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode ReduceMeanBuilder::SetReduceToEnd(std::shared_ptr tensor) +{ + if (tensor->GetDataType() != OH_NN_BOOL) { + LOGE("[ReduceAll] SetReduceToEnd failed, the reduceToEnd should be type OH_NN_BOOL."); + return OH_NN_INVALID_PARAMETER; + } + + tensor->IdentifyOpParameter(); + if (tensor->GetElementCount() != SCALE_LENGTH) { + LOGE("[ReduceAll] SetReduceToEnd failed, the reduceToEnd dimensions should be scalar."); + return OH_NN_INVALID_PARAMETER; + } + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[ReduceAll] SetReduceToEnd failed, the reduceToEnd passed buffer is empty."); + return OH_NN_INVALID_PARAMETER; + } + + m_reduceToEnd = *(static_cast(buffer)); + return OH_NN_SUCCESS; +} + OH_NN_ReturnCode ReduceMeanBuilder::SetKeepDims(std::shared_ptr tensor) { tensor->IdentifyOpParameter(); @@ -71,12 +117,24 @@ OH_NN_ReturnCode ReduceMeanBuilder::Build(const std::vector& paramsInd m_inputsIndex = inputsIndex; m_outputsIndex = outputsIndex; + returnCode = CheckParamIndex(paramsIndex, allTensors, PARAM_MAX_NUM); + if (returnCode != OH_NN_SUCCESS) { + LOGE("[ReduceMean] Build failed, passed invalid param index of ReduceMean operation index."); + return returnCode; + } + for (uint32_t i : paramsIndex) { std::shared_ptr tensor = allTensors[i]; switch (tensor->GetType()) { case OH_NN_REDUCE_MEAN_KEEP_DIMS: returnCode = SetKeepDims(tensor); break; + case OH_NN_REDUCE_MEAN_REDUCE_TO_END: + returnCode = SetReduceToEnd(tensor); + break; + case OH_NN_REDUCE_MEAN_COEFF: + returnCode = SetCoeff(tensor); + break; default: LOGE("[ReduceMean] Build failed, parameter type is invalid. type=%d", tensor->GetType()); return OH_NN_INVALID_PARAMETER; @@ -102,10 +160,9 @@ LiteGraphPrimitvePtr ReduceMeanBuilder::GetPrimitive() return {nullptr, DestroyLiteGraphPrimitive}; } - bool reduceToEnd{false}; - float coeff{0.0f}; + mindspore::lite::ReduceMode m_mode{mindspore::lite::REDUCE_MODE_MEAN}; - void* primitive = mindspore::lite::MindIR_ReduceFusion_CreatePrimitive(m_keepDims, m_mode, reduceToEnd, coeff); + void* primitive = mindspore::lite::MindIR_ReduceFusion_CreatePrimitive(m_keepDims, m_mode, m_reduceToEnd, m_coeff); LiteGraphPrimitvePtr graphPrimitivePtr(primitive, DestroyLiteGraphPrimitive); return graphPrimitivePtr; } diff --git a/frameworks/native/neural_network_runtime/ops/reducemean_builder.h b/frameworks/native/neural_network_runtime/ops/reducemean_builder.h index b589fad1c28391bcf4ec597a9e6ecdeb5584bf25..ae2d21bd7cba8905905aa07a6c53ea1903251251 100644 --- a/frameworks/native/neural_network_runtime/ops/reducemean_builder.h +++ b/frameworks/native/neural_network_runtime/ops/reducemean_builder.h @@ -35,11 +35,14 @@ public: LiteGraphPrimitvePtr GetPrimitive() override; private: + OH_NN_ReturnCode SetCoeff(std::shared_ptr tensor); + OH_NN_ReturnCode SetReduceToEnd(std::shared_ptr tensor); OH_NN_ReturnCode SetKeepDims(std::shared_ptr tensor); private: bool m_keepDims{false}; - mindspore::lite::ReduceMode m_mode{mindspore::lite::REDUCE_MODE_MEAN}; + float m_coeff {0.0f}; + bool m_reduceToEnd {false}; }; } // namespace Ops } // namespace NeuralNetworkRuntime diff --git a/frameworks/native/neural_network_runtime/ops/reducemin_builder.cpp b/frameworks/native/neural_network_runtime/ops/reducemin_builder.cpp new file mode 100644 index 0000000000000000000000000000000000000000..16248d43f4e2bf23ceb97b8031d56b98ac3d9f12 --- /dev/null +++ b/frameworks/native/neural_network_runtime/ops/reducemin_builder.cpp @@ -0,0 +1,171 @@ +/* + * Copyright (c) 2024 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "reducemin_builder.h" + +#include "ops_registry.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +static const int INPUT_NUM = 2; +static const int OUTPUT_NUM = 1; +static const int PARAM_MAX_NUM = 3; +static const int SCALE_LENGTH = 1; +static const std::string OP_NAME = "ReduceMin"; + +ReduceMinBuilder::ReduceMinBuilder() {} + +ReduceMinBuilder::~ReduceMinBuilder() {} + +OH_NN_ReturnCode ReduceMinBuilder::SetCoeff(std::shared_ptr tensor) +{ + if (tensor->GetDataType() != OH_NN_FLOAT32) { + LOGE("[ReduceMin] The coeff should be type OH_NN_FLOAT32."); + return OH_NN_INVALID_PARAMETER; + } + + if (tensor->GetElementCount() != SCALE_LENGTH) { + LOGE("[ReduceMin] The coeff should be scalar."); + return OH_NN_INVALID_PARAMETER; + } + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[ReduceMin] Tensor buffer is nullptr."); + return OH_NN_INVALID_PARAMETER; + } + m_coeff = *(static_cast(buffer)); + + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode ReduceMinBuilder::SetReduceToEnd(std::shared_ptr tensor) +{ + if (tensor->GetDataType() != OH_NN_BOOL) { + LOGE("[ReduceMin] SetReduceToEnd failed, the reduceToEnd should be type OH_NN_BOOL."); + return OH_NN_INVALID_PARAMETER; + } + + tensor->IdentifyOpParameter(); + if (tensor->GetElementCount() != SCALE_LENGTH) { + LOGE("[ReduceMin] SetReduceToEnd failed, the reduceToEnd dimensions should be scalar."); + return OH_NN_INVALID_PARAMETER; + } + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[ReduceMin] SetReduceToEnd failed, the reduceToEnd passed buffer is empty."); + return OH_NN_INVALID_PARAMETER; + } + + m_reduceToEnd = *(static_cast(buffer)); + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode ReduceMinBuilder::SetKeepDims(std::shared_ptr tensor) +{ + if (tensor->GetDataType() != OH_NN_BOOL) { + LOGE("[ReduceMin] SetKeepDims failed, the keep_dims should be type OH_NN_BOOL."); + return OH_NN_INVALID_PARAMETER; + } + + tensor->IdentifyOpParameter(); + if (tensor->GetElementCount() != SCALE_LENGTH) { + LOGE("[ReduceMin] SetKeepDims failed, the keep_dims dimensions should be scalar."); + return OH_NN_INVALID_PARAMETER; + } + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[ReduceMin] SetKeepDims failed, the keep_dims passed buffer is empty."); + return OH_NN_INVALID_PARAMETER; + } + + m_keepDims = *(static_cast(buffer)); + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode ReduceMinBuilder::Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) +{ + if (m_isBuild) { + LOGE("[ReduceMin] Build failed, the ReduceMin operation has been build, cannot build again."); + return OH_NN_OPERATION_FORBIDDEN; + } + + OH_NN_ReturnCode returnCode = CheckIOIndex(inputsIndex, outputsIndex, allTensors, INPUT_NUM, OUTPUT_NUM); + if (returnCode != OH_NN_SUCCESS) { + LOGE("[ReduceMin] Build failed, passed invalid input or output index of ReduceMin operation index."); + return returnCode; + } + + m_inputsIndex = inputsIndex; + m_outputsIndex = outputsIndex; + + returnCode = CheckParamIndex(paramsIndex, allTensors, PARAM_MAX_NUM); + if (returnCode != OH_NN_SUCCESS) { + LOGE("[ReduceMin] Build failed, passed invalid param index of ReduceMin operation index."); + return returnCode; + } + + for (uint32_t i : paramsIndex) { + std::shared_ptr tensor = allTensors[i]; + switch (tensor->GetType()) { + case OH_NN_REDUCE_MIN_KEEP_DIMS: + returnCode = SetKeepDims(tensor); + break; + case OH_NN_REDUCE_MIN_REDUCE_TO_END: + returnCode = SetReduceToEnd(tensor); + break; + case OH_NN_REDUCE_MIN_COEFF: + returnCode = SetCoeff(tensor); + break; + default: + LOGE("[ReduceMin] Build failed, parameter type is invalid. type=%d", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; + } + + if (returnCode != OH_NN_SUCCESS) { + LOGE("[ReduceMin] Build failed, passed invalid param."); + return returnCode; + } + } + + m_name = OP_NAME; + m_isBuild = true; + return OH_NN_SUCCESS; +} + +LiteGraphPrimitvePtr ReduceMinBuilder::GetPrimitive() +{ + if (!m_isBuild) { + LOGE("[ReduceMin] GetPrimitive failed, cannot get primitive before call build."); + return {nullptr, DestroyLiteGraphPrimitive}; + } + + mindspore::lite::ReduceMode m_mode {mindspore::lite::REDUCE_MODE_MIN}; + + void* primitive = mindspore::lite::MindIR_ReduceFusion_CreatePrimitive(m_keepDims, m_mode, m_reduceToEnd, m_coeff); + LiteGraphPrimitvePtr graphPrimitivePtr(primitive, DestroyLiteGraphPrimitive); + return graphPrimitivePtr; +} + +REGISTER_OPS(ReduceMinBuilder, OH_NN_OPS_REDUCE_MIN); +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS \ No newline at end of file diff --git a/frameworks/native/neural_network_runtime/ops/reducemin_builder.h b/frameworks/native/neural_network_runtime/ops/reducemin_builder.h new file mode 100644 index 0000000000000000000000000000000000000000..618e179a2534508fd9d2c333a064d3f129722443 --- /dev/null +++ b/frameworks/native/neural_network_runtime/ops/reducemin_builder.h @@ -0,0 +1,51 @@ +/* + * Copyright (c) 2024 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_REDUCEMIN_BUILDER_H +#define NEURAL_NETWORK_RUNTIME_REDUCEMIN_BUILDER_H + +#include "mindir.h" + +#include "ops_builder.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +class ReduceMinBuilder : public OpsBuilder { +public: + ReduceMinBuilder(); + ~ReduceMinBuilder() override; + OH_NN_ReturnCode Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) override; + + LiteGraphPrimitvePtr GetPrimitive() override; + +private: + OH_NN_ReturnCode SetCoeff(std::shared_ptr tensor); + OH_NN_ReturnCode SetReduceToEnd(std::shared_ptr tensor); + OH_NN_ReturnCode SetKeepDims(std::shared_ptr tensor); + +private: + float m_coeff {0.0f}; + bool m_reduceToEnd {false}; + bool m_keepDims {false}; +}; +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS + +#endif // NEURAL_NETWORK_RUNTIME_REDUCEMIN_BUILDER_H \ No newline at end of file diff --git a/frameworks/native/neural_network_runtime/ops/reduceprod_builder.cpp b/frameworks/native/neural_network_runtime/ops/reduceprod_builder.cpp index bd8b62f7ede14df362c0c3b626823b8a6b5bd712..3006ecd8a8bc155d2314c9aa2706a24ef29b6e93 100644 --- a/frameworks/native/neural_network_runtime/ops/reduceprod_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/reduceprod_builder.cpp @@ -22,6 +22,7 @@ namespace NeuralNetworkRuntime { namespace Ops { static const int INPUT_NUM = 2; static const int OUTPUT_NUM = 1; +static const int PARAM_MAX_NUM = 3; static const int SCALE_LENGTH = 1; static const std::string OP_NAME = "ReduceProd"; @@ -29,6 +30,51 @@ ReduceProdBuilder::ReduceProdBuilder() {} ReduceProdBuilder:: ~ReduceProdBuilder() {} +OH_NN_ReturnCode ReduceProdBuilder::SetCoeff(std::shared_ptr tensor) +{ + if (tensor->GetDataType() != OH_NN_FLOAT32) { + LOGE("[ReduceAll] The coeff should be type OH_NN_FLOAT32."); + return OH_NN_INVALID_PARAMETER; + } + + if (tensor->GetElementCount() != SCALE_LENGTH) { + LOGE("[ReduceAll] The coeff should be scalar."); + return OH_NN_INVALID_PARAMETER; + } + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[ReduceAll] Tensor buffer is nullptr."); + return OH_NN_INVALID_PARAMETER; + } + m_coeff = *(static_cast(buffer)); + + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode ReduceProdBuilder::SetReduceToEnd(std::shared_ptr tensor) +{ + if (tensor->GetDataType() != OH_NN_BOOL) { + LOGE("[ReduceAll] SetReduceToEnd failed, the reduceToEnd should be type OH_NN_BOOL."); + return OH_NN_INVALID_PARAMETER; + } + + tensor->IdentifyOpParameter(); + if (tensor->GetElementCount() != SCALE_LENGTH) { + LOGE("[ReduceAll] SetReduceToEnd failed, the reduceToEnd dimensions should be scalar."); + return OH_NN_INVALID_PARAMETER; + } + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[ReduceAll] SetReduceToEnd failed, the reduceToEnd passed buffer is empty."); + return OH_NN_INVALID_PARAMETER; + } + + m_reduceToEnd = *(static_cast(buffer)); + return OH_NN_SUCCESS; +} + OH_NN_ReturnCode ReduceProdBuilder::SetKeepDims(std::shared_ptr tensor) { tensor->IdentifyOpParameter(); @@ -71,12 +117,24 @@ OH_NN_ReturnCode ReduceProdBuilder::Build(const std::vector& paramsInd m_inputsIndex = inputsIndex; m_outputsIndex = outputsIndex; + returnCode = CheckParamIndex(paramsIndex, allTensors, PARAM_MAX_NUM); + if (returnCode != OH_NN_SUCCESS) { + LOGE("[ReduceProd] Build failed, passed invalid param index of ReduceProd operation index."); + return returnCode; + } + for (uint32_t i : paramsIndex) { std::shared_ptr tensor = allTensors[i]; switch (tensor->GetType()) { case OH_NN_REDUCE_PROD_KEEP_DIMS: returnCode = SetKeepDims(tensor); break; + case OH_NN_REDUCE_PROD_REDUCE_TO_END: + returnCode = SetReduceToEnd(tensor); + break; + case OH_NN_REDUCE_PROD_COEFF: + returnCode = SetCoeff(tensor); + break; default: LOGE("[ReduceProd] Build failed, parameter type is invalid. type=%d", tensor->GetType()); return OH_NN_INVALID_PARAMETER; @@ -102,10 +160,9 @@ LiteGraphPrimitvePtr ReduceProdBuilder::GetPrimitive() return {nullptr, DestroyLiteGraphPrimitive}; } - bool reduceToEnd{false}; - float coeff{0.0f}; + mindspore::lite::ReduceMode m_mode{mindspore::lite::REDUCE_MODE_PROD}; - void* primitive = mindspore::lite::MindIR_ReduceFusion_CreatePrimitive(m_keepDims, m_mode, reduceToEnd, coeff); + void* primitive = mindspore::lite::MindIR_ReduceFusion_CreatePrimitive(m_keepDims, m_mode, m_reduceToEnd, m_coeff); LiteGraphPrimitvePtr graphPrimitivePtr(primitive, DestroyLiteGraphPrimitive); return graphPrimitivePtr; } diff --git a/frameworks/native/neural_network_runtime/ops/reduceprod_builder.h b/frameworks/native/neural_network_runtime/ops/reduceprod_builder.h index 2dd2a948ad15922cc64783fc2a2b82a0de73460d..f5952606a33e210c2c1cc3712a4858574d6e0bb9 100644 --- a/frameworks/native/neural_network_runtime/ops/reduceprod_builder.h +++ b/frameworks/native/neural_network_runtime/ops/reduceprod_builder.h @@ -35,11 +35,14 @@ public: LiteGraphPrimitvePtr GetPrimitive() override; private: + OH_NN_ReturnCode SetCoeff(std::shared_ptr tensor); + OH_NN_ReturnCode SetReduceToEnd(std::shared_ptr tensor); OH_NN_ReturnCode SetKeepDims(std::shared_ptr tensor); private: bool m_keepDims{false}; - mindspore::lite::ReduceMode m_mode{mindspore::lite::REDUCE_MODE_PROD}; + float m_coeff {0.0f}; + bool m_reduceToEnd {false}; }; } // namespace Ops } // namespace NeuralNetworkRuntime diff --git a/frameworks/native/neural_network_runtime/ops/reducesum_builder.cpp b/frameworks/native/neural_network_runtime/ops/reducesum_builder.cpp new file mode 100644 index 0000000000000000000000000000000000000000..88d442f2d4b3be5739950bb4dcbd29fd69561e82 --- /dev/null +++ b/frameworks/native/neural_network_runtime/ops/reducesum_builder.cpp @@ -0,0 +1,171 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "reducesum_builder.h" + +#include "ops_registry.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +static const int INPUT_NUM = 2; +static const int OUTPUT_NUM = 1; +static const int PARAM_MAX_NUM = 3; +static const int SCALE_LENGTH = 1; +static const std::string OP_NAME = "ReduceSum"; + +ReduceSumBuilder::ReduceSumBuilder() {} + +ReduceSumBuilder::~ReduceSumBuilder() {} + +OH_NN_ReturnCode ReduceSumBuilder::SetCoeff(std::shared_ptr tensor) +{ + if (tensor->GetDataType() != OH_NN_FLOAT32) { + LOGE("[ReduceSum] The coeff should be type OH_NN_FLOAT32."); + return OH_NN_INVALID_PARAMETER; + } + + if (tensor->GetElementCount() != SCALE_LENGTH) { + LOGE("[ReduceSum] The coeff should be scalar."); + return OH_NN_INVALID_PARAMETER; + } + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[ReduceSum] Tensor buffer is nullptr."); + return OH_NN_INVALID_PARAMETER; + } + m_coeff = *(static_cast(buffer)); + + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode ReduceSumBuilder::SetReduceToEnd(std::shared_ptr tensor) +{ + if (tensor->GetDataType() != OH_NN_BOOL) { + LOGE("[ReduceSum] SetReduceToEnd failed, the reduceToEnd should be type OH_NN_BOOL."); + return OH_NN_INVALID_PARAMETER; + } + + tensor->IdentifyOpParameter(); + if (tensor->GetElementCount() != SCALE_LENGTH) { + LOGE("[ReduceSum] SetReduceToEnd failed, the reduceToEnd dimensions should be scalar."); + return OH_NN_INVALID_PARAMETER; + } + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[ReduceSum] SetReduceToEnd failed, the reduceToEnd passed buffer is empty."); + return OH_NN_INVALID_PARAMETER; + } + + m_reduceToEnd = *(static_cast(buffer)); + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode ReduceSumBuilder::SetKeepDims(std::shared_ptr tensor) +{ + if (tensor->GetDataType() != OH_NN_BOOL) { + LOGE("[ReduceSum] SetKeepDims failed, the keep_dims should be type OH_NN_BOOL."); + return OH_NN_INVALID_PARAMETER; + } + + tensor->IdentifyOpParameter(); + if (tensor->GetElementCount() != SCALE_LENGTH) { + LOGE("[ReduceSum] SetKeepDims failed, the keep_dims dimensions should be scalar."); + return OH_NN_INVALID_PARAMETER; + } + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[ReduceSum] SetKeepDims failed, the keep_dims passed buffer is empty."); + return OH_NN_INVALID_PARAMETER; + } + + m_keepDims = *(static_cast(buffer)); + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode ReduceSumBuilder::Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) +{ + if (m_isBuild) { + LOGE("[ReduceSum] Build failed, the ReduceSum operation has been build, cannot build again."); + return OH_NN_OPERATION_FORBIDDEN; + } + + OH_NN_ReturnCode returnCode = CheckIOIndex(inputsIndex, outputsIndex, allTensors, INPUT_NUM, OUTPUT_NUM); + if (returnCode != OH_NN_SUCCESS) { + LOGE("[ReduceSum] Build failed, passed invalid input or output index of ReduceSum operation index."); + return returnCode; + } + + m_inputsIndex = inputsIndex; + m_outputsIndex = outputsIndex; + + returnCode = CheckParamIndex(paramsIndex, allTensors, PARAM_MAX_NUM); + if (returnCode != OH_NN_SUCCESS) { + LOGE("[ReduceSum] Build failed, passed invalid param index of ReduceSum operation index."); + return returnCode; + } + + for (uint32_t i : paramsIndex) { + std::shared_ptr tensor = allTensors[i]; + switch (tensor->GetType()) { + case OH_NN_REDUCE_SUM_KEEP_DIMS: + returnCode = SetKeepDims(tensor); + break; + case OH_NN_REDUCE_SUM_REDUCE_TO_END: + returnCode = SetReduceToEnd(tensor); + break; + case OH_NN_REDUCE_SUM_COEFF: + returnCode = SetCoeff(tensor); + break; + default: + LOGE("[ReduceSum] Build failed, parameter type is invalid. type=%d", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; + } + + if (returnCode != OH_NN_SUCCESS) { + LOGE("[ReduceSum] Build failed, passed invalid param."); + return returnCode; + } + } + + m_name = OP_NAME; + m_isBuild = true; + return OH_NN_SUCCESS; +} + +LiteGraphPrimitvePtr ReduceSumBuilder::GetPrimitive() +{ + if (!m_isBuild) { + LOGE("[ReduceSum] GetPrimitive failed, cannot get primitive before call build."); + return {nullptr, DestroyLiteGraphPrimitive}; + } + + mindspore::lite::ReduceMode m_mode {mindspore::lite::REDUCE_MODE_SUM}; + + void* primitive = mindspore::lite::MindIR_ReduceFusion_CreatePrimitive(m_keepDims, m_mode, m_reduceToEnd, m_coeff); + LiteGraphPrimitvePtr graphPrimitivePtr(primitive, DestroyLiteGraphPrimitive); + return graphPrimitivePtr; +} + +REGISTER_OPS(ReduceSumBuilder, OH_NN_OPS_REDUCE_SUM); +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS \ No newline at end of file diff --git a/frameworks/native/neural_network_runtime/ops/reducesum_builder.h b/frameworks/native/neural_network_runtime/ops/reducesum_builder.h new file mode 100644 index 0000000000000000000000000000000000000000..1cf9ede58a2a43bfc131a502d5f91ea8519f7b34 --- /dev/null +++ b/frameworks/native/neural_network_runtime/ops/reducesum_builder.h @@ -0,0 +1,51 @@ +/* + * Copyright (c) 2024 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_REDUCESUM_BUILDER_H +#define NEURAL_NETWORK_RUNTIME_REDUCESUM_BUILDER_H + +#include "mindir.h" + +#include "ops_builder.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +class ReduceSumBuilder : public OpsBuilder { +public: + ReduceSumBuilder(); + ~ReduceSumBuilder() override; + OH_NN_ReturnCode Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) override; + + LiteGraphPrimitvePtr GetPrimitive() override; + +private: + OH_NN_ReturnCode SetCoeff(std::shared_ptr tensor); + OH_NN_ReturnCode SetReduceToEnd(std::shared_ptr tensor); + OH_NN_ReturnCode SetKeepDims(std::shared_ptr tensor); + +private: + float m_coeff {0.0f}; + bool m_reduceToEnd {false}; + bool m_keepDims {false}; +}; +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS + +#endif // NEURAL_NETWORK_RUNTIME_REDUCESUM_BUILDER_H \ No newline at end of file diff --git a/frameworks/native/neural_network_runtime/ops/relu6_builder.cpp b/frameworks/native/neural_network_runtime/ops/relu6_builder.cpp index 04d48e2908c8f6076bdb8fea50248a7cafeaaa35..b3f1c2159df86e53adff5b77e3bb6683f3eda03d 100644 --- a/frameworks/native/neural_network_runtime/ops/relu6_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/relu6_builder.cpp @@ -24,6 +24,7 @@ namespace NeuralNetworkRuntime { namespace Ops { static const int INPUT_NUM = 1; static const int OUTPUT_NUM = 1; +static const int PARAM_NUM = 0; static const std::string OP_NAME = "Relu6"; Relu6Builder::Relu6Builder() {} @@ -46,14 +47,15 @@ OH_NN_ReturnCode Relu6Builder::Build(const std::vector& paramsIndex, return returnCode; } - if (!paramsIndex.empty()) { - LOGW("[Relu6] Build failed, the Relu6 expects no parameters, but receive %zu", paramsIndex.size()); - return OH_NN_INVALID_PARAMETER; - } - m_inputsIndex = inputsIndex; m_outputsIndex = outputsIndex; + returnCode = CheckParamIndex(paramsIndex, allTensors, PARAM_NUM); + if (returnCode != OH_NN_SUCCESS) { + LOGE("[Relu6] Build failed, passed invaid param indices."); + return returnCode; + } + SetQuantType(outputsIndex, allTensors); m_isBuild = true; diff --git a/frameworks/native/neural_network_runtime/ops/relu_builder.cpp b/frameworks/native/neural_network_runtime/ops/relu_builder.cpp index d1cf527cb71c3d19d90e8a915a7d5aaddc6c2b1b..24c55148707dd3049996d418b149c6bd52ec8ac5 100644 --- a/frameworks/native/neural_network_runtime/ops/relu_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/relu_builder.cpp @@ -24,6 +24,7 @@ namespace NeuralNetworkRuntime { namespace Ops { static const int INPUT_NUMS = 1; static const int OUTPUT_NUMS = 1; +static const int PARAM_NUM = 0; static const std::string OP_NAME = "Relu"; ReluBuilder::ReluBuilder() {} @@ -46,14 +47,15 @@ OH_NN_ReturnCode ReluBuilder::Build(const std::vector& paramsIndex, return returnCode; } - if (!paramsIndex.empty()) { - LOGW("[Relu] Build failed, the Relu expects no parameters, but receive %zu", paramsIndex.size()); - return OH_NN_INVALID_PARAMETER; - } - m_inputsIndex = inputsIndex; m_outputsIndex = outputsIndex; + returnCode = CheckParamIndex(paramsIndex, allTensors, PARAM_NUM); + if (returnCode != OH_NN_SUCCESS) { + LOGE("[Relu] Build failed, passed invalid param indices."); + return returnCode; + } + SetQuantType(outputsIndex, allTensors); m_isBuild = true; diff --git a/frameworks/native/neural_network_runtime/ops/reshape_builder.cpp b/frameworks/native/neural_network_runtime/ops/reshape_builder.cpp index 3375ff1fc39b538baf137fb0199a6cda3447019e..5c60efe905cda245aeb30a87e70d20dc4043d30b 100644 --- a/frameworks/native/neural_network_runtime/ops/reshape_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/reshape_builder.cpp @@ -24,6 +24,7 @@ namespace NeuralNetworkRuntime { namespace Ops { static const int INPUT_NUM = 2; static const int OUTPUT_NUM = 1; +static const int PARAM_NUM = 0; static const std::string OP_NAME = "Reshape"; ReshapeBuilder::ReshapeBuilder() {} @@ -46,14 +47,15 @@ OH_NN_ReturnCode ReshapeBuilder::Build(const std::vector& paramsIndex, return returnCode; } - if (!paramsIndex.empty()) { - LOGW("[Reshape] Build failed, the Reshape expects no parameters, but receive %zu", paramsIndex.size()); - return OH_NN_INVALID_PARAMETER; - } - m_inputsIndex = inputsIndex; m_outputsIndex = outputsIndex; + returnCode = CheckParamIndex(paramsIndex, allTensors, PARAM_NUM); + if (returnCode != OH_NN_SUCCESS) { + LOGE("[Reshape] Build failed, passed invalid param index of Reshape operation index."); + return returnCode; + } + SetQuantType(outputsIndex, allTensors); m_name = OP_NAME; diff --git a/frameworks/native/neural_network_runtime/ops/resize_bilinear_builder.cpp b/frameworks/native/neural_network_runtime/ops/resize_bilinear_builder.cpp index be84a86bffae7889ebaff2df22dcfb4a794e8860..079a5b4752d1f88e8bba82b19f51ad575df88317 100644 --- a/frameworks/native/neural_network_runtime/ops/resize_bilinear_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/resize_bilinear_builder.cpp @@ -22,6 +22,7 @@ namespace NeuralNetworkRuntime { namespace Ops { static const int INPUT_NUM = 1; static const int OUTPUT_NUM = 1; +static const int PARAM_MAX_NUM = 6; static const int SCALE_LENGTH = 1; static const std::string OP_NAME = "ResizeBilinear"; @@ -166,6 +167,12 @@ OH_NN_ReturnCode ResizeBilinearBuilder::Build(const std::vector& param m_inputsIndex = inputsIndex; m_outputsIndex = outputsIndex; + returnCode = CheckParamIndex(paramsIndex, allTensors, PARAM_MAX_NUM); + if (returnCode != OH_NN_SUCCESS) { + LOGE("[ResizeBilinear] Build failed, passed invalid params index."); + return returnCode; + } + for (uint32_t i : paramsIndex) { std::shared_ptr tensor = allTensors[i]; switch (tensor->GetType()) { diff --git a/frameworks/native/neural_network_runtime/ops/round_builder.cpp b/frameworks/native/neural_network_runtime/ops/round_builder.cpp new file mode 100644 index 0000000000000000000000000000000000000000..9076e78b876aac8e0ed5ca6b8d29a8056e627349 --- /dev/null +++ b/frameworks/native/neural_network_runtime/ops/round_builder.cpp @@ -0,0 +1,75 @@ +/* + * Copyright (c) 2024 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "round_builder.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +static const int INPUT_NUM = 1; +static const int OUTPUT_NUM = 1; +static const int PARAM_NUM = 0; +static const std::string OP_NAME = "Round"; + +RoundBuilder::RoundBuilder() {} + +RoundBuilder::~RoundBuilder() {} + +OH_NN_ReturnCode RoundBuilder::Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) +{ + if (m_isBuild) { + LOGE("[Round] Build failed, the round operation has been build. cannot build again."); + return OH_NN_OPERATION_FORBIDDEN; + } + + auto ret = CheckIOIndex(inputsIndex, outputsIndex, allTensors, INPUT_NUM, OUTPUT_NUM); + if (ret != OH_NN_SUCCESS) { + LOGE("[Round] Build failed, passed invalid input or output index."); + return ret; + } + + m_inputsIndex = inputsIndex; + m_outputsIndex = outputsIndex; + + ret = CheckParamIndex(paramsIndex, allTensors, PARAM_NUM); + if (ret != OH_NN_SUCCESS) { + LOGE("[Round] Build failed, passed invalid param index."); + return ret; + } + + m_name = OP_NAME; + m_isBuild = true; + return OH_NN_SUCCESS; +} + +LiteGraphPrimitvePtr RoundBuilder::GetPrimitive() +{ + if (!m_isBuild) { + LOGE("[Round] GetPrimitive failed, cannot get primitive before call build."); + return {nullptr, DestroyLiteGraphPrimitive}; + } + + void* primitive = mindspore::lite::MindIR_Round_CreatePrimitive(); + LiteGraphPrimitvePtr graphPrimitivePtr(primitive, DestroyLiteGraphPrimitive) ; + return graphPrimitivePtr; +} + +REGISTER_OPS(RoundBuilder, OH_NN_OPS_ROUND); +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS diff --git a/frameworks/native/neural_network_runtime/ops/round_builder.h b/frameworks/native/neural_network_runtime/ops/round_builder.h new file mode 100644 index 0000000000000000000000000000000000000000..02d51f90ab42ae7fc99803aa87fd6875c8c7b231 --- /dev/null +++ b/frameworks/native/neural_network_runtime/ops/round_builder.h @@ -0,0 +1,42 @@ +/* + * Copyright (c) 2024 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_ROUND_BUILDER_H +#define NEURAL_NETWORK_RUNTIME_ROUND_BUILDER_H + +#include "mindir.h" + +#include "ops_builder.h" +#include "ops_registry.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +class RoundBuilder : public OpsBuilder { +public: + RoundBuilder(); + ~RoundBuilder() override; + OH_NN_ReturnCode Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) override; + + LiteGraphPrimitvePtr GetPrimitive() override; +}; +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS + +#endif // NEURAL_NETWORK_RUNTIME_ROUND_BUILDER_H \ No newline at end of file diff --git a/frameworks/native/neural_network_runtime/ops/rsqrt_builder.cpp b/frameworks/native/neural_network_runtime/ops/rsqrt_builder.cpp index 4ca0b00a351969cc38ee1b5518cc38ad6283736b..ad833cdb979cb15291d0a33a7c8d30559bf7005f 100644 --- a/frameworks/native/neural_network_runtime/ops/rsqrt_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/rsqrt_builder.cpp @@ -24,6 +24,7 @@ namespace NeuralNetworkRuntime { namespace Ops { static const int INPUT_NUM = 1; static const int OUTPUT_NUM = 1; +static const int PARAM_NUM = 0; static const std::string OP_NAME = "Rsqrt"; RsqrtBuilder::RsqrtBuilder() {} @@ -46,14 +47,15 @@ OH_NN_ReturnCode RsqrtBuilder::Build(const std::vector& paramsIndex, return returnCode; } - if (!paramsIndex.empty()) { - LOGW("[Rsqrt] Build failed, the Rsqrt expects no parameters, but receive %zu", paramsIndex.size()); - return OH_NN_INVALID_PARAMETER; - } - m_inputsIndex = inputsIndex; m_outputsIndex = outputsIndex; + returnCode = CheckParamIndex(paramsIndex, allTensors, PARAM_NUM); + if (returnCode != OH_NN_SUCCESS) { + LOGE("[Rsqrt] Build failed, passed invalid params index of Rsqrt operation index."); + return returnCode; + } + SetQuantType(outputsIndex, allTensors); m_name = OP_NAME; diff --git a/frameworks/native/neural_network_runtime/ops/scale_builder.cpp b/frameworks/native/neural_network_runtime/ops/scale_builder.cpp index d9527ecd5c9a88b321410682fe3036ec8fbfd20d..9c2256ac0efa76031fa4620cfadaa72660f5122d 100644 --- a/frameworks/native/neural_network_runtime/ops/scale_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/scale_builder.cpp @@ -24,6 +24,7 @@ namespace NeuralNetworkRuntime { namespace Ops { static const int INPUT_NUM = 3; static const int OUTPUT_NUM = 1; +static const int PARAM_MAX_NUM = 2; static const int SCALE_LENGTH = 1; static const std::string OP_NAME = "Scale"; @@ -103,6 +104,12 @@ OH_NN_ReturnCode ScaleBuilder::Build(const std::vector& paramsIndex, m_inputsIndex = inputsIndex; m_outputsIndex = outputsIndex; + returnCode = CheckParamIndex(paramsIndex, allTensors, PARAM_MAX_NUM); + if (returnCode != OH_NN_SUCCESS) { + LOGE("[ScaleBuilder] Build failed, passed invaid param index."); + return returnCode; + } + for (uint32_t i : paramsIndex) { std::shared_ptr tensor = allTensors[i]; switch (tensor->GetType()) { diff --git a/frameworks/native/neural_network_runtime/ops/scatter_nd_builder.cpp b/frameworks/native/neural_network_runtime/ops/scatter_nd_builder.cpp new file mode 100644 index 0000000000000000000000000000000000000000..48568a918380be767518d6aee0c39b418c8dde81 --- /dev/null +++ b/frameworks/native/neural_network_runtime/ops/scatter_nd_builder.cpp @@ -0,0 +1,75 @@ +/* + * Copyright (c) 2024 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "scatter_nd_builder.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +static const int INPUT_NUM = 3; +static const int OUTPUT_NUM = 1; +static const int PARAM_NUM = 0; +static const std::string OP_NAME = "ScatterND"; + +ScatterNDBuilder::ScatterNDBuilder() {} + +ScatterNDBuilder::~ScatterNDBuilder() {} + +OH_NN_ReturnCode ScatterNDBuilder::Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) +{ + if (m_isBuild) { + LOGE("[ScatterND] Build failed, the scatterND operation has been build. cannot build again."); + return OH_NN_OPERATION_FORBIDDEN; + } + + auto ret = CheckIOIndex(inputsIndex, outputsIndex, allTensors, INPUT_NUM, OUTPUT_NUM); + if (ret != OH_NN_SUCCESS) { + LOGE("[ScatterND] Build failed, passed invalid input or output index."); + return ret; + } + + m_inputsIndex = inputsIndex; + m_outputsIndex = outputsIndex; + + ret = CheckParamIndex(paramsIndex, allTensors, PARAM_NUM); + if (ret != OH_NN_SUCCESS) { + LOGE("[ScatterND] Build failed, passed invalid params index."); + return ret; + } + + m_name = OP_NAME; + m_isBuild = true; + return OH_NN_SUCCESS; +} + +LiteGraphPrimitvePtr ScatterNDBuilder::GetPrimitive() +{ + if (!m_isBuild) { + LOGE("[ScatterND] GetPrimitive failed, cannot get primitive before call build."); + return {nullptr, DestroyLiteGraphPrimitive}; + } + + void* primitive = mindspore::lite::MindIR_ScatterNd_CreatePrimitive(); + LiteGraphPrimitvePtr graphPrimitivePtr(primitive, DestroyLiteGraphPrimitive) ; + return graphPrimitivePtr; +} + +REGISTER_OPS(ScatterNDBuilder, OH_NN_OPS_SCATTER_ND); +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS diff --git a/frameworks/native/neural_network_runtime/ops/scatter_nd_builder.h b/frameworks/native/neural_network_runtime/ops/scatter_nd_builder.h new file mode 100644 index 0000000000000000000000000000000000000000..2e2e8c945b9e540bf022c84b2df6afa2fa37f2ed --- /dev/null +++ b/frameworks/native/neural_network_runtime/ops/scatter_nd_builder.h @@ -0,0 +1,42 @@ +/* + * Copyright (c) 2024 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_SCATTER_ND_BUILDER_H +#define NEURAL_NETWORK_RUNTIME_SCATTER_ND_BUILDER_H + +#include "mindir.h" + +#include "ops_builder.h" +#include "ops_registry.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +class ScatterNDBuilder : public OpsBuilder { +public: + ScatterNDBuilder(); + ~ScatterNDBuilder() override; + OH_NN_ReturnCode Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) override; + + LiteGraphPrimitvePtr GetPrimitive() override; +}; +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS + +#endif // NEURAL_NETWORK_RUNTIME_SCATTER_ND_BUILDER_H \ No newline at end of file diff --git a/frameworks/native/neural_network_runtime/ops/select_builder.cpp b/frameworks/native/neural_network_runtime/ops/select_builder.cpp index 955c267aff5c7ac9cff5cb3676ec091e5ac4a3a4..eab4bf30eb2c214c4db72c7f21a2e7e1cb0b53f3 100755 --- a/frameworks/native/neural_network_runtime/ops/select_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/select_builder.cpp @@ -20,6 +20,7 @@ namespace NeuralNetworkRuntime { namespace Ops { static const int INPUT_NUM = 3; static const int OUTPUT_NUM = 1; +static const int PARAM_NUM = 0; static const std::string OP_NAME = "Select"; SelectBuilder::SelectBuilder() {} @@ -41,15 +42,16 @@ OH_NN_ReturnCode SelectBuilder::Build(const std::vector& paramsIndex, LOGE("[Select] Build failed, passed invalid input or output index."); return ret; } - - if (!paramsIndex.empty()) { - LOGW("[Select] Build failed, the select expects no parameters, but receive %zu", paramsIndex.size()); - return OH_NN_INVALID_PARAMETER; - } m_inputsIndex = inputsIndex; m_outputsIndex = outputsIndex; + ret = CheckParamIndex(paramsIndex, allTensors, PARAM_NUM); + if (ret != OH_NN_SUCCESS) { + LOGE("[Select] Build failed, passed invalid param index."); + return ret; + } + m_name = OP_NAME; m_isBuild = true; return OH_NN_SUCCESS; diff --git a/frameworks/native/neural_network_runtime/ops/shape_builder.cpp b/frameworks/native/neural_network_runtime/ops/shape_builder.cpp index 3d2781adfe3e69c77616fb68e2c1cfd5667f8b6d..2d7e65d3147948a8d2b81ab5429db40f24879b52 100644 --- a/frameworks/native/neural_network_runtime/ops/shape_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/shape_builder.cpp @@ -24,6 +24,7 @@ namespace NeuralNetworkRuntime { namespace Ops { static const int INPUT_NUM = 1; static const int OUTPUT_NUM = 1; +static const int PARAM_NUM = 0; static const std::string OP_NAME = "Shape"; ShapeBuilder::ShapeBuilder() {} @@ -46,14 +47,15 @@ OH_NN_ReturnCode ShapeBuilder::Build(const std::vector& paramsIndex, return returnCode; } - if (!paramsIndex.empty()) { - LOGW("[ShapeBuilder] Build failed, the Shape expects no parameters, but receive %zu", paramsIndex.size()); - return OH_NN_INVALID_PARAMETER; - } - m_inputsIndex = inputsIndex; m_outputsIndex = outputsIndex; + returnCode = CheckParamIndex(paramsIndex, allTensors, PARAM_NUM); + if (returnCode != OH_NN_SUCCESS) { + LOGE("[ShapeBuilder] Build failed, passed invalid param index."); + return returnCode; + } + m_isBuild = true; m_name = OP_NAME; return OH_NN_SUCCESS; diff --git a/frameworks/native/neural_network_runtime/ops/sigmoid_builder.cpp b/frameworks/native/neural_network_runtime/ops/sigmoid_builder.cpp index 9a084d0cb18df03f8dc5277c1f1c24dafba270fd..204568a78d719d6e3f81f2c60986c6270572ad2b 100644 --- a/frameworks/native/neural_network_runtime/ops/sigmoid_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/sigmoid_builder.cpp @@ -24,6 +24,7 @@ namespace NeuralNetworkRuntime { namespace Ops { static const int INPUT_NUM = 1; static const int OUTPUT_NUM = 1; +static const int PARAM_NUM = 0; static const std::string OP_NAME = "Sigmoid"; SigmoidBuilder::SigmoidBuilder() {} @@ -46,14 +47,15 @@ OH_NN_ReturnCode SigmoidBuilder::Build(const std::vector& paramsIndex, return returnCode; } - if (!paramsIndex.empty()) { - LOGW("[SigmoidBuilder] Build failed, the Sigmoid expects no parameters, but receive %zu", paramsIndex.size()); - return OH_NN_INVALID_PARAMETER; - } - m_inputsIndex = inputsIndex; m_outputsIndex = outputsIndex; + returnCode = CheckParamIndex(paramsIndex, allTensors, PARAM_NUM); + if (returnCode != OH_NN_SUCCESS) { + LOGE("[SigmoidBuilder] Build failed, passed invalid input or output index."); + return returnCode; + } + // The quantization type of the first output determinies that of the operator. SetQuantType(outputsIndex, allTensors); diff --git a/frameworks/native/neural_network_runtime/ops/sin_builder.cpp b/frameworks/native/neural_network_runtime/ops/sin_builder.cpp index 589a8227acae44624a2878e73d171fb40ceda4ab..442924453f5e979c8b23247176363e9bda38a712 100644 --- a/frameworks/native/neural_network_runtime/ops/sin_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/sin_builder.cpp @@ -20,6 +20,7 @@ namespace NeuralNetworkRuntime { namespace Ops { static const int INPUT_NUM = 1; static const int OUTPUT_NUM = 1; +static const int PARAM_NUM = 0; static const std::string OP_NAME = "Sin"; SinBuilder::SinBuilder() {} @@ -41,15 +42,16 @@ OH_NN_ReturnCode SinBuilder::Build(const std::vector& paramsIndex, LOGE("[Sin] Build failed, passed invalid input or output index."); return ret; } - - if (!paramsIndex.empty()) { - LOGW("[Sin] Build failed, the sin expects no parameters, but receive %zu", paramsIndex.size()); - return OH_NN_INVALID_PARAMETER; - } m_inputsIndex = inputsIndex; m_outputsIndex = outputsIndex; + ret = CheckParamIndex(paramsIndex, allTensors, PARAM_NUM); + if (ret != OH_NN_SUCCESS) { + LOGE("[Sin] Build failed, passed invalid input or output index."); + return ret; + } + m_name = OP_NAME; m_isBuild = true; return OH_NN_SUCCESS; diff --git a/frameworks/native/neural_network_runtime/ops/slice_builder.cpp b/frameworks/native/neural_network_runtime/ops/slice_builder.cpp index 6351cc807a9bfc029f58e64c044ce27dcb2034f6..4ab1d947de1f12abadf3cc6ec9f75b2afd22e40c 100644 --- a/frameworks/native/neural_network_runtime/ops/slice_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/slice_builder.cpp @@ -22,12 +22,38 @@ namespace NeuralNetworkRuntime { namespace Ops { static const int INPUT_NUM = 3; static const int OUTPUT_NUM = 1; +static const int PARAM_MAX_NUM = 1; static const std::string OP_NAME = "Slice"; SliceBuilder::SliceBuilder() {} SliceBuilder::~SliceBuilder() {} +OH_NN_ReturnCode SliceBuilder::SetAxes(std::shared_ptr tensor) +{ + if (tensor->GetDataType() != OH_NN_INT64) { + LOGE("[SliceBuilder] The axes should be type OH_NN_INT64."); + return OH_NN_INVALID_PARAMETER; + } + + m_axes.clear(); + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[SliceBuilder] Tensor buffer is nullptr."); + return OH_NN_INVALID_PARAMETER; + } + + int64_t* pAxes = static_cast(buffer); + + uint32_t elementCount = tensor->GetElementCount(); + for (uint32_t i = 0; i < elementCount; ++i) { + m_axes.emplace_back(*pAxes); + ++pAxes; + } + return OH_NN_SUCCESS; +} + /** * Build method. * 1.set attr of ops. @@ -50,9 +76,27 @@ OH_NN_ReturnCode SliceBuilder::Build(const std::vector& paramsIndex, return returnCode; } - if (!paramsIndex.empty()) { - LOGE("[SliceBuilder] slice expects no parameters, but receive %zu", paramsIndex.size()); - return OH_NN_INVALID_PARAMETER; + returnCode = CheckParamIndex(paramsIndex, allTensors, PARAM_MAX_NUM); + if (returnCode != OH_NN_SUCCESS) { + LOGE("[SliceBuilder] Passed invalid param index."); + return returnCode; + } + + for (int i : paramsIndex) { + std::shared_ptr tensor = allTensors[i]; + tensor->IdentifyOpParameter(); + switch (tensor->GetType()) { + case OH_NN_SLICE_AXES: + returnCode = SetAxes(tensor); + break; + default: + LOGE("[SliceBuilder] Build failed, param invalid, type = %d.", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; + } + if (returnCode != OH_NN_SUCCESS) { + LOGE("[SliceBuilder] Build failed, passed invalid param."); + return returnCode; + } } m_inputsIndex = inputsIndex; diff --git a/frameworks/native/neural_network_runtime/ops/slice_builder.h b/frameworks/native/neural_network_runtime/ops/slice_builder.h index fba1b132900234d992074cf05af5a0f2bd35ae2a..f6f5ee6898646390a85ba3c3d996b6a1b1a7f978 100644 --- a/frameworks/native/neural_network_runtime/ops/slice_builder.h +++ b/frameworks/native/neural_network_runtime/ops/slice_builder.h @@ -33,6 +33,9 @@ public: LiteGraphTensorPtr GetPrimitive() override; +private: + OH_NN_ReturnCode SetAxes(std::shared_ptr tensor); + private: std::vector m_axes; }; diff --git a/frameworks/native/neural_network_runtime/ops/softmax_builder.cpp b/frameworks/native/neural_network_runtime/ops/softmax_builder.cpp index e0835bf71dfa84995bdf065d2d95b7a300bebc71..6b64c66e08473497c9ea5a70afab77500df3d3ab 100644 --- a/frameworks/native/neural_network_runtime/ops/softmax_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/softmax_builder.cpp @@ -22,6 +22,7 @@ namespace NeuralNetworkRuntime { namespace Ops { static const int INPUT_NUM = 1; static const int OUTPUT_NUM = 1; +static const int PARAM_MAX_NUM = 1; static const std::string OP_NAME = "Softmax"; SoftmaxBuilder::SoftmaxBuilder() {} @@ -72,6 +73,12 @@ OH_NN_ReturnCode SoftmaxBuilder::Build(const std::vector& paramsIndex, m_inputsIndex = inputsIndex; m_outputsIndex = outputsIndex; + returnCode = CheckParamIndex(paramsIndex, allTensors, PARAM_MAX_NUM); + if (returnCode != OH_NN_SUCCESS) { + LOGE("[SoftmaxBuilder] Passed invalid params index."); + return returnCode; + } + for (int i : paramsIndex) { std::shared_ptr tensor = allTensors[i]; tensor->IdentifyOpParameter(); diff --git a/frameworks/native/neural_network_runtime/ops/space_to_batch_nd_builder.cpp b/frameworks/native/neural_network_runtime/ops/space_to_batch_nd_builder.cpp index 8c0bb7bcb6aa829b537e103f7b3838956ebedae0..6fd66351eba4027cca69c0d477e45bd6686492dd 100644 --- a/frameworks/native/neural_network_runtime/ops/space_to_batch_nd_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/space_to_batch_nd_builder.cpp @@ -22,6 +22,7 @@ namespace NeuralNetworkRuntime { namespace Ops { static const int INPUT_NUM = 1; static const int OUTPUT_NUM = 1; +static const int PARAM_MAX_NUM = 2; static const std::string OP_NAME = "SpaceToBatchND"; static const int PADDINGS_DATA_SIZE = 2; static const int VECT_DATA_SIZE = 2; @@ -118,6 +119,12 @@ OH_NN_ReturnCode SpaceToBatchNDBuilder::Build(const std::vector& param m_inputsIndex = inputsIndex; m_outputsIndex = outputsIndex; + returnCode = CheckParamIndex(paramsIndex, allTensors, PARAM_MAX_NUM); + if (returnCode != OH_NN_SUCCESS) { + LOGE("[SpaceToBatchNDBuilder] Passed invalid param index."); + return returnCode; + } + for (int i : paramsIndex) { std::shared_ptr tensor = allTensors[i]; tensor->IdentifyOpParameter(); diff --git a/frameworks/native/neural_network_runtime/ops/space_to_batch_nd_builder.h b/frameworks/native/neural_network_runtime/ops/space_to_batch_nd_builder.h index 68f5ed7fb4aa7503f7eb50bf013d88e9531dbf6b..4f0d067778ca38f701f2b44bfe4b9da7fd0fd6c0 100644 --- a/frameworks/native/neural_network_runtime/ops/space_to_batch_nd_builder.h +++ b/frameworks/native/neural_network_runtime/ops/space_to_batch_nd_builder.h @@ -39,7 +39,7 @@ private: OH_NN_ReturnCode SetPaddings(std::shared_ptr tensor); private: - std::vector> paddings {}; + std::vector> paddings; std::vector block_shape {}; }; } // namespace Ops diff --git a/frameworks/native/neural_network_runtime/ops/space_to_depth_builder.cpp b/frameworks/native/neural_network_runtime/ops/space_to_depth_builder.cpp new file mode 100644 index 0000000000000000000000000000000000000000..904f2ad5f58cf720a6a73bd0bb9dc5e0f7190098 --- /dev/null +++ b/frameworks/native/neural_network_runtime/ops/space_to_depth_builder.cpp @@ -0,0 +1,121 @@ +/* + * Copyright (c) 2024 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "space_to_depth_builder.h" + +#include "transform.h" +#include "validation.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +static const int INPUT_NUM = 1; +static const int OUTPUT_NUM = 1; +static const int PARAM_MAX_NUM = 1; +static const int SCALAR_LENGTH = 1; +static const std::string OP_NAME = "SpaceToDepth"; + +SpaceToDepthBuilder::SpaceToDepthBuilder() {} + +SpaceToDepthBuilder::~SpaceToDepthBuilder() {} + +OH_NN_ReturnCode SpaceToDepthBuilder::SetBlockSize(std::shared_ptr tensor) +{ + if (tensor->GetDataType() != OH_NN_INT64) { + LOGE("[SpaceToDepth] The blockSize should be type OH_NN_INT64."); + return OH_NN_INVALID_PARAMETER; + } + + if (tensor->GetElementCount() != SCALAR_LENGTH) { + LOGE("[SpaceToDepth] The blockSize should be scalar."); + return OH_NN_INVALID_PARAMETER; + } + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[SpaceToDepth] Tensor buffer is nullptr."); + return OH_NN_INVALID_PARAMETER; + } + m_blockSize = *(static_cast(buffer)); + + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode SpaceToDepthBuilder::Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) +{ + if (m_isBuild) { + LOGE("[SpaceToDepth] Build failed, the spaceToDepth operation has been build. cannot build again."); + return OH_NN_OPERATION_FORBIDDEN; + } + + auto ret = CheckIOIndex(inputsIndex, outputsIndex, allTensors, INPUT_NUM, OUTPUT_NUM); + if (ret != OH_NN_SUCCESS) { + LOGE("[SpaceToDepth] Build failed, passed invalid input or output index."); + return ret; + } + + m_inputsIndex = inputsIndex; + m_outputsIndex = outputsIndex; + + ret = CheckParamIndex(paramsIndex, allTensors, PARAM_MAX_NUM); + if (ret != OH_NN_SUCCESS) { + LOGE("[SpaceToDepth] Build failed, passed invalid param index."); + return ret; + } + + for (int i : paramsIndex) { + std::shared_ptr tensor = allTensors[i]; + tensor->IdentifyOpParameter(); + switch (tensor->GetType()) { + case OH_NN_SPACE_TO_DEPTH_BLOCK_SIZE: + ret = SetBlockSize(tensor); + break; + default: + LOGE("[SpaceToDepth] Build failed, param invalid, type=%d", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; + } + + if (ret != OH_NN_SUCCESS) { + LOGE("[SpaceToDepth] Build failed, passed invalid param."); + return ret; + } + } + + m_name = OP_NAME; + m_isBuild = true; + return OH_NN_SUCCESS; +} + +LiteGraphPrimitvePtr SpaceToDepthBuilder::GetPrimitive() +{ + if (!m_isBuild) { + LOGE("[SpaceToDepth] GetPrimitive failed, cannot get primitive before call build."); + return {nullptr, DestroyLiteGraphPrimitive}; + } + + mindspore::lite::Format format {mindspore::lite::FORMAT_NCHW}; + + void* primitive = mindspore::lite::MindIR_SpaceToDepth_CreatePrimitive(m_blockSize, format); + LiteGraphPrimitvePtr graphPrimitivePtr(primitive, DestroyLiteGraphPrimitive) ; + return graphPrimitivePtr; +} + +REGISTER_OPS(SpaceToDepthBuilder, OH_NN_OPS_SPACE_TO_DEPTH); +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS \ No newline at end of file diff --git a/frameworks/native/neural_network_runtime/ops/space_to_depth_builder.h b/frameworks/native/neural_network_runtime/ops/space_to_depth_builder.h new file mode 100644 index 0000000000000000000000000000000000000000..dd16dda2dbdab6f9cb3866e4059201868831772e --- /dev/null +++ b/frameworks/native/neural_network_runtime/ops/space_to_depth_builder.h @@ -0,0 +1,48 @@ +/* + * Copyright (c) 2024 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_SPACE_TO_DEPTH_BUILDER_H +#define NEURAL_NETWORK_RUNTIME_SPACE_TO_DEPTH_BUILDER_H + +#include "mindir.h" + +#include "ops_builder.h" +#include "ops_registry.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +class SpaceToDepthBuilder : public OpsBuilder { +public: + SpaceToDepthBuilder(); + ~SpaceToDepthBuilder() override; + OH_NN_ReturnCode Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) override; + + LiteGraphPrimitvePtr GetPrimitive() override; + +private: + OH_NN_ReturnCode SetBlockSize(std::shared_ptr tensor); + +private: + int64_t m_blockSize {0}; +}; +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS + +#endif // NEURAL_NETWORK_RUNTIME_SPACE_TO_DEPTH_BUILDER_H diff --git a/frameworks/native/neural_network_runtime/ops/sparse_to_dense_builder.cpp b/frameworks/native/neural_network_runtime/ops/sparse_to_dense_builder.cpp index 52b5f79cfe64bc1c42740be9a5eee131717a7736..07a398d67694b8b4f763745bf44afd90afb53c3f 100644 --- a/frameworks/native/neural_network_runtime/ops/sparse_to_dense_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/sparse_to_dense_builder.cpp @@ -20,6 +20,7 @@ namespace NeuralNetworkRuntime { namespace Ops { static const int INPUT_NUM = 3; static const int OUTPUT_NUM = 1; +static const int PARAM_NUM = 0; static const std::string OP_NAME = "SparseToDense"; SparseToDenseBuilder::SparseToDenseBuilder() {} @@ -41,16 +42,16 @@ OH_NN_ReturnCode SparseToDenseBuilder::Build(const std::vector& params LOGE("[SparseToDense] Build failed, passed invalid input or output index."); return ret; } - - if (!paramsIndex.empty()) { - LOGW("[SparseToDense] Build failed, the sparseToDense expects no parameters, but receive %zu", \ - paramsIndex.size()); - return OH_NN_INVALID_PARAMETER; - } m_inputsIndex = inputsIndex; m_outputsIndex = outputsIndex; + ret = CheckParamIndex(paramsIndex, allTensors, PARAM_NUM); + if (ret != OH_NN_SUCCESS) { + LOGE("[SparseToDense] Build failed, passed invalid param index."); + return ret; + } + m_name = OP_NAME; m_isBuild = true; return OH_NN_SUCCESS; diff --git a/frameworks/native/neural_network_runtime/ops/split_builder.cpp b/frameworks/native/neural_network_runtime/ops/split_builder.cpp index 102c936b78ded67d71719cdb88304694a449aabd..ad6091cfff56167fa15873e9ed5a9288fa6731dc 100644 --- a/frameworks/native/neural_network_runtime/ops/split_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/split_builder.cpp @@ -19,6 +19,7 @@ namespace OHOS { namespace NeuralNetworkRuntime { namespace Ops { static const int INPUT_NUM = 1; +static const int PARAM_MAX_NUM = 3; static const std::string OP_NAME = "Split"; SplitBuilder::SplitBuilder() {} @@ -136,6 +137,12 @@ OH_NN_ReturnCode SplitBuilder::Build(const std::vector ¶msIndex, return returnCode; } + returnCode = CheckParamIndex(paramsIndex, allTensors, PARAM_MAX_NUM); + if (returnCode != OH_NN_SUCCESS) { + LOGE("[SplitBuilder] Build failed, passed invalid param index."); + return returnCode; + } + for (int i : paramsIndex) { std::shared_ptr tensor = allTensors[i]; tensor->IdentifyOpParameter(); diff --git a/frameworks/native/neural_network_runtime/ops/sqrt_builder.cpp b/frameworks/native/neural_network_runtime/ops/sqrt_builder.cpp index 378902ff85bb7c89b0c584d3896bd1ea04cf1b66..610dd56051afed8d6374e6be69adba2d71058577 100644 --- a/frameworks/native/neural_network_runtime/ops/sqrt_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/sqrt_builder.cpp @@ -22,6 +22,7 @@ namespace NeuralNetworkRuntime { namespace Ops { static const int INPUT_NUM = 1; static const int OUTPUT_NUM = 1; +static const int PARAM_NUM = 0; static const std::string OP_NAME = "Sqrt"; SqrtBuilder::SqrtBuilder() {} @@ -50,14 +51,15 @@ OH_NN_ReturnCode SqrtBuilder::Build(const std::vector& paramsIndex, return returnCode; } - if (!paramsIndex.empty()) { - LOGE("[SqrtBuilder] sqrt expects no parameters, but receive %zu", paramsIndex.size()); - return OH_NN_INVALID_PARAMETER; - } - m_inputsIndex = inputsIndex; m_outputsIndex = outputsIndex; + returnCode = CheckParamIndex(paramsIndex, allTensors, PARAM_NUM); + if (returnCode != OH_NN_SUCCESS) { + LOGE("[SqrtBuilder] Passed invalid param index."); + return returnCode; + } + // The quantization type of the first output determinies that of the operator. SetQuantType(outputsIndex, allTensors); m_isBuild = true; diff --git a/frameworks/native/neural_network_runtime/ops/square_builder.cpp b/frameworks/native/neural_network_runtime/ops/square_builder.cpp index 808c45c198e370df0c7d646de20ba141496ce99a..ecdf64cd50e17e2272d10efc10af3d2d44eb3d43 100755 --- a/frameworks/native/neural_network_runtime/ops/square_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/square_builder.cpp @@ -20,6 +20,7 @@ namespace NeuralNetworkRuntime { namespace Ops { static const int INPUT_NUM = 1; static const int OUTPUT_NUM = 1; +static const int PARAM_NUM = 0; static const std::string OP_NAME = "Square"; SquareBuilder::SquareBuilder() {} @@ -41,15 +42,16 @@ OH_NN_ReturnCode SquareBuilder::Build(const std::vector& paramsIndex, LOGE("[Square] Build failed, passed invalid input or output index."); return ret; } - - if (!paramsIndex.empty()) { - LOGW("[Square] Build failed, the square expects no parameters, but receive %zu", paramsIndex.size()); - return OH_NN_INVALID_PARAMETER; - } m_inputsIndex = inputsIndex; m_outputsIndex = outputsIndex; + ret = CheckParamIndex(paramsIndex, allTensors, PARAM_NUM); + if (ret != OH_NN_SUCCESS) { + LOGE("[Square] Build failed, passed invalid param index."); + return ret; + } + m_name = OP_NAME; m_isBuild = true; return OH_NN_SUCCESS; diff --git a/frameworks/native/neural_network_runtime/ops/squared_difference_builder.cpp b/frameworks/native/neural_network_runtime/ops/squared_difference_builder.cpp index 331b43eca5a9885d07b81e86e71122ee62602e9c..228194430d997b0740324409db7e66b2175acbb6 100644 --- a/frameworks/native/neural_network_runtime/ops/squared_difference_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/squared_difference_builder.cpp @@ -22,6 +22,7 @@ namespace NeuralNetworkRuntime { namespace Ops { static const int INPUT_NUM = 2; static const int OUTPUT_NUM = 1; +static const int PARAM_NUM = 0; static const std::string OP_NAME = "SquaredDifference"; SquaredDifferenceBuilder::SquaredDifferenceBuilder() {} @@ -50,14 +51,15 @@ OH_NN_ReturnCode SquaredDifferenceBuilder::Build(const std::vector& pa return returnCode; } - if (!paramsIndex.empty()) { - LOGE("[SquaredDifferenceBuilder] squaredDifference expects no parameters, but receive %zu", paramsIndex.size()); - return OH_NN_INVALID_PARAMETER; - } - m_inputsIndex = inputsIndex; m_outputsIndex = outputsIndex; + returnCode = CheckParamIndex(paramsIndex, allTensors, PARAM_NUM); + if (returnCode != OH_NN_SUCCESS) { + LOGE("[SquaredDifferenceBuilder] Passed invalid param index."); + return returnCode; + } + m_isBuild = true; m_name = OP_NAME; return OH_NN_SUCCESS; diff --git a/frameworks/native/neural_network_runtime/ops/squeeze_builder.cpp b/frameworks/native/neural_network_runtime/ops/squeeze_builder.cpp index c37da63524f0347b09c909fe72139c432a5a3430..61604669e90784f0715895805811d974757cef35 100644 --- a/frameworks/native/neural_network_runtime/ops/squeeze_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/squeeze_builder.cpp @@ -22,6 +22,7 @@ namespace NeuralNetworkRuntime { namespace Ops { static const int INPUT_NUM = 1; static const int OUTPUT_NUM = 1; +static const int PARAM_MAX_NUM = 1; static const std::string OP_NAME = "Squeeze"; SqueezeBuilder::SqueezeBuilder() {} @@ -76,6 +77,12 @@ OH_NN_ReturnCode SqueezeBuilder::Build(const std::vector ¶msIndex, m_inputsIndex = inputsIndex; m_outputsIndex = outputsIndex; + returnCode = CheckParamIndex(paramsIndex, allTensors, PARAM_MAX_NUM); + if (returnCode != OH_NN_SUCCESS) { + LOGE("[SqueezeBuilder] Passed invalid param index."); + return returnCode; + } + for (int i : paramsIndex) { std::shared_ptr tensor = allTensors[i]; tensor->IdentifyOpParameter(); diff --git a/frameworks/native/neural_network_runtime/ops/stack_builder.cpp b/frameworks/native/neural_network_runtime/ops/stack_builder.cpp index efe7a5cf99a018afa9e7551fcb83d566dbcf0ed3..f3aa2b3924e06aaf98954e46b095c63092977f0a 100644 --- a/frameworks/native/neural_network_runtime/ops/stack_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/stack_builder.cpp @@ -22,6 +22,7 @@ namespace NeuralNetworkRuntime { namespace Ops { static const int INPUT_MIN_NUM = 2; static const int OUTPUT_NUM = 1; +static const int PARAM_MAX_NUM = 1; static const std::string OP_NAME = "Stack"; StackBuilder::StackBuilder() {} @@ -78,7 +79,12 @@ OH_NN_ReturnCode StackBuilder::Build(const std::vector& paramsIndex, m_inputsIndex = inputsIndex; m_outputsIndex = outputsIndex; - OH_NN_ReturnCode returnCode; + auto returnCode = CheckParamIndex(paramsIndex, allTensors, PARAM_MAX_NUM); + if (returnCode != OH_NN_SUCCESS) { + LOGE("[StackBuilder] Passed invalid param index."); + return returnCode; + } + for (int i : paramsIndex) { std::shared_ptr tensor = allTensors[i]; tensor->IdentifyOpParameter(); diff --git a/frameworks/native/neural_network_runtime/ops/strided_slice_builder.cpp b/frameworks/native/neural_network_runtime/ops/strided_slice_builder.cpp index 5a5873b3b088a12250a5a9411bc4047b747c506f..bc537549bb6cede454e146271895662c3962870d 100644 --- a/frameworks/native/neural_network_runtime/ops/strided_slice_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/strided_slice_builder.cpp @@ -24,6 +24,7 @@ namespace NeuralNetworkRuntime { namespace Ops { static const int INPUT_NUM = 4; static const int OUTPUT_NUM = 1; +static const int PARAM_MAX_NUM = 5; static const std::string OP_NAME = "StridedSlice"; StridedSliceBuilder::StridedSliceBuilder() {} @@ -153,6 +154,12 @@ OH_NN_ReturnCode StridedSliceBuilder::Build(const std::vector& paramsI return returnCode; } + returnCode = CheckParamIndex(paramsIndex, allTensors, PARAM_MAX_NUM); + if (returnCode != OH_NN_SUCCESS) { + LOGE("[StridedSliceBuilder] Passed invalid param index."); + return returnCode; + } + for (int i : paramsIndex) { std::shared_ptr tensor = allTensors[i]; tensor->IdentifyOpParameter(); diff --git a/frameworks/native/neural_network_runtime/ops/sub_builder.cpp b/frameworks/native/neural_network_runtime/ops/sub_builder.cpp index 85c06d9d2da922081097e8f2437ab548759e19f9..a298235acaaa512f0f5338524acaa199d2c42e8b 100644 --- a/frameworks/native/neural_network_runtime/ops/sub_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/sub_builder.cpp @@ -22,6 +22,7 @@ namespace NeuralNetworkRuntime { namespace Ops { static const int INPUT_NUM = 2; static const int OUTPUT_NUM = 1; +static const int PARAM_MAX_NUM = 1; static const std::string OP_NAME = "Sub"; SubBuilder::SubBuilder() {} @@ -82,6 +83,12 @@ OH_NN_ReturnCode SubBuilder::Build(const std::vector& paramsIndex, m_inputsIndex = inputsIndex; m_outputsIndex = outputsIndex; + returnCode = CheckParamIndex(paramsIndex, allTensors, PARAM_MAX_NUM); + if (returnCode != OH_NN_SUCCESS) { + LOGE("[SubBuilder] Passed invalid param index."); + return returnCode; + } + for (int i : paramsIndex) { std::shared_ptr tensor = allTensors[i]; tensor->IdentifyOpParameter(); diff --git a/frameworks/native/neural_network_runtime/ops/sub_builder.h b/frameworks/native/neural_network_runtime/ops/sub_builder.h index 1e65ec88276fce86186e6a52d5d21a5609b89431..6e638a5cdf5c90155a49b079c885b1c8b7399c1f 100644 --- a/frameworks/native/neural_network_runtime/ops/sub_builder.h +++ b/frameworks/native/neural_network_runtime/ops/sub_builder.h @@ -39,7 +39,7 @@ private: OH_NN_ReturnCode SetActivationType(std::shared_ptr tensor); private: - mindspore::lite::ActivationType m_activationType{mindspore::lite::ACTIVATION_TYPE_NO_ACTIVATION}; + mindspore::lite::ActivationType m_activationType {mindspore::lite::ACTIVATION_TYPE_NO_ACTIVATION}; }; } // namespace Ops } // namespace NeuralNetworkRuntime diff --git a/frameworks/native/neural_network_runtime/ops/swish_builder.cpp b/frameworks/native/neural_network_runtime/ops/swish_builder.cpp new file mode 100644 index 0000000000000000000000000000000000000000..5eb934fe4de9e5eec02211393b2507b1e738d98d --- /dev/null +++ b/frameworks/native/neural_network_runtime/ops/swish_builder.cpp @@ -0,0 +1,82 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "swish_builder.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +static const int INPUT_NUM = 1; +static const int OUTPUT_NUM = 1; +static const int PARAM_NUM = 0; +static const std::string OP_NAME = "Swish"; + +SwishBuilder::SwishBuilder() {} + +SwishBuilder::~SwishBuilder() {} + +OH_NN_ReturnCode SwishBuilder::Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) +{ + if (m_isBuild) { + LOGE("[Swish] Build failed, the swish operation has been build. cannot build again."); + return OH_NN_OPERATION_FORBIDDEN; + } + + auto ret = CheckIOIndex(inputsIndex, outputsIndex, allTensors, INPUT_NUM, OUTPUT_NUM); + if (ret != OH_NN_SUCCESS) { + LOGE("[Swish] Build failed, passed invalid input or output index."); + return ret; + } + + m_inputsIndex = inputsIndex; + m_outputsIndex = outputsIndex; + + ret = CheckParamIndex(paramsIndex, allTensors, PARAM_NUM); + if (ret != OH_NN_SUCCESS) { + LOGE("[Swish] Build failed, passed invalid param index."); + return ret; + } + + m_name = OP_NAME; + m_isBuild = true; + return OH_NN_SUCCESS; +} + +LiteGraphPrimitvePtr SwishBuilder::GetPrimitive() +{ + if (!m_isBuild) { + LOGE("[Swish] GetPrimitive failed, cannot get primitive before call build."); + return {nullptr, DestroyLiteGraphPrimitive}; + } + + float alpha {0.0f}; + float minVal {0.0f}; + float maxVal {0.0f}; + bool approximate {false}; + mindspore::lite::ActivationType activationType {mindspore::lite::ACTIVATION_TYPE_SWISH}; + + void* primitive = mindspore::lite::MindIR_Activation_CreatePrimitive(activationType, alpha, + minVal, maxVal, approximate); + LiteGraphPrimitvePtr graphPrimitivePtr(primitive, DestroyLiteGraphPrimitive) ; + return graphPrimitivePtr; +} + +REGISTER_OPS(SwishBuilder, OH_NN_OPS_SWISH); +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS diff --git a/frameworks/native/neural_network_runtime/ops/swish_builder.h b/frameworks/native/neural_network_runtime/ops/swish_builder.h new file mode 100644 index 0000000000000000000000000000000000000000..7681906778ed92ec7a37a5492eba4cff84962f6f --- /dev/null +++ b/frameworks/native/neural_network_runtime/ops/swish_builder.h @@ -0,0 +1,42 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_SWISH_BUILDER_H +#define NEURAL_NETWORK_RUNTIME_SWISH_BUILDER_H + +#include "mindir.h" + +#include "ops_builder.h" +#include "ops_registry.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +class SwishBuilder : public OpsBuilder { +public: + SwishBuilder(); + ~SwishBuilder() override; + OH_NN_ReturnCode Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) override; + + LiteGraphPrimitvePtr GetPrimitive() override; +}; +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS + +#endif // NEURAL_NETWORK_RUNTIME_SWISH_BUILDER_H \ No newline at end of file diff --git a/frameworks/native/neural_network_runtime/ops/tanh_builder.cpp b/frameworks/native/neural_network_runtime/ops/tanh_builder.cpp index c275f004d069484853ec7537e230fa5554bcaf51..39f17dfc62d46f6fe74901ebe4498bc8b51a252f 100644 --- a/frameworks/native/neural_network_runtime/ops/tanh_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/tanh_builder.cpp @@ -20,6 +20,7 @@ namespace NeuralNetworkRuntime { namespace Ops { static const int INPUT_NUM = 1; static const int OUTPUT_NUM = 1; +static const int PARAM_MAX_NUM = 1; static const std::string OP_NAME = "Tanh"; TanhBuilder::TanhBuilder() {} @@ -48,14 +49,15 @@ OH_NN_ReturnCode TanhBuilder::Build(const std::vector& paramsIndex, return returnCode; } - if (!paramsIndex.empty()) { - LOGE("[TanhBuilder] TanhBuilder expects no parameters, but receive %zu", paramsIndex.size()); - return OH_NN_INVALID_PARAMETER; - } - m_inputsIndex = inputsIndex; m_outputsIndex = outputsIndex; + returnCode = CheckParamIndex(paramsIndex, allTensors, PARAM_MAX_NUM); + if (returnCode != OH_NN_SUCCESS) { + LOGE("[TanhBuilder] Passed invalid param index."); + return returnCode; + } + // The quantization type of the first output determinies that of the operator. SetQuantType(outputsIndex, allTensors); diff --git a/frameworks/native/neural_network_runtime/ops/tanh_builder.h b/frameworks/native/neural_network_runtime/ops/tanh_builder.h index dfc6a1e967c3a84c159f37747c12e73413f132f8..5461fe85989cd532696166adb39f17f8374dc71a 100644 --- a/frameworks/native/neural_network_runtime/ops/tanh_builder.h +++ b/frameworks/native/neural_network_runtime/ops/tanh_builder.h @@ -36,7 +36,7 @@ public: LiteGraphPrimitvePtr GetPrimitive() override; private: - mindspore::lite::ActivationType m_activationType{mindspore::lite::ACTIVATION_TYPE_TANH}; + mindspore::lite::ActivationType m_activationType {mindspore::lite::ACTIVATION_TYPE_TANH}; }; } // namespace Ops } // namespace NeuralNetworkRuntime diff --git a/frameworks/native/neural_network_runtime/ops/tile_builder.cpp b/frameworks/native/neural_network_runtime/ops/tile_builder.cpp index 8032acc505d40ba9f674704d2170e7aacbd491e9..eb8f07bae44dd89c91a3a2c2127da64ea8066513 100644 --- a/frameworks/native/neural_network_runtime/ops/tile_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/tile_builder.cpp @@ -22,12 +22,38 @@ namespace NeuralNetworkRuntime { namespace Ops { static const int INPUT_NUM = 2; static const int OUTPUT_NUM = 1; +static const int PARAM_MAX_NUM = 1; static const std::string OP_NAME = "Tile"; TileBuilder::TileBuilder() {} TileBuilder::~TileBuilder() {} +OH_NN_ReturnCode TileBuilder::SetDims(std::shared_ptr tensor) +{ + if (tensor->GetDataType() != OH_NN_INT64) { + LOGE("[TileBuilder] The dims should be type OH_NN_INT64."); + return OH_NN_INVALID_PARAMETER; + } + + m_dims.clear(); + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[TileBuilder] Tensor buffer is nullptr."); + return OH_NN_INVALID_PARAMETER; + } + + int64_t* pDims = static_cast(buffer); + + uint32_t elementCount = tensor->GetElementCount(); + for (uint32_t i = 0; i < elementCount; ++i) { + m_dims.emplace_back(*pDims); + ++pDims; + } + return OH_NN_SUCCESS; +} + /** * Build method. * 1.set attr of ops. @@ -50,9 +76,27 @@ OH_NN_ReturnCode TileBuilder::Build(const std::vector& paramsIndex, return returnCode; } - if (!paramsIndex.empty()) { - LOGE("[TileBuilder] TransposeBuilder expects no parameters, but receive %zu", paramsIndex.size()); - return OH_NN_INVALID_PARAMETER; + returnCode = CheckParamIndex(paramsIndex, allTensors, PARAM_MAX_NUM); + if (returnCode != OH_NN_SUCCESS) { + LOGE("[TileBuilder] Passed invalid param index."); + return returnCode; + } + + for (int i : paramsIndex) { + std::shared_ptr tensor = allTensors[i]; + tensor->IdentifyOpParameter(); + switch (tensor->GetType()) { + case OH_NN_TILE_DIMS: + returnCode = SetDims(tensor); + break; + default: + LOGE("[SliceBuilder] Build failed, param invalid, type = %d.", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; + } + if (returnCode != OH_NN_SUCCESS) { + LOGE("[SliceBuilder] Build failed, passed invalid param."); + return returnCode; + } } m_inputsIndex = inputsIndex; diff --git a/frameworks/native/neural_network_runtime/ops/tile_builder.h b/frameworks/native/neural_network_runtime/ops/tile_builder.h index e47e00baeb1790cdd4cb8408b56903373d8bc58c..e504403349c55e5ef761ce351a92b8f903784cfe 100644 --- a/frameworks/native/neural_network_runtime/ops/tile_builder.h +++ b/frameworks/native/neural_network_runtime/ops/tile_builder.h @@ -33,6 +33,9 @@ public: LiteGraphPrimitvePtr GetPrimitive() override; +private: + OH_NN_ReturnCode SetDims(std::shared_ptr tensor); + private: std::vector m_dims {0}; }; diff --git a/frameworks/native/neural_network_runtime/ops/top_k_builder.cpp b/frameworks/native/neural_network_runtime/ops/top_k_builder.cpp index 673d6f0baff2497072cb8aaf872573be16b953c6..03004033818ba41b2dc682f8c6c746a889925c0e 100644 --- a/frameworks/native/neural_network_runtime/ops/top_k_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/top_k_builder.cpp @@ -23,6 +23,8 @@ namespace Ops { static const std::string OP_NAME = "TopK"; static const int INPUT_NUM = 2; static const int OUTPUT_NUM = 2; +static const int PARAM_MAX_NUM = 2; +static const int SCALAR_LENGTH = 1; TopKBuilder::TopKBuilder() {} @@ -45,6 +47,28 @@ OH_NN_ReturnCode TopKBuilder::SetSorted(std::shared_ptr tensor) return OH_NN_SUCCESS; } +OH_NN_ReturnCode TopKBuilder::SetAxis(std::shared_ptr tensor) +{ + if (tensor->GetDataType() != OH_NN_INT64) { + LOGE("[TopK] The axis should be type OH_NN_INT64."); + return OH_NN_INVALID_PARAMETER; + } + + if (tensor->GetElementCount() != SCALAR_LENGTH) { + LOGE("[TopK] The axis should be scalar."); + return OH_NN_INVALID_PARAMETER; + } + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[TopK] Tensor buffer is nullptr."); + return OH_NN_INVALID_PARAMETER; + } + m_axis = *(static_cast(buffer)); + + return OH_NN_SUCCESS; +} + /** * Build method. * 1.build primitive of ops. @@ -70,6 +94,12 @@ OH_NN_ReturnCode TopKBuilder::Build(const std::vector& paramsIndex, m_inputsIndex = inputsIndex; m_outputsIndex = outputsIndex; + returnCode = CheckParamIndex(paramsIndex, allTensors, PARAM_MAX_NUM); + if (returnCode != OH_NN_SUCCESS) { + LOGE("[TopK] Passed invalid param index."); + return returnCode; + } + for (int i : paramsIndex) { std::shared_ptr tensor = allTensors[i]; tensor->IdentifyOpParameter(); @@ -77,6 +107,9 @@ OH_NN_ReturnCode TopKBuilder::Build(const std::vector& paramsIndex, case OH_NN_TOP_K_SORTED: returnCode = SetSorted(tensor); break; + case OH_NN_TOP_K_AXIS: + returnCode = SetAxis(tensor); + break; default: LOGE("[TopK] Parameter Type is invalid. type=%d", tensor->GetType()); return OH_NN_INVALID_PARAMETER; @@ -100,8 +133,7 @@ LiteGraphPrimitvePtr TopKBuilder::GetPrimitive() return {nullptr, DestroyLiteGraphPrimitive}; } - int64_t axis = 0; - auto primitive = mindspore::lite::MindIR_TopKFusion_CreatePrimitive(m_sorted, axis); + auto primitive = mindspore::lite::MindIR_TopKFusion_CreatePrimitive(m_sorted, m_axis); if (primitive == nullptr) { LOGE("[TopK] MindIR_TopKFusion_CreatePrimitive failed."); return {nullptr, DestroyLiteGraphPrimitive}; diff --git a/frameworks/native/neural_network_runtime/ops/top_k_builder.h b/frameworks/native/neural_network_runtime/ops/top_k_builder.h index 3535f57afd4cdf48814e6fb929b7d4362e1e2fd5..4c8ccf5aa3d6dd94db491de2c5fc6e6f30796ff5 100644 --- a/frameworks/native/neural_network_runtime/ops/top_k_builder.h +++ b/frameworks/native/neural_network_runtime/ops/top_k_builder.h @@ -34,8 +34,10 @@ public: private: OH_NN_ReturnCode SetSorted(std::shared_ptr tensor); + OH_NN_ReturnCode SetAxis(std::shared_ptr tensor); private: + int64_t m_axis {0}; bool m_sorted {true}; // true means sorting in the descending order. }; } // namespace Ops diff --git a/frameworks/native/neural_network_runtime/ops/transpose_builder.cpp b/frameworks/native/neural_network_runtime/ops/transpose_builder.cpp index a89b28207a93531d572c65441ba6dd28eef8cedd..67c08c7fcc991b5d669ce6c71223da475070ad45 100644 --- a/frameworks/native/neural_network_runtime/ops/transpose_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/transpose_builder.cpp @@ -22,6 +22,7 @@ namespace NeuralNetworkRuntime { namespace Ops { static const int INPUT_NUM = 2; static const int OUTPUT_NUM = 1; +static const int PARAM_NUM = 0; static const std::string OP_NAME = "Transpose"; TransposeBuilder::TransposeBuilder() {} @@ -50,14 +51,15 @@ OH_NN_ReturnCode TransposeBuilder::Build(const std::vector& paramsInde return returnCode; } - if (!paramsIndex.empty()) { - LOGE("[TransposeBuilder] TransposeBuilder expects no parameters, but receive %zu", paramsIndex.size()); - return OH_NN_INVALID_PARAMETER; - } - m_inputsIndex = inputsIndex; m_outputsIndex = outputsIndex; + returnCode = CheckParamIndex(paramsIndex, allTensors, PARAM_NUM); + if (returnCode != OH_NN_SUCCESS) { + LOGE("[TransposeBuilder] Passed invalid param index."); + return returnCode; + } + m_isBuild = true; m_name = OP_NAME; return OH_NN_SUCCESS; diff --git a/frameworks/native/neural_network_runtime/ops/unsqueeze_builder.cpp b/frameworks/native/neural_network_runtime/ops/unsqueeze_builder.cpp index 62b6d86b531901cb301f92ef6f1fac914118e8fa..25d0231fa86bf92188718c6253ff770f2c1aaf7b 100644 --- a/frameworks/native/neural_network_runtime/ops/unsqueeze_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/unsqueeze_builder.cpp @@ -22,6 +22,7 @@ namespace NeuralNetworkRuntime { namespace Ops { static const int INPUT_NUM = 1; static const int OUTPUT_NUM = 1; +static const int PARAM_MAX_NUM = 1; static const std::string OP_NAME = "Unsqueeze"; UnsqueezeBuilder::UnsqueezeBuilder() {} @@ -71,6 +72,12 @@ OH_NN_ReturnCode UnsqueezeBuilder::Build(const std::vector& paramsInde m_inputsIndex = inputsIndex; m_outputsIndex = outputsIndex; + returnCode = CheckParamIndex(paramsIndex, allTensors, PARAM_MAX_NUM); + if (returnCode != OH_NN_SUCCESS) { + LOGE("[UnsqueezeBuilder] Passed invalid param index."); + return returnCode; + } + for (int i : paramsIndex) { std::shared_ptr tensor = allTensors[i]; tensor->IdentifyOpParameter(); diff --git a/frameworks/native/neural_network_runtime/ops/unstack_builder.cpp b/frameworks/native/neural_network_runtime/ops/unstack_builder.cpp index e2ee7a2ca1a4b5312d519c8c28934e5fd3d87bbe..4a0aea9ebd14d31bcb7faf852f0740c48e0d0c7f 100755 --- a/frameworks/native/neural_network_runtime/ops/unstack_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/unstack_builder.cpp @@ -20,6 +20,7 @@ namespace NeuralNetworkRuntime { namespace Ops { static const int INPUT_NUM = 1; static const int OUTPUT_MIN_NUM = 1; +static const int PARAM_MAX_NUM = 1; static const int SCALAR_LENGTH = 1; static const std::string OP_NAME = "Unstack"; @@ -86,8 +87,13 @@ OH_NN_ReturnCode UnstackBuilder::Build(const std::vector& paramsIndex, m_inputsIndex = inputsIndex; m_outputsIndex = outputsIndex; - - OH_NN_ReturnCode returnCode; + + auto returnCode = CheckParamIndex(paramsIndex, allTensors, PARAM_MAX_NUM); + if (returnCode != OH_NN_SUCCESS) { + LOGE("[Unstack] Passed invalid param index."); + return returnCode; + } + for (int i : paramsIndex) { std::shared_ptr tensor = allTensors[i]; tensor->IdentifyOpParameter(); diff --git a/frameworks/native/neural_network_runtime/ops/where_builder.cpp b/frameworks/native/neural_network_runtime/ops/where_builder.cpp index 4c2fe012a02b4f4015b66f2d16664eec047bd627..13685deeacc406d5b68686b921c6f2e2eb4dcc59 100644 --- a/frameworks/native/neural_network_runtime/ops/where_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/where_builder.cpp @@ -20,6 +20,7 @@ namespace NeuralNetworkRuntime { namespace Ops { static const int INPUT_NUM = 3; static const int OUTPUT_NUM = 1; +static const int PARAM_NUM = 0; static const std::string OP_NAME = "Where"; WhereBuilder::WhereBuilder() {} @@ -41,15 +42,16 @@ OH_NN_ReturnCode WhereBuilder::Build(const std::vector& paramsIndex, LOGE("[Where] Build failed, passed invalid input or output index."); return ret; } - - if (!paramsIndex.empty()) { - LOGW("[Where] Build failed, the where expects no parameters, but receive %zu", paramsIndex.size()); - return OH_NN_INVALID_PARAMETER; - } m_inputsIndex = inputsIndex; m_outputsIndex = outputsIndex; + ret = CheckParamIndex(paramsIndex, allTensors, PARAM_NUM); + if (ret != OH_NN_SUCCESS) { + LOGE("[Where] Build failed, passed invalid param index."); + return ret; + } + m_name = OP_NAME; m_isBuild = true; return OH_NN_SUCCESS; diff --git a/frameworks/native/neural_network_runtime/ops_builder.cpp b/frameworks/native/neural_network_runtime/ops_builder.cpp index 69c860772c1e1f59a90e75bc56484de6f7575a1b..e999b8bc7a22de870c2817862a84c644639f8981 100644 --- a/frameworks/native/neural_network_runtime/ops_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops_builder.cpp @@ -92,6 +92,28 @@ OH_NN_ReturnCode OpsBuilder::CheckIOIndex(const std::vector& inputsInd return OH_NN_SUCCESS; } +OH_NN_ReturnCode OpsBuilder::CheckParamIndex(const std::vector& paramsIndex, + const std::vector>& allTensors, + const size_t paramNum) const +{ + size_t paramsIndexSize = paramsIndex.size(); + if (paramsIndexSize > paramNum) { + LOGE("The number of index of params is %{public}zu larger than %{public}zu.", paramsIndexSize, paramNum); + return OH_NN_INVALID_PARAMETER; + } + + size_t allTensorsSize = allTensors.size(); + bool isParamsOutOfRange = std::any_of(paramsIndex.begin(), paramsIndex.end(), [allTensorsSize](uint32_t index) { + return index >= allTensorsSize; + }); + if (isParamsOutOfRange) { + LOGE("The index of params is out of range."); + return OH_NN_INVALID_PARAMETER; + } + + return OH_NN_SUCCESS; +} + void OpsBuilder::SetQuantType(const std::vector& outputsIndex, const std::vector>& allTensors) { diff --git a/frameworks/native/neural_network_runtime/ops_builder.h b/frameworks/native/neural_network_runtime/ops_builder.h index ecc82f0ba1e565036e35c1cee2d26af36c3707be..c42604d9311c37aa7469fa3671e628789d4d0327 100644 --- a/frameworks/native/neural_network_runtime/ops_builder.h +++ b/frameworks/native/neural_network_runtime/ops_builder.h @@ -65,6 +65,9 @@ protected: const std::vector>& allTensors, const size_t inputNum, const size_t outputNum) const; + OH_NN_ReturnCode CheckParamIndex(const std::vector& paramsIndex, + const std::vector>& allTensors, + const size_t paramNum) const; void SetQuantType(const std::vector& outputsIndex, const std::vector>& allTensors); diff --git a/interfaces/kits/c/neural_network_runtime/neural_network_runtime_type.h b/interfaces/kits/c/neural_network_runtime/neural_network_runtime_type.h index a60ead7704fc4637d5a882ed8bbfb30a604ea775..3fd00020567608d60ee12b27ad69f44f4840cf6f 100644 --- a/interfaces/kits/c/neural_network_runtime/neural_network_runtime_type.h +++ b/interfaces/kits/c/neural_network_runtime/neural_network_runtime_type.h @@ -368,6 +368,9 @@ typedef enum { * padding. Excessive pixels will be discarded. * * activationType is an integer constant which is contained in FuseType. * The specified activation function is called before output. + * * global Whether to do global pooling. + * * roundMode Boundary handling method. When the pool cannot completely cover the input feature map, + * the output feature map is rounded up, 0 means round down, 1 means round up. * * If the input contains the padList parameter: * @@ -387,6 +390,9 @@ typedef enum { * and the nearest neighbor values are used for padding. * * activationType is an integer constant which is contained in FuseType. * The specified activation function is called before output. + * * global Whether to do global pooling. + * * roundMode Boundary handling method. When the pool cannot completely cover the input feature map, + * the output feature map is rounded up, 0 means round down, 1 means round up. * * Outputs: * @@ -503,7 +509,7 @@ typedef enum { * * input: input tensor. * * weight: convolution weight in [outChannel, kernelHeight, kernelWidth, inChannel/group] format. * The value of inChannel must be exactly divided by the value of group. - * + * * * bias: bias of the convolution. It is an array with a length of [outChannel]. * In quantization scenarios, the bias parameter does not require quantization parameters. * The quantization version requires data input of the OH_NN_INT32 type. @@ -523,7 +529,7 @@ typedef enum { * The total padding quantity is calculated horizontally and vertically * and evenly distributed to the top, bottom, left, and right if possible. * Otherwise, the last additional padding will be completed from the bottom and right. - * + * * 1 (valid): The possible maximum height and width of the output will be returned * in case of no padding. The excessive pixels will be discarded. * * group: number of groups in which the input is divided by in_channel. The value is of the @@ -671,7 +677,7 @@ typedef enum { * 0 (same): The height and width of the output are the same as those of the input. The total padding * quantity is calculated horizontally and vertically and evenly distributed to the top, bottom, left, and * right if possible. Otherwise, the last additional padding will be completed from the bottom and right. - * + * * 1 (valid): The possible maximum height and width of the output will be returned in case of no * padding. The excessive pixels will be discarded. * * activationType is an integer constant which is contained in FuseType. @@ -743,7 +749,7 @@ typedef enum { * * Outputs: * - * * output: computing result, which has the same data type and shape of output and input1. + * * output: computing result, which has the same data type and shape of input1 and input2. */ OH_NN_OPS_ELTWISE = 12, @@ -792,12 +798,13 @@ typedef enum { * * * activationType is an integer constant which is contained in FuseType. * The specified activation function is called before output. + * * hasBias Whether to use the bias. * * Outputs: * * * output: computed tensor. * - * If the input contains the axis parameter: + * If the input contains the axis parameter or useAxis parameter: * * Inputs: * @@ -813,6 +820,8 @@ typedef enum { * converted into a 1D tensor for applying the full connection. * * activationType is an integer constant which is contained in FuseType. * The specified activation function is called before output. + * * useAxis Whether to use the axis. + * * hasBias Whether to use the bias. * * Outputs: * @@ -846,7 +855,7 @@ typedef enum { * Outputs: * * * output: n-dimensional Hswish activation value. - * The data type is the same as that of shape and input. + * The data type is the same as that of shape and input. */ OH_NN_OPS_HSWISH = 17, @@ -856,13 +865,13 @@ typedef enum { * * Inputs: * - * * input1, which can be a real number, Boolean value, or tensor whose data type is real number or NN_BOOL. - * * input2, which can be a real number or a Boolean value if input1 is a tensor and must be a tensor - * with the data type of real number or NN_BOOL if input1 is not a tensor. + * * input1, can be a real number, Boolean value, or tensor whose data type is real number or OH_NN_BOOL. + * * input2, can be a real number or a Boolean value if input1 is a tensor and must be a tensor + * with the data type of real number or OH_NN_BOOL if input1 is not a tensor. * * Outputs: * - * * A tensor of the data type NN_BOOL. When a quantization model is used, the quantization parameters of the + * * A tensor of the data type OH_NN_BOOL. When a quantization model is used, the quantization parameters of the * output cannot be omitted. However, values of the quantization parameters do not affect the result. */ OH_NN_OPS_LESS_EQUAL = 18, @@ -890,17 +899,17 @@ typedef enum { OH_NN_OPS_MATMUL = 19, /** - * Calculates the maximum of input1 and input2 element-wise. The inputs of input1 and input2 - * comply with the implicit type conversion rules to make the data types consistent. - * * The inputs must be two tensors or one tensor and one scalar. - * When the inputs are two tensors, their data types cannot be both NN_BOOL. + * Calculates the maximum of input1 and input2 element-wise. The inputs of input1\n + * and input2 comply with the implicit type conversion rules to make the data types consistent. + * The inputs must be two tensors or one tensor and one scalar. + * When the inputs are two tensors, their data types cannot be both OH_NN_BOOL. * Their shapes can be broadcast to the same size. * When the inputs are one tensor and one scalar, the scalar must be a constant. * * Inputs: * - * * input1: n-dimensional input tensor of the real number or NN_BOOL type. - * * input2: n-dimensional input tensor of the real number or NN_BOOL type. + * * input1: n-dimensional input tensor of the real number or OH_NN_BOOL type. + * * input2: n-dimensional input tensor of the real number or OH_NN_BOOL type. * * Outputs: * @@ -934,6 +943,9 @@ typedef enum { * no padding. The excessive pixels will be discarded. * * activationType is an integer constant which is contained in FuseType. * The specified activation function is called before output. + * * global Whether to do global pooling. + * * roundMode Boundary handling method. When the pool cannot completely cover the input feature map, + * the output feature map is rounded up, 0 means round down, 1 means round up. * * If the input contains the padList parameter: * @@ -952,6 +964,9 @@ typedef enum { * and the nearest neighbor values are used for padding. * * activationType is an integer constant which is contained in FuseType. * The specified activation function is called before output. + * * global Whether to do global pooling. + * * roundMode Boundary handling method. When the pool cannot completely cover the input feature map, + * the output feature map is rounded up, 0 means round down, 1 means round up. * * Outputs: * @@ -1023,8 +1038,9 @@ typedef enum { * * Parameters: * - * * padValues: value to be added to the pad operation. + * * constantValue: value to be added to the pad operation. * The value is a constant with the same data type as inputX. + * * paddingMode: Padding mode. * * Outputs: * @@ -1037,13 +1053,17 @@ typedef enum { /** * Calculates the y power of each element in input. * The inputs must be two tensors or one tensor and one scalar. - * When the inputs are two tensors, their data types cannot be both NN_BOOL, and their shapes must be the same. + * When the inputs are two tensors, their data types cannot be both OH_NN_BOOL, and their shapes must be the same. * When the inputs are one tensor and one scalar, the scalar must be a constant. * * Inputs: * - * * input: real number, Boolean value, or tensor whose data type is real number or NN_BOOL. - * * y: real number, Boolean value, or tensor whose data type is real number or NN_BOOL. + * * input: real number, Boolean value, or tensor whose data type is real number or OH_NN_BOOL. + * * y: real number, Boolean value, or tensor whose data type is real number or OH_NN_BOOL. + * + * Parameters: + * * scale: A OH_NN_FLOAT32 scalar that represents the factor of the scale blend. + * * shift: A OH_NN_FLOAT32 scalar that represents the bias of the scale blend. * * Outputs: * @@ -1110,6 +1130,10 @@ typedef enum { * * size: slice length, which is an array of integers greater than or equal to 0. * Assume that a dimension is i and 1<=size[i]<=input.shape[i]-begin[i]. * + * Parameters: + * + * * axes: Dimensions on which the tensor is sliced. + * * Outputs: * * * output: n-dimensional tensor obtained by slicing. @@ -1162,7 +1186,7 @@ typedef enum { * output.shape[3] = (w + paddings[1][0] + paddings[1][1]) / blockShape[1] * (h + paddings[0][0] + paddings[0][1]) and (w + paddings[1][0] + paddings[1][1]) is exactly divisible by * (h + paddings[0][0] + paddings[0][1]) and (w + paddings[1][0] + paddings[1][1]). - * + * */ OH_NN_OPS_SPACE_TO_BATCH_ND = 31, @@ -1212,8 +1236,10 @@ typedef enum { * * Inputs: * - * * input1: minuend, which is a tensor of the NN_FLOAT16, NN_FLOAT32, NN_INT32, or NN_BOOL type. - * * input2: subtrahend, which is a tensor of the NN_FLOAT16, NN_FLOAT32, NN_INT32, or NN_BOOL type. + * * input1: minuend, which is a tensor of the OH_NN_FLOAT16, OH_NN_FLOAT32, OH_NN_INT32, + * or OH_NN_BOOL type. + * * input2: subtrahend, which is a tensor of the OH_NN_FLOAT16, OH_NN_FLOAT32, OH_NN_INT32, + * or OH_NN_BOOL type. * * Outputs: * @@ -1353,6 +1379,11 @@ typedef enum { * * multiples: number of times that the input tensor is copied in each dimension. The value is a 1D tensor. * The length m is not less than the number of dimensions, that is, n. * + * Parameters: + * + * * dims A 1D tensor that specifies the number of times that data is copied in each dimension. + * The length m is not less than the number of dimensions of x. + * * Outputs: * * An m-dimensional tensor whose TensorType is the same as that of the input. If input and * multiples have the same length, input and output have the same number of dimensions. @@ -1391,11 +1422,14 @@ typedef enum { * Parameters: * * * keepDims: indicates whether to retain the dimension. The value is a Boolean value. + * * reduceToEnd: boolean value, indicates whether the reduce operation needs to be performed + * until the last axis. + * * coeff: A OH_NN_FLOAT32 scalar that represents the scale factor of the output. * * Outputs: * * * output: m-dimensional output tensor whose data type is the same as that of the input. - * If keepDims is false, m==n. If keepDims is true, mkeepDims is false, mkeepDims is true, m==n. */ OH_NN_OPS_REDUCE_MEAN = 42, @@ -1511,10 +1545,12 @@ typedef enum { * * Parameters: * - * * beginAxis is an NN_INT32 scalar that specifies the axis from which normalization starts. + * * beginAxis: an OH_NN_INT32 scalar that specifies the axis from which normalization starts. * The value range is [1, rank(input)). - * * epsilon is a scalar of NN_FLOAT32. It is a tiny amount in the normalization formula. - * The common value is 1e-7. + * * epsilon: a scalar of OH_NN_FLOAT32. It is a tiny amount in the normalization formula. + * The common value is 0.00001f. + * * beginParamsAxis: an OH_NN_INT32 scalar that specifies the start axis of layer normalization + * of input parameter (gamma, beta). * * Outputs: * @@ -1523,7 +1559,9 @@ typedef enum { OH_NN_OPS_LAYER_NORM = 49, /** - * Calculates the accumulated value for a tensor along the specified dimension. + * Calculates the accumulated value for a tensor along the specified dimension. If keepDims is set to + * false, the number of dimensions is reduced for the input; if keepDims is set to true, + * the number of dimensions is retained. * * Inputs: * @@ -1534,34 +1572,38 @@ typedef enum { * Parameters: * * * keepDims: indicates whether to retain the dimension. The value is a Boolean value. - * When its value is true, the number of output dimensions is the same as that of the input. - * When its value is false, the number of output dimensions is reduced. + * * reduceToEnd: boolean value, indicates whether the reduce operation needs to be performed + * until the last axis. + * * coeff: A OH_NN_FLOAT32 scalar that represents the scale factor of the output. * * Outputs: * * * output: m-dimensional output tensor whose data type is the same as that of the input. - * If keepDims is false, m==n. If keepDims is true, mkeepDims is false, mkeepDims is true, m==n. */ OH_NN_OPS_REDUCE_PROD = 50, /** - * Operates the logical OR in the specified dimension. If keepDims is set to false, - * the number of dimensions is reduced for the input; if keepDims is set to true, - * the number of dimensions is retained. + * Calculates the logical sum value for input tensor along the specified dimension. If keepDims is set to + * false, the number of dimensions is reduced for the input; if keepDims is set to true, + * the number of dimensions is retained. * * Inputs: * - * * A n-dimensional input tensor, where n is less than 8. - * * A 1D tensor specifying the dimension used to operate the logical OR. + * * input: n-dimensional input tensor, where n is less than 8. + * * axis: dimension used to calculate the logical sum value. The value is a 1D tensor. * The value range of each element in axis is [–n, n). * * Parameters: * * * keepDims: indicates whether to retain the dimension. The value is a Boolean value. + * * reduceToEnd: boolean value, indicates whether the reduce operation needs to be performed + * until the last axis. + * * coeff: A OH_NN_FLOAT32 scalar that represents the scale factor of the output. * * Outputs: * * output: m-dimensional output tensor whose data type is the same as that of the input. - * If keepDims is false, m==n. If keepDims is true, mkeepDims is false, mkeepDims is true, m==n. */ OH_NN_OPS_REDUCE_ALL = 51, @@ -1576,6 +1618,7 @@ typedef enum { * * * src_t: data type of the input. * * dst_t: data type of the output. + * * axis: dimensional of the input to convert. * * Outputs: * @@ -1595,6 +1638,8 @@ typedef enum { * Parameters: * * * sorted: order of sorting. The value true means descending and false means ascending. + * * axis: A OH_NN_INT32 scalar that specifies the dimension that needs to be sorted, default 0, + * pointing to the last dimension. * * Outputs: * @@ -1613,7 +1658,10 @@ typedef enum { * Parameters: * * * axis: dimension for calculating the index of the maximum. - * * keep_dims: indicates whether to maintain the input tensor dimension. The value is a Boolean value. + * * keepDims: indicates whether to maintain the input tensor dimension. The value is a Boolean value. + * * topK: Whether to keep the output dimensions the same as the input dimensions. + * * outMaxValue: Return the index if the value is false. + * Return the value if the value is true. The default value is false. * * Outputs: * * output: index of the maximum input tensor on the axis. The value is a tensor. @@ -1624,6 +1672,7 @@ typedef enum { * Adds a dimension based on the value of axis. * * Inputs: + * * * input: n-dimensional tensor. * * Parameters: @@ -1632,6 +1681,7 @@ typedef enum { * The value range of the integer is [-n, n). * * Outputs: + * * * output: output tensor. */ OH_NN_OPS_UNSQUEEZE = 55, @@ -1641,31 +1691,39 @@ typedef enum { * output=0.5∗input∗(1+tanh(input/2)) * * Inputs: - * * An n-dimensional input tensor. + * + * * input: An n-dimensional input tensor. + * + * Parameters: + * * approximate: Whether to use the approximation algorithm. * * Outputs: + * * * output: n-dimensional tensor, with the same data type and shape as the input tensor. */ OH_NN_OPS_GELU = 56, /** - * Unpacks the given dimension of a rank-R tensor into rank-(R-1) tensors. + * Unpacks the input tensors base on the given dimension of axis. * Unpacks tensors from input by chipping it along the axis dimension. * For example, given a tensor of shape (A, B, C, D); * If axis == 0, then the i'th tensor in output is the slice value[i, :, :, :],\n * and each tensor in output will have shape (B, C, D). * If axis == 1, then the i'th tensor in output is the slice value[:, i, :, :],\n * and each tensor in output will have shape (A, C, D). Etc. - * This is the opposite of stack. + * This is the opposite of OH_NN_OPS_STACK. * * Inputs: + * * * input: n-dimensional tensor. * * Parameters: - * * axis: dimension along witch to pack. Default: 0. Negative values wrap around. The range is [-R, R). + * + * * axis: dimension along witch to unpack. Default 0. The range is [-n, n). * * Outputs: - * * output: A tuple of tensors, the shape of each objects is the same. + * + * * output: A tuple of tensors, the shape of each objects is same. */ OH_NN_OPS_UNSTACK = 57, @@ -1673,10 +1731,13 @@ typedef enum { * Obtains the absolute value of the input tensor. * * Inputs: + * * * input: n-dimensional tensor. * * Outputs: - * * output: The absolute value of the input tensor. + * + * * output: n-dimensional tensor. The absolute value of the input tensor. + * The shape and data type is the same as inputs'. */ OH_NN_OPS_ABS = 58, @@ -1684,77 +1745,87 @@ typedef enum { * Computes the Gauss error function of input element-wise. * * Inputs: - * * input: n-dimensional tensor. + * + * * input: n-dimensional tensor. The dimension should be less than 8, + * and the data type only support OH_NN_FLOAT32 and OH_NN_FLOAT16. * * Outputs: - * * output: A tensor, has the same shape and dtype as the input. + * + * * output: n-dimensional tensor. The shape and data type is the same as inputs'. */ OH_NN_OPS_ERF = 59, /** - * Calculates the exponential of the given input tensor, element-wise. - * ExpFusion computes outputs output = base ^ (shift + scale * input), for base > 0. - * Or if base is set to the default (-1), base is set to e, - * so output = exp(shift + scale * input). + * Calculates the exponential of the given input tensor element-wise. + * ExpFusion computes outputs by formula output = base ^ (shift + scale * input), for base > 0. + * And the base is default set to -1, which means nature logarithm 'e', + * and the calculate formula changes to output = exp(shift + scale * input). * * Inputs: + * * * input: n-dimensional tensor. * * Parameters: - * * base: The base of exponential function, default -1 for a value of e, must be > 0. - * * scale: The amplifcation factor of independent value, default 1. - * * shift: The offset of independent value, default 1. + * + * * base: The base of exponential function. Default set to -1 representing nature logarithm 'e'. + * Input value must be > 0. + * * scale: The amplifcation factor of exponential value, default 1. + * * shift: The offset of exponential value, default 0. * * Outputs: - * * output: A tensor. The exponential of the input tensor computed element-wise. + * + * * output: n-dimensional tensor. The element-wise exponential result of the input tensor. */ OH_NN_OPS_EXP = 60, /** - * Returns the tensor resulted from performing the less logical operation elementwise\n - * on the input tensors input1 and input2. + * For input1 and input2, calculate the result of input1[i] < input2[i] for each pair of elements, + * where i is the index of each element in the input tensor. * * Inputs: - * * input1: n-dimensional tensor. - * The first input is a number or a bool or a tensor whose data type is number or bool. - * * input2: n-dimensional tensor. The second input is a number or a bool - * when the first input is a tensor or a tensor whose data type is number or bool. + * + * * input1: can be a real number, Boolean value, or tensor whose data type is real number or OH_NN_BOOL. + * * input2: can be a real number or a Boolean value if input1 is a tensor and must be a tensor + * with the data type of real number or OH_NN_BOOL if input1 is not a tensor. * * Outputs: - * * output: A tensor, the shape is the same as the one after broadcasting, and the data type is bool. + * + * * output: A tensor of the data type OH_NN_BOOL. When a quantization model is used, the quantization + * parameters of the output cannot be omitted. However, values of the quantization parameters do not + * affect the result. */ OH_NN_OPS_LESS = 61, /** - * Selects elements from input1 or input2, depending on condition. - * The input1 and input2 tensors must all have the same shape, - * and the output will also have that shape. - * The condition tensor must be a scalar if input1 and input2 are scalars. - * If input1 and input2 are vectors or higher rank, then condition must be either a scalar, - * a vector with size matching the first dimension of input1, or must have the same shape as input1. - * The condition tensor acts as a mask that chooses, based on the value at each element, - * whether the corresponding element / row in the output should be taken from input1 (if true) or input2 (if false). + * Selects output elements from input1 or input2, depending on condition. + * If condition is true, choose elements from input1. Otherwise, choose elements from input2 if condition is false. + * The three inputs, condition , input1 and input2 must share the same shape. * * Inputs: - * * inputCond: n-dimensional tensor or scalar. - * The condition tensor, decides which element is chosen. - * * input1: n-dimensional tensor. a tensor which may have the same shape as condition. - * If condition is rank 1, x1 may have higher rank, but its first dimension must match the size of condition. - * * input2: n-dimensional tensor, has the same shape with input1. + * + * * condition: n-dimensional tensor or scalar. + * The condition tensor, decides which element is chosen. + * * input1: n-dimensional tensor. First input tensor to be chosen. + * If condition is rank 1, input1 may have higher rank, but its first dimension must match the + * size of condition. + * * input2: n-dimensional tensor. Second input tensor to be chosen. * * Outputs: - * * output: A tensor, has the same shape as the input_cond. + * + * * output: A tensor, has the same shape and data type as the input. */ OH_NN_OPS_SELECT = 62, /** - * Calculates the square of a tensor. + * Calculates the square of input tensor element-wise. * * Inputs: + * * * input: n-dimensional tensor. * * Outputs: - * * output: A tensor, has the same shape and dtype as the input. + * + * * output: n-dimensional tensor, has the same shape and dtype as the input. */ OH_NN_OPS_SQUARE = 63, @@ -1763,19 +1834,19 @@ typedef enum { * then the output will have shape (d_0 X d_1 … d_(axis-1), d_axis X d_(axis+1) … X dn). * * Inputs: - * * input: n-dimensional tensor. rank >= axis. + * + * * input: n-dimensional tensor. The rank of input should be greater or equal to axis. * * Parameters: - * * axis: Indicate up to which input dimensions (exclusive) should be flattened - * to the outer dimension of the output. - * The value for axis must be in the range [-r, r], where r is the rank of the input tensor. - * Negative value means counting dimensions from the back. When axis = 0, the shape of the output tensor is\n - * (1, (d_0 X d_1 … d_n)), where the shape of the input tensor is (d_0, d_1, … d_n). + * + * * axis: Indicate up to which input dimensions (exclusive) should be flattened to the outer dimension + * of the output. The value for axis must be in the range [-r, r], where r is the rank of the input tensor. + * Negative value means counting dimensions from the back. When axis = 0, the shape of the output tensor is + * (1, (d_0 X d_1 … d_n)), where the shape of the input tensor is (d_0, d_1, … d_n). * * Outputs: - * * output: A tensor, with the contents of the input tensor, - * with input dimensions up to axis flattened to the outer dimension of - * the output and remaining input dimensions flattened into the inner dimension of the output. + * + * * output: 2-dimensional tensor after flattened. */ OH_NN_OPS_FLATTEN = 64, @@ -1787,17 +1858,19 @@ typedef enum { * in the following order: depth, column, and then row. * * Inputs: - * * input: 4-dimensional tensor.. with specific format of NHWC or NCHW. - * where N is the batch axis, H is the height, W is the width and C is the channel or depth. + * + * * input: 4-dimensional tensor with specific format of NHWC or NCHW. + * where N is the batch axis, H is the height, W is the width and C is the channel or depth. * * Parameters: + * * * blockSize: Blocks of [blocksize, blocksize] are moved. - * * format: Format of input tensor, default NCHW. * * mode: DCR (default) for depth-column-row order re-arrangement. Use CRD for column-row-depth order. * * Outputs: + * * * output: Output tensor of [N, H * blocksize, W * blocksize, C/(blocksize * blocksize)] for NHWC format - * or [N, C/(blocksize * blocksize), H * blocksize, W * blocksize] for NCHW format. + * or [N, C/(blocksize * blocksize), H * blocksize, W * blocksize] for NCHW format. */ OH_NN_OPS_DEPTH_TO_SPACE = 65, @@ -1806,36 +1879,40 @@ typedef enum { * and extends by increments of delta up to limit. * * Inputs: + * * * input: n-dimensional tensor. * * Parameters: - * * dType: Reserved dataType parameter. + * * * start: Scalar. First entry for the range of output values. * * limit: Scalar. Exclusive upper limit for the range of output values. * * delta: Scalar. Value to step by. * * Outputs: - * * output: A 1-dimensional tensor with specific data type containing generated range of values. + * + * * output: 1-dimensional tensor with specific data type containing generated range of values. */ OH_NN_OPS_RANGE = 66, /** - * Carries out instance normalization as formula y = scale * (x - mean) / sqrt(variance + epsilon) + B, - * where mean and variance are computed per instance per channel. + * Normalize each channel of the input. Make the mean of each channel of the input is 0 and the variance is 1. * * Inputs: - * * input: A 4-dimensional tensor(B, C, H, W).input data tensor from the previous operator; - * dimensions for image case are (N x C x H x W), where N is the batch size, - * C is the number of channels, and H and W are the height and the width of the data. - * For non image case, the dimensions are in the form of (N x C x D1 x D2 … Dn), where N is the batch size. + * + * * input: Input data tensor from the previous operator; dimensions for image case are (N x C x H x W), + * where N is the batch size, C is the number of channels, and H and W are the height and the width of + * the data. For non image case, the dimensions are in the form of (N x C x D1 x D2 … Dn), where N is the + * batch size. * * scale: The input 1-dimensional scale tensor of channel size. * * bias: The input 1-dimensional bias tensor of channel size. * * Parameters: - * epsilon: The epsilon value to use to avoid division by zero. + * + * * epsilon: The epsilon value to use to avoid division by zero. * * Outputs: - * * output: A tensor, has the same shape as the input. + * + * * output: The output tensor of the same shape as input. */ OH_NN_OPS_INSTANCE_NORM = 67, @@ -1843,14 +1920,17 @@ typedef enum { * Generate a tensor with given value and shape. * * Inputs: - * * input: n-dimensional tensor.Indicates the shape of the expected output tensor. - * If empty tensor is given, the output would be a scalar. All values must be >= 0. + * + * * input: 1-dimensional tensor. Indicates the shape of the expected output tensor. + * All values must be >= 0. * * Parameters: - * * dataType: The data_type of the output tensor. - * * value: The value of the output elements. Should be a one-element tensor. + * + * * dataType: The data type of the output tensor. + * * value: The value of the output elements. * * Outputs: + * * * output: A tensor, has the same shape as the input. */ OH_NN_OPS_CONSTANT_OF_SHAPE = 68, @@ -1859,65 +1939,85 @@ typedef enum { * Broadcast a tensor for a compatiable shape. * * Inputs: + * * * input: n-dimensional tensor. * * Parameters: + * * * shape: A 1-dimensional Tensor, the shape of the desired output. * * Outputs: + * * * output: A tensor after broadcasted. */ OH_NN_OPS_BROADCAST_TO = 69, /** - * Returns the tensor resulted from performing the equal logical operation elementwise\n - * on the input tensors input1 and input2. + * For input1 and input2, calculate the result of input1[i] = input2[i] for each pair of elements, + * where i is the index of each element in the input tensor. * * Inputs: - * * input1, the first input operand. - * * input2, the second input operand. + * + * * input1, can be a real number, Boolean value, or tensor whose data type is real number or OH_NN_BOOL. + * * input2, can be a real number or a Boolean value if input1 is a tensor and must be a tensor + * with the data type of real number or OH_NN_BOOL if input1 is not a tensor. * * Outputs: - * * output: A tensor. + * + * * output: A tensor of the data type OH_NN_BOOL. When a quantization model is used, + * the quantization output cannot be omitted. However, values of the quantization + * parameters do not affect the result. */ OH_NN_OPS_EQUAL = 70, /** - * Returns the tensor resulted from performing the greater logical operation elementwise\n - * on the input tensors input1 and input2. + * For input1 and input2, calculate the result of input1[i] > input2[i] for each pair of elements, + * where i is the index of each element in the input tensor. * * Inputs: - * * input1: the first input operand. - * * input2: the second input operand. + * + * * input1, can be a real number, Boolean value, or tensor whose data type is real number or OH_NN_BOOL. + * * input2, can be a real number or a Boolean value if input1 is a tensor and must be a tensor + * with the data type of real number or OH_NN_BOOL if input1 is not a tensor. * * Outputs: - * * output: A tensor. + * + * * A tensor of the data type OH_NN_BOOL. When a quantization model is used, the quantization parameters of the + * output cannot be omitted. However, values of the quantization parameters do not affect the result. */ OH_NN_OPS_GREATER = 71, /** - * Returns the tensor resulted from performing the not_equal logical operation elementwise\n - * on the input tensors input1 and input2. + * For input1 and input2, calculate the result of input1[i] != input2[i] for each pair of elements, + * where i is the index of each element in the input tensor. * * Inputs: - * * input1: the first input operand. - * * input2: the second input operand. + * + * * input1, can be a real number, Boolean value, or tensor whose data type is real number or OH_NN_BOOL. + * * input2, can be a real number or a Boolean value if input1 is a tensor and must be a tensor + * with the data type of real number or OH_NN_BOOL if input1 is not a tensor. * * Outputs: - * * output: A tensor. + * + * * A tensor of the data type OH_NN_BOOL. When a quantization model is used, the quantization parameters of the + * output cannot be omitted. However, values of the quantization parameters do not affect the result. */ OH_NN_OPS_NOT_EQUAL = 72, /** - * Returns the tensor resulted from performing the greater_equal logical operation elementwise\n - * on the input tensors input1 and input2. + * For input1 and input2, calculate the result of input1[i] >= input2[i] for each pair of elements, + * where i is the index of each element in the input tensor. * * Inputs: - * * input1: the first input operand. - * * input2: the second input operand. + * + * * input1, can be a real number, Boolean value, or tensor whose data type is real number or OH_NN_BOOL. + * * input2, can be a real number or a Boolean value if input1 is a tensor and must be a tensor + * with the data type of real number or OH_NN_BOOL if input1 is not a tensor. * * Outputs: - * * output: A tensor. + * + * * A tensor of the data type OH_NN_BOOL. When a quantization model is used, the quantization parameters of the + * output cannot be omitted. However, values of the quantization parameters do not affect the result. */ OH_NN_OPS_GREATER_EQUAL = 73, @@ -1927,12 +2027,15 @@ typedef enum { * is applied to the data tensor elementwise. * * Inputs: + * * * input: n-dimensional input tensor. * * Parameters: - * * negative_slope: Coefficient of leakage. + * + * * negativeSlope: Coefficient of leakage. * * Outputs: + * * * output: A tensor, with the same data type and shape as the input tensor. */ OH_NN_OPS_LEAKY_RELU = 74, @@ -1941,17 +2044,19 @@ typedef enum { * Computes an one-layer LSTM. This operator is usually supported via some custom implementation. * * Inputs: - * * input: n-dimensional tensor, shape is [seq_len, batch_size, input_size]. + * + * * input: n-dimensional tensor, shape is [seqLen, batchSize, inputSize]. * * wIh: Weight tensor of input-layer to hidden-layer, - * shape is [num_directions* num_layers, 4 * hidden_size, input_size]. + * shape is [numDirections* numLayers, 4 * hiddenSize, inputSize]. * * wHh: Weight tensor of hidden-layer to hidden-layer, - * shape is [num_directions* num_layers, 4 * hidden_size, hidden_size]. + * shape is [numDirections* numLayers, 4 * hiddenSize, hiddenSize]. * * bias: Bias tensor of input-layer and hidden-layer to hidden-layer, - * shape is [num_directions* num_layers, 8 * hidden_size]. - * * hx: Init state of hidden-layer, shape is [num_directions * num_layers, batch_size, hidden_size]. - * * cx: Init state of cell, shape is [num_directions * num_layers, batch_size, hidden_size]. + * shape is [numDirections* numLayers, 8 * hiddenSize]. + * * hx: Init state of hidden-layer, shape is [numDirections * numLayers, batchSize, hiddenSize]. + * * cx: Init state of cell, shape is [numDirections * numLayers, batchSize, hiddenSize]. * * Parameters: + * * * bidirectional: Whether the LSTM operation is bidirectional. * * hasBias: Whether the operation contains bias. * * inputSize: Size of input tensor. @@ -1961,15 +2066,16 @@ typedef enum { * * dropout: Dropout probalility of each layer except first-layer. * * zoneoutCell: Probalility that the cell state retains the previous state. Default: 0. * * zoneoutHidden: Probalility that the hidden state retains the previous state. Default: 0. - * * projSize: If proj_size > 0, will use LSTM with projections of corresponding size. Default: 0. + * * projSize: If projSize > 0, will use LSTM with projections of corresponding size. Default: 0. * * Outputs: + * * * output: A tensor that concats all the intermediate output tensor of the hidden, - * shape is [seq_len, batch_size, num_directions * real_hidden_size]. + * shape is [seqLen, batchSize, numDirections * realHiddenSize]. * * hy: The last output tensor of the hidden-layer, - * shape is [num_directions * num_layers, batch_size, real_hidden_size]. + * shape is [numDirections * numLayers, batchSize, realHiddenSize]. * * cy: The last output tensor of the cell, - * shape is [num_directions * num_layers, batch_size, hidden_size]. + * shape is [numDirections * numLayers, batchSize, hiddenSize]. */ OH_NN_OPS_LSTM = 75, @@ -1978,13 +2084,16 @@ typedef enum { * Any values less than min are set to min. Any values greater than max are set to max. * * Inputs: + * * * input: n-dimensional tensor. * * Parameters: + * * * max: Maximum value, above which element is replaced by max. It must be a scalar(tensor of empty shape). * * min: Minimum value, under which element is replaced by min. It must be a scalar(tensor of empty shape). * * Outputs: + * * * output: n-dimensional tensor., with the same data type and shape as the input tensor. */ OH_NN_OPS_CLIP = 76, @@ -1994,15 +2103,18 @@ typedef enum { * where each element is 'True' if corresponding element in the input tensor is non-zero, and 'False' otherwise. * * Inputs: + * * * input: n-dimensional tensor of shape (N,*), - * where * indicates any number of additional dimensions. + * where * indicates any number of additional dimensions. * * aixs: scalar or tensor, indices the dimension to be computed. * * Parameters: - * * keep_dims: Whether to keep dimension info. + * + * * keepDims: Whether to keep dimension info. * * Outputs: - * * output: Indices or values before the maximum input tensor on the axis. + * + * * output: 1-dimension or n-dimension tensor with boolean data type. */ OH_NN_OPS_ALL = 77, @@ -2012,14 +2124,17 @@ typedef enum { * Summerize determines how many entries of the tensors to print. * * Inputs: - * * data: The tensors to print out when condition is false. + * * * condition: The condition to evalute. + * * data: The tensors to print out when condition is false. * * Parameters: - * * maxsummarize: Print this many entries of each tensor. + * + * * summarize: The number of entries for each tensor is printed. * * Outputs: - * * output: Tensor after average pooling. + * + * * output: Result value judged by condition. If the condition is not true, an Error is returned. */ OH_NN_OPS_ASSERT = 78, @@ -2027,9 +2142,11 @@ typedef enum { * Calculates the cosine of the given input tensor, element-wise. * * Inputs: + * * * input: n-dimensional tensor. * * Outputs: + * * * output: n-dimensional tensor. The cosine of the input tensor computed element-wise. */ OH_NN_OPS_COS = 79, @@ -2038,62 +2155,77 @@ typedef enum { * Calculates the result of nature logarithm of the input. * * Inputs: + * * * input: n-dimensional tensor. The value must be greater than 0. * * Outputs: + * * * output: n-dimensional tensor with the same shape as the input tensor. */ OH_NN_OPS_LOG = 80, /** - * Calculates the truth value of input0 and input1 element-wise. + * Calculates the logical value of input1 and input2 element-wise. * * Inputs: - * * input0: Tensor of type boolean or convert to boolean implicitly. + * * * input1: Tensor of type boolean or convert to boolean implicitly. + * * input2: Tensor of type boolean or convert to boolean implicitly. * * Outputs: - * * output: A tensor of type bool with the shape that x1 and x2 broadcast to. + * + * * output: n-dimensional tensor. The calculation result of logical-and + * and the numeric type is OH_NN_BOOL. */ OH_NN_OPS_LOGICAL_AND = 81, /** - * Calculates the truth value of NOT x element-wise. + * Calculates the logical value of NOT input element-wise. * * Inputs: + * * * input: Tensor of type boolean or convert to boolean implicitly. * * Outputs: - * * output: A tensor of type bool with the shape of input. + * + * * output: n-dimensional tensor. The calculation result of logical-not + * and the numeric type is OH_NN_BOOL. */ OH_NN_OPS_LOGICAL_NOT = 82, /** * Computes the remainder of dividing the first input tensor by the second input tensor element-wise. - * Inputs of x and y comply with the implicit type conversion rules to make the data types consistent. + * Inputs of input1 and input2 comply with the implicit type conversion rules to make the data types consistent. * The inputs must be two tensors or one tensor and one scalar. When the inputs are two tensors, * both dtypes cannot be bool, and the shapes of them could be broadcast. * When the inputs are one tensor and one scalar, the scalar could only be a constant. * * Inputs: - * * input0: A number, a bool or a tensor whose data type is number. - * * input1:if input0 is a tensor, input1 could be a number, a bool or a tensor whose data type is number. - * If input0 is a number or a bool, input1 must be a tensor whose data type is number. + * + * * input1: The remainder of the scalar or tensor, numeric or OH_NN_BOOL type, + * or the n-dimensional tensor of the numeric dimension numeric type. + * * input2: Remainder factor. When the first input is an n-dimensional tensor, + * the second input can be a numeric tensor, a OH_NN_BOOL type, or an n-dimensional + * tensor of a numeric type dimension, and when the first input is a numeric or OH_NN_BOOL tensor, + * the second input must be a tensor of the numeric dimension of the data type. * * Outputs: - * * output: The shape is the same shape as the boradcast shape. The data type is the type with - * the higher precision or the highest data type between the two inputs. + * + * * output: n-dimensional tensor. The shape is the same as the input after broadcasting, + * and the data type is the data type with the highest accuracy of the two inputs. */ OH_NN_OPS_MOD = 83, /** - * Returns a tensor with negative values of the input tensor element-wise. + * Calculate the opposite value of the input tensor element-wise. * * Inputs: - * * input: A tensor of the int or float type. + * + * * input: n-dimensional tensor with numeric data type。 * * Outputs: - * * output: A tensor with the same shape as the input tensor. + * + * * output: n-dimensional tensor with the same shape and data type as the input tensor. */ OH_NN_OPS_NEG = 84, @@ -2101,10 +2233,12 @@ typedef enum { * Calculate reciprocal of a tensor element-wise. * * Inputs: - * * input: Input tensor. + * + * * input: n-dimensional tensor. * * Outputs: - * * output: A tensor with the same shape as the input tensor. + * + * * output: n-dimensional tensor with the same shape and data type as the input tensor. */ OH_NN_OPS_RECIPROCAL = 85, @@ -2112,54 +2246,370 @@ typedef enum { * Calculate sine of the input element-wise. * * Inputs: - * * input: Input tensor. + * + * * input: n-dimensional tensor. * * Outputs: - * * output: A tensor with the same data type and shape as the input tensor. + * + * * output: n-dimensional tensor the same data type and shape as the input tensor. */ OH_NN_OPS_SIN = 86, /** - * Selects elements from x1 or x2 based on condition and returns a tensor. + * Selects elements from input1 or input2 based on condition and returns a tensor. * * Inputs: - * * input_cond: n-dimensional tensor or scalar. - * The condition tensor, decides which element is chosen. - * * input1: n-dimensional tensor. If condition is rank 1, - * x1 may have higher rank, but its first dimension must match the size of condition. - * * input2: n-dimensional tensor. + * + * * condition: n-dimensional tensor or scalar. Judging conditions. If the OH_NN_BOOL element + * is True, then the element corresponding to the position of input1 is selected, and if the OH_NN_BOOL + * element is False, the element corresponding to the position of input2 is selected. + * * input1: n-dimensional tensor. First tensor to be chosen. + * * input2: n-dimensional tensor. Second tensor to be chosen. * * Outputs: - * * output: A tensor, has the same shape as the input_cond. + * + * * output: n-dimensional tensor with the same shape and data type as the input1 and input2. */ OH_NN_OPS_WHERE = 87, /** - * Converts a sparse representation into a dense tensor. + * Converts a sparse tensor into a dense tensor. * * Inputs: + * * * indices: 2-dimensional tensor. Position of an ellement in a sparse tensor. - * Each element value must be non-negative. The shape is (N, 2). + * Each element value must be non-negative. The shape is (N, 2). * * values: 1-dimensional tensor. The value corresponding to the location of indices. The shape is (N). * * sparseShape: 2-dimensional tensor. The shape of a sparse tensor. The value consists of - * two positive integers, indicating that the shape of the sparse tensor is (N, C). + * two positive integers, indicating that the shape of the sparse tensor is (N, C). * * Outputs: + * * * output: A tensor. The data type is the same as values, and the shape is specified by sparseShape. */ OH_NN_OPS_SPARSE_TO_DENSE = 88, /** - * Calculates the truth value of input0 or input1 element-wise. + * Calculates the logical value of input1 or input2 element-wise. * * Inputs: - * * input0: Tensor of type boolean or convert to boolean implicitly. + * * * input1: Tensor of type boolean or convert to boolean implicitly. + * * input2: Tensor of type boolean or convert to boolean implicitly. * * Outputs: - * * output: A tensor of type bool with the shape that input0 and input1 broadcast to. + * + * * output: n--dimensional tensor. The calculation result of logical-or + * and the numeric type is OH_NN_BOOL. */ OH_NN_OPS_LOGICAL_OR = 89, + + /** + * Returns element-wise smallest integer in not less than input. + * + * Inputs: + * + * * input: n-dimensional tensor. + * + * Outputs: + * + * * output: A tensor after ceiled. + */ + OH_NN_OPS_CEIL = 90, + + /** + * Crop given tensor acrodding to axis and offset. + * + * Inputs: + * + * * input: n-dimensional tensor. + * * shape: 1-dimensional tensor, indices cropped windows dimension. + * + * Parameters: + * + * * axis: Cropped dimension. + * * offset: Cropped offset per dimension. + * + * Outputs: + * + * * output: Cropped output tensor. + */ + OH_NN_OPS_CROP = 91, + + /** + * The output of the object detection model is post-processed, including decoding the bounding box, + * class probability and score of the model output, and then performing non-maximum suppression (NMS) + * to remove the overlapping bounding box, and finally outputting the detection result. + * + * Inputs: + * + * * bbox: Boxes to be predicted. + * * scores: Socres of all boxes. + * * anchors: Information of boxes, includes box, variance and coordinates. + * + * Parameters: + * * input_size: The size of the input tensor. + * * scale: The scaling factor used to convert the output from + * the normalized form to the original image coordinates. + * * nmsIoUThreshold: The threshold of overlapping region during NMS. + * * nmsScoreThreshold: The socre threshold used to select target bbox duing NMS. + * * maxDetections: Maximum of bboxes per image. + * * detectionsPerClass: Maximum of bboxes per class. + * * maxClassesPerDetection: Maximum of reserved classes per bboxes. + * * numClasses: Number of target classes to be detected. + * * useRegularNms: Whether use NMS based on IoU threshold. + * * outQuantized: Whether need to quantize. + * + * Outputs: + * + * * bboxes: The corrdinates of target detected bboxes. + * * classes: The target class index of target detected bboxes. + * * confidences: The score of target detected bboxes. + * * numDetections: The number of target detected bboxes. + */ + OH_NN_OPS_DETECTION_POST_PROCESS = 92, + + /** + * Returns element-wise largest integer not greater than x. + * + * Inputs: + * + * * input: n-dimensional tensor. + * + * Outputs: + * + * * output: A tensor after floored. + */ + OH_NN_OPS_FLOOR = 93, + + /** + * Calculate the L2-normalize of the input using the given axis. + * + * Inputs: + * + * * input: Input to compute the L2-normalization. + * + * Parameters: + * + * * axis: The axis on which to apply normalization, -1 means last axis, default: 0. + * * epsilon: Value added for numerical stability. default: 1e-6; + * * activationType: Activation function type. + * + * Outputs: + * + * * output: Result tensor with the same type and shape as input input. + */ + OH_NN_OPS_L2_NORMALIZE = 94, + + /** + * Computes the log-softmax function to n-dimensional input tensor. + * The input is transformed by the Softmax function and then by the log function to lie in range[-inf,0). + * + * Inputs: + * + * * input: n-dimensional tensor. + * + * Parameters: + * + * * axis: The axis to apply LogSoftmax operation, -1 means the last dimension. + * + * Outputs: + * + * * output: Tensor output. Has the same data type and shape as input. + */ + OH_NN_OPS_LOG_SOFTMAX = 95, + + /** + * Normalize over local input regions. + * + * Inputs: + * + * * input: n-dimensional tensor. + * + * Parameters: + * + * * depthRadius: Half-width of the 1-dimension normalization window. + * * bias: Offset. + * * alpha: Scale factor. + * * beta: Exponent. + * * normRegion: Specifies normalization region. Options: "ACROSS_CHNNEL". + * + * Outputs: + * + * * output: Result output tensor. + */ + OH_NN_OPS_LRN = 96, + + /** + * Calculates the minimum of input1 and input2 element-wise. The inputs of input1 and + * input2 comply with the implicit type conversion rules to make the data types are consistent. + * + * The input must be two tensors or one tensor and one scalar. When the input is two tensors, the data types + * cannot be Boolean at the same time, and their shapes can be broadcast to the same size. When the inputs are + * one tensor and one scalar, the scalar must be a constant. + * + * Inputs: + * + * * input1: n-dimensional tensor, whose data type can be number or Boolean. + * * input2: n-dimensional tensor, whose data type can be number or Boolean. + * + * Outputs: + * + * * output: Minimum value of the elements of the two tensors. + */ + OH_NN_OPS_MINIMUM = 97, + + /** + * Calculate the rank of a tensor. + * The rank of a tensor is the number of indices required to uniquely select each element of the tensor. + * + * Inputs: + * + * * input: n-dimensional tensor. + * + * Outputs: + * + * * output: Result tensor. 0-D int32 Tensor representing the rank of input. + */ + OH_NN_OPS_RANK = 98, + + /** + * Calculates the maximum value for input tensor along the specified dimension. If keepDims is set to + * false, the number of dimensions is reduced for the input; if keepDims is set to true, + * the number of dimensions is retained. + * + * Inputs: + * + * * input: n-dimensional input tensor, where n is less than 8. + * * axis: dimension used to calculate the maximum value. The value is a 1D tensor. + * The value range of each element in axis is [–n, n). + * + * Parameters: + * + * * keepDims: indicates whether to retain the dimension. The value is a Boolean value. + * * reduceToEnd: boolean value, indicates whether the reduce operation needs to be performed + * until the last axis. + * * coeff: A OH_NN_FLOAT32 scalar that represents the scale factor of the output. + * + * Outputs: + * + * * output: m-dimensional output tensor whose data type is the same as that of the input. + * If keepDims is false, mkeepDims is true, m==n. + */ + OH_NN_OPS_REDUCE_MAX = 99, + + /** + * Calculates the minimum value for input tensor along the specified dimension. If keepDims is set to + * false, the number of dimensions is reduced for the input; if keepDims is set to true, + * the number of dimensions is retained. + * + * Inputs: + * + * * input: n-dimensional input tensor, where n is less than 8. + * * axis: dimension used to calculate the minimum value. The value is a 1D tensor. + * The value range of each element in axis is [–n, n). + * + * Parameters: + * + * * keepDims: indicates whether to retain the dimension. The value is a Boolean value. + * * reduceToEnd: boolean value, indicates whether the reduce operation needs to be performed + * until the last axis. + * * coeff: A OH_NN_FLOAT32 scalar that represents the scale factor of the output. + * + * Outputs: + * + * * output: m-dimensional output tensor whose data type is the same as that of the input. + * If keepDims is false, mkeepDims is true, m==n. + */ + OH_NN_OPS_REDUCE_MIN = 100, + + /** + * Calculates the numerical sum value for input tensor along the specified dimension. If keepDims is set to + * false, the number of dimensions is reduced for the input; if keepDims is set to true, + * the number of dimensions is retained. + * + * Inputs: + * + * * input: n-dimensional input tensor, where n is less than 8. + * * axis: dimension used to calculate the sum value. The value is a 1D tensor. + * The value range of each element in axis is [–n, n). + * + * Parameters: + * + * * keepDims: indicates whether to retain the dimension. The value is a Boolean value. + * * reduceToEnd: boolean value, indicates whether the reduce operation needs to be performed + * until the last axis. + * * coeff: A OH_NN_FLOAT32 scalar that represents the scale factor of the output. + * + * Outputs: + * + * * output: m-dimensional output tensor whose data type is the same as that of the input. + * If keepDims is false, mkeepDims is true, m==n. + */ + OH_NN_OPS_REDUCE_SUM = 101, + + /** + * Calculate half to even of a tensor element-wise. + * + * Inputs: + * + * * input: n-dimensional tensor. + * + * Outputs: + * + * * output: Result tensor with the same shape as the input. + */ + OH_NN_OPS_ROUND = 102, + + /** + * Scatters a tensor into a new tensor depending on the specified indices. + * + * Inputs: + * + * * indices: The index of scattering in the new tensor with int32 or int64 data type. + * The rank of indices must be at least 2 and indices_shape[-1] <= len(shape). + * * updates: The source tensor to be scattered. It has shape indices_shape[:-1]+shape[indices_shape[-1]:]. + * * shape: The shape of the output tensor, has the same data type as indices. + * + * Outputs: + * + * * output: Result tensor with the same type as update and the same shape as shape. + */ + OH_NN_OPS_SCATTER_ND = 103, + + /** + * Rearrange blocks of spatial data into depth. + * The output tensor’s height dimension is height / blocksize; + * The output tensor’s weight dimension is weight / blocksize; + * The depth of output tensor is blocksize * blocksize * inputDepth; + * The input tensor’s height and width must be divisible by blocksize. + * + * Inputs: + * + * * input: 4-dimensional tensor. + * + * Parameters: + * + * * blocksize: The block size used to divide spatial data. It must be >= 2. + * + * Outputs: + * + * * output: Result tensor with the same data_type as the input. + */ + OH_NN_OPS_SPACE_TO_DEPTH = 104, + + /** + * Swish activation function + * + * Inputs: + * + * * input: n-dimensional tensor. + * + * Outputs: + * + * * output: Output tensor. + */ + OH_NN_OPS_SWISH = 105, } OH_NN_OperationType; /** @@ -2184,18 +2634,18 @@ typedef enum { * of the Add operator. */ OH_NN_ADD_ACTIVATIONTYPE = 1, - /** This enumerated value is used when the tensor is used as the kernel_size parameter + /** This enumerated value is used when the tensor is used as the kernelSize parameter * of the AvgPool operator. */ OH_NN_AVG_POOL_KERNEL_SIZE = 2, /** This enumerated value is used when the tensor is used as the stride parameter * of the AvgPool operator. */ OH_NN_AVG_POOL_STRIDE = 3, - /** This enumerated value is used when the tensor is used as the pad_mode parameter + /** This enumerated value is used when the tensor is used as the padMode parameter * of the AvgPool operator. */ OH_NN_AVG_POOL_PAD_MODE = 4, /** This enumerated value is used when the tensor is used as the pad parameter of the AvgPool operator. */ OH_NN_AVG_POOL_PAD = 5, - /** This enumerated value is used when the tensor is used as the activation_type parameter + /** This enumerated value is used when the tensor is used as the activationType parameter * of the AvgPool operator. */ OH_NN_AVG_POOL_ACTIVATION_TYPE = 6, @@ -2292,18 +2742,18 @@ typedef enum { * of the Matmul operator. */ OH_NN_MATMUL_ACTIVATION_TYPE = 35, - /** This enumerated value is used when the tensor is used as the kernel_size parameter + /** This enumerated value is used when the tensor is used as the kernelSize parameter * of the MaxPool operator. */ OH_NN_MAX_POOL_KERNEL_SIZE = 36, /** This enumerated value is used when the tensor is used as the stride parameter * of the MaxPool operator. */ OH_NN_MAX_POOL_STRIDE = 37, - /** This enumerated value is used when the tensor is used as the pad_mode parameter + /** This enumerated value is used when the tensor is used as the padMode parameter * of the MaxPool operator. */ OH_NN_MAX_POOL_PAD_MODE = 38, /** This enumerated value is used when the tensor is used as the pad parameter of the MaxPool operator. */ OH_NN_MAX_POOL_PAD = 39, - /** This enumerated value is used when the tensor is used as the activation_type parameter + /** This enumerated value is used when the tensor is used as the activationType parameter * of the MaxPool operator. */ OH_NN_MAX_POOL_ACTIVATION_TYPE = 40, @@ -2314,7 +2764,7 @@ typedef enum { /** This enumerated value is used when the tensor is used as the axis parameter of the OneHot operator. */ OH_NN_ONE_HOT_AXIS = 42, - /** This enumerated value is used when the tensor is used as the constant_value parameter + /** This enumerated value is used when the tensor is used as the constantValue parameter * of the Pad operator. */ OH_NN_PAD_CONSTANT_VALUE = 43, @@ -2369,23 +2819,23 @@ typedef enum { * of the Sub operator. */ OH_NN_SUB_ACTIVATIONTYPE = 59, - /** This enumerated value is used when the tensor is used as the keep_dims parameter + /** This enumerated value is used when the tensor is used as the keepDims parameter * of the ReduceMean operator. */ OH_NN_REDUCE_MEAN_KEEP_DIMS = 60, - /** This enumerated value is used when the tensor is used as the new_height parameter + /** This enumerated value is used when the tensor is used as the newHeight parameter * of the ResizeBilinear operator. */ OH_NN_RESIZE_BILINEAR_NEW_HEIGHT = 61, - /** This enumerated value is used when the tensor is used as the new_width parameter + /** This enumerated value is used when the tensor is used as the newWidth parameter * of the ResizeBilinear operator. */ OH_NN_RESIZE_BILINEAR_NEW_WIDTH = 62, - /** This enumerated value is used when the tensor is used as the preserve_aspect_ratio parameter + /** This enumerated value is used when the tensor is used as the preserveAspectRatio parameter * of the ResizeBilinear operator. */ OH_NN_RESIZE_BILINEAR_PRESERVE_ASPECT_RATIO = 63, - /** This enumerated value is used when the tensor is used as the coordinate_transform_mode parameter + /** This enumerated value is used when the tensor is used as the coordinateTransformMode parameter * of the ResizeBilinear operator. */ OH_NN_RESIZE_BILINEAR_COORDINATE_TRANSFORM_MODE = 64, - /** This enumerated value is used when the tensor is used as the exclude_outside parameter + /** This enumerated value is used when the tensor is used as the excludeOutside parameter * of the ResizeBilinear operator. */ OH_NN_RESIZE_BILINEAR_EXCLUDE_OUTSIDE = 65, @@ -2402,11 +2852,11 @@ typedef enum { * of the LayerNorm operator. */ OH_NN_LAYER_NORM_ELEMENTWISE_AFFINE = 69, - /** This enumerated value is used when the tensor is used as the keep_dims parameter + /** This enumerated value is used when the tensor is used as the keepDims parameter * of the ReduceProd operator. */ OH_NN_REDUCE_PROD_KEEP_DIMS = 70, - /** This enumerated value is used when the tensor is used as the keep_dims parameter + /** This enumerated value is used when the tensor is used as the keepDims parameter * of the ReduceAll operator. */ OH_NN_REDUCE_ALL_KEEP_DIMS = 71, @@ -2438,96 +2888,258 @@ typedef enum { /** This enumerated value is used when the tensor is used as the axis parameter of the Flatten operator. */ OH_NN_FLATTEN_AXIS = 79, - /** This enumerated value is used when the tensor is used as the block_Size parameter + /** This enumerated value is used when the tensor is used as the blockSize parameter * of the DepthToSpace operator. */ OH_NN_DEPTH_TO_SPACE_BLOCK_SIZE = 80, - /** This enumerated value is used when the tensor is used as the format parameter - * of the DepthToSpace operator. */ - OH_NN_DEPTH_TO_SPACE_FORMAT = 81, /** This enumerated value is used when the tensor is used as the mode parameter * of the DepthToSpace operator. */ - OH_NN_DEPTH_TO_SPACE_MODE = 82, + OH_NN_DEPTH_TO_SPACE_MODE = 81, - /** This enumerated value is used when the tensor is used as the dType parameter of the Range operator. */ - OH_NN_RANGE_DTYPE = 83, /** This enumerated value is used when the tensor is used as the start parameter of the Range operator. */ - OH_NN_RANGE_START = 84, + OH_NN_RANGE_START = 82, /** This enumerated value is used when the tensor is used as the limit parameter of the Range operator. */ - OH_NN_RANGE_LIMIT = 85, + OH_NN_RANGE_LIMIT = 83, /** This enumerated value is used when the tensor is used as the delta parameter of the Range operator. */ - OH_NN_RANGE_DELTA = 86, + OH_NN_RANGE_DELTA = 84, /** This enumerated value is used when the tensor is used as the dataType parameter * of the ConstantOfShape operator. */ - OH_NN_CONSTANT_OF_SHAPE_DATA_TYPE = 87, + OH_NN_CONSTANT_OF_SHAPE_DATA_TYPE = 85, /** This enumerated value is used when the tensor is used as the value parameter * of the ConstantOfShape operator. */ - OH_NN_CONSTANT_OF_SHAPE_VALUE = 88, + OH_NN_CONSTANT_OF_SHAPE_VALUE = 86, /** This enumerated value is used when the tensor is used as the shape parameter * of the BroadcastTo operator. */ - OH_NN_BROADCAST_TO_SHAPE = 89, + OH_NN_BROADCAST_TO_SHAPE = 87, /** This enumerated value is used when the tensor is used as the epsilon parameter * of the InstanceNorm operator. */ - OH_NN_INSTANCE_NORM_EPSILON = 90, + OH_NN_INSTANCE_NORM_EPSILON = 88, /** This enumerated value is used when the tensor is used as the base parameter of the Exp operator. */ - OH_NN_EXP_BASE = 91, + OH_NN_EXP_BASE = 89, /** This enumerated value is used when the tensor is used as the scale parameter of the Exp operator. */ - OH_NN_EXP_SCALE = 92, + OH_NN_EXP_SCALE = 90, /** This enumerated value is used when the tensor is used as the shift parameter of the Exp operator. */ - OH_NN_EXP_SHIFT = 93, + OH_NN_EXP_SHIFT = 91, - /** This enumerated value is used when the tensor is used as the negative_slope parameter + /** This enumerated value is used when the tensor is used as the negativeSlope parameter * of the LeakyRelu operator. */ - OH_NN_LEAKY_RELU_NEGATIVE_SLOPE = 94, + OH_NN_LEAKY_RELU_NEGATIVE_SLOPE = 92, /** This enumerated value is used when the tensor is used as the bidirectional parameter * of the LSTM operator. */ - OH_NN_LSTM_BIDIRECTIONAL = 95, - /** This enumerated value is used when the tensor is used as the has_bias parameter of the LSTM operator. */ - OH_NN_LSTM_HAS_BIAS = 96, - /** This enumerated value is used when the tensor is used as the input_size parameter + OH_NN_LSTM_BIDIRECTIONAL = 93, + /** This enumerated value is used when the tensor is used as the hasBias parameter of the LSTM operator. */ + OH_NN_LSTM_HAS_BIAS = 94, + /** This enumerated value is used when the tensor is used as the inputSize parameter * of the LSTM operator. */ - OH_NN_LSTM_INPUT_SIZE = 97, - /** This enumerated value is used when the tensor is used as the hidden_size parameter + OH_NN_LSTM_INPUT_SIZE = 95, + /** This enumerated value is used when the tensor is used as the hiddenSize parameter * of the LSTM operator. */ - OH_NN_LSTM_HIDDEN_SIZE = 98, - /** This enumerated value is used when the tensor is used as the num_layers parameter + OH_NN_LSTM_HIDDEN_SIZE = 96, + /** This enumerated value is used when the tensor is used as the numLayers parameter * of the LSTM operator. */ - OH_NN_LSTM_NUM_LAYERS = 99, - /** This enumerated value is used when the tensor is used as the num_directions parameter + OH_NN_LSTM_NUM_LAYERS = 97, + /** This enumerated value is used when the tensor is used as the numDirections parameter * of the LSTM operator. */ - OH_NN_LSTM_NUM_DIRECTIONS = 100, + OH_NN_LSTM_NUM_DIRECTIONS = 98, /** This enumerated value is used when the tensor is used as the dropout parameter of the LSTM operator. */ - OH_NN_LSTM_DROPOUT = 101, - /** This enumerated value is used when the tensor is used as the zoneout_cell parameter + OH_NN_LSTM_DROPOUT = 99, + /** This enumerated value is used when the tensor is used as the zoneoutCell parameter * of the LSTM operator. */ - OH_NN_LSTM_ZONEOUT_CELL = 102, - /** This enumerated value is used when the tensor is used as the zoneout_hidden parameter + OH_NN_LSTM_ZONEOUT_CELL = 100, + /** This enumerated value is used when the tensor is used as the zoneoutHidden parameter * of the LSTM operator. */ - OH_NN_LSTM_ZONEOUT_HIDDEN = 103, - /** This enumerated value is used when the tensor is used as the proj_size parameter + OH_NN_LSTM_ZONEOUT_HIDDEN = 101, + /** This enumerated value is used when the tensor is used as the projSize parameter * of the LSTM operator. */ - OH_NN_LSTM_PROJ_SIZE = 104, + OH_NN_LSTM_PROJ_SIZE = 102, /** This enumerated value is used when the tensor is used as the max parameter of the Clip operator. */ - OH_NN_CLIP_MAX = 105, + OH_NN_CLIP_MAX = 103, /** This enumerated value is used when the tensor is used as the min parameter of the Clip operator. */ - OH_NN_CLIP_MIN = 106, + OH_NN_CLIP_MIN = 104, - /** This enumerated value is used when the tensor is used as the keep_dims parameter of the All operator. */ - OH_NN_ALL_KEEP_DIMS = 107, + /** This enumerated value is used when the tensor is used as the keepDims parameter of the All operator. */ + OH_NN_ALL_KEEP_DIMS = 105, /** This enumerated value is used when the tensor is used as the summarize parameter * of the Assert operator. */ - OH_NN_ASSERT_SUMMARIZE = 108, + OH_NN_ASSERT_SUMMARIZE = 106, /** This enumerated value is used when the tensor is used as the scale parameter of the pow operator. */ - OH_NN_POW_SCALE = 109, + OH_NN_POW_SCALE = 107, /** This enumerated value is used when the tensor is used as the shift parameter of the pow operator. */ - OH_NN_POW_SHIFT = 110, + OH_NN_POW_SHIFT = 108, + + /** This enumerated value is used when the tensor is used as the roundMode parameter + * of the AvgPool operator. */ + OH_NN_AVG_POOL_ROUND_MODE = 109, + /** This enumerated value is used when the tensor is used as the global parameter + * of the AvgPool operator. */ + OH_NN_AVG_POOL_GLOBAL = 110, + + /** This enumerated value is used when the tensor is used as the hasBias parameter + * of the FullConnection operator. */ + OH_NN_FULL_CONNECTION_HAS_BIAS = 111, + /** This enumerated value is used when the tensor is used as the useAxis parameter + * of the FullConnection operator. */ + OH_NN_FULL_CONNECTION_USE_AXIS = 112, + + /** This enumerated value is used when the tensor is used as the approximate parameter + * of the GeLU operator. */ + OH_NN_GELU_APPROXIMATE = 113, + + /** This enumerated value is used when the tensor is used as the roundMode parameter + * of the MaxPool operator. */ + OH_NN_MAX_POOL_ROUND_MODE = 114, + /** This enumerated value is used when the tensor is used as the global parameter + * of the MaxPool operator. */ + OH_NN_MAX_POOL_GLOBAL = 115, + + /** This enumerated value is used when the tensor is used as the paddingMode parameter + * of the Pad operator. */ + OH_NN_PAD_PADDING_MODE = 116, + + /** This enumerated value is used when the tensor is used as the reduceToEnd parameter + * of the ReduceMean operator. */ + OH_NN_REDUCE_MEAN_REDUCE_TO_END = 117, + /** This enumerated value is used when the tensor is used as the coeff parameter + * of the ReduceMean operator. */ + OH_NN_REDUCE_MEAN_COEFF = 118, + + /** This enumerated value is used when the tensor is used as the reduceToEnd parameter + * of the ReduceProd operator. */ + OH_NN_REDUCE_PROD_REDUCE_TO_END = 119, + /** This enumerated value is used when the tensor is used as the coeff parameter + * of the ReduceProd operator. */ + OH_NN_REDUCE_PROD_COEFF = 120, + + /** This enumerated value is used when the tensor is used as the reduceToEnd parameter + * of the ReduceAll operator. */ + OH_NN_REDUCE_ALL_REDUCE_TO_END = 121, + /** This enumerated value is used when the tensor is used as the coeff parameter + * of the ReduceAll operator. */ + OH_NN_REDUCE_ALL_COEFF = 122, + + /** This enumerated value is used when the tensor is used as the axis parameter + * of the Topk operator. */ + OH_NN_TOP_K_AXIS = 123, + + /** This enumerated value is used when the tensor is used as the topK parameter + * of the ArgMax operator. */ + OH_NN_ARG_MAX_TOP_K = 124, + /** This enumerated value is used when the tensor is used as the outMaxValue parameter + * of the ArgMax operator. */ + OH_NN_ARG_MAX_OUT_MAX_VALUE = 125, + + /** This enumerated value is used when the tensor is used as the axis parameter of the crop operator. */ + OH_NN_CROP_AXIS = 126, + /** This enumerated value is used when the tensor is used as the offset parameter of the crop operator. */ + OH_NN_CROP_OFFSET = 127, + + /** This enumerated value is used when the tensor is used as the inputSize parameter + * of the detectionPostProcess operator. */ + OH_NN_DETECTION_POST_PROCESS_INPUT_SIZE = 128, + /** This enumerated value is used when the tensor is used as the scale parameter + * of the detectionPostProcess operator. */ + OH_NN_DETECTION_POST_PROCESS_SCALE = 129, + /** This enumerated value is used when the tensor is used as the nmsIoUThreshold + * parameter of the detectionPostProcess operator. */ + OH_NN_DETECTION_POST_PROCESS_NMS_IOU_THRESHOLD = 130, + /** This enumerated value is used when the tensor is used as the nmsScoreThreshold parameter + * of the detectionPostProcess operator. */ + OH_NN_DETECTION_POST_PROCESS_NMS_SCORE_THRESHOLD = 131, + /** This enumerated value is used when the tensor is used as the maxDetections parameter + * of the detectionPostProcess operator. */ + OH_NN_DETECTION_POST_PROCESS_MAX_DETECTIONS = 132, + /** This enumerated value is used when the tensor is used as the detectionsPerClass parameter + * of the detectionPostProcess operator. */ + OH_NN_DETECTION_POST_PROCESS_DETECTIONS_PER_CLASS = 133, + /** This enumerated value is used when the tensor is used as the maxClassesPerDetection parameter + * of the detectionPostProcess operator. */ + OH_NN_DETECTION_POST_PROCESS_MAX_CLASSES_PER_DETECTION = 134, + /** This enumerated value is used when the tensor is used as the numClasses parameter + * of the detectionPostProcess operator. */ + OH_NN_DETECTION_POST_PROCESS_NUM_CLASSES = 135, + /** This enumerated value is used when the tensor is used as the useRegularNms parameter + * of the detectionPostProcess operator. */ + OH_NN_DETECTION_POST_PROCESS_USE_REGULAR_NMS = 136, + /** This enumerated value is used when the tensor is used as the outQuantized parameter + * of the detectionPostProcess operator. */ + OH_NN_DETECTION_POST_PROCESS_OUT_QUANTIZED = 137, + + /** This enumerated value is used when the tensor is used as the axis parameter + * of the L2Normalize operator. */ + OH_NN_L2_NORMALIZE_AXIS = 138, + /** This enumerated value is used when the tensor is used as the epsilon parameter + * of the L2Normalize operator. */ + OH_NN_L2_NORMALIZE_EPSILON = 139, + /** This enumerated value is used when the tensor is used as the activationType parameter + * of the L2Normalize operator. */ + OH_NN_L2_NORMALIZE_ACTIVATION_TYPE = 140, + + /** This enumerated value is used when the tensor is used as the axis parameter of the softmax operator. */ + OH_NN_LOG_SOFTMAX_AXIS = 141, + + /** This enumerated value is used when the tensor is used as the depthRedius + * parameter of the LRN operator. */ + OH_NN_LRN_DEPTH_RADIUS = 142, + /** This enumerated value is used when the tensor is used as the bias parameter of the LRN operator. */ + OH_NN_LRN_BIAS = 143, + /** This enumerated value is used when the tensor is used as the alpha parameter of the LRN operator. */ + OH_NN_LRN_ALPHA = 144, + /** This enumerated value is used when the tensor is used as the beta parameter of the LRN operator. */ + OH_NN_LRN_BETA = 145, + /** This enumerated value is used when the tensor is used as the normRegion parameter + * of the LRN operator. */ + OH_NN_LRN_NORM_REGION = 146, + + /** This enumerated value is used when the tensor is used as the blockSize parameter + * of the spaceToDepth operator. */ + OH_NN_SPACE_TO_DEPTH_BLOCK_SIZE = 147, + + /** This enumerated value is used when the tensor is used as the keepDims parameter + * of the ReduceMax operator. */ + OH_NN_REDUCE_MAX_KEEP_DIMS = 148, + /** This enumerated value is used when the tensor is used as the reduceToEnd parameter + * of the ReduceMax operator. */ + OH_NN_REDUCE_MAX_REDUCE_TO_END = 149, + /** This enumerated value is used when the tensor is used as the coeff parameter + * of the ReduceMax operator. */ + OH_NN_REDUCE_MAX_COEFF = 150, + + /** This enumerated value is used when the tensor is used as the keepDims parameter + * of the ReduceMin operator. */ + OH_NN_REDUCE_MIN_KEEP_DIMS = 151, + /** This enumerated value is used when the tensor is used as the reduceToEnd parameter + * of the ReduceMin operator. */ + OH_NN_REDUCE_MIN_REDUCE_TO_END = 152, + /** This enumerated value is used when the tensor is used as the coeff parameter + * of the ReduceMin operator. */ + OH_NN_REDUCE_MIN_COEFF = 153, + + /** This enumerated value is used when the tensor is used as the keepDims parameter + * of the ReduceSum operator. */ + OH_NN_REDUCE_SUM_KEEP_DIMS = 154, + /** This enumerated value is used when the tensor is used as the reduceToEnd parameter + * of the ReduceSum operator. */ + OH_NN_REDUCE_SUM_REDUCE_TO_END = 155, + /** This enumerated value is used when the tensor is used as the coeff parameter + * of the ReduceSum operator. */ + OH_NN_REDUCE_SUM_COEFF = 156, + + /** This enumerated value is used when the tensor is used as the axis parameter + * of the QuantDTypeCast operator. */ + OH_NN_QUANT_DTYPE_CAST_AXIS = 157, + + /** This enumerated value is used when the tensor is used as the axes parameter of the Slice operator. */ + OH_NN_SLICE_AXES = 158, + + /** This enumerated value is used when the tensor is used as the dims parameter of the Tile operator. */ + OH_NN_TILE_DIMS = 159, } OH_NN_TensorType; /** diff --git a/test/unittest/ops/BUILD.gn b/test/unittest/ops/BUILD.gn index 44eb8c9368f55481cefc8937a39ab0824276fd77..519a0c45f94b399574368461caa9371c942462ad 100644 --- a/test/unittest/ops/BUILD.gn +++ b/test/unittest/ops/BUILD.gn @@ -43,6 +43,7 @@ ohos_unittest("OpsUnittest") { sources += [ "./biasadd_test.cpp" ] sources += [ "./broadcast_to_test.cpp" ] sources += [ "./cast_test.cpp" ] + sources += [ "./ceil_test.cpp" ] sources += [ "./clip_test.cpp" ] sources += [ "./concat_three_inputs_test.cpp" ] sources += [ "./concat_two_inputs_test.cpp" ] @@ -52,9 +53,11 @@ ohos_unittest("OpsUnittest") { sources += [ "./conv2d_tranpose_padmode_test.cpp" ] sources += [ "./conv2d_transpose_pad_test.cpp" ] sources += [ "./cos_test.cpp" ] + sources += [ "./crop_test.cpp" ] sources += [ "./depth_to_space_test.cpp" ] sources += [ "./depthwise_conv2d_native_pad_test.cpp" ] sources += [ "./depthwise_conv2d_native_padmode_test.cpp" ] + sources += [ "./detection_post_process_test.cpp" ] sources += [ "./div_test.cpp" ] sources += [ "./eltwise_test.cpp" ] sources += [ "./equal_builder_test.cpp" ] @@ -65,25 +68,30 @@ ohos_unittest("OpsUnittest") { sources += [ "./fullconnection_with_axis_test.cpp" ] sources += [ "./fill_builder_test.cpp" ] sources += [ "./flatten_test.cpp" ] + sources += [ "./floor_test.cpp" ] sources += [ "./gather_builder_test.cpp" ] sources += [ "./gelu_builder_test.cpp" ] sources += [ "./greater_builder_test.cpp" ] sources += [ "./greater_equal_builder_test.cpp" ] sources += [ "./hswish_builder_test.cpp" ] sources += [ "./instance_norm_test.cpp" ] + sources += [ "./l2_normalize_test.cpp" ] sources += [ "./layernorm_builder_test.cpp" ] sources += [ "./leaky_relu_test.cpp" ] sources += [ "./less_test.cpp" ] sources += [ "./lessequal_builder_test.cpp" ] + sources += [ "./log_softmax_test.cpp" ] sources += [ "./log_test.cpp" ] sources += [ "./logical_and_test.cpp" ] sources += [ "./logical_not_test.cpp" ] sources += [ "./logical_or_test.cpp" ] + sources += [ "./lrn_test.cpp" ] sources += [ "./lstm_test.cpp" ] sources += [ "./maximum_builder_test.cpp" ] sources += [ "./maxpool_pad_test.cpp" ] sources += [ "./maxpool_padmode_test.cpp" ] sources += [ "./matmul_builder_test.cpp" ] + sources += [ "./minimum_test.cpp" ] sources += [ "./mod_test.cpp" ] sources += [ "./mul_builder_test.cpp" ] sources += [ "./neg_test.cpp" ] @@ -94,22 +102,28 @@ ohos_unittest("OpsUnittest") { sources += [ "./prelu_builder_test.cpp" ] sources += [ "./quant_dtype_cast_builder_test.cpp" ] sources += [ "./range_test.cpp" ] + sources += [ "./rank_test.cpp" ] sources += [ "./reciprocal_test.cpp" ] sources += [ "./reduce_all_builder_test.cpp" ] + sources += [ "./reduce_max_builder_test.cpp" ] sources += [ "./reduce_mean_builder_test.cpp" ] + sources += [ "./reduce_min_builder_test.cpp" ] sources += [ "./reduce_prod_builder_test.cpp" ] + sources += [ "./reduce_sum_builder_test.cpp" ] sources += [ "./relu_builder_test.cpp" ] sources += [ "./relu6_builder_test.cpp" ] sources += [ "./reshape_builder_test.cpp" ] sources += [ "./resize_bilinear_builder_test.cpp" ] sources += [ "./rsqrt_builder_test.cpp" ] sources += [ "./scale_builder_test.cpp" ] + sources += [ "./scatter_nd_test.cpp" ] sources += [ "./select_test.cpp" ] sources += [ "./shape_builder_test.cpp" ] sources += [ "./sigmoid_builder_test.cpp" ] sources += [ "./sin_test.cpp" ] sources += [ "./slice_builder_test.cpp" ] sources += [ "./softmax_builder_test.cpp" ] + sources += [ "./space_to_depth_test.cpp" ] sources += [ "./spacetobatchnd_builder_test.cpp" ] sources += [ "./sparse_to_dense_test.cpp" ] sources += [ "./split_builder_test.cpp" ] @@ -120,6 +134,7 @@ ohos_unittest("OpsUnittest") { sources += [ "./stack_builder_test.cpp" ] sources += [ "./strided_slice_builder_test.cpp" ] sources += [ "./sub_builder_test.cpp" ] + sources += [ "./swish_test.cpp" ] sources += [ "./tanh_builder_test.cpp" ] sources += [ "./tile_builder_test.cpp" ] sources += [ "./topk_builder_test.cpp" ] diff --git a/test/unittest/ops/argmax_test.cpp b/test/unittest/ops/argmax_test.cpp index e658bf777fa075f7a5bfc444fa4dcbbb527908bd..06d56320dd4e980be3a080dee50bb8653830471e 100644 --- a/test/unittest/ops/argmax_test.cpp +++ b/test/unittest/ops/argmax_test.cpp @@ -33,12 +33,16 @@ public: const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); void SetArgmaxKeepdims(OH_NN_DataType dataType, const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + void SetArgmaxTopK(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + void SetArgmaxOutMaxValue(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); public: ArgMaxBuilder m_builder; std::vector m_inputs{0}; std::vector m_outputs{1}; - std::vector m_params{2, 3}; + std::vector m_params{2, 3, 4, 5}; std::vector m_input_dim{3, 3}; std::vector m_output_dim{3, 3}; std::vector m_param_dim{}; @@ -64,7 +68,27 @@ void ArgMaxBuilderTest::SetArgmaxKeepdims(OH_NN_DataType dataType, std::shared_ptr tensor = TransToNNTensor(dataType, dim, quantParam, type); bool* keepdimsValue = new (std::nothrow) bool(false); EXPECT_NE(nullptr, keepdimsValue); - tensor->SetBuffer(keepdimsValue, sizeof(keepdimsValue)); + tensor->SetBuffer(keepdimsValue, sizeof(bool)); + m_allTensors.emplace_back(tensor); +} + +void ArgMaxBuilderTest::SetArgmaxTopK(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + std::shared_ptr tensor = TransToNNTensor(dataType, dim, quantParam, type); + int64_t* topKValue = new (std::nothrow) int64_t(0); + EXPECT_NE(nullptr, topKValue); + tensor->SetBuffer(topKValue, sizeof(int64_t)); + m_allTensors.emplace_back(tensor); +} + +void ArgMaxBuilderTest::SetArgmaxOutMaxValue(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + std::shared_ptr tensor = TransToNNTensor(dataType, dim, quantParam, type); + bool* outMaxValueValue = new (std::nothrow) bool(false); + EXPECT_NE(nullptr, outMaxValueValue); + tensor->SetBuffer(outMaxValueValue, sizeof(bool)); m_allTensors.emplace_back(tensor); } @@ -81,6 +105,8 @@ HWTEST_F(ArgMaxBuilderTest, argmax_build_001, TestSize.Level1) SetArgmaxAxis(OH_NN_INT64, m_param_dim, nullptr, OH_NN_ARG_MAX_AXIS); SetArgmaxKeepdims(OH_NN_BOOL, m_param_dim, nullptr, OH_NN_ARG_MAX_KEEPDIMS); + SetArgmaxTopK(OH_NN_INT64, m_param_dim, nullptr, OH_NN_ARG_MAX_TOP_K); + SetArgmaxOutMaxValue(OH_NN_BOOL, m_param_dim, nullptr, OH_NN_ARG_MAX_OUT_MAX_VALUE); EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); } @@ -93,7 +119,7 @@ HWTEST_F(ArgMaxBuilderTest, argmax_build_002, TestSize.Level1) { m_inputs = {0}; m_outputs = {1}; - m_params = {2, 3}; + m_params = {2, 3, 4, 5}; m_paramsIndex = m_params; SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); @@ -101,6 +127,9 @@ HWTEST_F(ArgMaxBuilderTest, argmax_build_002, TestSize.Level1) SetArgmaxAxis(OH_NN_INT64, m_param_dim, nullptr, OH_NN_ARG_MAX_AXIS); SetArgmaxKeepdims(OH_NN_BOOL, m_param_dim, nullptr, OH_NN_ARG_MAX_KEEPDIMS); + SetArgmaxTopK(OH_NN_INT64, m_param_dim, nullptr, OH_NN_ARG_MAX_TOP_K); + SetArgmaxOutMaxValue(OH_NN_BOOL, m_param_dim, nullptr, OH_NN_ARG_MAX_OUT_MAX_VALUE); + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); } @@ -122,6 +151,9 @@ HWTEST_F(ArgMaxBuilderTest, argmax_build_003, TestSize.Level1) SetArgmaxAxis(OH_NN_INT64, m_param_dim, nullptr, OH_NN_ARG_MAX_AXIS); SetArgmaxKeepdims(OH_NN_BOOL, m_param_dim, nullptr, OH_NN_ARG_MAX_KEEPDIMS); + SetArgmaxTopK(OH_NN_INT64, m_param_dim, nullptr, OH_NN_ARG_MAX_TOP_K); + SetArgmaxOutMaxValue(OH_NN_BOOL, m_param_dim, nullptr, OH_NN_ARG_MAX_OUT_MAX_VALUE); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); } @@ -142,6 +174,9 @@ HWTEST_F(ArgMaxBuilderTest, argmax_build_004, TestSize.Level1) SetArgmaxAxis(OH_NN_INT64, m_param_dim, nullptr, OH_NN_ARG_MAX_AXIS); SetArgmaxKeepdims(OH_NN_BOOL, m_param_dim, nullptr, OH_NN_ARG_MAX_KEEPDIMS); + SetArgmaxTopK(OH_NN_INT64, m_param_dim, nullptr, OH_NN_ARG_MAX_TOP_K); + SetArgmaxOutMaxValue(OH_NN_BOOL, m_param_dim, nullptr, OH_NN_ARG_MAX_OUT_MAX_VALUE); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); } @@ -162,6 +197,9 @@ HWTEST_F(ArgMaxBuilderTest, argmax_build_005, TestSize.Level1) SetArgmaxAxis(OH_NN_INT64, m_param_dim, nullptr, OH_NN_ARG_MAX_AXIS); SetArgmaxKeepdims(OH_NN_BOOL, m_param_dim, nullptr, OH_NN_ARG_MAX_KEEPDIMS); + SetArgmaxTopK(OH_NN_INT64, m_param_dim, nullptr, OH_NN_ARG_MAX_TOP_K); + SetArgmaxOutMaxValue(OH_NN_BOOL, m_param_dim, nullptr, OH_NN_ARG_MAX_OUT_MAX_VALUE); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); } @@ -182,6 +220,9 @@ HWTEST_F(ArgMaxBuilderTest, argmax_build_006, TestSize.Level1) SetArgmaxAxis(OH_NN_INT64, m_param_dim, nullptr, OH_NN_ARG_MAX_AXIS); SetArgmaxKeepdims(OH_NN_BOOL, m_param_dim, nullptr, OH_NN_ARG_MAX_KEEPDIMS); + SetArgmaxTopK(OH_NN_INT64, m_param_dim, nullptr, OH_NN_ARG_MAX_TOP_K); + SetArgmaxOutMaxValue(OH_NN_BOOL, m_param_dim, nullptr, OH_NN_ARG_MAX_OUT_MAX_VALUE); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); } @@ -203,6 +244,9 @@ HWTEST_F(ArgMaxBuilderTest, argmax_build_007, TestSize.Level1) tensor->SetBuffer(axisValueTest, sizeof(float)); m_allTensors.emplace_back(tensor); SetArgmaxKeepdims(OH_NN_BOOL, m_param_dim, nullptr, OH_NN_ARG_MAX_KEEPDIMS); + SetArgmaxTopK(OH_NN_INT64, m_param_dim, nullptr, OH_NN_ARG_MAX_TOP_K); + SetArgmaxOutMaxValue(OH_NN_BOOL, m_param_dim, nullptr, OH_NN_ARG_MAX_OUT_MAX_VALUE); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); } @@ -224,12 +268,15 @@ HWTEST_F(ArgMaxBuilderTest, argmax_build_008, TestSize.Level1) tensor->SetBuffer(keepdimsValue, sizeof(int64_t)); m_allTensors.emplace_back(tensor); + SetArgmaxTopK(OH_NN_INT64, m_param_dim, nullptr, OH_NN_ARG_MAX_TOP_K); + SetArgmaxOutMaxValue(OH_NN_BOOL, m_param_dim, nullptr, OH_NN_ARG_MAX_OUT_MAX_VALUE); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); } /** * @tc.name: argmax_build_009 - * @tc.desc: Verify the invalid param to argmax of the build function + * @tc.desc: Verify the invalid keepdims of the build function * @tc.type: FUNC */ HWTEST_F(ArgMaxBuilderTest, argmax_build_009, TestSize.Level1) @@ -239,18 +286,21 @@ HWTEST_F(ArgMaxBuilderTest, argmax_build_009, TestSize.Level1) SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); SetArgmaxAxis(OH_NN_INT64, m_param_dim, nullptr, OH_NN_ARG_MAX_AXIS); - std::shared_ptr tensor = TransToNNTensor(OH_NN_INT64, m_param_dim, nullptr, OH_NN_AVG_POOL_STRIDE); - int64_t* strideValue = new (std::nothrow) int64_t(0); - EXPECT_NE(nullptr, strideValue); + SetArgmaxKeepdims(OH_NN_BOOL, m_param_dim, nullptr, OH_NN_ARG_MAX_KEEPDIMS); - tensor->SetBuffer(strideValue, sizeof(int64_t)); + std::shared_ptr tensor = TransToNNTensor(OH_NN_FLOAT32, m_param_dim, nullptr, OH_NN_ARG_MAX_TOP_K); + float* topKValueTest = new (std::nothrow) float(0); + EXPECT_NE(nullptr, topKValueTest); + tensor->SetBuffer(topKValueTest, sizeof(float)); m_allTensors.emplace_back(tensor); + SetArgmaxOutMaxValue(OH_NN_BOOL, m_param_dim, nullptr, OH_NN_ARG_MAX_OUT_MAX_VALUE); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); } /** * @tc.name: argmax_build_010 - * @tc.desc: Verify the argmax without set axis of the build function + * @tc.desc: Verify the invalid keepdims of the build function * @tc.type: FUNC */ HWTEST_F(ArgMaxBuilderTest, argmax_build_010, TestSize.Level1) @@ -259,18 +309,121 @@ HWTEST_F(ArgMaxBuilderTest, argmax_build_010, TestSize.Level1) SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetArgmaxAxis(OH_NN_INT64, m_param_dim, nullptr, OH_NN_ARG_MAX_AXIS); + SetArgmaxKeepdims(OH_NN_BOOL, m_param_dim, nullptr, OH_NN_ARG_MAX_KEEPDIMS); + SetArgmaxTopK(OH_NN_INT64, m_param_dim, nullptr, OH_NN_ARG_MAX_TOP_K); + + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT64, m_param_dim, nullptr, OH_NN_ARG_MAX_OUT_MAX_VALUE); + int64_t* outMaxValue = new (std::nothrow) int64_t(0); + EXPECT_NE(nullptr, outMaxValue); + tensor->SetBuffer(outMaxValue, sizeof(int64_t)); + m_allTensors.emplace_back(tensor); + + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: argmax_build_011 + * @tc.desc: Verify the invalid param to argmax's axis of the build function + * @tc.type: FUNC + */ +HWTEST_F(ArgMaxBuilderTest, argmax_build_011, TestSize.Level1) +{ + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetArgmaxAxis(OH_NN_INT64, m_param_dim, nullptr, OH_NN_MUL_ACTIVATION_TYPE); + SetArgmaxKeepdims(OH_NN_BOOL, m_param_dim, nullptr, OH_NN_ARG_MAX_KEEPDIMS); + SetArgmaxTopK(OH_NN_INT64, m_param_dim, nullptr, OH_NN_ARG_MAX_TOP_K); + SetArgmaxOutMaxValue(OH_NN_BOOL, m_param_dim, nullptr, OH_NN_ARG_MAX_OUT_MAX_VALUE); + + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: argmax_build_012 + * @tc.desc: Verify the invalid param to argmax's keepDims of the build function + * @tc.type: FUNC + */ +HWTEST_F(ArgMaxBuilderTest, argmax_build_012, TestSize.Level1) +{ + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetArgmaxAxis(OH_NN_INT64, m_param_dim, nullptr, OH_NN_ARG_MAX_AXIS); + SetArgmaxKeepdims(OH_NN_BOOL, m_param_dim, nullptr, OH_NN_MUL_ACTIVATION_TYPE); + SetArgmaxTopK(OH_NN_INT64, m_param_dim, nullptr, OH_NN_ARG_MAX_TOP_K); + SetArgmaxOutMaxValue(OH_NN_BOOL, m_param_dim, nullptr, OH_NN_ARG_MAX_OUT_MAX_VALUE); + + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: argmax_build_013 + * @tc.desc: Verify the invalid param to argmax's topK of the build function + * @tc.type: FUNC + */ +HWTEST_F(ArgMaxBuilderTest, argmax_build_013, TestSize.Level1) +{ + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetArgmaxAxis(OH_NN_INT64, m_param_dim, nullptr, OH_NN_ARG_MAX_AXIS); + SetArgmaxKeepdims(OH_NN_BOOL, m_param_dim, nullptr, OH_NN_ARG_MAX_KEEPDIMS); + SetArgmaxTopK(OH_NN_INT64, m_param_dim, nullptr, OH_NN_MUL_ACTIVATION_TYPE); + SetArgmaxOutMaxValue(OH_NN_BOOL, m_param_dim, nullptr, OH_NN_ARG_MAX_OUT_MAX_VALUE); + + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: argmax_build_014 + * @tc.desc: Verify the invalid param to argmax's outMaxValue of the build function + * @tc.type: FUNC + */ +HWTEST_F(ArgMaxBuilderTest, argmax_build_014, TestSize.Level1) +{ + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetArgmaxAxis(OH_NN_INT64, m_param_dim, nullptr, OH_NN_ARG_MAX_AXIS); + SetArgmaxKeepdims(OH_NN_BOOL, m_param_dim, nullptr, OH_NN_ARG_MAX_KEEPDIMS); + SetArgmaxTopK(OH_NN_INT64, m_param_dim, nullptr, OH_NN_ARG_MAX_TOP_K); + SetArgmaxOutMaxValue(OH_NN_BOOL, m_param_dim, nullptr, OH_NN_MUL_ACTIVATION_TYPE); + + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: argmax_build_015 + * @tc.desc: Verify the argmax without set axis of the build function + * @tc.type: FUNC + */ +HWTEST_F(ArgMaxBuilderTest, argmax_build_015, TestSize.Level1) +{ + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT64, m_param_dim, nullptr, OH_NN_ARG_MAX_AXIS); m_allTensors.emplace_back(tensor); SetArgmaxKeepdims(OH_NN_BOOL, m_param_dim, nullptr, OH_NN_ARG_MAX_KEEPDIMS); + SetArgmaxTopK(OH_NN_INT64, m_param_dim, nullptr, OH_NN_ARG_MAX_TOP_K); + SetArgmaxOutMaxValue(OH_NN_BOOL, m_param_dim, nullptr, OH_NN_ARG_MAX_OUT_MAX_VALUE); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); } /** - * @tc.name: argmax_build_011 + * @tc.name: argmax_build_016 * @tc.desc: Verify the argmax without set keepdims of the build function * @tc.type: FUNC */ -HWTEST_F(ArgMaxBuilderTest, argmax_build_011, TestSize.Level1) +HWTEST_F(ArgMaxBuilderTest, argmax_build_016, TestSize.Level1) { m_paramsIndex = m_params; SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); @@ -279,15 +432,58 @@ HWTEST_F(ArgMaxBuilderTest, argmax_build_011, TestSize.Level1) SetArgmaxAxis(OH_NN_INT64, m_param_dim, nullptr, OH_NN_ARG_MAX_AXIS); std::shared_ptr tensor = TransToNNTensor(OH_NN_BOOL, m_param_dim, nullptr, OH_NN_ARG_MAX_KEEPDIMS); m_allTensors.emplace_back(tensor); + SetArgmaxTopK(OH_NN_INT64, m_param_dim, nullptr, OH_NN_ARG_MAX_TOP_K); + SetArgmaxOutMaxValue(OH_NN_BOOL, m_param_dim, nullptr, OH_NN_ARG_MAX_OUT_MAX_VALUE); + + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: argmax_build_017 + * @tc.desc: Verify the argmax without set topK of the build function + * @tc.type: FUNC + */ +HWTEST_F(ArgMaxBuilderTest, argmax_build_017, TestSize.Level1) +{ + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetArgmaxAxis(OH_NN_INT64, m_param_dim, nullptr, OH_NN_ARG_MAX_AXIS); + SetArgmaxKeepdims(OH_NN_BOOL, m_param_dim, nullptr, OH_NN_ARG_MAX_KEEPDIMS); + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT64, m_param_dim, nullptr, OH_NN_ARG_MAX_TOP_K); + m_allTensors.emplace_back(tensor); + SetArgmaxOutMaxValue(OH_NN_BOOL, m_param_dim, nullptr, OH_NN_ARG_MAX_OUT_MAX_VALUE); + + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: argmax_build_018 + * @tc.desc: Verify the argmax without set outMaxValue of the build function + * @tc.type: FUNC + */ +HWTEST_F(ArgMaxBuilderTest, argmax_build_018, TestSize.Level1) +{ + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetArgmaxAxis(OH_NN_INT64, m_param_dim, nullptr, OH_NN_ARG_MAX_AXIS); + SetArgmaxKeepdims(OH_NN_BOOL, m_param_dim, nullptr, OH_NN_ARG_MAX_KEEPDIMS); + SetArgmaxTopK(OH_NN_INT64, m_param_dim, nullptr, OH_NN_ARG_MAX_TOP_K); + std::shared_ptr tensor = TransToNNTensor(OH_NN_BOOL, m_param_dim, nullptr, OH_NN_ARG_MAX_OUT_MAX_VALUE); + m_allTensors.emplace_back(tensor); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); } /** - * @tc.name: add_getprimitive_001 + * @tc.name: argmax_getprimitive_001 * @tc.desc: Verify the behavior of the GetPrimitive function * @tc.type: FUNC */ -HWTEST_F(ArgMaxBuilderTest, add_getprimitive_001, TestSize.Level1) +HWTEST_F(ArgMaxBuilderTest, argmax_getprimitive_001, TestSize.Level1) { m_paramsIndex = m_params; SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); @@ -295,6 +491,8 @@ HWTEST_F(ArgMaxBuilderTest, add_getprimitive_001, TestSize.Level1) SetArgmaxAxis(OH_NN_INT64, m_param_dim, nullptr, OH_NN_ARG_MAX_AXIS); SetArgmaxKeepdims(OH_NN_BOOL, m_param_dim, nullptr, OH_NN_ARG_MAX_KEEPDIMS); + SetArgmaxTopK(OH_NN_INT64, m_param_dim, nullptr, OH_NN_ARG_MAX_TOP_K); + SetArgmaxOutMaxValue(OH_NN_BOOL, m_param_dim, nullptr, OH_NN_ARG_MAX_OUT_MAX_VALUE); EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); LiteGraphTensorPtr primitive = m_builder.GetPrimitive(); @@ -302,18 +500,22 @@ HWTEST_F(ArgMaxBuilderTest, add_getprimitive_001, TestSize.Level1) EXPECT_NE(expectPrimitive, primitive); EXPECT_NE(nullptr, primitive); - int64_t returnValue = mindspore::lite::MindIR_ArgMaxFusion_GetAxis(primitive.get()); - EXPECT_EQ(returnValue, 0); + int64_t axisReturn = mindspore::lite::MindIR_ArgMaxFusion_GetAxis(primitive.get()); + EXPECT_EQ(axisReturn, 0); bool keepdimsReturn = mindspore::lite::MindIR_ArgMaxFusion_GetKeepDims(primitive.get()); EXPECT_EQ(keepdimsReturn, false); + int64_t topKReturn = mindspore::lite::MindIR_ArgMaxFusion_GetTopK(primitive.get()); + EXPECT_EQ(topKReturn, 0); + bool outMaxValueReturn = mindspore::lite::MindIR_ArgMaxFusion_GetOutMaxValue(primitive.get()); + EXPECT_EQ(outMaxValueReturn, false); } /** - * @tc.name: add_getprimitive_002 + * @tc.name: argmax_getprimitive_002 * @tc.desc: Verify the behavior of the GetPrimitive function * @tc.type: FUNC */ -HWTEST_F(ArgMaxBuilderTest, add_getprimitive_002, TestSize.Level1) +HWTEST_F(ArgMaxBuilderTest, argmax_getprimitive_002, TestSize.Level1) { m_paramsIndex = m_params; SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); @@ -321,6 +523,8 @@ HWTEST_F(ArgMaxBuilderTest, add_getprimitive_002, TestSize.Level1) SetArgmaxAxis(OH_NN_INT64, m_param_dim, nullptr, OH_NN_ARG_MAX_AXIS); SetArgmaxKeepdims(OH_NN_BOOL, m_param_dim, nullptr, OH_NN_ARG_MAX_KEEPDIMS); + SetArgmaxTopK(OH_NN_INT64, m_param_dim, nullptr, OH_NN_ARG_MAX_TOP_K); + SetArgmaxOutMaxValue(OH_NN_BOOL, m_param_dim, nullptr, OH_NN_ARG_MAX_OUT_MAX_VALUE); LiteGraphTensorPtr primitive = m_builder.GetPrimitive(); LiteGraphTensorPtr expectPrimitive = {nullptr, DestroyLiteGraphPrimitive}; EXPECT_EQ(expectPrimitive, primitive); diff --git a/test/unittest/ops/avgpool_pad_test.cpp b/test/unittest/ops/avgpool_pad_test.cpp index 15d616396fec0604a669db56dd8c591abcda522c..ab5f74bff4d53e54de2ef7b10d54ae5df4335989 100644 --- a/test/unittest/ops/avgpool_pad_test.cpp +++ b/test/unittest/ops/avgpool_pad_test.cpp @@ -31,6 +31,10 @@ public: void SetPad(OH_NN_DataType dataType, const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + void SetRoundMode(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + void SetGlobal(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); void SetPadParams(); public: @@ -43,19 +47,39 @@ public: std::vector m_param_dim{}; std::vector m_inputs{0}; std::vector m_outputs{1}; - std::vector m_params{2, 3, 4, 5}; + std::vector m_params{2, 3, 4, 5, 6, 7}; }; void AvgPoolPadBuilderTest::SetUp() {} void AvgPoolPadBuilderTest::TearDown() {} +void AvgPoolPadBuilderTest::SetRoundMode(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + std::shared_ptr tensor = TransToNNTensor(dataType, dim, quantParam, type); + int32_t* roundModeValue = new (std::nothrow) int32_t(0); + EXPECT_NE(nullptr, roundModeValue); + tensor->SetBuffer(roundModeValue, sizeof(int32_t)); + m_allTensors.emplace_back(tensor); +} + +void AvgPoolPadBuilderTest::SetGlobal(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + std::shared_ptr tensor = TransToNNTensor(dataType, dim, quantParam, type); + bool* globalValue = new (std::nothrow) bool(false); + EXPECT_NE(nullptr, globalValue); + tensor->SetBuffer(globalValue, sizeof(bool)); + m_allTensors.emplace_back(tensor); +} + void AvgPoolPadBuilderTest::SetPad(OH_NN_DataType dataType, const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) { int32_t padNum{4}; std::shared_ptr tensor = TransToNNTensor(dataType, dim, quantParam, type); - int64_t* padValue = new (std::nothrow) int64_t[padNum]{0, 0, 0, 0}; + int64_t* padValue = new (std::nothrow) int64_t[padNum] {0, 0, 0, 0}; EXPECT_NE(nullptr, padValue); tensor->SetBuffer(padValue, sizeof(int64_t) * padNum); m_allTensors.emplace_back(tensor); @@ -67,6 +91,8 @@ void AvgPoolPadBuilderTest::SetPadParams() SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_AVG_POOL_STRIDE); SetPad(OH_NN_INT64, m_pad_dim, nullptr, OH_NN_AVG_POOL_PAD); SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_AVG_POOL_ACTIVATION_TYPE); + SetRoundMode(OH_NN_INT32, m_param_dim, nullptr, OH_NN_AVG_POOL_ROUND_MODE); + SetGlobal(OH_NN_BOOL, m_param_dim, nullptr, OH_NN_AVG_POOL_GLOBAL); } /** @@ -107,7 +133,7 @@ HWTEST_F(AvgPoolPadBuilderTest, avgpool_build_pad_003, TestSize.Level1) { m_inputs = {}; m_outputs = {0}; - m_params = {1, 2, 3, 4}; + m_params = {1, 2, 3, 4, 5, 6}; m_paramsIndex = m_params; SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); @@ -126,7 +152,7 @@ HWTEST_F(AvgPoolPadBuilderTest, avgpool_build_pad_004, TestSize.Level1) { m_inputs = {0}; m_outputs = {}; - m_params = {1, 2, 3, 4}; + m_params = {1, 2, 3, 4, 5, 6}; m_paramsIndex = m_params; SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); @@ -143,9 +169,9 @@ HWTEST_F(AvgPoolPadBuilderTest, avgpool_build_pad_004, TestSize.Level1) */ HWTEST_F(AvgPoolPadBuilderTest, avgpool_build_pad_005, TestSize.Level1) { - m_inputs = {6}; + m_inputs = {8}; m_outputs = {1}; - m_params = {2, 3, 4, 5}; + m_params = {2, 3, 4, 5, 6, 7}; m_paramsIndex = m_params; SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); @@ -163,8 +189,8 @@ HWTEST_F(AvgPoolPadBuilderTest, avgpool_build_pad_005, TestSize.Level1) HWTEST_F(AvgPoolPadBuilderTest, avgpool_build_pad_006, TestSize.Level1) { m_inputs = {0}; - m_outputs = {6}; - m_params = {2, 3, 4, 5}; + m_outputs = {8}; + m_params = {2, 3, 4, 5, 6, 7}; m_paramsIndex = m_params; SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); @@ -196,7 +222,10 @@ HWTEST_F(AvgPoolPadBuilderTest, avgpool_build_pad_007, TestSize.Level1) SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_AVG_POOL_STRIDE); SetPad(OH_NN_INT64, m_pad_dim, nullptr, OH_NN_AVG_POOL_PAD); SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_AVG_POOL_ACTIVATION_TYPE); + SetRoundMode(OH_NN_INT32, m_param_dim, nullptr, OH_NN_AVG_POOL_ROUND_MODE); + SetGlobal(OH_NN_BOOL, m_param_dim, nullptr, OH_NN_AVG_POOL_GLOBAL); EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + tensor->SetBuffer(nullptr, 0); } /** @@ -220,7 +249,10 @@ HWTEST_F(AvgPoolPadBuilderTest, avgpool_build_pad_008, TestSize.Level1) SetPad(OH_NN_INT64, m_pad_dim, nullptr, OH_NN_AVG_POOL_PAD); SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_AVG_POOL_ACTIVATION_TYPE); + SetRoundMode(OH_NN_INT32, m_param_dim, nullptr, OH_NN_AVG_POOL_ROUND_MODE); + SetGlobal(OH_NN_BOOL, m_param_dim, nullptr, OH_NN_AVG_POOL_GLOBAL); EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + tensor->SetBuffer(nullptr, 0); } /** @@ -244,10 +276,12 @@ HWTEST_F(AvgPoolPadBuilderTest, avgpool_build_pad_009, TestSize.Level1) tensor->SetBuffer(padValue, sizeof(int32_t) * padNum); m_allTensors.emplace_back(tensor); SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_AVG_POOL_ACTIVATION_TYPE); + SetRoundMode(OH_NN_INT32, m_param_dim, nullptr, OH_NN_AVG_POOL_ROUND_MODE); + SetGlobal(OH_NN_BOOL, m_param_dim, nullptr, OH_NN_AVG_POOL_GLOBAL); EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + tensor->SetBuffer(nullptr, 0); } - /** * @tc.name: avgpool_build_pad_010 * @tc.desc: Verify the invalid activation of the build function @@ -269,15 +303,72 @@ HWTEST_F(AvgPoolPadBuilderTest, avgpool_build_pad_010, TestSize.Level1) tensor->SetBuffer(activationValue, sizeof(int32_t)); m_allTensors.emplace_back(tensor); + SetRoundMode(OH_NN_INT32, m_param_dim, nullptr, OH_NN_AVG_POOL_ROUND_MODE); + SetGlobal(OH_NN_BOOL, m_param_dim, nullptr, OH_NN_AVG_POOL_GLOBAL); EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + tensor->SetBuffer(nullptr, 0); } /** * @tc.name: avgpool_build_pad_011 - * @tc.desc: Verify the activation scalar length of the build function + * @tc.desc: Verify the invalid roundMode of the build function * @tc.type: FUNC */ HWTEST_F(AvgPoolPadBuilderTest, avgpool_build_pad_011, TestSize.Level1) +{ + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetKernelSize(OH_NN_INT64, m_kenelsize_dim, nullptr, OH_NN_AVG_POOL_KERNEL_SIZE); + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_AVG_POOL_STRIDE); + SetPad(OH_NN_INT64, m_pad_dim, nullptr, OH_NN_AVG_POOL_PAD); + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_AVG_POOL_ACTIVATION_TYPE); + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT64, m_param_dim, nullptr, + OH_NN_AVG_POOL_ROUND_MODE); + int64_t* roundModeValue = new (std::nothrow) int64_t(0); + EXPECT_NE(nullptr, roundModeValue); + + tensor->SetBuffer(roundModeValue, sizeof(int64_t)); + m_allTensors.emplace_back(tensor); + SetGlobal(OH_NN_BOOL, m_param_dim, nullptr, OH_NN_AVG_POOL_GLOBAL); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + tensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: avgpool_build_pad_012 + * @tc.desc: Verify the invalid activation of the build function + * @tc.type: FUNC + */ +HWTEST_F(AvgPoolPadBuilderTest, avgpool_build_pad_012, TestSize.Level1) +{ + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetKernelSize(OH_NN_INT64, m_kenelsize_dim, nullptr, OH_NN_AVG_POOL_KERNEL_SIZE); + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_AVG_POOL_STRIDE); + SetPad(OH_NN_INT64, m_pad_dim, nullptr, OH_NN_AVG_POOL_PAD); + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_AVG_POOL_ACTIVATION_TYPE); + SetRoundMode(OH_NN_INT32, m_param_dim, nullptr, OH_NN_AVG_POOL_ROUND_MODE); + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT32, m_param_dim, nullptr, + OH_NN_AVG_POOL_GLOBAL); + int32_t* globalValue = new (std::nothrow) int32_t(0); + EXPECT_NE(nullptr, globalValue); + + tensor->SetBuffer(globalValue, sizeof(int32_t)); + m_allTensors.emplace_back(tensor); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + tensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: avgpool_build_pad_013 + * @tc.desc: Verify the activation scalar length of the build function + * @tc.type: FUNC + */ +HWTEST_F(AvgPoolPadBuilderTest, avgpool_build_pad_013, TestSize.Level1) { m_param_dim = {2}; m_paramsIndex = m_params; @@ -298,11 +389,11 @@ HWTEST_F(AvgPoolPadBuilderTest, avgpool_build_pad_011, TestSize.Level1) } /** - * @tc.name: avgpool_build_pad_012 + * @tc.name: avgpool_build_pad_014 * @tc.desc: Verify the avgpool without set kernelsize of the build function * @tc.type: FUNC */ -HWTEST_F(AvgPoolPadBuilderTest, avgpool_build_pad_012, TestSize.Level1) +HWTEST_F(AvgPoolPadBuilderTest, avgpool_build_pad_014, TestSize.Level1) { m_paramsIndex = m_params; SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); @@ -315,15 +406,17 @@ HWTEST_F(AvgPoolPadBuilderTest, avgpool_build_pad_012, TestSize.Level1) SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_AVG_POOL_STRIDE); SetPad(OH_NN_INT64, m_pad_dim, nullptr, OH_NN_AVG_POOL_PAD); SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_AVG_POOL_ACTIVATION_TYPE); + SetRoundMode(OH_NN_INT32, m_param_dim, nullptr, OH_NN_AVG_POOL_ROUND_MODE); + SetGlobal(OH_NN_BOOL, m_param_dim, nullptr, OH_NN_AVG_POOL_GLOBAL); EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); } /** - * @tc.name: avgpool_build_pad_013 + * @tc.name: avgpool_build_pad_015 * @tc.desc: Verify the avgpool without set stride of the build function * @tc.type: FUNC */ -HWTEST_F(AvgPoolPadBuilderTest, avgpool_build_pad_013, TestSize.Level1) +HWTEST_F(AvgPoolPadBuilderTest, avgpool_build_pad_015, TestSize.Level1) { m_paramsIndex = m_params; SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); @@ -335,15 +428,17 @@ HWTEST_F(AvgPoolPadBuilderTest, avgpool_build_pad_013, TestSize.Level1) SetPad(OH_NN_INT64, m_pad_dim, nullptr, OH_NN_AVG_POOL_PAD); SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_AVG_POOL_ACTIVATION_TYPE); + SetRoundMode(OH_NN_INT32, m_param_dim, nullptr, OH_NN_AVG_POOL_ROUND_MODE); + SetGlobal(OH_NN_BOOL, m_param_dim, nullptr, OH_NN_AVG_POOL_GLOBAL); EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); } /** - * @tc.name: avgpool_build_pad_014 + * @tc.name: avgpool_build_pad_016 * @tc.desc: Verify the avgpool without set pad of the build function * @tc.type: FUNC */ -HWTEST_F(AvgPoolPadBuilderTest, avgpool_build_pad_014, TestSize.Level1) +HWTEST_F(AvgPoolPadBuilderTest, avgpool_build_pad_016, TestSize.Level1) { m_paramsIndex = m_params; SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); @@ -351,19 +446,21 @@ HWTEST_F(AvgPoolPadBuilderTest, avgpool_build_pad_014, TestSize.Level1) SetKernelSize(OH_NN_INT64, m_kenelsize_dim, nullptr, OH_NN_AVG_POOL_KERNEL_SIZE); SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_AVG_POOL_STRIDE); - std::shared_ptr tensor = TransToNNTensor(OH_NN_INT64, m_pad_dim, nullptr, OH_NN_AVG_POOL_PAD); + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT32, m_pad_dim, nullptr, OH_NN_AVG_POOL_PAD); m_allTensors.emplace_back(tensor); SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_AVG_POOL_ACTIVATION_TYPE); + SetRoundMode(OH_NN_INT32, m_param_dim, nullptr, OH_NN_AVG_POOL_ROUND_MODE); + SetGlobal(OH_NN_BOOL, m_param_dim, nullptr, OH_NN_AVG_POOL_GLOBAL); EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); } /** - * @tc.name: avgpool_build_pad_015 + * @tc.name: avgpool_build_pad_017 * @tc.desc: Verify the avgpool without set activation of the build function * @tc.type: FUNC */ -HWTEST_F(AvgPoolPadBuilderTest, avgpool_build_pad_015, TestSize.Level1) +HWTEST_F(AvgPoolPadBuilderTest, avgpool_build_pad_017, TestSize.Level1) { m_paramsIndex = m_params; SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); @@ -376,6 +473,54 @@ HWTEST_F(AvgPoolPadBuilderTest, avgpool_build_pad_015, TestSize.Level1) std::shared_ptr tensor = TransToNNTensor(OH_NN_INT8, m_param_dim, nullptr, OH_NN_AVG_POOL_ACTIVATION_TYPE); m_allTensors.emplace_back(tensor); + SetRoundMode(OH_NN_INT32, m_param_dim, nullptr, OH_NN_AVG_POOL_ROUND_MODE); + SetGlobal(OH_NN_BOOL, m_param_dim, nullptr, OH_NN_AVG_POOL_GLOBAL); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: avgpool_build_pad_018 + * @tc.desc: Verify the avgpool without set activation of the build function + * @tc.type: FUNC + */ +HWTEST_F(AvgPoolPadBuilderTest, avgpool_build_pad_018, TestSize.Level1) +{ + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetKernelSize(OH_NN_INT64, m_kenelsize_dim, nullptr, OH_NN_AVG_POOL_KERNEL_SIZE); + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_AVG_POOL_STRIDE); + SetPad(OH_NN_INT64, m_pad_dim, nullptr, OH_NN_AVG_POOL_PAD); + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_AVG_POOL_ACTIVATION_TYPE); + + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT64, m_param_dim, nullptr, + OH_NN_AVG_POOL_ROUND_MODE); + m_allTensors.emplace_back(tensor); + SetGlobal(OH_NN_BOOL, m_param_dim, nullptr, OH_NN_AVG_POOL_GLOBAL); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: avgpool_build_pad_019 + * @tc.desc: Verify the avgpool without set activation of the build function + * @tc.type: FUNC + */ +HWTEST_F(AvgPoolPadBuilderTest, avgpool_build_pad_019, TestSize.Level1) +{ + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetKernelSize(OH_NN_INT64, m_kenelsize_dim, nullptr, OH_NN_AVG_POOL_KERNEL_SIZE); + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_AVG_POOL_STRIDE); + SetPad(OH_NN_INT64, m_pad_dim, nullptr, OH_NN_AVG_POOL_PAD); + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_AVG_POOL_ACTIVATION_TYPE); + SetRoundMode(OH_NN_INT32, m_param_dim, nullptr, OH_NN_AVG_POOL_ROUND_MODE); + + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT8, m_param_dim, nullptr, + OH_NN_AVG_POOL_GLOBAL); + m_allTensors.emplace_back(tensor); EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); } @@ -408,6 +553,12 @@ HWTEST_F(AvgPoolPadBuilderTest, avgpool_getprimitive_pad_001, TestSize.Level1) int8_t activationValue = 0; int expectActivation = mindspore::lite::MindIR_AvgPoolFusion_GetActivationType(primitive.get()); EXPECT_EQ(activationValue, expectActivation); + mindspore::lite::RoundMode roundModeValue = mindspore::lite::ROUND_MODE_FLOOR; + auto expectRoundMode = mindspore::lite::MindIR_AvgPoolFusion_GetRoundMode(primitive.get()); + EXPECT_EQ(roundModeValue, expectRoundMode); + bool globalValue = false; + bool expectGlobal = mindspore::lite::MindIR_AvgPoolFusion_GetGlobal(primitive.get()); + EXPECT_EQ(globalValue, expectGlobal); } /** diff --git a/test/unittest/ops/avgpool_padmod_test.cpp b/test/unittest/ops/avgpool_padmod_test.cpp index 18b3ebd996437d3ec99bbf0ca01d7fb3861f4aff..c16d2de3fe7e04494b9409fdfecf89c31ac54645 100644 --- a/test/unittest/ops/avgpool_padmod_test.cpp +++ b/test/unittest/ops/avgpool_padmod_test.cpp @@ -31,13 +31,17 @@ public: void SetPadMode(OH_NN_DataType dataType, const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + void SetRoundMode(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + void SetGlobal(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); void SetParams(); public: AvgPoolBuilder m_builder; std::vector m_inputs{0}; std::vector m_outputs{1}; - std::vector m_params{2, 3, 4, 5}; + std::vector m_params{2, 3, 4, 5, 6, 7}; std::vector m_input_dim{1, 3, 3, 1}; std::vector m_output_dim{1, 2, 2, 1}; std::vector m_kenelsize_dim{2}; @@ -49,6 +53,26 @@ void AvgPoolBuilderTest::SetUp() {} void AvgPoolBuilderTest::TearDown() {} +void AvgPoolBuilderTest::SetRoundMode(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + std::shared_ptr tensor = TransToNNTensor(dataType, dim, quantParam, type); + int32_t* roundModeValue = new (std::nothrow) int32_t(0); + EXPECT_NE(nullptr, roundModeValue); + tensor->SetBuffer(roundModeValue, sizeof(int32_t)); + m_allTensors.emplace_back(tensor); +} + +void AvgPoolBuilderTest::SetGlobal(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + std::shared_ptr tensor = TransToNNTensor(dataType, dim, quantParam, type); + bool* globalValue = new (std::nothrow) bool(false); + EXPECT_NE(nullptr, globalValue); + tensor->SetBuffer(globalValue, sizeof(bool)); + m_allTensors.emplace_back(tensor); +} + void AvgPoolBuilderTest::SetPadMode(OH_NN_DataType dataType, const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) { @@ -65,6 +89,8 @@ void AvgPoolBuilderTest::SetParams() SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_AVG_POOL_STRIDE); SetPadMode(OH_NN_INT8, m_param_dim, nullptr, OH_NN_AVG_POOL_PAD_MODE); SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_AVG_POOL_ACTIVATION_TYPE); + SetRoundMode(OH_NN_INT32, m_param_dim, nullptr, OH_NN_AVG_POOL_ROUND_MODE); + SetGlobal(OH_NN_BOOL, m_param_dim, nullptr, OH_NN_AVG_POOL_GLOBAL); } /** @@ -107,7 +133,7 @@ HWTEST_F(AvgPoolBuilderTest, avgpool_build_pad_mode_003, TestSize.Level1) { m_inputs = {}; m_outputs = {0}; - m_params = {1, 2, 3, 4}; + m_params = {1, 2, 3, 4, 5, 6}; m_paramsIndex = m_params; SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); @@ -126,7 +152,7 @@ HWTEST_F(AvgPoolBuilderTest, avgpool_build_pad_mode_004, TestSize.Level1) { m_inputs = {0}; m_outputs = {}; - m_params = {1, 2, 3, 4}; + m_params = {1, 2, 3, 4, 5, 6}; m_paramsIndex = m_params; SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); @@ -143,9 +169,9 @@ HWTEST_F(AvgPoolBuilderTest, avgpool_build_pad_mode_004, TestSize.Level1) */ HWTEST_F(AvgPoolBuilderTest, avgpool_build_pad_mode_005, TestSize.Level1) { - m_inputs = {6}; + m_inputs = {8}; m_outputs = {1}; - m_params = {2, 3, 4, 5}; + m_params = {2, 3, 4, 5, 6, 7}; m_paramsIndex = m_params; SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); @@ -163,8 +189,8 @@ HWTEST_F(AvgPoolBuilderTest, avgpool_build_pad_mode_005, TestSize.Level1) HWTEST_F(AvgPoolBuilderTest, avgpool_build_pad_mode_006, TestSize.Level1) { m_inputs = {0}; - m_outputs = {6}; - m_params = {2, 3, 4, 5}; + m_outputs = {8}; + m_params = {2, 3, 4, 5, 6, 7}; m_paramsIndex = m_params; SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); @@ -196,6 +222,8 @@ HWTEST_F(AvgPoolBuilderTest, avgpool_build_pad_mode_007, TestSize.Level1) SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_AVG_POOL_STRIDE); SetPadMode(OH_NN_INT8, m_param_dim, nullptr, OH_NN_AVG_POOL_PAD_MODE); SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_AVG_POOL_ACTIVATION_TYPE); + SetRoundMode(OH_NN_INT32, m_param_dim, nullptr, OH_NN_AVG_POOL_ROUND_MODE); + SetGlobal(OH_NN_BOOL, m_param_dim, nullptr, OH_NN_AVG_POOL_GLOBAL); EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); } @@ -221,6 +249,8 @@ HWTEST_F(AvgPoolBuilderTest, avgpool_build_pad_mode_008, TestSize.Level1) SetPadMode(OH_NN_INT8, m_param_dim, nullptr, OH_NN_AVG_POOL_PAD_MODE); SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_AVG_POOL_ACTIVATION_TYPE); + SetRoundMode(OH_NN_INT32, m_param_dim, nullptr, OH_NN_AVG_POOL_ROUND_MODE); + SetGlobal(OH_NN_BOOL, m_param_dim, nullptr, OH_NN_AVG_POOL_GLOBAL); EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); } @@ -245,6 +275,8 @@ HWTEST_F(AvgPoolBuilderTest, avgpool_build_pad_mode_009, TestSize.Level1) m_allTensors.emplace_back(tensor); SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_AVG_POOL_ACTIVATION_TYPE); + SetRoundMode(OH_NN_INT32, m_param_dim, nullptr, OH_NN_AVG_POOL_ROUND_MODE); + SetGlobal(OH_NN_BOOL, m_param_dim, nullptr, OH_NN_AVG_POOL_GLOBAL); EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); } @@ -270,15 +302,71 @@ HWTEST_F(AvgPoolBuilderTest, avgpool_build_pad_mode_010, TestSize.Level1) tensor->SetBuffer(activationValue, sizeof(int32_t)); m_allTensors.emplace_back(tensor); + SetRoundMode(OH_NN_INT32, m_param_dim, nullptr, OH_NN_AVG_POOL_ROUND_MODE); + SetGlobal(OH_NN_BOOL, m_param_dim, nullptr, OH_NN_AVG_POOL_GLOBAL); EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); } /** * @tc.name: avgpool_build_pad_mode_011 - * @tc.desc: Verify the scalar length of the build function + * @tc.desc: Verify the invalid roundMode of the build function * @tc.type: FUNC */ HWTEST_F(AvgPoolBuilderTest, avgpool_build_pad_mode_011, TestSize.Level1) +{ + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetKernelSize(OH_NN_INT64, m_kenelsize_dim, nullptr, OH_NN_AVG_POOL_KERNEL_SIZE); + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_AVG_POOL_STRIDE); + SetPadMode(OH_NN_INT8, m_param_dim, nullptr, OH_NN_AVG_POOL_PAD_MODE); + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_AVG_POOL_ACTIVATION_TYPE); + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT64, m_param_dim, nullptr, + OH_NN_AVG_POOL_ROUND_MODE); + int64_t* roundModeValue = new (std::nothrow) int64_t(0); + EXPECT_NE(nullptr, roundModeValue); + + tensor->SetBuffer(roundModeValue, sizeof(int64_t)); + m_allTensors.emplace_back(tensor); + SetGlobal(OH_NN_BOOL, m_param_dim, nullptr, OH_NN_AVG_POOL_GLOBAL); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + tensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: avgpool_build_pad_mode_012 + * @tc.desc: Verify the invalid activation of the build function + * @tc.type: FUNC + */ +HWTEST_F(AvgPoolBuilderTest, avgpool_build_pad_mode_012, TestSize.Level1) +{ + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetKernelSize(OH_NN_INT64, m_kenelsize_dim, nullptr, OH_NN_AVG_POOL_KERNEL_SIZE); + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_AVG_POOL_STRIDE); + SetPadMode(OH_NN_INT8, m_param_dim, nullptr, OH_NN_AVG_POOL_PAD_MODE); + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_AVG_POOL_ACTIVATION_TYPE); + SetRoundMode(OH_NN_INT32, m_param_dim, nullptr, OH_NN_AVG_POOL_ROUND_MODE); + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT32, m_param_dim, nullptr, + OH_NN_AVG_POOL_GLOBAL); + int32_t* globalValue = new (std::nothrow) int32_t(0); + EXPECT_NE(nullptr, globalValue); + + tensor->SetBuffer(globalValue, sizeof(int32_t)); + m_allTensors.emplace_back(tensor); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + tensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: avgpool_build_pad_mode_013 + * @tc.desc: Verify the scalar length of the build function + * @tc.type: FUNC + */ +HWTEST_F(AvgPoolBuilderTest, avgpool_build_pad_mode_013, TestSize.Level1) { m_param_dim = {2}; m_paramsIndex = m_params; @@ -299,11 +387,11 @@ HWTEST_F(AvgPoolBuilderTest, avgpool_build_pad_mode_011, TestSize.Level1) } /** - * @tc.name: avgpool_build_pad_mode_012 + * @tc.name: avgpool_build_pad_mode_014 * @tc.desc: Verify the param invalid to avgpool of the build function * @tc.type: FUNC */ -HWTEST_F(AvgPoolBuilderTest, avgpool_build_pad_mode_012, TestSize.Level1) +HWTEST_F(AvgPoolBuilderTest, avgpool_build_pad_mode_014, TestSize.Level1) { m_paramsIndex = m_params; SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); @@ -323,11 +411,11 @@ HWTEST_F(AvgPoolBuilderTest, avgpool_build_pad_mode_012, TestSize.Level1) } /** - * @tc.name: avgpool_build_pad_mode_013 + * @tc.name: avgpool_build_pad_mode_015 * @tc.desc: Verify the invalid padmode of the build function * @tc.type: FUNC */ -HWTEST_F(AvgPoolBuilderTest, avgpool_build_pad_mode_013, TestSize.Level1) +HWTEST_F(AvgPoolBuilderTest, avgpool_build_pad_mode_015, TestSize.Level1) { m_paramsIndex = m_params; SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); @@ -346,11 +434,11 @@ HWTEST_F(AvgPoolBuilderTest, avgpool_build_pad_mode_013, TestSize.Level1) } /** - * @tc.name: avgpool_build_pad_mode_014 + * @tc.name: avgpool_build_pad_mode_016 * @tc.desc: Verify the invalid activation value of the build function * @tc.type: FUNC */ -HWTEST_F(AvgPoolBuilderTest, avgpool_build_pad_mode_014, TestSize.Level1) +HWTEST_F(AvgPoolBuilderTest, avgpool_build_pad_mode_016, TestSize.Level1) { m_paramsIndex = m_params; SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); @@ -398,6 +486,13 @@ HWTEST_F(AvgPoolBuilderTest, avgpool_getprimitive_pad_mode_001, TestSize.Level1) EXPECT_EQ(1, returnPadMode); int returnActivation = mindspore::lite::MindIR_AvgPoolFusion_GetActivationType(primitive.get()); EXPECT_EQ(0, returnActivation); + + mindspore::lite::RoundMode roundModeValue = mindspore::lite::ROUND_MODE_FLOOR; + auto expectRoundMode = mindspore::lite::MindIR_AvgPoolFusion_GetRoundMode(primitive.get()); + EXPECT_EQ(roundModeValue, expectRoundMode); + bool globalValue = false; + bool expectGlobal = mindspore::lite::MindIR_AvgPoolFusion_GetGlobal(primitive.get()); + EXPECT_EQ(globalValue, expectGlobal); } /** diff --git a/test/unittest/ops/ceil_test.cpp b/test/unittest/ops/ceil_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..edff2671ace99de8596e7c8ad4833f08f908e356 --- /dev/null +++ b/test/unittest/ops/ceil_test.cpp @@ -0,0 +1,158 @@ +/* + * Copyright (c) 2024 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ops/ceil_builder.h" + +#include "ops_test.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Ops; + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class CeilBuilderTest : public OpsTest { +public: + void SetUp() override; + void TearDown() override; + +protected: + CeilBuilder m_builder; + std::vector m_inputs {0}; + std::vector m_outputs {1}; + std::vector m_dim {1, 2, 2, 1}; +}; + +void CeilBuilderTest::SetUp() {} + +void CeilBuilderTest::TearDown() {} + +/** + * @tc.name: ceil_build_001 + * @tc.desc: Verify that the build function returns a successful message. + * @tc.type: FUNC + */ +HWTEST_F(CeilBuilderTest, ceil_build_001, TestSize.Level1) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/** + * @tc.name: ceil_build_002 + * @tc.desc: Verify that the build function returns a failed message with true m_isBuild. + * @tc.type: FUNC + */ +HWTEST_F(CeilBuilderTest, ceil_build_002, TestSize.Level1) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); + + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/** + * @tc.name: ceil_build_003 + * @tc.desc: Verify that the build function returns a failed message with invalided input. + * @tc.type: FUNC + */ +HWTEST_F(CeilBuilderTest, ceil_build_003, TestSize.Level1) +{ + m_inputs = {0, 1}; + m_outputs = {2}; + + SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: ceil_build_004 + * @tc.desc: Verify that the build function returns a failed message with invalided output. + * @tc.type: FUNC + */ +HWTEST_F(CeilBuilderTest, ceil_build_004, TestSize.Level1) +{ + m_outputs = {1, 2}; + + SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: ceil_build_005 + * @tc.desc: Verify that the build function returns a failed message with empty allTensor. + * @tc.type: FUNC + */ +HWTEST_F(CeilBuilderTest, ceil_build_005, TestSize.Level1) +{ + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputs, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: ceil_build_006 + * @tc.desc: Verify that the build function returns a failed message without output tensor. + * @tc.type: FUNC + */ +HWTEST_F(CeilBuilderTest, ceil_build_006, TestSize.Level1) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: ceil_getprimitive_001 + * @tc.desc: Verify that the getPrimitive function returns a successful message + * @tc.type: FUNC + */ +HWTEST_F(CeilBuilderTest, ceil_getprimitive_001, TestSize.Level1) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); + + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + LiteGraphPrimitvePtr primitive = m_builder.GetPrimitive(); + LiteGraphPrimitvePtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); + EXPECT_NE(expectPrimitive, primitive); +} + +/** + * @tc.name: ceil_getprimitive_002 + * @tc.desc: Verify that the getPrimitive function returns a failed message without build. + * @tc.type: FUNC + */ +HWTEST_F(CeilBuilderTest, ceil_getprimitive_002, TestSize.Level1) +{ + LiteGraphPrimitvePtr primitive = m_builder.GetPrimitive(); + LiteGraphPrimitvePtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); + EXPECT_EQ(expectPrimitive, primitive); +} +} +} +} \ No newline at end of file diff --git a/test/unittest/ops/crop_test.cpp b/test/unittest/ops/crop_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..370e9f9fbddf3d16ddcfb95ac2f8b61263fa1fe1 --- /dev/null +++ b/test/unittest/ops/crop_test.cpp @@ -0,0 +1,341 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ops/crop_builder.h" + +#include "ops_test.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Ops; + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class CropBuilderTest : public OpsTest { +public: + void SetUp() override; + void TearDown() override; + +protected: + void SaveAxis(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + void SaveOffset(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + void SetInputAndShape(); + +protected: + CropBuilder m_builder; + std::vector m_inputs {0, 1}; + std::vector m_outputs {2}; + std::vector m_params {3, 4}; + std::vector m_inputDim {2, 3, 4, 5}; + std::vector m_shapeDim {1}; + std::vector m_outputDim {2, 3, 4, 5}; + std::vector m_axisDim {}; + std::vector m_offsetDim {1}; +}; + +void CropBuilderTest::SetUp() {} + +void CropBuilderTest::TearDown() {} + +void CropBuilderTest::SetInputAndShape() +{ + m_inputsIndex = m_inputs; + std::shared_ptr inputTensor; + inputTensor = TransToNNTensor(OH_NN_FLOAT32, m_inputDim, nullptr, OH_NN_TENSOR); + m_allTensors.emplace_back(inputTensor); + + std::shared_ptr shapeTensor; + shapeTensor = TransToNNTensor(OH_NN_FLOAT32, m_shapeDim, nullptr, OH_NN_TENSOR); + m_allTensors.emplace_back(shapeTensor); +} + +void CropBuilderTest::SaveAxis(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + std::shared_ptr axisTensor = TransToNNTensor(dataType, dim, quantParam, type); + int64_t* axisValue = new (std::nothrow) int64_t[1] {0}; + axisTensor->SetBuffer(axisValue, sizeof(int64_t)); + m_allTensors.emplace_back(axisTensor); +} + +void CropBuilderTest::SaveOffset(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + std::shared_ptr offsetTensor = TransToNNTensor(dataType, dim, quantParam, type); + int64_t* offsetValue = new (std::nothrow) int64_t[1] {1}; + offsetTensor->SetBuffer(offsetValue, sizeof(int64_t)); + m_allTensors.emplace_back(offsetTensor); +} + +/** + * @tc.name: crop_build_001 + * @tc.desc: Verify that the build function returns a successful message. + * @tc.type: FUNC + */ +HWTEST_F(CropBuilderTest, crop_build_001, TestSize.Level1) +{ + SetInputAndShape(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + SaveAxis(OH_NN_INT64, m_axisDim, nullptr, OH_NN_CROP_AXIS); + SaveOffset(OH_NN_INT64, m_offsetDim, nullptr, OH_NN_CROP_OFFSET); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/** + * @tc.name: crop_build_002 + * @tc.desc: Verify that the build function returns a failed message with true m_isBuild. + * @tc.type: FUNC + */ +HWTEST_F(CropBuilderTest, crop_build_002, TestSize.Level1) +{ + SetInputAndShape(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + SaveAxis(OH_NN_INT64, m_axisDim, nullptr, OH_NN_CROP_AXIS); + SaveOffset(OH_NN_INT64, m_offsetDim, nullptr, OH_NN_CROP_OFFSET); + + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors)); + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/** + * @tc.name: crop_build_003 + * @tc.desc: Verify that the build function returns a failed message with invalided input. + * @tc.type: FUNC + */ +HWTEST_F(CropBuilderTest, crop_build_003, TestSize.Level1) +{ + m_inputs = {0, 1, 2}; + m_outputs = {3}; + m_params = {4, 5}; + + SetInputAndShape(); + std::shared_ptr inputTensor = TransToNNTensor(OH_NN_FLOAT32, m_inputDim, nullptr, OH_NN_TENSOR); + m_allTensors.emplace_back(inputTensor); + + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + SaveAxis(OH_NN_INT64, m_axisDim, nullptr, OH_NN_CROP_AXIS); + SaveOffset(OH_NN_INT64, m_offsetDim, nullptr, OH_NN_CROP_OFFSET); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputs, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: crop_build_004 + * @tc.desc: Verify that the build function returns a failed message with invalided output. + * @tc.type: FUNC + */ +HWTEST_F(CropBuilderTest, crop_build_004, TestSize.Level1) +{ + m_outputs = {2, 3}; + m_params = {4, 5}; + + SetInputAndShape(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + SaveAxis(OH_NN_INT64, m_axisDim, nullptr, OH_NN_CROP_AXIS); + SaveOffset(OH_NN_INT64, m_offsetDim, nullptr, OH_NN_CROP_OFFSET); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: crop_build_005 + * @tc.desc: Verify that the build function returns a failed message with empty allTensor. + * @tc.type: FUNC + */ +HWTEST_F(CropBuilderTest, crop_build_005, TestSize.Level1) +{ + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputs, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: crop_build_006 + * @tc.desc: Verify that the build function returns a failed message without output tensor. + * @tc.type: FUNC + */ +HWTEST_F(CropBuilderTest, crop_build_006, TestSize.Level1) +{ + SetInputAndShape(); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: crop_build_007 + * @tc.desc: Verify that the build function returns a failed message with invalid axis's dataType. + * @tc.type: FUNC + */ +HWTEST_F(CropBuilderTest, crop_build_007, TestSize.Level1) +{ + SetInputAndShape(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + + std::shared_ptr axisTensor = TransToNNTensor(OH_NN_FLOAT32, m_axisDim, + nullptr, OH_NN_CROP_AXIS); + float* axisValue = new (std::nothrow) float [1]{0.0f}; + axisTensor->SetBuffer(&axisValue, sizeof(float)); + m_allTensors.emplace_back(axisTensor); + SaveOffset(OH_NN_INT64, m_offsetDim, nullptr, OH_NN_CROP_OFFSET); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + axisTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: crop_build_008 + * @tc.desc: Verify that the build function returns a failed message with invalid offset's dataType. + * @tc.type: FUNC + */ +HWTEST_F(CropBuilderTest, crop_build_008, TestSize.Level1) +{ + SetInputAndShape(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + + SaveAxis(OH_NN_INT64, m_axisDim, nullptr, OH_NN_CROP_AXIS); + std::shared_ptr offsetTensor = TransToNNTensor(OH_NN_FLOAT32, m_offsetDim, + nullptr, OH_NN_CROP_OFFSET); + float* offsetValue = new (std::nothrow) float[1] {1.0f}; + int32_t offsetSize = 1; + EXPECT_NE(nullptr, offsetValue); + offsetTensor->SetBuffer(offsetValue, sizeof(float) * offsetSize); + m_allTensors.emplace_back(offsetTensor); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + offsetTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: crop_build_009 + * @tc.desc: Verify that the build function returns a failed message with passing invalid dataType param. + * @tc.type: FUNC + */ +HWTEST_F(CropBuilderTest, crop_build_009, TestSize.Level1) +{ + SetInputAndShape(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + SaveAxis(OH_NN_INT64, m_axisDim, nullptr, OH_NN_MUL_ACTIVATION_TYPE); + SaveOffset(OH_NN_INT64, m_offsetDim, nullptr, OH_NN_CROP_OFFSET); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: crop_build_010 + * @tc.desc: Verify that the build function returns a failed message with passing invalid value param. + * @tc.type: FUNC + */ +HWTEST_F(CropBuilderTest, crop_build_010, TestSize.Level1) +{ + SetInputAndShape(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + SaveAxis(OH_NN_INT64, m_axisDim, nullptr, OH_NN_CROP_AXIS); + SaveOffset(OH_NN_INT64, m_offsetDim, nullptr, OH_NN_MUL_ACTIVATION_TYPE); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: crop_build_011 + * @tc.desc: Verify that the build function returns a failed message without set buffer for dataType. + * @tc.type: FUNC + */ +HWTEST_F(CropBuilderTest, crop_build_011, TestSize.Level1) +{ + SetInputAndShape(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + + std::shared_ptr dataTypeTensor = TransToNNTensor(OH_NN_INT64, m_axisDim, + nullptr, OH_NN_CROP_AXIS); + m_allTensors.emplace_back(dataTypeTensor); + SaveOffset(OH_NN_INT64, m_offsetDim, nullptr, OH_NN_CROP_OFFSET); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: crop_build_012 + * @tc.desc: Verify that the build function returns a failed message without set buffer for value. + * @tc.type: FUNC + */ +HWTEST_F(CropBuilderTest, crop_build_012, TestSize.Level1) +{ + SetInputAndShape(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + + SaveAxis(OH_NN_INT64, m_axisDim, nullptr, OH_NN_CROP_AXIS); + std::shared_ptr valueTensor = TransToNNTensor(OH_NN_INT64, m_offsetDim, + nullptr, OH_NN_CROP_OFFSET); + m_allTensors.emplace_back(valueTensor); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: crop_getprimitive_001 + * @tc.desc: Verify that the getPrimitive function returns a successful message + * @tc.type: FUNC + */ +HWTEST_F(CropBuilderTest, crop_getprimitive_001, TestSize.Level1) +{ + SetInputAndShape(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + SaveAxis(OH_NN_INT64, m_axisDim, nullptr, OH_NN_CROP_AXIS); + SaveOffset(OH_NN_INT64, m_offsetDim, nullptr, OH_NN_CROP_OFFSET); + + int64_t axisValue = 0; + std::vector offsetsValue = {1}; + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors)); + LiteGraphPrimitvePtr primitive = m_builder.GetPrimitive(); + LiteGraphPrimitvePtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); + EXPECT_NE(expectPrimitive, primitive); + + auto returnAxisValue = mindspore::lite::MindIR_Crop_GetAxis(primitive.get()); + EXPECT_EQ(returnAxisValue, axisValue); + auto returnOffsets = mindspore::lite::MindIR_Crop_GetOffsets(primitive.get()); + auto returnOffsetsSize = returnOffsets.size(); + for (size_t i = 0; i < returnOffsetsSize; ++i) { + EXPECT_EQ(returnOffsets[i], offsetsValue[i]); + } +} + +/** + * @tc.name: crop_getprimitive_002 + * @tc.desc: Verify that the getPrimitive function returns a failed message without build. + * @tc.type: FUNC + */ +HWTEST_F(CropBuilderTest, crop_getprimitive_002, TestSize.Level1) +{ + LiteGraphPrimitvePtr primitive = m_builder.GetPrimitive(); + LiteGraphPrimitvePtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); + EXPECT_EQ(expectPrimitive, primitive); +} +} +} +} \ No newline at end of file diff --git a/test/unittest/ops/depth_to_space_test.cpp b/test/unittest/ops/depth_to_space_test.cpp index 77984f10e83aebdf0776208e9a3f7b9859de6662..c71e04eb3fc130df51bf9b20e595d9fb4c3bb531 100644 --- a/test/unittest/ops/depth_to_space_test.cpp +++ b/test/unittest/ops/depth_to_space_test.cpp @@ -16,7 +16,6 @@ #include "ops/depth_to_space_builder.h" #include "ops_test.h" -#include using namespace testing; using namespace testing::ext; @@ -33,8 +32,6 @@ public: protected: void SaveBlockSize(OH_NN_DataType dataType, const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); - void SaveFormat(OH_NN_DataType dataType, - const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); void SaveMode(OH_NN_DataType dataType, const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); @@ -42,13 +39,10 @@ protected: DepthToSpaceBuilder m_builder; std::vector m_inputs {0}; std::vector m_outputs {1}; - std::vector m_params {2, 3, 4}; + std::vector m_params {2, 3}; std::vector m_inputDim {1, 12, 1, 1}; std::vector m_outputDim {1, 3, 2, 2}; std::vector m_paramDim {}; - - std::string mode = "DCR"; - std::shared_ptr modeTensor; }; void DepthToSpaceBuilderTest::SetUp() {} @@ -59,31 +53,19 @@ void DepthToSpaceBuilderTest::SaveBlockSize(OH_NN_DataType dataType, const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) { std::shared_ptr blockSizeTensor = TransToNNTensor(dataType, dim, quantParam, type); - int64_t* blockSizeValue = new (std::nothrow) int64_t [1]{2}; + int64_t* blockSizeValue = new (std::nothrow) int64_t[1] {2}; EXPECT_NE(nullptr, blockSizeValue); blockSizeTensor->SetBuffer(blockSizeValue, sizeof(int64_t)); m_allTensors.emplace_back(blockSizeTensor); } -void DepthToSpaceBuilderTest::SaveFormat(OH_NN_DataType dataType, - const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) -{ - std::shared_ptr formatTensor = TransToNNTensor(dataType, dim, quantParam, type); - int8_t* formatValue = new (std::nothrow) int8_t(1); - EXPECT_NE(nullptr, formatValue); - - formatTensor->SetBuffer(formatValue, sizeof(int8_t)); - m_allTensors.emplace_back(formatTensor); -} - void DepthToSpaceBuilderTest::SaveMode(OH_NN_DataType dataType, const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) { - modeTensor = TransToNNTensor(dataType, dim, quantParam, type); - int8_t* modeValue = (int8_t*)(mode.c_str()); + std::shared_ptr modeTensor = TransToNNTensor(dataType, dim, quantParam, type); + int32_t* modeValue = new (std::nothrow) int32_t[1] {0}; EXPECT_NE(nullptr, modeValue); - - modeTensor->SetBuffer(modeValue, sizeof(int8_t)*(mode.length()+1)); + modeTensor->SetBuffer(modeValue, sizeof(int32_t)); m_allTensors.emplace_back(modeTensor); } @@ -97,12 +79,10 @@ HWTEST_F(DepthToSpaceBuilderTest, depth_to_space_build_001, TestSize.Level1) SaveInputTensor(m_inputs, OH_NN_INT32, m_inputDim, nullptr); SaveOutputTensor(m_outputs, OH_NN_INT32, m_outputDim, nullptr); SaveBlockSize(OH_NN_INT64, m_paramDim, nullptr, OH_NN_DEPTH_TO_SPACE_BLOCK_SIZE); - SaveFormat(OH_NN_INT8, m_paramDim, nullptr, OH_NN_DEPTH_TO_SPACE_FORMAT); - SaveMode(OH_NN_INT8, m_paramDim, nullptr, OH_NN_DEPTH_TO_SPACE_MODE); + SaveMode(OH_NN_INT32, m_paramDim, nullptr, OH_NN_DEPTH_TO_SPACE_MODE); OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); EXPECT_EQ(OH_NN_SUCCESS, ret); - modeTensor->SetBuffer(nullptr, 0); } /** @@ -115,13 +95,11 @@ HWTEST_F(DepthToSpaceBuilderTest, depth_to_space_build_002, TestSize.Level1) SaveInputTensor(m_inputs, OH_NN_INT32, m_inputDim, nullptr); SaveOutputTensor(m_outputs, OH_NN_INT32, m_outputDim, nullptr); SaveBlockSize(OH_NN_INT64, m_paramDim, nullptr, OH_NN_DEPTH_TO_SPACE_BLOCK_SIZE); - SaveFormat(OH_NN_INT8, m_paramDim, nullptr, OH_NN_DEPTH_TO_SPACE_FORMAT); - SaveMode(OH_NN_INT8, m_paramDim, nullptr, OH_NN_DEPTH_TO_SPACE_MODE); + SaveMode(OH_NN_INT32, m_paramDim, nullptr, OH_NN_DEPTH_TO_SPACE_MODE); EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors)); OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); - modeTensor->SetBuffer(nullptr, 0); } /** @@ -133,17 +111,15 @@ HWTEST_F(DepthToSpaceBuilderTest, depth_to_space_build_003, TestSize.Level1) { m_inputs = {0, 1}; m_outputs = {2}; - m_params = {3, 4, 5}; + m_params = {3, 4}; SaveInputTensor(m_inputs, OH_NN_INT32, m_inputDim, nullptr); SaveOutputTensor(m_outputs, OH_NN_INT32, m_outputDim, nullptr); SaveBlockSize(OH_NN_INT64, m_paramDim, nullptr, OH_NN_DEPTH_TO_SPACE_BLOCK_SIZE); - SaveFormat(OH_NN_INT8, m_paramDim, nullptr, OH_NN_DEPTH_TO_SPACE_FORMAT); - SaveMode(OH_NN_INT8, m_paramDim, nullptr, OH_NN_DEPTH_TO_SPACE_MODE); + SaveMode(OH_NN_INT32, m_paramDim, nullptr, OH_NN_DEPTH_TO_SPACE_MODE); OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); - modeTensor->SetBuffer(nullptr, 0); } /** @@ -154,17 +130,15 @@ HWTEST_F(DepthToSpaceBuilderTest, depth_to_space_build_003, TestSize.Level1) HWTEST_F(DepthToSpaceBuilderTest, depth_to_space_build_004, TestSize.Level1) { m_outputs = {1, 2}; - m_params = {3, 4, 5}; + m_params = {3, 4}; SaveInputTensor(m_inputs, OH_NN_INT32, m_inputDim, nullptr); SaveOutputTensor(m_outputs, OH_NN_INT32, m_outputDim, nullptr); SaveBlockSize(OH_NN_INT64, m_paramDim, nullptr, OH_NN_DEPTH_TO_SPACE_BLOCK_SIZE); - SaveFormat(OH_NN_INT8, m_paramDim, nullptr, OH_NN_DEPTH_TO_SPACE_FORMAT); - SaveMode(OH_NN_INT8, m_paramDim, nullptr, OH_NN_DEPTH_TO_SPACE_MODE); + SaveMode(OH_NN_INT32, m_paramDim, nullptr, OH_NN_DEPTH_TO_SPACE_MODE); OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); - modeTensor->SetBuffer(nullptr, 0); } /** @@ -203,57 +177,32 @@ HWTEST_F(DepthToSpaceBuilderTest, depth_to_space_build_007, TestSize.Level1) std::shared_ptr blockSizeTensor = TransToNNTensor(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_DEPTH_TO_SPACE_BLOCK_SIZE); - float* blockSizeValue = new (std::nothrow) float [1]{2.0f}; + float* blockSizeValue = new (std::nothrow) float[1] {2.0f}; EXPECT_NE(nullptr, blockSizeValue); blockSizeTensor->SetBuffer(blockSizeValue, sizeof(float)); m_allTensors.emplace_back(blockSizeTensor); - SaveFormat(OH_NN_INT8, m_paramDim, nullptr, OH_NN_DEPTH_TO_SPACE_FORMAT); - SaveMode(OH_NN_INT8, m_paramDim, nullptr, OH_NN_DEPTH_TO_SPACE_MODE); + SaveMode(OH_NN_INT32, m_paramDim, nullptr, OH_NN_DEPTH_TO_SPACE_MODE); OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); blockSizeTensor->SetBuffer(nullptr, 0); - modeTensor->SetBuffer(nullptr, 0); } /** * @tc.name: depth_to_space_build_008 - * @tc.desc: Verify that the build function returns a failed message with invalid format's dataType. - * @tc.type: FUNC - */ -HWTEST_F(DepthToSpaceBuilderTest, depth_to_space_build_008, TestSize.Level1) -{ - SaveInputTensor(m_inputs, OH_NN_INT32, m_inputDim, nullptr); - SaveOutputTensor(m_outputs, OH_NN_INT32, m_outputDim, nullptr); - - SaveBlockSize(OH_NN_INT64, m_paramDim, nullptr, OH_NN_DEPTH_TO_SPACE_BLOCK_SIZE); - std::shared_ptr formatTensor = TransToNNTensor(OH_NN_INT64, m_paramDim, - nullptr, OH_NN_DEPTH_TO_SPACE_FORMAT); - int64_t* formatValue = new (std::nothrow) int64_t(1); - formatTensor->SetBuffer(formatValue, sizeof(int64_t)); - m_allTensors.emplace_back(formatTensor); - SaveMode(OH_NN_INT8, m_paramDim, nullptr, OH_NN_DEPTH_TO_SPACE_MODE); - - OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); - EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); - formatTensor->SetBuffer(nullptr, 0); - modeTensor->SetBuffer(nullptr, 0); -} - -/** - * @tc.name: depth_to_space_build_009 * @tc.desc: Verify that the build function returns a failed message with invalid mode's dataType. * @tc.type: FUNC */ -HWTEST_F(DepthToSpaceBuilderTest, depth_to_space_build_009, TestSize.Level1) +HWTEST_F(DepthToSpaceBuilderTest, depth_to_space_build_008, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_inputDim, nullptr); SaveOutputTensor(m_outputs, OH_NN_INT32, m_outputDim, nullptr); SaveBlockSize(OH_NN_INT64, m_paramDim, nullptr, OH_NN_DEPTH_TO_SPACE_BLOCK_SIZE); - SaveFormat(OH_NN_INT8, m_paramDim, nullptr, OH_NN_DEPTH_TO_SPACE_FORMAT); - modeTensor = TransToNNTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_DEPTH_TO_SPACE_MODE); - int64_t* modeValue = (int64_t*)(mode.c_str()); + std::shared_ptr modeTensor = TransToNNTensor(OH_NN_FLOAT32, m_paramDim, + nullptr, OH_NN_DEPTH_TO_SPACE_MODE); + float* modeValue = new (std::nothrow) float[1] {0.0f}; + EXPECT_NE(nullptr, modeValue); modeTensor->SetBuffer(modeValue, sizeof(modeValue)); m_allTensors.emplace_back(modeTensor); @@ -263,65 +212,43 @@ HWTEST_F(DepthToSpaceBuilderTest, depth_to_space_build_009, TestSize.Level1) } /** - * @tc.name: depth_to_space_build_010 + * @tc.name: depth_to_space_build_009 * @tc.desc: Verify that the build function returns a failed message with passing invalid blockSize param. * @tc.type: FUNC */ -HWTEST_F(DepthToSpaceBuilderTest, depth_to_space_build_010, TestSize.Level1) +HWTEST_F(DepthToSpaceBuilderTest, depth_to_space_build_009, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_inputDim, nullptr); SaveOutputTensor(m_outputs, OH_NN_INT32, m_outputDim, nullptr); SaveBlockSize(OH_NN_INT64, m_paramDim, nullptr, OH_NN_MUL_ACTIVATION_TYPE); - SaveFormat(OH_NN_INT8, m_paramDim, nullptr, OH_NN_DEPTH_TO_SPACE_FORMAT); - SaveMode(OH_NN_INT8, m_paramDim, nullptr, OH_NN_DEPTH_TO_SPACE_MODE); + SaveMode(OH_NN_INT32, m_paramDim, nullptr, OH_NN_DEPTH_TO_SPACE_MODE); OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); - modeTensor->SetBuffer(nullptr, 0); } /** - * @tc.name: depth_to_space_build_011 - * @tc.desc: Verify that the build function returns a failed message with passing invalid format param. - * @tc.type: FUNC - */ -HWTEST_F(DepthToSpaceBuilderTest, depth_to_space_build_011, TestSize.Level1) -{ - SaveInputTensor(m_inputs, OH_NN_INT32, m_inputDim, nullptr); - SaveOutputTensor(m_outputs, OH_NN_INT32, m_outputDim, nullptr); - SaveBlockSize(OH_NN_INT64, m_paramDim, nullptr, OH_NN_DEPTH_TO_SPACE_BLOCK_SIZE); - SaveFormat(OH_NN_INT8, m_paramDim, nullptr, OH_NN_MUL_ACTIVATION_TYPE); - SaveMode(OH_NN_INT8, m_paramDim, nullptr, OH_NN_DEPTH_TO_SPACE_MODE); - - OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); - EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); - modeTensor->SetBuffer(nullptr, 0); -} - -/** - * @tc.name: depth_to_space_build_012 + * @tc.name: depth_to_space_build_010 * @tc.desc: Verify that the build function returns a failed message with passing invalid mode param. * @tc.type: FUNC */ -HWTEST_F(DepthToSpaceBuilderTest, depth_to_space_build_012, TestSize.Level1) +HWTEST_F(DepthToSpaceBuilderTest, depth_to_space_build_010, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_inputDim, nullptr); SaveOutputTensor(m_outputs, OH_NN_INT32, m_outputDim, nullptr); SaveBlockSize(OH_NN_INT64, m_paramDim, nullptr, OH_NN_DEPTH_TO_SPACE_BLOCK_SIZE); - SaveFormat(OH_NN_INT8, m_paramDim, nullptr, OH_NN_DEPTH_TO_SPACE_FORMAT); - SaveMode(OH_NN_INT8, m_paramDim, nullptr, OH_NN_MUL_ACTIVATION_TYPE); + SaveMode(OH_NN_INT32, m_paramDim, nullptr, OH_NN_MUL_ACTIVATION_TYPE); OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); - modeTensor->SetBuffer(nullptr, 0); } /** - * @tc.name: depth_to_space_build_013 + * @tc.name: depth_to_space_build_011 * @tc.desc: Verify that the build function returns a failed message without set buffer for blockSize. * @tc.type: FUNC */ -HWTEST_F(DepthToSpaceBuilderTest, depth_to_space_build_013, TestSize.Level1) +HWTEST_F(DepthToSpaceBuilderTest, depth_to_space_build_011, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_inputDim, nullptr); SaveOutputTensor(m_outputs, OH_NN_INT32, m_outputDim, nullptr); @@ -329,48 +256,25 @@ HWTEST_F(DepthToSpaceBuilderTest, depth_to_space_build_013, TestSize.Level1) std::shared_ptr blockSizeTensor = TransToNNTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_DEPTH_TO_SPACE_BLOCK_SIZE); m_allTensors.emplace_back(blockSizeTensor); - SaveFormat(OH_NN_INT8, m_paramDim, nullptr, OH_NN_DEPTH_TO_SPACE_FORMAT); - SaveMode(OH_NN_INT8, m_paramDim, nullptr, OH_NN_DEPTH_TO_SPACE_MODE); + SaveMode(OH_NN_INT32, m_paramDim, nullptr, OH_NN_DEPTH_TO_SPACE_MODE); OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); - modeTensor->SetBuffer(nullptr, 0); } /** - * @tc.name: depth_to_space_build_014 - * @tc.desc: Verify that the build function returns a failed message without set buffer for format. - * @tc.type: FUNC - */ -HWTEST_F(DepthToSpaceBuilderTest, depth_to_space_build_014, TestSize.Level1) -{ - SaveInputTensor(m_inputs, OH_NN_INT32, m_inputDim, nullptr); - SaveOutputTensor(m_outputs, OH_NN_INT32, m_outputDim, nullptr); - - SaveBlockSize(OH_NN_INT64, m_paramDim, nullptr, OH_NN_DEPTH_TO_SPACE_BLOCK_SIZE); - std::shared_ptr formatTensor = TransToNNTensor(OH_NN_INT8, m_paramDim, - nullptr, OH_NN_DEPTH_TO_SPACE_FORMAT); - m_allTensors.emplace_back(formatTensor); - SaveMode(OH_NN_INT8, m_paramDim, nullptr, OH_NN_DEPTH_TO_SPACE_MODE); - - OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); - EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); - modeTensor->SetBuffer(nullptr, 0); -} - -/** - * @tc.name: depth_to_space_build_015 + * @tc.name: depth_to_space_build_012 * @tc.desc: Verify that the build function returns a failed message without set buffer for mode. * @tc.type: FUNC */ -HWTEST_F(DepthToSpaceBuilderTest, depth_to_space_build_015, TestSize.Level1) +HWTEST_F(DepthToSpaceBuilderTest, depth_to_space_build_012, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_inputDim, nullptr); SaveOutputTensor(m_outputs, OH_NN_INT32, m_outputDim, nullptr); SaveBlockSize(OH_NN_INT64, m_paramDim, nullptr, OH_NN_DEPTH_TO_SPACE_BLOCK_SIZE); - SaveFormat(OH_NN_INT8, m_paramDim, nullptr, OH_NN_DEPTH_TO_SPACE_FORMAT); - modeTensor = TransToNNTensor(OH_NN_INT8, m_paramDim, nullptr, OH_NN_DEPTH_TO_SPACE_MODE); + std::shared_ptr modeTensor = TransToNNTensor(OH_NN_INT32, m_paramDim, + nullptr, OH_NN_DEPTH_TO_SPACE_MODE); m_allTensors.emplace_back(modeTensor); OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); @@ -387,11 +291,9 @@ HWTEST_F(DepthToSpaceBuilderTest, depth_to_space_getprimitive_001, TestSize.Leve SaveInputTensor(m_inputs, OH_NN_INT32, m_inputDim, nullptr); SaveOutputTensor(m_outputs, OH_NN_INT32, m_outputDim, nullptr); SaveBlockSize(OH_NN_INT64, m_paramDim, nullptr, OH_NN_DEPTH_TO_SPACE_BLOCK_SIZE); - SaveFormat(OH_NN_INT8, m_paramDim, nullptr, OH_NN_DEPTH_TO_SPACE_FORMAT); - SaveMode(OH_NN_INT8, m_paramDim, nullptr, OH_NN_DEPTH_TO_SPACE_MODE); + SaveMode(OH_NN_INT32, m_paramDim, nullptr, OH_NN_DEPTH_TO_SPACE_MODE); int64_t blockSizeValue = 2; - mindspore::lite::Format formatValue = mindspore::lite::FORMAT_NCHW; std::string modeValue = "DCR"; EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors)); @@ -401,11 +303,8 @@ HWTEST_F(DepthToSpaceBuilderTest, depth_to_space_getprimitive_001, TestSize.Leve auto returnBlockSizeValue = mindspore::lite::MindIR_DepthToSpace_GetBlockSize(primitive.get()); EXPECT_EQ(returnBlockSizeValue, blockSizeValue); - mindspore::lite::Format returnFormatValue = mindspore::lite::MindIR_DepthToSpace_GetFormat(primitive.get()); - EXPECT_EQ(returnFormatValue, formatValue); auto returnModeValue = mindspore::lite::MindIR_DepthToSpace_GetMode(primitive.get()); EXPECT_EQ(returnModeValue, modeValue); - modeTensor->SetBuffer(nullptr, 0); } /** diff --git a/test/unittest/ops/detection_post_process_test.cpp b/test/unittest/ops/detection_post_process_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..9afc12d934342f3ed0956a9ccf8e75225023515b --- /dev/null +++ b/test/unittest/ops/detection_post_process_test.cpp @@ -0,0 +1,1217 @@ +/* + * Copyright (c) 2024 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ops/detection_post_process_builder.h" + +#include "ops_test.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Ops; + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class DetectionPostProcessBuilderTest : public OpsTest { +public: + void SetUp() override; + void TearDown() override; + +protected: + void SetInputTensor(); + void SetInputSize(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + void SetScale(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + void SetNmsIoUThreshold(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + void SetNmsScoreThreshold(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + void SetMaxDetections(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + void SetDetectionsPerClass(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + void SetMaxClassesPerDetection(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + void SetNumClasses(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + void SetUseRegularNms(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + void SetOutQuantized(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + +protected: + DetectionPostProcessBuilder m_builder; + std::vector m_inputs {0, 1, 2}; + std::vector m_outputs {3, 4, 5, 6}; + std::vector m_params {7, 8, 9, 10, 11, 12, 13, 14, 15, 16}; + std::vector m_inputBboxDim {1, 16}; + std::vector m_inputScoresDim {1, 4}; + std::vector m_inputAnchorsDim {1, 2, 8}; + std::vector m_outputDim {2, 3}; + std::vector m_paramDim {}; + std::vector m_scaleDim {4}; +}; + +void DetectionPostProcessBuilderTest::SetUp() {} + +void DetectionPostProcessBuilderTest::TearDown() {} + +void DetectionPostProcessBuilderTest::SetInputSize(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + std::shared_ptr inputSizeTensor = TransToNNTensor(dataType, dim, quantParam, type); + int64_t* inputSizeValue = new (std::nothrow) int64_t[1] {300}; + EXPECT_NE(nullptr, inputSizeValue); + inputSizeTensor->SetBuffer(inputSizeValue, sizeof(int64_t)); + m_allTensors.emplace_back(inputSizeTensor); +} + +void DetectionPostProcessBuilderTest::SetScale(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + std::shared_ptr scaleTensor = TransToNNTensor(dataType, dim, quantParam, type); + float* scaleValue = new (std::nothrow) float[4] {10.0, 10.0, 5.0, 5.0}; + int32_t scaleSize = 4; + EXPECT_NE(nullptr, scaleValue); + scaleTensor->SetBuffer(scaleValue, sizeof(float) * scaleSize); + m_allTensors.emplace_back(scaleTensor); +} + +void DetectionPostProcessBuilderTest::SetNmsIoUThreshold(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + std::shared_ptr nmsIouThresholdTensor = TransToNNTensor(dataType, dim, quantParam, type); + float* nmsIouThresholdValue = new (std::nothrow) float[1] {0.5}; + EXPECT_NE(nullptr, nmsIouThresholdValue); + nmsIouThresholdTensor->SetBuffer(nmsIouThresholdValue, sizeof(float)); + m_allTensors.emplace_back(nmsIouThresholdTensor); +} + +void DetectionPostProcessBuilderTest::SetNmsScoreThreshold(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + std::shared_ptr nmsScoreThresholdTensor = TransToNNTensor(dataType, dim, quantParam, type); + float* nmsScoreThresholdValue = new (std::nothrow) float[1] {0.5}; + EXPECT_NE(nullptr, nmsScoreThresholdValue); + nmsScoreThresholdTensor->SetBuffer(nmsScoreThresholdValue, sizeof(float)); + m_allTensors.emplace_back(nmsScoreThresholdTensor); +} + +void DetectionPostProcessBuilderTest::SetMaxDetections(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + std::shared_ptr maxDetectionsTensor = TransToNNTensor(dataType, dim, quantParam, type); + int64_t* maxDetectionsValue = new (std::nothrow) int64_t[1] {5}; + EXPECT_NE(nullptr, maxDetectionsValue); + maxDetectionsTensor->SetBuffer(maxDetectionsValue, sizeof(int64_t)); + m_allTensors.emplace_back(maxDetectionsTensor); +} + +void DetectionPostProcessBuilderTest::SetDetectionsPerClass(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + std::shared_ptr detectionsPerClassTensor = TransToNNTensor(dataType, dim, quantParam, type); + int64_t* detectionsPerClassValue = new (std::nothrow) int64_t[1] {2}; + EXPECT_NE(nullptr, detectionsPerClassValue); + detectionsPerClassTensor->SetBuffer(detectionsPerClassValue, sizeof(int64_t)); + m_allTensors.emplace_back(detectionsPerClassTensor); +} + +void DetectionPostProcessBuilderTest::SetMaxClassesPerDetection(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + std::shared_ptr maxClassesPerDetectionTensor = TransToNNTensor(dataType, dim, quantParam, type); + int64_t* maxClassesPerDetectionValue = new (std::nothrow) int64_t[1] {1}; + EXPECT_NE(nullptr, maxClassesPerDetectionValue); + maxClassesPerDetectionTensor->SetBuffer(maxClassesPerDetectionValue, sizeof(int64_t)); + m_allTensors.emplace_back(maxClassesPerDetectionTensor); +} + +void DetectionPostProcessBuilderTest::SetNumClasses(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + std::shared_ptr numClassesTensor = TransToNNTensor(dataType, dim, quantParam, type); + int64_t* numClassesValue = new (std::nothrow) int64_t[1] {5}; + EXPECT_NE(nullptr, numClassesValue); + numClassesTensor->SetBuffer(numClassesValue, sizeof(int64_t)); + m_allTensors.emplace_back(numClassesTensor); +} + +void DetectionPostProcessBuilderTest::SetUseRegularNms(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + std::shared_ptr useRegularNmsTensor = TransToNNTensor(dataType, dim, quantParam, type); + bool* useRegularNmsValue = new (std::nothrow) bool(false); + EXPECT_NE(nullptr, useRegularNmsValue); + useRegularNmsTensor->SetBuffer(useRegularNmsValue, sizeof(bool)); + m_allTensors.emplace_back(useRegularNmsTensor); +} + +void DetectionPostProcessBuilderTest::SetOutQuantized(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + std::shared_ptr outQuantizedTensor = TransToNNTensor(dataType, dim, quantParam, type); + bool* outQuantizedValue = new (std::nothrow) bool(false); + EXPECT_NE(nullptr, outQuantizedValue); + outQuantizedTensor->SetBuffer(outQuantizedValue, sizeof(bool)); + m_allTensors.emplace_back(outQuantizedTensor); +} + +void DetectionPostProcessBuilderTest::SetInputTensor() +{ + m_inputsIndex = m_inputs; + std::shared_ptr bboxTensor; + bboxTensor = TransToNNTensor(OH_NN_FLOAT32, m_inputBboxDim, nullptr, OH_NN_TENSOR); + m_allTensors.emplace_back(bboxTensor); + + std::shared_ptr scoresTensor; + scoresTensor = TransToNNTensor(OH_NN_FLOAT32, m_inputScoresDim, nullptr, OH_NN_TENSOR); + m_allTensors.emplace_back(scoresTensor); + + std::shared_ptr anchorsTensor; + anchorsTensor = TransToNNTensor(OH_NN_FLOAT32, m_inputAnchorsDim, nullptr, OH_NN_TENSOR); + m_allTensors.emplace_back(anchorsTensor); +} + +/** + * @tc.name: detection_post_process_build_001 + * @tc.desc: Verify that the build function returns a successful message. + * @tc.type: FUNC + */ +HWTEST_F(DetectionPostProcessBuilderTest, detection_post_process_build_001, TestSize.Level1) +{ + SetInputTensor(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + SetInputSize(OH_NN_INT64, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_INPUT_SIZE); + SetScale(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_SCALE); + SetNmsIoUThreshold(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_NMS_IOU_THRESHOLD); + SetNmsScoreThreshold(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_NMS_SCORE_THRESHOLD); + SetMaxDetections(OH_NN_INT64, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_MAX_DETECTIONS); + SetDetectionsPerClass(OH_NN_INT64, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_DETECTIONS_PER_CLASS); + SetMaxClassesPerDetection(OH_NN_INT64, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_MAX_CLASSES_PER_DETECTION); + SetNumClasses(OH_NN_INT64, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_NUM_CLASSES); + SetUseRegularNms(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_USE_REGULAR_NMS); + SetOutQuantized(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_OUT_QUANTIZED); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/** + * @tc.name: detection_post_process_build_002 + * @tc.desc: Verify that the build function returns a failed message with true m_isBuild. + * @tc.type: FUNC + */ +HWTEST_F(DetectionPostProcessBuilderTest, detection_post_process_build_002, TestSize.Level1) +{ + SetInputTensor(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + SetInputSize(OH_NN_INT64, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_INPUT_SIZE); + SetScale(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_SCALE); + SetNmsIoUThreshold(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_NMS_IOU_THRESHOLD); + SetNmsScoreThreshold(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_NMS_SCORE_THRESHOLD); + SetMaxDetections(OH_NN_INT64, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_MAX_DETECTIONS); + SetDetectionsPerClass(OH_NN_INT64, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_DETECTIONS_PER_CLASS); + SetMaxClassesPerDetection(OH_NN_INT64, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_MAX_CLASSES_PER_DETECTION); + SetNumClasses(OH_NN_INT64, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_NUM_CLASSES); + SetUseRegularNms(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_USE_REGULAR_NMS); + SetOutQuantized(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_OUT_QUANTIZED); + + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors)); + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/** + * @tc.name: detection_post_process_build_003 + * @tc.desc: Verify that the build function returns a failed message with invalided input. + * @tc.type: FUNC + */ +HWTEST_F(DetectionPostProcessBuilderTest, detection_post_process_build_003, TestSize.Level1) +{ + m_inputs = {0, 1, 2, 3, 4}; + m_outputs = {5, 6, 7, 8}; + m_params = {9, 10, 11, 12, 13, 14, 15, 16, 17, 18}; + + SaveInputTensor({1}, OH_NN_FLOAT32, m_inputBboxDim, nullptr); + SetInputTensor(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + SetInputSize(OH_NN_INT64, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_INPUT_SIZE); + SetScale(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_SCALE); + SetNmsIoUThreshold(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_NMS_IOU_THRESHOLD); + SetNmsScoreThreshold(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_NMS_SCORE_THRESHOLD); + SetMaxDetections(OH_NN_INT64, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_MAX_DETECTIONS); + SetDetectionsPerClass(OH_NN_INT64, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_DETECTIONS_PER_CLASS); + SetMaxClassesPerDetection(OH_NN_INT64, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_MAX_CLASSES_PER_DETECTION); + SetNumClasses(OH_NN_INT64, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_NUM_CLASSES); + SetUseRegularNms(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_USE_REGULAR_NMS); + SetOutQuantized(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_OUT_QUANTIZED); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: detection_post_process_build_004 + * @tc.desc: Verify that the build function returns a failed message with invalided output. + * @tc.type: FUNC + */ +HWTEST_F(DetectionPostProcessBuilderTest, detection_post_process_build_004, TestSize.Level1) +{ + m_outputs = {4, 5, 6, 7, 8}; + m_params = {9, 10, 11, 12, 13, 14, 15, 16, 17, 18}; + + SetInputTensor(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + SetInputSize(OH_NN_INT64, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_INPUT_SIZE); + SetScale(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_SCALE); + SetNmsIoUThreshold(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_NMS_IOU_THRESHOLD); + SetNmsScoreThreshold(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_NMS_SCORE_THRESHOLD); + SetMaxDetections(OH_NN_INT64, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_MAX_DETECTIONS); + SetDetectionsPerClass(OH_NN_INT64, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_DETECTIONS_PER_CLASS); + SetMaxClassesPerDetection(OH_NN_INT64, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_MAX_CLASSES_PER_DETECTION); + SetNumClasses(OH_NN_INT64, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_NUM_CLASSES); + SetUseRegularNms(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_USE_REGULAR_NMS); + SetOutQuantized(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_OUT_QUANTIZED); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: detection_post_process_build_005 + * @tc.desc: Verify that the build function returns a failed message with empty allTensor. + * @tc.type: FUNC + */ +HWTEST_F(DetectionPostProcessBuilderTest, detection_post_process_build_005, TestSize.Level1) +{ + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputs, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: detection_post_process_build_006 + * @tc.desc: Verify that the build function returns a failed message without output tensor. + * @tc.type: FUNC + */ +HWTEST_F(DetectionPostProcessBuilderTest, detection_post_process_build_006, TestSize.Level1) +{ + SetInputTensor(); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: detection_post_process_build_007 + * @tc.desc: Verify that the build function returns a failed message with invalid inputSize's dataType. + * @tc.type: FUNC + */ +HWTEST_F(DetectionPostProcessBuilderTest, detection_post_process_build_007, TestSize.Level1) +{ + SetInputTensor(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + + std::shared_ptr inputSizeTensor = TransToNNTensor(OH_NN_FLOAT32, m_paramDim, + nullptr, OH_NN_DETECTION_POST_PROCESS_INPUT_SIZE); + float* inputSizeValue = new (std::nothrow) float[1] {300.0f}; + inputSizeTensor->SetBuffer(inputSizeValue, sizeof(float)); + m_allTensors.emplace_back(inputSizeTensor); + SetScale(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_SCALE); + SetNmsIoUThreshold(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_NMS_IOU_THRESHOLD); + SetNmsScoreThreshold(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_NMS_SCORE_THRESHOLD); + SetMaxDetections(OH_NN_INT64, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_MAX_DETECTIONS); + SetDetectionsPerClass(OH_NN_INT64, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_DETECTIONS_PER_CLASS); + SetMaxClassesPerDetection(OH_NN_INT64, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_MAX_CLASSES_PER_DETECTION); + SetNumClasses(OH_NN_INT64, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_NUM_CLASSES); + SetUseRegularNms(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_USE_REGULAR_NMS); + SetOutQuantized(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_OUT_QUANTIZED); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + inputSizeTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: detection_post_process_build_008 + * @tc.desc: Verify that the build function returns a failed message with invalid scale's dataType. + * @tc.type: FUNC + */ +HWTEST_F(DetectionPostProcessBuilderTest, detection_post_process_build_008, TestSize.Level1) +{ + SetInputTensor(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + + SetInputSize(OH_NN_INT64, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_INPUT_SIZE); + std::shared_ptr scaleTensor = TransToNNTensor(OH_NN_INT64, m_paramDim, + nullptr, OH_NN_DETECTION_POST_PROCESS_SCALE); + int64_t* scaleValue = new (std::nothrow) int64_t[4] {10.0f, 10.0f, 5.0f, 5.0f}; + int32_t scaleSize = 4; + scaleTensor->SetBuffer(scaleValue, sizeof(int64_t) * scaleSize); + m_allTensors.emplace_back(scaleTensor); + SetNmsIoUThreshold(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_NMS_IOU_THRESHOLD); + SetNmsScoreThreshold(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_NMS_SCORE_THRESHOLD); + SetMaxDetections(OH_NN_INT64, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_MAX_DETECTIONS); + SetDetectionsPerClass(OH_NN_INT64, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_DETECTIONS_PER_CLASS); + SetMaxClassesPerDetection(OH_NN_INT64, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_MAX_CLASSES_PER_DETECTION); + SetNumClasses(OH_NN_INT64, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_NUM_CLASSES); + SetUseRegularNms(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_USE_REGULAR_NMS); + SetOutQuantized(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_OUT_QUANTIZED); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + scaleTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: detection_post_process_build_009 + * @tc.desc: Verify that the build function returns a failed message with invalid nmsIoUThreshold's dataType. + * @tc.type: FUNC + */ +HWTEST_F(DetectionPostProcessBuilderTest, detection_post_process_build_009, TestSize.Level1) +{ + SetInputTensor(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + + SetInputSize(OH_NN_INT64, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_INPUT_SIZE); + SetScale(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_SCALE); + std::shared_ptr nmsIoUThresholdTensor = TransToNNTensor(OH_NN_INT64, m_paramDim, + nullptr, OH_NN_DETECTION_POST_PROCESS_NMS_IOU_THRESHOLD); + int64_t* nmsIoUThresholdValue = new (std::nothrow) int64_t[1] {0}; + nmsIoUThresholdTensor->SetBuffer(nmsIoUThresholdValue, sizeof(int64_t)); + m_allTensors.emplace_back(nmsIoUThresholdTensor); + SetNmsScoreThreshold(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_NMS_SCORE_THRESHOLD); + SetMaxDetections(OH_NN_INT64, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_MAX_DETECTIONS); + SetDetectionsPerClass(OH_NN_INT64, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_DETECTIONS_PER_CLASS); + SetMaxClassesPerDetection(OH_NN_INT64, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_MAX_CLASSES_PER_DETECTION); + SetNumClasses(OH_NN_INT64, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_NUM_CLASSES); + SetUseRegularNms(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_USE_REGULAR_NMS); + SetOutQuantized(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_OUT_QUANTIZED); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + nmsIoUThresholdTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: detection_post_process_build_010 + * @tc.desc: Verify that the build function returns a failed message with invalid nmsScoreThreshold's dataType. + * @tc.type: FUNC + */ +HWTEST_F(DetectionPostProcessBuilderTest, detection_post_process_build_010, TestSize.Level1) +{ + SetInputTensor(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + + SetInputSize(OH_NN_INT64, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_INPUT_SIZE); + SetScale(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_SCALE); + SetNmsIoUThreshold(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_NMS_IOU_THRESHOLD); + std::shared_ptr nmsScoreThresholdTensor = TransToNNTensor(OH_NN_INT64, m_paramDim, + nullptr, OH_NN_DETECTION_POST_PROCESS_NMS_SCORE_THRESHOLD); + int64_t* nmsScoreThresholdValue = new (std::nothrow) int64_t[1] {0}; + nmsScoreThresholdTensor->SetBuffer(nmsScoreThresholdValue, sizeof(int64_t)); + m_allTensors.emplace_back(nmsScoreThresholdTensor); + SetMaxDetections(OH_NN_INT64, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_MAX_DETECTIONS); + SetDetectionsPerClass(OH_NN_INT64, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_DETECTIONS_PER_CLASS); + SetMaxClassesPerDetection(OH_NN_INT64, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_MAX_CLASSES_PER_DETECTION); + SetNumClasses(OH_NN_INT64, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_NUM_CLASSES); + SetUseRegularNms(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_USE_REGULAR_NMS); + SetOutQuantized(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_OUT_QUANTIZED); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + nmsScoreThresholdTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: detection_post_process_build_011 + * @tc.desc: Verify that the build function returns a failed message with invalid maxDetections's dataType. + * @tc.type: FUNC + */ +HWTEST_F(DetectionPostProcessBuilderTest, detection_post_process_build_011, TestSize.Level1) +{ + SetInputTensor(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + + SetInputSize(OH_NN_INT64, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_INPUT_SIZE); + SetScale(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_SCALE); + SetNmsIoUThreshold(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_NMS_IOU_THRESHOLD); + SetNmsScoreThreshold(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_NMS_SCORE_THRESHOLD); + std::shared_ptr maxDetectionsTensor = TransToNNTensor(OH_NN_FLOAT32, m_paramDim, + nullptr, OH_NN_DETECTION_POST_PROCESS_MAX_DETECTIONS); + float* maxDetectionsValue = new (std::nothrow) float[1] {5.0f}; + maxDetectionsTensor->SetBuffer(maxDetectionsValue, sizeof(float)); + m_allTensors.emplace_back(maxDetectionsTensor); + SetDetectionsPerClass(OH_NN_INT64, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_DETECTIONS_PER_CLASS); + SetMaxClassesPerDetection(OH_NN_INT64, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_MAX_CLASSES_PER_DETECTION); + SetNumClasses(OH_NN_INT64, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_NUM_CLASSES); + SetUseRegularNms(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_USE_REGULAR_NMS); + SetOutQuantized(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_OUT_QUANTIZED); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + maxDetectionsTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: detection_post_process_build_012 + * @tc.desc: Verify that the build function returns a failed message with invalid detectionsPerClass's dataType. + * @tc.type: FUNC + */ +HWTEST_F(DetectionPostProcessBuilderTest, detection_post_process_build_012, TestSize.Level1) +{ + SetInputTensor(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + + SetInputSize(OH_NN_INT64, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_INPUT_SIZE); + SetScale(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_SCALE); + SetNmsIoUThreshold(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_NMS_IOU_THRESHOLD); + SetNmsScoreThreshold(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_NMS_SCORE_THRESHOLD); + SetMaxDetections(OH_NN_INT64, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_MAX_DETECTIONS); + std::shared_ptr detectionsPerClassTensor = TransToNNTensor(OH_NN_FLOAT32, m_paramDim, + nullptr, OH_NN_DETECTION_POST_PROCESS_DETECTIONS_PER_CLASS); + float* detectionsPerClassValue = new (std::nothrow) float[1] {2.0f}; + detectionsPerClassTensor->SetBuffer(detectionsPerClassValue, sizeof(float)); + m_allTensors.emplace_back(detectionsPerClassTensor); + SetMaxClassesPerDetection(OH_NN_INT64, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_MAX_CLASSES_PER_DETECTION); + SetNumClasses(OH_NN_INT64, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_NUM_CLASSES); + SetUseRegularNms(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_USE_REGULAR_NMS); + SetOutQuantized(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_OUT_QUANTIZED); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + detectionsPerClassTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: detection_post_process_build_013 + * @tc.desc: Verify that the build function returns a failed message with invalid maxClassesPerDetection's dataType. + * @tc.type: FUNC + */ +HWTEST_F(DetectionPostProcessBuilderTest, detection_post_process_build_013, TestSize.Level1) +{ + SetInputTensor(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + + SetInputSize(OH_NN_INT64, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_INPUT_SIZE); + SetScale(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_SCALE); + SetNmsIoUThreshold(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_NMS_IOU_THRESHOLD); + SetNmsScoreThreshold(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_NMS_SCORE_THRESHOLD); + SetMaxDetections(OH_NN_INT64, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_MAX_DETECTIONS); + SetDetectionsPerClass(OH_NN_INT64, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_DETECTIONS_PER_CLASS); + std::shared_ptr maxClassesPerDetectionTensor = TransToNNTensor(OH_NN_FLOAT32, m_paramDim, + nullptr, OH_NN_DETECTION_POST_PROCESS_MAX_CLASSES_PER_DETECTION); + float* maxClassesPerDetectionValue = new (std::nothrow) float[2] {1.0f}; + maxClassesPerDetectionTensor->SetBuffer(maxClassesPerDetectionValue, sizeof(float)); + m_allTensors.emplace_back(maxClassesPerDetectionTensor); + SetNumClasses(OH_NN_INT64, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_NUM_CLASSES); + SetUseRegularNms(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_USE_REGULAR_NMS); + SetOutQuantized(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_OUT_QUANTIZED); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + maxClassesPerDetectionTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: detection_post_process_build_014 + * @tc.desc: Verify that the build function returns a failed message with invalid numClasses's dataType. + * @tc.type: FUNC + */ +HWTEST_F(DetectionPostProcessBuilderTest, detection_post_process_build_014, TestSize.Level1) +{ + SetInputTensor(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + + SetInputSize(OH_NN_INT64, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_INPUT_SIZE); + SetScale(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_SCALE); + SetNmsIoUThreshold(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_NMS_IOU_THRESHOLD); + SetNmsScoreThreshold(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_NMS_SCORE_THRESHOLD); + SetMaxDetections(OH_NN_INT64, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_MAX_DETECTIONS); + SetDetectionsPerClass(OH_NN_INT64, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_DETECTIONS_PER_CLASS); + SetMaxClassesPerDetection(OH_NN_INT64, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_MAX_CLASSES_PER_DETECTION); + std::shared_ptr numClassesTensor = TransToNNTensor(OH_NN_FLOAT32, m_paramDim, + nullptr, OH_NN_DETECTION_POST_PROCESS_NUM_CLASSES); + float* numClassesValue = new (std::nothrow) float[1] {5.0f}; + numClassesTensor->SetBuffer(numClassesValue, sizeof(float)); + m_allTensors.emplace_back(numClassesTensor); + SetNumClasses(OH_NN_INT64, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_NUM_CLASSES); + SetUseRegularNms(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_USE_REGULAR_NMS); + SetOutQuantized(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_OUT_QUANTIZED); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + numClassesTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: detection_post_process_build_015 + * @tc.desc: Verify that the build function returns a failed message with invalid useRegularNms's dataType. + * @tc.type: FUNC + */ +HWTEST_F(DetectionPostProcessBuilderTest, detection_post_process_build_015, TestSize.Level1) +{ + SetInputTensor(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + + SetInputSize(OH_NN_INT64, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_INPUT_SIZE); + SetScale(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_SCALE); + SetNmsIoUThreshold(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_NMS_IOU_THRESHOLD); + SetNmsScoreThreshold(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_NMS_SCORE_THRESHOLD); + SetMaxDetections(OH_NN_INT64, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_MAX_DETECTIONS); + SetDetectionsPerClass(OH_NN_INT64, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_DETECTIONS_PER_CLASS); + SetMaxClassesPerDetection(OH_NN_INT64, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_MAX_CLASSES_PER_DETECTION); + SetNumClasses(OH_NN_INT64, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_NUM_CLASSES); + std::shared_ptr useRegularNmsTensor = TransToNNTensor(OH_NN_INT64, m_paramDim, + nullptr, OH_NN_DETECTION_POST_PROCESS_USE_REGULAR_NMS); + int64_t* useRegularNmsValue = new (std::nothrow) int64_t[1] {0}; + useRegularNmsTensor->SetBuffer(useRegularNmsValue, sizeof(int64_t)); + m_allTensors.emplace_back(useRegularNmsTensor); + SetOutQuantized(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_OUT_QUANTIZED); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + useRegularNmsTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: detection_post_process_build_016 + * @tc.desc: Verify that the build function returns a failed message with invalid outQuantized's dataType. + * @tc.type: FUNC + */ +HWTEST_F(DetectionPostProcessBuilderTest, detection_post_process_build_016, TestSize.Level1) +{ + SetInputTensor(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + + SetInputSize(OH_NN_INT64, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_INPUT_SIZE); + SetScale(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_SCALE); + SetNmsIoUThreshold(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_NMS_IOU_THRESHOLD); + SetNmsScoreThreshold(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_NMS_SCORE_THRESHOLD); + SetMaxDetections(OH_NN_INT64, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_MAX_DETECTIONS); + SetDetectionsPerClass(OH_NN_INT64, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_DETECTIONS_PER_CLASS); + SetMaxClassesPerDetection(OH_NN_INT64, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_MAX_CLASSES_PER_DETECTION); + SetNumClasses(OH_NN_INT64, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_NUM_CLASSES); + SetUseRegularNms(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_USE_REGULAR_NMS); + std::shared_ptr outQuantizedTensor = TransToNNTensor(OH_NN_INT64, m_paramDim, + nullptr, OH_NN_DETECTION_POST_PROCESS_OUT_QUANTIZED); + int64_t* outQuantizedValue = new (std::nothrow) int64_t[1] {0}; + outQuantizedTensor->SetBuffer(outQuantizedValue, sizeof(int64_t)); + m_allTensors.emplace_back(outQuantizedTensor); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + outQuantizedTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: detection_post_process_build_017 + * @tc.desc: Verify that the build function returns a failed message with passing invalid inputSize. + * @tc.type: FUNC + */ +HWTEST_F(DetectionPostProcessBuilderTest, detection_post_process_build_017, TestSize.Level1) +{ + SetInputTensor(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + + SetInputSize(OH_NN_INT64, m_paramDim, nullptr, OH_NN_MUL_ACTIVATION_TYPE); + SetScale(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_SCALE); + SetNmsIoUThreshold(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_NMS_IOU_THRESHOLD); + SetNmsScoreThreshold(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_NMS_SCORE_THRESHOLD); + SetMaxDetections(OH_NN_INT64, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_MAX_DETECTIONS); + SetDetectionsPerClass(OH_NN_INT64, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_DETECTIONS_PER_CLASS); + SetMaxClassesPerDetection(OH_NN_INT64, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_MAX_CLASSES_PER_DETECTION); + SetNumClasses(OH_NN_INT64, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_NUM_CLASSES); + SetUseRegularNms(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_USE_REGULAR_NMS); + SetOutQuantized(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_OUT_QUANTIZED); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: detection_post_process_build_018 + * @tc.desc: Verify that the build function returns a failed message with passing invalid scale. + * @tc.type: FUNC + */ +HWTEST_F(DetectionPostProcessBuilderTest, detection_post_process_build_018, TestSize.Level1) +{ + SetInputTensor(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + + SetInputSize(OH_NN_INT64, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_INPUT_SIZE); + SetScale(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_MUL_ACTIVATION_TYPE); + SetNmsIoUThreshold(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_NMS_IOU_THRESHOLD); + SetNmsScoreThreshold(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_NMS_SCORE_THRESHOLD); + SetMaxDetections(OH_NN_INT64, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_MAX_DETECTIONS); + SetDetectionsPerClass(OH_NN_INT64, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_DETECTIONS_PER_CLASS); + SetMaxClassesPerDetection(OH_NN_INT64, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_MAX_CLASSES_PER_DETECTION); + SetNumClasses(OH_NN_INT64, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_NUM_CLASSES); + SetUseRegularNms(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_USE_REGULAR_NMS); + SetOutQuantized(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_OUT_QUANTIZED); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: detection_post_process_build_019 + * @tc.desc: Verify that the build function returns a failed message with passing invalid nmsIoUThreshold. + * @tc.type: FUNC + */ +HWTEST_F(DetectionPostProcessBuilderTest, detection_post_process_build_019, TestSize.Level1) +{ + SetInputTensor(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + + SetInputSize(OH_NN_INT64, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_INPUT_SIZE); + SetScale(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_SCALE); + SetNmsIoUThreshold(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_MUL_ACTIVATION_TYPE); + SetNmsScoreThreshold(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_NMS_SCORE_THRESHOLD); + SetMaxDetections(OH_NN_INT64, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_MAX_DETECTIONS); + SetDetectionsPerClass(OH_NN_INT64, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_DETECTIONS_PER_CLASS); + SetMaxClassesPerDetection(OH_NN_INT64, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_MAX_CLASSES_PER_DETECTION); + SetNumClasses(OH_NN_INT64, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_NUM_CLASSES); + SetUseRegularNms(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_USE_REGULAR_NMS); + SetOutQuantized(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_OUT_QUANTIZED); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: detection_post_process_build_020 + * @tc.desc: Verify that the build function returns a failed message with passing invalid nmsScoreThreshold. + * @tc.type: FUNC + */ +HWTEST_F(DetectionPostProcessBuilderTest, detection_post_process_build_020, TestSize.Level1) +{ + SetInputTensor(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + + SetInputSize(OH_NN_INT64, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_INPUT_SIZE); + SetScale(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_SCALE); + SetNmsIoUThreshold(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_NMS_IOU_THRESHOLD); + SetNmsScoreThreshold(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_MUL_ACTIVATION_TYPE); + SetMaxDetections(OH_NN_INT64, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_MAX_DETECTIONS); + SetDetectionsPerClass(OH_NN_INT64, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_DETECTIONS_PER_CLASS); + SetMaxClassesPerDetection(OH_NN_INT64, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_MAX_CLASSES_PER_DETECTION); + SetNumClasses(OH_NN_INT64, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_NUM_CLASSES); + SetUseRegularNms(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_USE_REGULAR_NMS); + SetOutQuantized(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_OUT_QUANTIZED); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: detection_post_process_build_021 + * @tc.desc: Verify that the build function returns a failed message with passing invalid maxDetections. + * @tc.type: FUNC + */ +HWTEST_F(DetectionPostProcessBuilderTest, detection_post_process_build_021, TestSize.Level1) +{ + SetInputTensor(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + + SetInputSize(OH_NN_INT64, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_INPUT_SIZE); + SetScale(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_SCALE); + SetNmsIoUThreshold(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_NMS_IOU_THRESHOLD); + SetNmsScoreThreshold(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_NMS_SCORE_THRESHOLD); + SetMaxDetections(OH_NN_INT64, m_paramDim, nullptr, OH_NN_MUL_ACTIVATION_TYPE); + SetDetectionsPerClass(OH_NN_INT64, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_DETECTIONS_PER_CLASS); + SetMaxClassesPerDetection(OH_NN_INT64, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_MAX_CLASSES_PER_DETECTION); + SetNumClasses(OH_NN_INT64, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_NUM_CLASSES); + SetUseRegularNms(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_USE_REGULAR_NMS); + SetOutQuantized(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_OUT_QUANTIZED); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: detection_post_process_build_022 + * @tc.desc: Verify that the build function returns a failed message with passing invalid detectionsPerClass. + * @tc.type: FUNC + */ +HWTEST_F(DetectionPostProcessBuilderTest, detection_post_process_build_022, TestSize.Level1) +{ + SetInputTensor(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + + SetInputSize(OH_NN_INT64, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_INPUT_SIZE); + SetScale(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_SCALE); + SetNmsIoUThreshold(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_NMS_IOU_THRESHOLD); + SetNmsScoreThreshold(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_NMS_SCORE_THRESHOLD); + SetMaxDetections(OH_NN_INT64, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_MAX_DETECTIONS); + SetDetectionsPerClass(OH_NN_INT64, m_paramDim, nullptr, OH_NN_MUL_ACTIVATION_TYPE); + SetMaxClassesPerDetection(OH_NN_INT64, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_MAX_CLASSES_PER_DETECTION); + SetNumClasses(OH_NN_INT64, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_NUM_CLASSES); + SetUseRegularNms(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_USE_REGULAR_NMS); + SetOutQuantized(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_OUT_QUANTIZED); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: detection_post_process_build_023 + * @tc.desc: Verify that the build function returns a failed message with passing invalid maxClassesPerDetection. + * @tc.type: FUNC + */ +HWTEST_F(DetectionPostProcessBuilderTest, detection_post_process_build_023, TestSize.Level1) +{ + SetInputTensor(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + + SetInputSize(OH_NN_INT64, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_INPUT_SIZE); + SetScale(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_SCALE); + SetNmsIoUThreshold(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_NMS_IOU_THRESHOLD); + SetNmsScoreThreshold(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_NMS_SCORE_THRESHOLD); + SetMaxDetections(OH_NN_INT64, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_MAX_DETECTIONS); + SetDetectionsPerClass(OH_NN_INT64, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_DETECTIONS_PER_CLASS); + SetMaxClassesPerDetection(OH_NN_INT64, m_paramDim, nullptr, OH_NN_MUL_ACTIVATION_TYPE); + SetNumClasses(OH_NN_INT64, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_NUM_CLASSES); + SetUseRegularNms(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_USE_REGULAR_NMS); + SetOutQuantized(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_OUT_QUANTIZED); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: detection_post_process_build_024 + * @tc.desc: Verify that the build function returns a failed message with passing invalid numClasses. + * @tc.type: FUNC + */ +HWTEST_F(DetectionPostProcessBuilderTest, detection_post_process_build_024, TestSize.Level1) +{ + SetInputTensor(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + + SetInputSize(OH_NN_INT64, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_INPUT_SIZE); + SetScale(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_SCALE); + SetNmsIoUThreshold(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_NMS_IOU_THRESHOLD); + SetNmsScoreThreshold(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_NMS_SCORE_THRESHOLD); + SetMaxDetections(OH_NN_INT64, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_MAX_DETECTIONS); + SetDetectionsPerClass(OH_NN_INT64, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_DETECTIONS_PER_CLASS); + SetMaxClassesPerDetection(OH_NN_INT64, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_MAX_CLASSES_PER_DETECTION); + SetNumClasses(OH_NN_INT64, m_paramDim, nullptr, OH_NN_MUL_ACTIVATION_TYPE); + SetUseRegularNms(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_USE_REGULAR_NMS); + SetOutQuantized(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_OUT_QUANTIZED); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: detection_post_process_build_025 + * @tc.desc: Verify that the build function returns a failed message with passing invalid useRegularNms. + * @tc.type: FUNC + */ +HWTEST_F(DetectionPostProcessBuilderTest, detection_post_process_build_025, TestSize.Level1) +{ + SetInputTensor(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + + SetInputSize(OH_NN_INT64, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_INPUT_SIZE); + SetScale(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_SCALE); + SetNmsIoUThreshold(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_NMS_IOU_THRESHOLD); + SetNmsScoreThreshold(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_NMS_SCORE_THRESHOLD); + SetMaxDetections(OH_NN_INT64, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_MAX_DETECTIONS); + SetDetectionsPerClass(OH_NN_INT64, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_DETECTIONS_PER_CLASS); + SetMaxClassesPerDetection(OH_NN_INT64, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_MAX_CLASSES_PER_DETECTION); + SetNumClasses(OH_NN_INT64, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_NUM_CLASSES); + SetUseRegularNms(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_MUL_ACTIVATION_TYPE); + SetOutQuantized(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_OUT_QUANTIZED); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: detection_post_process_build_026 + * @tc.desc: Verify that the build function returns a failed message with passing invalid outQuantized. + * @tc.type: FUNC + */ +HWTEST_F(DetectionPostProcessBuilderTest, detection_post_process_build_026, TestSize.Level1) +{ + SetInputTensor(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + + SetInputSize(OH_NN_INT64, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_INPUT_SIZE); + SetScale(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_SCALE); + SetNmsIoUThreshold(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_NMS_IOU_THRESHOLD); + SetNmsScoreThreshold(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_NMS_SCORE_THRESHOLD); + SetMaxDetections(OH_NN_INT64, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_MAX_DETECTIONS); + SetDetectionsPerClass(OH_NN_INT64, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_DETECTIONS_PER_CLASS); + SetMaxClassesPerDetection(OH_NN_INT64, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_MAX_CLASSES_PER_DETECTION); + SetNumClasses(OH_NN_INT64, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_NUM_CLASSES); + SetUseRegularNms(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_USE_REGULAR_NMS); + SetOutQuantized(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_MUL_ACTIVATION_TYPE); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: detection_post_process_build_027 + * @tc.desc: Verify that the build function returns a failed message without set buffer for InputSize. + * @tc.type: FUNC + */ +HWTEST_F(DetectionPostProcessBuilderTest, detection_post_process_build_027, TestSize.Level1) +{ + SetInputTensor(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + + std::shared_ptr inputSizeTensor = TransToNNTensor(OH_NN_INT64, m_paramDim, + nullptr, OH_NN_DETECTION_POST_PROCESS_INPUT_SIZE); + m_allTensors.emplace_back(inputSizeTensor); + SetScale(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_SCALE); + SetNmsIoUThreshold(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_NMS_IOU_THRESHOLD); + SetNmsScoreThreshold(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_NMS_SCORE_THRESHOLD); + SetMaxDetections(OH_NN_INT64, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_MAX_DETECTIONS); + SetDetectionsPerClass(OH_NN_INT64, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_DETECTIONS_PER_CLASS); + SetMaxClassesPerDetection(OH_NN_INT64, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_MAX_CLASSES_PER_DETECTION); + SetNumClasses(OH_NN_INT64, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_NUM_CLASSES); + SetUseRegularNms(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_USE_REGULAR_NMS); + SetOutQuantized(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_OUT_QUANTIZED); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: detection_post_process_build_028 + * @tc.desc: Verify that the build function returns a failed message without set buffer for scale. + * @tc.type: FUNC + */ +HWTEST_F(DetectionPostProcessBuilderTest, detection_post_process_build_028, TestSize.Level1) +{ + SetInputTensor(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + + SetInputSize(OH_NN_INT64, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_INPUT_SIZE); + std::shared_ptr scaleTensor = TransToNNTensor(OH_NN_FLOAT32, m_paramDim, + nullptr, OH_NN_DETECTION_POST_PROCESS_SCALE); + m_allTensors.emplace_back(scaleTensor); + SetNmsIoUThreshold(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_NMS_IOU_THRESHOLD); + SetNmsScoreThreshold(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_NMS_SCORE_THRESHOLD); + SetMaxDetections(OH_NN_INT64, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_MAX_DETECTIONS); + SetDetectionsPerClass(OH_NN_INT64, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_DETECTIONS_PER_CLASS); + SetMaxClassesPerDetection(OH_NN_INT64, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_MAX_CLASSES_PER_DETECTION); + SetNumClasses(OH_NN_INT64, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_NUM_CLASSES); + SetUseRegularNms(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_USE_REGULAR_NMS); + SetOutQuantized(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_OUT_QUANTIZED); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: detection_post_process_build_029 + * @tc.desc: Verify that the build function returns a failed message without set buffer for nmsIoUThreshold. + * @tc.type: FUNC + */ +HWTEST_F(DetectionPostProcessBuilderTest, detection_post_process_build_029, TestSize.Level1) +{ + SetInputTensor(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + + SetInputSize(OH_NN_INT64, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_INPUT_SIZE); + SetScale(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_SCALE); + std::shared_ptr nmsIoUThresholdTensor = TransToNNTensor(OH_NN_FLOAT32, m_paramDim, + nullptr, OH_NN_DETECTION_POST_PROCESS_NMS_IOU_THRESHOLD); + m_allTensors.emplace_back(nmsIoUThresholdTensor); + SetNmsScoreThreshold(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_NMS_SCORE_THRESHOLD); + SetMaxDetections(OH_NN_INT64, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_MAX_DETECTIONS); + SetDetectionsPerClass(OH_NN_INT64, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_DETECTIONS_PER_CLASS); + SetMaxClassesPerDetection(OH_NN_INT64, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_MAX_CLASSES_PER_DETECTION); + SetNumClasses(OH_NN_INT64, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_NUM_CLASSES); + SetUseRegularNms(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_USE_REGULAR_NMS); + SetOutQuantized(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_OUT_QUANTIZED); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: detection_post_process_build_030 + * @tc.desc: Verify that the build function returns a failed message without set buffer for nmsScoreThreshold. + * @tc.type: FUNC + */ +HWTEST_F(DetectionPostProcessBuilderTest, detection_post_process_build_030, TestSize.Level1) +{ + SetInputTensor(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + + SetInputSize(OH_NN_INT64, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_INPUT_SIZE); + SetScale(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_SCALE); + SetNmsIoUThreshold(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_NMS_IOU_THRESHOLD); + std::shared_ptr nmsScoreThresholdTensor = TransToNNTensor(OH_NN_FLOAT32, m_paramDim, + nullptr, OH_NN_DETECTION_POST_PROCESS_NMS_SCORE_THRESHOLD); + m_allTensors.emplace_back(nmsScoreThresholdTensor); + SetMaxDetections(OH_NN_INT64, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_MAX_DETECTIONS); + SetDetectionsPerClass(OH_NN_INT64, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_DETECTIONS_PER_CLASS); + SetMaxClassesPerDetection(OH_NN_INT64, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_MAX_CLASSES_PER_DETECTION); + SetNumClasses(OH_NN_INT64, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_NUM_CLASSES); + SetUseRegularNms(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_USE_REGULAR_NMS); + SetOutQuantized(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_OUT_QUANTIZED); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: detection_post_process_build_031 + * @tc.desc: Verify that the build function returns a failed message without set buffer for maxDetections. + * @tc.type: FUNC + */ +HWTEST_F(DetectionPostProcessBuilderTest, detection_post_process_build_031, TestSize.Level1) +{ + SetInputTensor(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + + SetInputSize(OH_NN_INT64, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_INPUT_SIZE); + SetScale(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_SCALE); + SetNmsIoUThreshold(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_NMS_IOU_THRESHOLD); + SetNmsScoreThreshold(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_NMS_SCORE_THRESHOLD); + std::shared_ptr maxDetectionsTensor = TransToNNTensor(OH_NN_INT64, m_paramDim, + nullptr, OH_NN_DETECTION_POST_PROCESS_MAX_DETECTIONS); + m_allTensors.emplace_back(maxDetectionsTensor); + SetDetectionsPerClass(OH_NN_INT64, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_DETECTIONS_PER_CLASS); + SetMaxClassesPerDetection(OH_NN_INT64, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_MAX_CLASSES_PER_DETECTION); + SetNumClasses(OH_NN_INT64, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_NUM_CLASSES); + SetUseRegularNms(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_USE_REGULAR_NMS); + SetOutQuantized(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_OUT_QUANTIZED); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: detection_post_process_build_032 + * @tc.desc: Verify that the build function returns a failed message without set buffer for detectionsPerClass. + * @tc.type: FUNC + */ +HWTEST_F(DetectionPostProcessBuilderTest, detection_post_process_build_032, TestSize.Level1) +{ + SetInputTensor(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + + SetInputSize(OH_NN_INT64, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_INPUT_SIZE); + SetScale(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_SCALE); + SetNmsIoUThreshold(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_NMS_IOU_THRESHOLD); + SetNmsScoreThreshold(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_NMS_SCORE_THRESHOLD); + SetMaxDetections(OH_NN_INT64, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_MAX_DETECTIONS); + std::shared_ptr detectionsPerClassTensor = TransToNNTensor(OH_NN_INT64, m_paramDim, + nullptr, OH_NN_DETECTION_POST_PROCESS_DETECTIONS_PER_CLASS); + m_allTensors.emplace_back(detectionsPerClassTensor); + SetMaxClassesPerDetection(OH_NN_INT64, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_MAX_CLASSES_PER_DETECTION); + SetNumClasses(OH_NN_INT64, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_NUM_CLASSES); + SetUseRegularNms(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_USE_REGULAR_NMS); + SetOutQuantized(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_OUT_QUANTIZED); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: detection_post_process_build_033 + * @tc.desc: Verify that the build function returns a failed message without set buffer for maxClassesPerDetection. + * @tc.type: FUNC + */ +HWTEST_F(DetectionPostProcessBuilderTest, detection_post_process_build_033, TestSize.Level1) +{ + SetInputTensor(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + + SetInputSize(OH_NN_INT64, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_INPUT_SIZE); + SetScale(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_SCALE); + SetNmsIoUThreshold(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_NMS_IOU_THRESHOLD); + SetNmsScoreThreshold(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_NMS_SCORE_THRESHOLD); + SetMaxDetections(OH_NN_INT64, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_MAX_DETECTIONS); + SetDetectionsPerClass(OH_NN_INT64, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_DETECTIONS_PER_CLASS); + std::shared_ptr maxClassesPerDetectionTensor = TransToNNTensor(OH_NN_INT64, m_paramDim, + nullptr, OH_NN_DETECTION_POST_PROCESS_MAX_CLASSES_PER_DETECTION); + m_allTensors.emplace_back(maxClassesPerDetectionTensor); + SetNumClasses(OH_NN_INT64, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_NUM_CLASSES); + SetUseRegularNms(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_USE_REGULAR_NMS); + SetOutQuantized(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_OUT_QUANTIZED); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: detection_post_process_build_034 + * @tc.desc: Verify that the build function returns a failed message without set buffer for numClasses. + * @tc.type: FUNC + */ +HWTEST_F(DetectionPostProcessBuilderTest, detection_post_process_build_034, TestSize.Level1) +{ + SetInputTensor(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + + SetInputSize(OH_NN_INT64, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_INPUT_SIZE); + SetScale(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_SCALE); + SetNmsIoUThreshold(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_NMS_IOU_THRESHOLD); + SetNmsScoreThreshold(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_NMS_SCORE_THRESHOLD); + SetMaxDetections(OH_NN_INT64, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_MAX_DETECTIONS); + SetDetectionsPerClass(OH_NN_INT64, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_DETECTIONS_PER_CLASS); + SetMaxClassesPerDetection(OH_NN_INT64, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_MAX_CLASSES_PER_DETECTION); + std::shared_ptr numClassesTensor = TransToNNTensor(OH_NN_INT64, m_paramDim, + nullptr, OH_NN_DETECTION_POST_PROCESS_NUM_CLASSES); + m_allTensors.emplace_back(numClassesTensor); + SetUseRegularNms(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_USE_REGULAR_NMS); + SetOutQuantized(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_OUT_QUANTIZED); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: detection_post_process_build_035 + * @tc.desc: Verify that the build function returns a failed message without set buffer for useRegularNms. + * @tc.type: FUNC + */ +HWTEST_F(DetectionPostProcessBuilderTest, detection_post_process_build_035, TestSize.Level1) +{ + SetInputTensor(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + + SetInputSize(OH_NN_INT64, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_INPUT_SIZE); + SetScale(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_SCALE); + SetNmsIoUThreshold(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_NMS_IOU_THRESHOLD); + SetNmsScoreThreshold(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_NMS_SCORE_THRESHOLD); + SetMaxDetections(OH_NN_INT64, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_MAX_DETECTIONS); + SetDetectionsPerClass(OH_NN_INT64, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_DETECTIONS_PER_CLASS); + SetMaxClassesPerDetection(OH_NN_INT64, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_MAX_CLASSES_PER_DETECTION); + SetNumClasses(OH_NN_INT64, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_NUM_CLASSES); + std::shared_ptr useRegularNmsTensor = TransToNNTensor(OH_NN_INT64, m_paramDim, + nullptr, OH_NN_DETECTION_POST_PROCESS_USE_REGULAR_NMS); + m_allTensors.emplace_back(useRegularNmsTensor); + SetOutQuantized(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_OUT_QUANTIZED); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: detection_post_process_build_036 + * @tc.desc: Verify that the build function returns a failed message without set buffer for outQuantized. + * @tc.type: FUNC + */ +HWTEST_F(DetectionPostProcessBuilderTest, detection_post_process_build_036, TestSize.Level1) +{ + SetInputTensor(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + + SetInputSize(OH_NN_INT64, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_INPUT_SIZE); + SetScale(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_SCALE); + SetNmsIoUThreshold(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_NMS_IOU_THRESHOLD); + SetNmsScoreThreshold(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_NMS_SCORE_THRESHOLD); + SetMaxDetections(OH_NN_INT64, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_MAX_DETECTIONS); + SetDetectionsPerClass(OH_NN_INT64, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_DETECTIONS_PER_CLASS); + SetMaxClassesPerDetection(OH_NN_INT64, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_MAX_CLASSES_PER_DETECTION); + SetNumClasses(OH_NN_INT64, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_NUM_CLASSES); + SetUseRegularNms(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_USE_REGULAR_NMS); + std::shared_ptr outQuantizedTensor = TransToNNTensor(OH_NN_INT64, m_paramDim, + nullptr, OH_NN_DETECTION_POST_PROCESS_OUT_QUANTIZED); + m_allTensors.emplace_back(outQuantizedTensor); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: detection_post_process_getprimitive_001 + * @tc.desc: Verify that the getPrimitive function returns a successful message + * @tc.type: FUNC + */ +HWTEST_F(DetectionPostProcessBuilderTest, detection_post_process_getprimitive_001, TestSize.Level1) +{ + SetInputTensor(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + + SetInputSize(OH_NN_INT64, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_INPUT_SIZE); + SetScale(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_SCALE); + SetNmsIoUThreshold(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_NMS_IOU_THRESHOLD); + SetNmsScoreThreshold(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_NMS_SCORE_THRESHOLD); + SetMaxDetections(OH_NN_INT64, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_MAX_DETECTIONS); + SetDetectionsPerClass(OH_NN_INT64, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_DETECTIONS_PER_CLASS); + SetMaxClassesPerDetection(OH_NN_INT64, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_MAX_CLASSES_PER_DETECTION); + SetNumClasses(OH_NN_INT64, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_NUM_CLASSES); + SetUseRegularNms(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_USE_REGULAR_NMS); + SetOutQuantized(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_DETECTION_POST_PROCESS_OUT_QUANTIZED); + + int64_t inputSizeValue = 300; + std::vector scaleValue = {10.0f, 10.0f, 5.0f, 5.0f}; + float nmsIoUThresholdValue = 0.5f; + float nmsScoreThresholdValue = 0.5f; + int64_t maxDetectionsValue = 5; + int64_t detectionsPerClassValue = 2; + int64_t maxClassesPerDetectionValue = 1; + int64_t numClassesValue = 5; + bool useRegularNmsValue = false; + bool outQuantizedValue = false; + + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors)); + LiteGraphPrimitvePtr primitive = m_builder.GetPrimitive(); + LiteGraphPrimitvePtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); + EXPECT_NE(expectPrimitive, primitive); + + auto returnInputSize = mindspore::lite::MindIR_DetectionPostProcess_GetInputSize(primitive.get()); + EXPECT_EQ(returnInputSize, inputSizeValue); + auto returnScale = mindspore::lite::MindIR_DetectionPostProcess_GetScale(primitive.get()); + auto returnScaleSize = returnScale.size(); + for (size_t i = 0; i < returnScaleSize; ++i) { + EXPECT_EQ(returnScale[i], scaleValue[i]); + } + auto returnNmsIoUThresholdValue = mindspore::lite::MindIR_DetectionPostProcess_GetNmsIouThreshold(primitive.get()); + EXPECT_EQ(returnNmsIoUThresholdValue, nmsIoUThresholdValue); + auto returnNmsScoreThreshold = mindspore::lite::MindIR_DetectionPostProcess_GetNmsScoreThreshold(primitive.get()); + EXPECT_EQ(returnNmsScoreThreshold, nmsScoreThresholdValue); + auto returnMaxDetections = mindspore::lite::MindIR_DetectionPostProcess_GetMaxDetections(primitive.get()); + EXPECT_EQ(returnMaxDetections, maxDetectionsValue); + auto returnDetectionsPerClass = + mindspore::lite::MindIR_DetectionPostProcess_GetDetectionsPerClass(primitive.get()); + EXPECT_EQ(returnDetectionsPerClass, detectionsPerClassValue); + auto returnMaxClassesPerDetection = + mindspore::lite::MindIR_DetectionPostProcess_GetMaxClassesPerDetection(primitive.get()); + EXPECT_EQ(returnMaxClassesPerDetection, maxClassesPerDetectionValue); + auto returnNumClasses = mindspore::lite::MindIR_DetectionPostProcess_GetNumClasses(primitive.get()); + EXPECT_EQ(returnNumClasses, numClassesValue); + auto returnUseRegularNms = mindspore::lite::MindIR_DetectionPostProcess_GetUseRegularNms(primitive.get()); + EXPECT_EQ(returnUseRegularNms, useRegularNmsValue); + auto returnOutQuantized = mindspore::lite::MindIR_DetectionPostProcess_GetOutQuantized(primitive.get()); + EXPECT_EQ(returnOutQuantized, outQuantizedValue); +} + +/** + * @tc.name: detection_post_process_getprimitive_002 + * @tc.desc: Verify that the getPrimitive function returns a failed message without build. + * @tc.type: FUNC + */ +HWTEST_F(DetectionPostProcessBuilderTest, detection_post_process_getprimitive_002, TestSize.Level1) +{ + LiteGraphPrimitvePtr primitive = m_builder.GetPrimitive(); + LiteGraphPrimitvePtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); + EXPECT_EQ(expectPrimitive, primitive); +} +} +} +} \ No newline at end of file diff --git a/test/unittest/ops/floor_test.cpp b/test/unittest/ops/floor_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..47740a9fe308625632fb7575fee5d860453caebe --- /dev/null +++ b/test/unittest/ops/floor_test.cpp @@ -0,0 +1,158 @@ +/* + * Copyright (c) 2024 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ops/floor_builder.h" + +#include "ops_test.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Ops; + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class FloorBuilderTest : public OpsTest { +public: + void SetUp() override; + void TearDown() override; + +protected: + FloorBuilder m_builder; + std::vector m_inputs {0}; + std::vector m_outputs {1}; + std::vector m_dim {1, 2, 2, 1}; +}; + +void FloorBuilderTest::SetUp() {} + +void FloorBuilderTest::TearDown() {} + +/** + * @tc.name: floor_build_001 + * @tc.desc: Verify that the build function returns a successful message. + * @tc.type: FUNC + */ +HWTEST_F(FloorBuilderTest, floor_build_001, TestSize.Level1) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/** + * @tc.name: floor_build_002 + * @tc.desc: Verify that the build function returns a failed message with true m_isBuild. + * @tc.type: FUNC + */ +HWTEST_F(FloorBuilderTest, floor_build_002, TestSize.Level1) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); + + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/** + * @tc.name: floor_build_003 + * @tc.desc: Verify that the build function returns a failed message with invalided input. + * @tc.type: FUNC + */ +HWTEST_F(FloorBuilderTest, floor_build_003, TestSize.Level1) +{ + m_inputs = {0, 1}; + m_outputs = {2}; + + SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: floor_build_004 + * @tc.desc: Verify that the build function returns a failed message with invalided output. + * @tc.type: FUNC + */ +HWTEST_F(FloorBuilderTest, floor_build_004, TestSize.Level1) +{ + m_outputs = {1, 2}; + + SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: floor_build_005 + * @tc.desc: Verify that the build function returns a failed message with empty allTensor. + * @tc.type: FUNC + */ +HWTEST_F(FloorBuilderTest, floor_build_005, TestSize.Level1) +{ + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputs, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: floor_build_006 + * @tc.desc: Verify that the build function returns a failed message without output tensor. + * @tc.type: FUNC + */ +HWTEST_F(FloorBuilderTest, floor_build_006, TestSize.Level1) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: floor_getprimitive_001 + * @tc.desc: Verify that the getPrimitive function returns a successful message + * @tc.type: FUNC + */ +HWTEST_F(FloorBuilderTest, floor_getprimitive_001, TestSize.Level1) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); + + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + LiteGraphPrimitvePtr primitive = m_builder.GetPrimitive(); + LiteGraphPrimitvePtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); + EXPECT_NE(expectPrimitive, primitive); +} + +/** + * @tc.name: floor_getprimitive_002 + * @tc.desc: Verify that the getPrimitive function returns a failed message without build. + * @tc.type: FUNC + */ +HWTEST_F(FloorBuilderTest, floor_getprimitive_002, TestSize.Level1) +{ + LiteGraphPrimitvePtr primitive = m_builder.GetPrimitive(); + LiteGraphPrimitvePtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); + EXPECT_EQ(expectPrimitive, primitive); +} +} +} +} \ No newline at end of file diff --git a/test/unittest/ops/fullconnection_test.cpp b/test/unittest/ops/fullconnection_test.cpp index ef2394c58989fc2aa52ad7a26746c295a473cece..65d37bd0aa55b4b938757b5bde8f3cba18bc6a83 100644 --- a/test/unittest/ops/fullconnection_test.cpp +++ b/test/unittest/ops/fullconnection_test.cpp @@ -33,12 +33,14 @@ public: void SetInputToAlltensor(); void SetActivation(OH_NN_DataType dataType, const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + void SetHasBias(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); public: FullConnectionBuilder m_builder; std::vector m_inputs {0, 1, 2}; std::vector m_outputs {3}; - std::vector m_params {4}; + std::vector m_params {4, 5}; std::vector m_output_dim {2, 2}; std::vector m_param_dim {}; }; @@ -81,6 +83,17 @@ void FullConnectionBuilderTest::SetActivation(OH_NN_DataType dataType, m_allTensors.emplace_back(tensor); } +void FullConnectionBuilderTest::SetHasBias(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + std::shared_ptr tensor = TransToNNTensor(dataType, dim, quantParam, type); + bool* hasBiasValue = new (std::nothrow) bool (true); + EXPECT_NE(nullptr, hasBiasValue); + + tensor->SetBuffer(hasBiasValue, sizeof(bool)); + m_allTensors.emplace_back(tensor); +} + /** * @tc.name: fullconnection_build_001 * @tc.desc: Verify the success of the build function @@ -94,6 +107,8 @@ HWTEST_F(FullConnectionBuilderTest, fullconnection_build_001, TestSize.Level1) SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_FULL_CONNECTION_ACTIVATIONTYPE); + SetHasBias(OH_NN_BOOL, m_param_dim, nullptr, OH_NN_FULL_CONNECTION_HAS_BIAS); + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); } @@ -110,6 +125,8 @@ HWTEST_F(FullConnectionBuilderTest, fullconnection_build_002, TestSize.Level1) SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_FULL_CONNECTION_ACTIVATIONTYPE); + SetHasBias(OH_NN_BOOL, m_param_dim, nullptr, OH_NN_FULL_CONNECTION_HAS_BIAS); + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); } @@ -122,13 +139,15 @@ HWTEST_F(FullConnectionBuilderTest, fullconnection_build_002, TestSize.Level1) HWTEST_F(FullConnectionBuilderTest, fullconnection_build_003, TestSize.Level1) { m_outputs = {}; - m_params = {3}; + m_params = {3, 4}; m_inputsIndex = m_inputs; m_paramsIndex = m_params; SetInputToAlltensor(); SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_FULL_CONNECTION_ACTIVATIONTYPE); + SetHasBias(OH_NN_BOOL, m_param_dim, nullptr, OH_NN_FULL_CONNECTION_HAS_BIAS); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); } @@ -141,7 +160,7 @@ HWTEST_F(FullConnectionBuilderTest, fullconnection_build_004, TestSize.Level1) { m_inputs = {0, 1, 6}; m_outputs = {3}; - m_params = {4}; + m_params = {4, 5}; m_inputsIndex = m_inputs; m_paramsIndex = m_params; @@ -149,6 +168,8 @@ HWTEST_F(FullConnectionBuilderTest, fullconnection_build_004, TestSize.Level1) SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_FULL_CONNECTION_ACTIVATIONTYPE); + SetHasBias(OH_NN_BOOL, m_param_dim, nullptr, OH_NN_FULL_CONNECTION_HAS_BIAS); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); } @@ -172,6 +193,7 @@ HWTEST_F(FullConnectionBuilderTest, fullconnection_build_005, TestSize.Level1) tensor->SetBuffer(activationValue, sizeof(int32_t)); m_allTensors.emplace_back(tensor); + SetHasBias(OH_NN_BOOL, m_param_dim, nullptr, OH_NN_FULL_CONNECTION_HAS_BIAS); EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); } @@ -182,6 +204,30 @@ HWTEST_F(FullConnectionBuilderTest, fullconnection_build_005, TestSize.Level1) */ HWTEST_F(FullConnectionBuilderTest, fullconnection_build_006, TestSize.Level1) +{ + m_inputsIndex = m_inputs; + m_paramsIndex = m_params; + SetInputToAlltensor(); + + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_FULL_CONNECTION_ACTIVATIONTYPE); + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT32, m_param_dim, nullptr, + OH_NN_FULL_CONNECTION_HAS_BIAS); + int32_t *hasBiasValue = new (std::nothrow) int32_t(1); + EXPECT_NE(nullptr, hasBiasValue); + + tensor->SetBuffer(hasBiasValue, sizeof(int32_t)); + m_allTensors.emplace_back(tensor); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: fullconnection_build_008 + * @tc.desc: Verify the behavior of the build function + * @tc.type: FUNC + */ + +HWTEST_F(FullConnectionBuilderTest, fullconnection_build_008, TestSize.Level1) { m_param_dim = {2}; m_inputsIndex = m_inputs; @@ -196,16 +242,17 @@ HWTEST_F(FullConnectionBuilderTest, fullconnection_build_006, TestSize.Level1) tensor->SetBuffer(activationValue, 2 * sizeof(int8_t)); m_allTensors.emplace_back(tensor); + SetHasBias(OH_NN_BOOL, m_param_dim, nullptr, OH_NN_FULL_CONNECTION_HAS_BIAS); EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); } /** - * @tc.name: fullconnection_build_007 + * @tc.name: fullconnection_build_009 * @tc.desc: Verify the invalid avtivation value of the build function * @tc.type: FUNC */ -HWTEST_F(FullConnectionBuilderTest, fullconnection_build_007, TestSize.Level1) +HWTEST_F(FullConnectionBuilderTest, fullconnection_build_009, TestSize.Level1) { m_inputsIndex = m_inputs; m_paramsIndex = m_params; @@ -219,16 +266,17 @@ HWTEST_F(FullConnectionBuilderTest, fullconnection_build_007, TestSize.Level1) tensor->SetBuffer(activationValue, sizeof(int8_t)); m_allTensors.emplace_back(tensor); + SetHasBias(OH_NN_BOOL, m_param_dim, nullptr, OH_NN_FULL_CONNECTION_HAS_BIAS); EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); } /** - * @tc.name: fullconnection_build_008 + * @tc.name: fullconnection_build_010 * @tc.desc: Verify the invalid param to fullconnection of the build function * @tc.type: FUNC */ -HWTEST_F(FullConnectionBuilderTest, fullconnection_build_008, TestSize.Level1) +HWTEST_F(FullConnectionBuilderTest, fullconnection_build_010, TestSize.Level1) { m_inputsIndex = m_inputs; m_paramsIndex = m_params; @@ -242,6 +290,25 @@ HWTEST_F(FullConnectionBuilderTest, fullconnection_build_008, TestSize.Level1) tensor->SetBuffer(activationValue, sizeof(int8_t)); m_allTensors.emplace_back(tensor); + SetHasBias(OH_NN_BOOL, m_param_dim, nullptr, OH_NN_FULL_CONNECTION_HAS_BIAS); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: fullconnection_build_011 + * @tc.desc: Verify the invalid avtivation value of the build function + * @tc.type: FUNC + */ + +HWTEST_F(FullConnectionBuilderTest, fullconnection_build_011, TestSize.Level1) +{ + m_inputsIndex = m_inputs; + m_paramsIndex = m_params; + SetInputToAlltensor(); + + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_FULL_CONNECTION_ACTIVATIONTYPE); + SetHasBias(OH_NN_BOOL, m_param_dim, nullptr, OH_NN_LOG_SOFTMAX_AXIS); EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); } @@ -258,6 +325,7 @@ HWTEST_F(FullConnectionBuilderTest, fullconnection_getprimitive_001, TestSize.Le SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_FULL_CONNECTION_ACTIVATIONTYPE); + SetHasBias(OH_NN_BOOL, m_param_dim, nullptr, OH_NN_FULL_CONNECTION_HAS_BIAS); EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); LiteGraphTensorPtr primitive = m_builder.GetPrimitive(); LiteGraphTensorPtr expectPrimitive = {nullptr, DestroyLiteGraphPrimitive}; @@ -265,6 +333,8 @@ HWTEST_F(FullConnectionBuilderTest, fullconnection_getprimitive_001, TestSize.Le int8_t activationReturn = mindspore::lite::MindIR_FullConnection_GetActivationType(primitive.get()); EXPECT_EQ(activationReturn, 0); + bool hasBiasReturn = mindspore::lite::MindIR_FullConnection_GetHasBias(primitive.get()); + EXPECT_EQ(hasBiasReturn, true); } /** @@ -280,6 +350,7 @@ HWTEST_F(FullConnectionBuilderTest, fullconnection_getprimitive_002, TestSize.Le SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_FULL_CONNECTION_ACTIVATIONTYPE); + SetHasBias(OH_NN_BOOL, m_param_dim, nullptr, OH_NN_FULL_CONNECTION_HAS_BIAS); LiteGraphTensorPtr primitive = m_builder.GetPrimitive(); LiteGraphTensorPtr expectPrimitive = {nullptr, DestroyLiteGraphPrimitive}; EXPECT_EQ(expectPrimitive, primitive); diff --git a/test/unittest/ops/fullconnection_with_axis_test.cpp b/test/unittest/ops/fullconnection_with_axis_test.cpp index 4b72a90ef4d622bfeec34753ea95f4e336217eae..1033a8d610d2cabc3e21fdea405ccdd2c0d3979f 100644 --- a/test/unittest/ops/fullconnection_with_axis_test.cpp +++ b/test/unittest/ops/fullconnection_with_axis_test.cpp @@ -32,14 +32,18 @@ public: void SetInputToAlltensor(); void SetActivation(OH_NN_DataType dataType, const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); - void SeAxis(OH_NN_DataType dataType, + void SetAxis(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + void SetUseAxis(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + void SetHasBias(OH_NN_DataType dataType, const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); public: FullConnectionBuilder m_builder; std::vector m_inputs {0, 1, 2}; std::vector m_outputs {3}; - std::vector m_params {4, 5}; + std::vector m_params {4, 5, 6, 7}; std::vector m_output_dim {2, 2}; std::vector m_param_dim {}; }; @@ -80,7 +84,7 @@ void FullConnectionAxisBuilderTest::SetActivation(OH_NN_DataType dataType, m_allTensors.emplace_back(tensor); } -void FullConnectionAxisBuilderTest::SeAxis(OH_NN_DataType dataType, +void FullConnectionAxisBuilderTest::SetAxis(OH_NN_DataType dataType, const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) { std::shared_ptr tensor = TransToNNTensor(dataType, dim, quantParam, type); @@ -91,6 +95,28 @@ void FullConnectionAxisBuilderTest::SeAxis(OH_NN_DataType dataType, m_allTensors.emplace_back(tensor); } +void FullConnectionAxisBuilderTest::SetUseAxis(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + std::shared_ptr tensor = TransToNNTensor(dataType, dim, quantParam, type); + bool* useAxisValue = new (std::nothrow) bool (true); + EXPECT_NE(nullptr, useAxisValue); + + tensor->SetBuffer(useAxisValue, sizeof(bool)); + m_allTensors.emplace_back(tensor); +} + +void FullConnectionAxisBuilderTest::SetHasBias(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + std::shared_ptr tensor = TransToNNTensor(dataType, dim, quantParam, type); + bool* hasBiasValue = new (std::nothrow) bool (true); + EXPECT_NE(nullptr, hasBiasValue); + + tensor->SetBuffer(hasBiasValue, sizeof(bool)); + m_allTensors.emplace_back(tensor); +} + /** * @tc.name: fullconnection_build_axis_001 * @tc.desc: Verify the behavior of the build function @@ -103,8 +129,11 @@ HWTEST_F(FullConnectionAxisBuilderTest, fullconnection_build_axis_001, TestSize. SetInputToAlltensor(); SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); - SeAxis(OH_NN_INT64, m_param_dim, nullptr, OH_NN_FULL_CONNECTION_AXIS); + SetAxis(OH_NN_INT64, m_param_dim, nullptr, OH_NN_FULL_CONNECTION_AXIS); SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_FULL_CONNECTION_ACTIVATIONTYPE); + SetUseAxis(OH_NN_BOOL, m_param_dim, nullptr, OH_NN_FULL_CONNECTION_USE_AXIS); + SetHasBias(OH_NN_BOOL, m_param_dim, nullptr, OH_NN_FULL_CONNECTION_HAS_BIAS); + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); } @@ -120,8 +149,11 @@ HWTEST_F(FullConnectionAxisBuilderTest, fullconnection_build_axis_002, TestSize. SetInputToAlltensor(); SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); - SeAxis(OH_NN_INT64, m_param_dim, nullptr, OH_NN_FULL_CONNECTION_AXIS); + SetAxis(OH_NN_INT64, m_param_dim, nullptr, OH_NN_FULL_CONNECTION_AXIS); SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_FULL_CONNECTION_ACTIVATIONTYPE); + SetUseAxis(OH_NN_BOOL, m_param_dim, nullptr, OH_NN_FULL_CONNECTION_USE_AXIS); + SetHasBias(OH_NN_BOOL, m_param_dim, nullptr, OH_NN_FULL_CONNECTION_HAS_BIAS); + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); } @@ -137,10 +169,13 @@ HWTEST_F(FullConnectionAxisBuilderTest, fullconnection_build_axis_003, TestSize. m_inputsIndex = m_inputs; m_paramsIndex = m_params; SetInputToAlltensor(); - SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); - SeAxis(OH_NN_INT64, m_param_dim, nullptr, OH_NN_FULL_CONNECTION_AXIS); + + SetAxis(OH_NN_INT64, m_param_dim, nullptr, OH_NN_FULL_CONNECTION_AXIS); SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_FULL_CONNECTION_ACTIVATIONTYPE); + SetUseAxis(OH_NN_BOOL, m_param_dim, nullptr, OH_NN_FULL_CONNECTION_USE_AXIS); + SetHasBias(OH_NN_BOOL, m_param_dim, nullptr, OH_NN_FULL_CONNECTION_HAS_BIAS); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); } @@ -151,14 +186,17 @@ HWTEST_F(FullConnectionAxisBuilderTest, fullconnection_build_axis_003, TestSize. */ HWTEST_F(FullConnectionAxisBuilderTest, fullconnection_build_axis_004, TestSize.Level1) { - m_inputs = {0, 1, 6}; + m_inputs = {0, 1, 8}; m_inputsIndex = m_inputs; m_paramsIndex = m_params; SetInputToAlltensor(); - SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); - SeAxis(OH_NN_INT64, m_param_dim, nullptr, OH_NN_FULL_CONNECTION_AXIS); + + SetAxis(OH_NN_INT64, m_param_dim, nullptr, OH_NN_FULL_CONNECTION_AXIS); SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_FULL_CONNECTION_ACTIVATIONTYPE); + SetUseAxis(OH_NN_BOOL, m_param_dim, nullptr, OH_NN_FULL_CONNECTION_USE_AXIS); + SetHasBias(OH_NN_BOOL, m_param_dim, nullptr, OH_NN_FULL_CONNECTION_HAS_BIAS); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); } @@ -172,15 +210,17 @@ HWTEST_F(FullConnectionAxisBuilderTest, fullconnection_build_axis_005, TestSize. m_inputsIndex = m_inputs; m_paramsIndex = m_params; SetInputToAlltensor(); - SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT32, m_param_dim, nullptr, OH_NN_FULL_CONNECTION_AXIS); int32_t *axisValueTest = new (std::nothrow) int32_t(0); EXPECT_NE(nullptr, axisValueTest); - tensor->SetBuffer(axisValueTest, sizeof(int32_t)); m_allTensors.emplace_back(tensor); SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_FULL_CONNECTION_ACTIVATIONTYPE); + SetUseAxis(OH_NN_BOOL, m_param_dim, nullptr, OH_NN_FULL_CONNECTION_USE_AXIS); + SetHasBias(OH_NN_BOOL, m_param_dim, nullptr, OH_NN_FULL_CONNECTION_HAS_BIAS); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); } @@ -194,16 +234,18 @@ HWTEST_F(FullConnectionAxisBuilderTest, fullconnection_build_axis_006, TestSize. m_inputsIndex = m_inputs; m_paramsIndex = m_params; SetInputToAlltensor(); - SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); - SeAxis(OH_NN_INT64, m_param_dim, nullptr, OH_NN_FULL_CONNECTION_AXIS); + + SetAxis(OH_NN_INT64, m_param_dim, nullptr, OH_NN_FULL_CONNECTION_AXIS); std::shared_ptr tensor = TransToNNTensor(OH_NN_INT32, m_param_dim, nullptr, OH_NN_FULL_CONNECTION_ACTIVATIONTYPE); int32_t *activationValue = new (std::nothrow) int32_t(0); EXPECT_NE(nullptr, activationValue); - tensor->SetBuffer(activationValue, sizeof(int32_t)); m_allTensors.emplace_back(tensor); + SetUseAxis(OH_NN_BOOL, m_param_dim, nullptr, OH_NN_FULL_CONNECTION_USE_AXIS); + SetHasBias(OH_NN_BOOL, m_param_dim, nullptr, OH_NN_FULL_CONNECTION_HAS_BIAS); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); } @@ -213,6 +255,56 @@ HWTEST_F(FullConnectionAxisBuilderTest, fullconnection_build_axis_006, TestSize. * @tc.type: FUNC */ HWTEST_F(FullConnectionAxisBuilderTest, fullconnection_build_axis_007, TestSize.Level1) +{ + m_inputsIndex = m_inputs; + m_paramsIndex = m_params; + SetInputToAlltensor(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetAxis(OH_NN_INT64, m_param_dim, nullptr, OH_NN_FULL_CONNECTION_AXIS); + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_FULL_CONNECTION_ACTIVATIONTYPE); + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT32, m_param_dim, nullptr, + OH_NN_FULL_CONNECTION_USE_AXIS); + int32_t *useAxisValue = new (std::nothrow) int32_t(1); + EXPECT_NE(nullptr, useAxisValue); + tensor->SetBuffer(useAxisValue, sizeof(int32_t)); + m_allTensors.emplace_back(tensor); + SetHasBias(OH_NN_BOOL, m_param_dim, nullptr, OH_NN_FULL_CONNECTION_HAS_BIAS); + + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: fullconnection_build_axis_008 + * @tc.desc: Verify the behavior of the build function + * @tc.type: FUNC + */ +HWTEST_F(FullConnectionAxisBuilderTest, fullconnection_build_axis_008, TestSize.Level1) +{ + m_inputsIndex = m_inputs; + m_paramsIndex = m_params; + SetInputToAlltensor(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetAxis(OH_NN_INT64, m_param_dim, nullptr, OH_NN_FULL_CONNECTION_AXIS); + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_FULL_CONNECTION_ACTIVATIONTYPE); + SetUseAxis(OH_NN_BOOL, m_param_dim, nullptr, OH_NN_FULL_CONNECTION_USE_AXIS); + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT32, m_param_dim, nullptr, + OH_NN_FULL_CONNECTION_HAS_BIAS); + int32_t *hasBiasValue = new (std::nothrow) int32_t(1); + EXPECT_NE(nullptr, hasBiasValue); + tensor->SetBuffer(hasBiasValue, sizeof(int32_t)); + m_allTensors.emplace_back(tensor); + + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: fullconnection_build_axis_009 + * @tc.desc: Verify the behavior of the build function + * @tc.type: FUNC + */ +HWTEST_F(FullConnectionAxisBuilderTest, fullconnection_build_axis_009, TestSize.Level1) { std::vector paramDimTest = {2}; m_inputsIndex = m_inputs; @@ -220,75 +312,211 @@ HWTEST_F(FullConnectionAxisBuilderTest, fullconnection_build_axis_007, TestSize. SetInputToAlltensor(); SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT64, paramDimTest, nullptr, OH_NN_FULL_CONNECTION_AXIS); int64_t *axisValueTest = new (std::nothrow) int64_t[2]{0, 0}; EXPECT_NE(nullptr, axisValueTest); - tensor->SetBuffer(axisValueTest, 2 * sizeof(int64_t)); m_allTensors.emplace_back(tensor); SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_FULL_CONNECTION_ACTIVATIONTYPE); + SetUseAxis(OH_NN_BOOL, m_param_dim, nullptr, OH_NN_FULL_CONNECTION_USE_AXIS); + SetHasBias(OH_NN_BOOL, m_param_dim, nullptr, OH_NN_FULL_CONNECTION_HAS_BIAS); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); } /** - * @tc.name: fullconnection_build_axis_008 + * @tc.name: fullconnection_build_axis_010 * @tc.desc: Verify the behavior of the build function * @tc.type: FUNC */ -HWTEST_F(FullConnectionAxisBuilderTest, fullconnection_build_axis_008, TestSize.Level1) +HWTEST_F(FullConnectionAxisBuilderTest, fullconnection_build_axis_010, TestSize.Level1) { std::vector paramDimTest = {2}; m_inputsIndex = m_inputs; m_paramsIndex = m_params; SetInputToAlltensor(); - SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); - SeAxis(OH_NN_INT64, m_param_dim, nullptr, OH_NN_FULL_CONNECTION_AXIS); + + SetAxis(OH_NN_INT64, m_param_dim, nullptr, OH_NN_FULL_CONNECTION_AXIS); std::shared_ptr tensor = TransToNNTensor(OH_NN_INT8, paramDimTest, nullptr, OH_NN_FULL_CONNECTION_ACTIVATIONTYPE); int8_t *activationValue = new (std::nothrow) int8_t[2]{0, 0}; EXPECT_NE(nullptr, activationValue); - tensor->SetBuffer(activationValue, 2 * sizeof(int8_t)); m_allTensors.emplace_back(tensor); + SetUseAxis(OH_NN_BOOL, m_param_dim, nullptr, OH_NN_FULL_CONNECTION_USE_AXIS); + SetHasBias(OH_NN_BOOL, m_param_dim, nullptr, OH_NN_FULL_CONNECTION_HAS_BIAS); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); } /** - * @tc.name: fullconnection_build_axis_009 - * @tc.desc: Verify the fullconnection without set axis of the build function + * @tc.name: fullconnection_build_axis_011 + * @tc.desc: Verify the behavior of the build function * @tc.type: FUNC */ -HWTEST_F(FullConnectionAxisBuilderTest, fullconnection_build_axis_009, TestSize.Level1) +HWTEST_F(FullConnectionAxisBuilderTest, fullconnection_build_axis_011, TestSize.Level1) { + std::vector paramDimTest = {2}; m_inputsIndex = m_inputs; m_paramsIndex = m_params; SetInputToAlltensor(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetAxis(OH_NN_INT64, m_param_dim, nullptr, OH_NN_FULL_CONNECTION_AXIS); + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_FULL_CONNECTION_ACTIVATIONTYPE); + std::shared_ptr tensor = TransToNNTensor(OH_NN_BOOL, paramDimTest, nullptr, + OH_NN_FULL_CONNECTION_USE_AXIS); + bool *useAxisValue = new (std::nothrow) bool[2] {true, true}; + EXPECT_NE(nullptr, useAxisValue); + tensor->SetBuffer(useAxisValue, 2 * sizeof(int8_t)); + m_allTensors.emplace_back(tensor); + SetHasBias(OH_NN_BOOL, m_param_dim, nullptr, OH_NN_FULL_CONNECTION_HAS_BIAS); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: fullconnection_build_axis_012 + * @tc.desc: Verify the behavior of the build function + * @tc.type: FUNC + */ +HWTEST_F(FullConnectionAxisBuilderTest, fullconnection_build_axis_012, TestSize.Level1) +{ + std::vector paramDimTest = {2}; + m_inputsIndex = m_inputs; + m_paramsIndex = m_params; + SetInputToAlltensor(); SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetAxis(OH_NN_INT64, m_param_dim, nullptr, OH_NN_FULL_CONNECTION_AXIS); + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_FULL_CONNECTION_ACTIVATIONTYPE); + SetUseAxis(OH_NN_BOOL, m_param_dim, nullptr, OH_NN_FULL_CONNECTION_USE_AXIS); + std::shared_ptr tensor = TransToNNTensor(OH_NN_BOOL, paramDimTest, nullptr, + OH_NN_FULL_CONNECTION_HAS_BIAS); + bool *hasBiasValue = new (std::nothrow) bool[2] {true, true}; + EXPECT_NE(nullptr, hasBiasValue); + tensor->SetBuffer(hasBiasValue, 2 * sizeof(bool)); + m_allTensors.emplace_back(tensor); + + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: fullconnection_build_axis_013 + * @tc.desc: Verify the fullconnection without set axis of the build function + * @tc.type: FUNC + */ +HWTEST_F(FullConnectionAxisBuilderTest, fullconnection_build_axis_013, TestSize.Level1) +{ + m_inputsIndex = m_inputs; + m_paramsIndex = m_params; + + SetInputToAlltensor(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT64, m_param_dim, nullptr, OH_NN_FULL_CONNECTION_AXIS); m_allTensors.emplace_back(tensor); SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_FULL_CONNECTION_ACTIVATIONTYPE); + SetUseAxis(OH_NN_BOOL, m_param_dim, nullptr, OH_NN_FULL_CONNECTION_USE_AXIS); + SetHasBias(OH_NN_BOOL, m_param_dim, nullptr, OH_NN_FULL_CONNECTION_HAS_BIAS); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); } /** - * @tc.name: fullconnection_build_axis_010 + * @tc.name: fullconnection_build_axis_014 * @tc.desc: Verify the fullconnection without set activation of the build function * @tc.type: FUNC */ -HWTEST_F(FullConnectionAxisBuilderTest, fullconnection_build_axis_010, TestSize.Level1) +HWTEST_F(FullConnectionAxisBuilderTest, fullconnection_build_axis_014, TestSize.Level1) { m_inputsIndex = m_inputs; m_paramsIndex = m_params; - SetInputToAlltensor(); + SetInputToAlltensor(); SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); - SeAxis(OH_NN_INT64, m_param_dim, nullptr, OH_NN_FULL_CONNECTION_AXIS); + + SetAxis(OH_NN_INT64, m_param_dim, nullptr, OH_NN_FULL_CONNECTION_AXIS); std::shared_ptr tensor = TransToNNTensor(OH_NN_INT8, m_param_dim, nullptr, OH_NN_FULL_CONNECTION_ACTIVATIONTYPE); m_allTensors.emplace_back(tensor); + SetUseAxis(OH_NN_BOOL, m_param_dim, nullptr, OH_NN_FULL_CONNECTION_USE_AXIS); + SetHasBias(OH_NN_BOOL, m_param_dim, nullptr, OH_NN_FULL_CONNECTION_HAS_BIAS); + + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: fullconnection_build_axis_015 + * @tc.desc: Verify the fullconnection without set activation of the build function + * @tc.type: FUNC + */ +HWTEST_F(FullConnectionAxisBuilderTest, fullconnection_build_axis_015, TestSize.Level1) +{ + m_inputsIndex = m_inputs; + m_paramsIndex = m_params; + + SetInputToAlltensor(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetAxis(OH_NN_INT64, m_param_dim, nullptr, OH_NN_FULL_CONNECTION_AXIS); + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_FULL_CONNECTION_ACTIVATIONTYPE); + std::shared_ptr tensor = TransToNNTensor(OH_NN_BOOL, m_param_dim, nullptr, + OH_NN_FULL_CONNECTION_USE_AXIS); + m_allTensors.emplace_back(tensor); + SetHasBias(OH_NN_BOOL, m_param_dim, nullptr, OH_NN_FULL_CONNECTION_HAS_BIAS); + + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: fullconnection_build_axis_016 + * @tc.desc: Verify the fullconnection without set activation of the build function + * @tc.type: FUNC + */ +HWTEST_F(FullConnectionAxisBuilderTest, fullconnection_build_axis_016, TestSize.Level1) +{ + m_inputsIndex = m_inputs; + m_paramsIndex = m_params; + + SetInputToAlltensor(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetAxis(OH_NN_INT64, m_param_dim, nullptr, OH_NN_FULL_CONNECTION_AXIS); + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_FULL_CONNECTION_ACTIVATIONTYPE); + SetUseAxis(OH_NN_BOOL, m_param_dim, nullptr, OH_NN_FULL_CONNECTION_USE_AXIS); + std::shared_ptr tensor = TransToNNTensor(OH_NN_BOOL, m_param_dim, nullptr, + OH_NN_FULL_CONNECTION_HAS_BIAS); + m_allTensors.emplace_back(tensor); + + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: fullconnection_build_axis_017 + * @tc.desc: Verify the behavior of the build function + * @tc.type: FUNC + */ +HWTEST_F(FullConnectionAxisBuilderTest, fullconnection_build_axis_017, TestSize.Level1) +{ + m_inputsIndex = m_inputs; + m_paramsIndex = m_params; + SetInputToAlltensor(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetAxis(OH_NN_INT64, m_param_dim, nullptr, OH_NN_FULL_CONNECTION_AXIS); + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_FULL_CONNECTION_ACTIVATIONTYPE); + std::shared_ptr tensor = TransToNNTensor(OH_NN_BOOL, m_param_dim, nullptr, + OH_NN_FULL_CONNECTION_USE_AXIS); + bool *useAxisValue = new (std::nothrow) bool(false); + EXPECT_NE(nullptr, useAxisValue); + tensor->SetBuffer(useAxisValue, sizeof(bool)); + m_allTensors.emplace_back(tensor); + SetHasBias(OH_NN_BOOL, m_param_dim, nullptr, OH_NN_FULL_CONNECTION_HAS_BIAS); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); } @@ -303,8 +531,12 @@ HWTEST_F(FullConnectionAxisBuilderTest, fullconnection_getprimitive_axis_001, Te m_paramsIndex = m_params; SetInputToAlltensor(); SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); - SeAxis(OH_NN_INT64, m_param_dim, nullptr, OH_NN_FULL_CONNECTION_AXIS); + + SetAxis(OH_NN_INT64, m_param_dim, nullptr, OH_NN_FULL_CONNECTION_AXIS); SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_FULL_CONNECTION_ACTIVATIONTYPE); + SetUseAxis(OH_NN_BOOL, m_param_dim, nullptr, OH_NN_FULL_CONNECTION_USE_AXIS); + SetHasBias(OH_NN_BOOL, m_param_dim, nullptr, OH_NN_FULL_CONNECTION_HAS_BIAS); + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); LiteGraphTensorPtr primitive = m_builder.GetPrimitive(); @@ -315,6 +547,10 @@ HWTEST_F(FullConnectionAxisBuilderTest, fullconnection_getprimitive_axis_001, Te EXPECT_EQ(returnValue, 0); bool activationReturn = mindspore::lite::MindIR_FullConnection_GetActivationType(primitive.get()); EXPECT_EQ(activationReturn, 0); + bool useAxisReturn = mindspore::lite::MindIR_FullConnection_GetUseAxis(primitive.get()); + EXPECT_EQ(useAxisReturn, true); + bool hasBiasReturn = mindspore::lite::MindIR_FullConnection_GetHasBias(primitive.get()); + EXPECT_EQ(hasBiasReturn, true); } /** @@ -329,7 +565,7 @@ HWTEST_F(FullConnectionAxisBuilderTest, fullconnection_getprimitive_axis_002, Te SetInputToAlltensor(); SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); - SeAxis(OH_NN_INT64, m_param_dim, nullptr, OH_NN_FULL_CONNECTION_AXIS); + SetAxis(OH_NN_INT64, m_param_dim, nullptr, OH_NN_FULL_CONNECTION_AXIS); SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_FULL_CONNECTION_ACTIVATIONTYPE); LiteGraphTensorPtr primitive = m_builder.GetPrimitive(); diff --git a/test/unittest/ops/gelu_builder_test.cpp b/test/unittest/ops/gelu_builder_test.cpp index 46deb77c88440d740caa7c889c41c092de4308df..483e1c58e59fe90f2618710b95bf58874c460602 100644 --- a/test/unittest/ops/gelu_builder_test.cpp +++ b/test/unittest/ops/gelu_builder_test.cpp @@ -29,19 +29,34 @@ public: void SetUp() override; void TearDown() override; +protected: + void SetApproximate(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + protected: GeluBuilder m_gelu; std::vector m_inputs {0}; std::vector m_outputs {1}; - std::vector m_params {}; + std::vector m_params {2}; std::vector m_inputDim {1, 5, 1, 1}; std::vector m_outputDim {1, 5, 1, 1}; + std::vector m_paramsDim {}; }; void GeluBuilderTest::SetUp() {} void GeluBuilderTest::TearDown() {} +void GeluBuilderTest::SetApproximate(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + std::shared_ptr outQuantizedTensor = TransToNNTensor(dataType, dim, quantParam, type); + bool* outQuantizedValue = new (std::nothrow) bool(false); + EXPECT_NE(nullptr, outQuantizedValue); + outQuantizedTensor->SetBuffer(outQuantizedValue, sizeof(bool)); + m_allTensors.emplace_back(outQuantizedTensor); +} + /** * @tc.name: gelu_build_001 * @tc.desc: Verify that the build function returns a successful message. @@ -51,6 +66,7 @@ HWTEST_F(GeluBuilderTest, gelu_build_001, TestSize.Level0) { SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + SetApproximate(OH_NN_BOOL, m_paramsDim, nullptr, OH_NN_GELU_APPROXIMATE); OH_NN_ReturnCode ret = m_gelu.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); EXPECT_EQ(OH_NN_SUCCESS, ret); @@ -65,6 +81,7 @@ HWTEST_F(GeluBuilderTest, gelu_build_002, TestSize.Level0) { SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + SetApproximate(OH_NN_BOOL, m_paramsDim, nullptr, OH_NN_GELU_APPROXIMATE); EXPECT_EQ(OH_NN_SUCCESS, m_gelu.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors)); OH_NN_ReturnCode ret = m_gelu.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); @@ -80,9 +97,11 @@ HWTEST_F(GeluBuilderTest, gelu_build_003, TestSize.Level0) { m_inputs = {0, 1}; m_outputs = {2}; + m_params = {3}; SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + SetApproximate(OH_NN_BOOL, m_paramsDim, nullptr, OH_NN_GELU_APPROXIMATE); OH_NN_ReturnCode ret = m_gelu.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); @@ -96,9 +115,11 @@ HWTEST_F(GeluBuilderTest, gelu_build_003, TestSize.Level0) HWTEST_F(GeluBuilderTest, gelu_build_004, TestSize.Level0) { std::vector m_outputs = {1, 2}; + m_params = {3}; SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + SetApproximate(OH_NN_BOOL, m_paramsDim, nullptr, OH_NN_GELU_APPROXIMATE); OH_NN_ReturnCode ret = m_gelu.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); @@ -135,7 +156,7 @@ HWTEST_F(GeluBuilderTest, gelu_build_006, TestSize.Level0) */ HWTEST_F(GeluBuilderTest, gelu_build_007, TestSize.Level0) { - m_params = {2}; + m_params = {2, 3}; std::vector paramDim = {}; SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); @@ -143,6 +164,62 @@ HWTEST_F(GeluBuilderTest, gelu_build_007, TestSize.Level0) std::shared_ptr paramTensor; paramTensor = TransToNNTensor(OH_NN_INT32, paramDim, nullptr, OH_NN_TENSOR); m_allTensors.emplace_back(paramTensor); + SetApproximate(OH_NN_BOOL, m_paramsDim, nullptr, OH_NN_GELU_APPROXIMATE); + + OH_NN_ReturnCode ret = m_gelu.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: gelu_build_008 + * @tc.desc: Verify that the build function returns a failed message with invalid approximate's dataType. + * @tc.type: FUNC + */ +HWTEST_F(GeluBuilderTest, gelu_build_008, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + + std::shared_ptr approximateTensor = TransToNNTensor(OH_NN_INT64, m_paramsDim, + nullptr, OH_NN_GELU_APPROXIMATE); + int64_t* approximateValue = new (std::nothrow) int64_t[1] {0}; + EXPECT_NE(nullptr, approximateValue); + approximateTensor->SetBuffer(approximateValue, sizeof(int64_t)); + m_allTensors.emplace_back(approximateTensor); + + OH_NN_ReturnCode ret = m_gelu.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + approximateTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: gelu_build_009 + * @tc.desc: Verify that the build function returns a failed message with passing invalid approximate param. + * @tc.type: FUNC + */ +HWTEST_F(GeluBuilderTest, gelu_build_009, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + SetApproximate(OH_NN_BOOL, m_paramsDim, nullptr, OH_NN_MUL_ACTIVATION_TYPE); + + OH_NN_ReturnCode ret = m_gelu.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: Gelu_build_011 + * @tc.desc: Verify that the build function returns a failed message without set buffer for approximate. + * @tc.type: FUNC + */ +HWTEST_F(GeluBuilderTest, gelu_build_011, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_outputDim, nullptr); + + std::shared_ptr approximateTensor = TransToNNTensor(OH_NN_BOOL, m_paramsDim, + nullptr, OH_NN_GELU_APPROXIMATE); + m_allTensors.emplace_back(approximateTensor); OH_NN_ReturnCode ret = m_gelu.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); @@ -157,7 +234,9 @@ HWTEST_F(GeluBuilderTest, gelu_getprimitive_001, TestSize.Level0) { SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + SetApproximate(OH_NN_BOOL, m_paramsDim, nullptr, OH_NN_GELU_APPROXIMATE); + bool approximateValue = false; EXPECT_EQ(OH_NN_SUCCESS, m_gelu.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors)); LiteGraphPrimitvePtr primitive = m_gelu.GetPrimitive(); LiteGraphPrimitvePtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); @@ -166,6 +245,8 @@ HWTEST_F(GeluBuilderTest, gelu_getprimitive_001, TestSize.Level0) mindspore::lite::ActivationType activationType = mindspore::lite::ACTIVATION_TYPE_GELU; auto returnValue = mindspore::lite::MindIR_Activation_GetActivationType(primitive.get()); EXPECT_EQ(returnValue, activationType); + auto returnApproximateValue = mindspore::lite::MindIR_Activation_GetApproximate(primitive.get()); + EXPECT_EQ(returnApproximateValue, approximateValue); } /** diff --git a/test/unittest/ops/l2_normalize_test.cpp b/test/unittest/ops/l2_normalize_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..9b11b7cd6e790c773f4c2a76759d519f66988ce7 --- /dev/null +++ b/test/unittest/ops/l2_normalize_test.cpp @@ -0,0 +1,398 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ops/l2_normalize_builder.h" + +#include "ops_test.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Ops; + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class L2NormalizeBuilderTest : public OpsTest { +public: + void SetUp() override; + void TearDown() override; + +protected: + void SetAxis(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + void SetEpsilon(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + +protected: + L2NormalizeBuilder m_builder; + std::vector m_inputs {0}; + std::vector m_outputs {1}; + std::vector m_params {2, 3, 4}; + std::vector m_inputDim {2, 3}; + std::vector m_outputDim {2, 3}; + std::vector m_paramDim {1}; +}; + +void L2NormalizeBuilderTest::SetUp() {} + +void L2NormalizeBuilderTest::TearDown() {} + +void L2NormalizeBuilderTest::SetAxis(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + std::shared_ptr axisTensor = TransToNNTensor(dataType, dim, quantParam, type); + int64_t* axisValue = new (std::nothrow) int64_t[1] {1}; + EXPECT_NE(nullptr, axisValue); + axisTensor->SetBuffer(axisValue, sizeof(int64_t)); + m_allTensors.emplace_back(axisTensor); +} + +void L2NormalizeBuilderTest::SetEpsilon(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + std::shared_ptr epsilonTensor = TransToNNTensor(dataType, dim, quantParam, type); + float* epsilonValue = new (std::nothrow) float[1] {0.0f}; + EXPECT_NE(nullptr, epsilonValue); + epsilonTensor->SetBuffer(epsilonValue, sizeof(float)); + m_allTensors.emplace_back(epsilonTensor); +} + +/** + * @tc.name: l2_normalize_build_001 + * @tc.desc: Verify that the build function returns a successful message. + * @tc.type: FUNC + */ +HWTEST_F(L2NormalizeBuilderTest, l2_normalize_build_001, TestSize.Level1) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + SetAxis(OH_NN_INT64, m_paramDim, nullptr, OH_NN_L2_NORMALIZE_AXIS); + SetEpsilon(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_L2_NORMALIZE_EPSILON); + SetActivation(OH_NN_INT8, m_paramDim, nullptr, OH_NN_L2_NORMALIZE_ACTIVATION_TYPE); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/** + * @tc.name: l2_normalize_build_002 + * @tc.desc: Verify that the build function returns a failed message with true m_isBuild. + * @tc.type: FUNC + */ +HWTEST_F(L2NormalizeBuilderTest, l2_normalize_build_002, TestSize.Level1) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + SetAxis(OH_NN_INT64, m_paramDim, nullptr, OH_NN_L2_NORMALIZE_AXIS); + SetEpsilon(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_L2_NORMALIZE_EPSILON); + SetActivation(OH_NN_INT8, m_paramDim, nullptr, OH_NN_L2_NORMALIZE_ACTIVATION_TYPE); + + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors)); + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/** + * @tc.name: l2_normalize_build_003 + * @tc.desc: Verify that the build function returns a failed message with invalided input. + * @tc.type: FUNC + */ +HWTEST_F(L2NormalizeBuilderTest, l2_normalize_build_003, TestSize.Level1) +{ + m_inputs = {0, 1}; + m_outputs = {2}; + m_params = {3, 4, 5}; + + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + SetAxis(OH_NN_INT64, m_paramDim, nullptr, OH_NN_L2_NORMALIZE_AXIS); + SetEpsilon(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_L2_NORMALIZE_EPSILON); + SetActivation(OH_NN_INT8, m_paramDim, nullptr, OH_NN_L2_NORMALIZE_ACTIVATION_TYPE); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: l2_normalize_build_004 + * @tc.desc: Verify that the build function returns a failed message with invalided output. + * @tc.type: FUNC + */ +HWTEST_F(L2NormalizeBuilderTest, l2_normalize_build_004, TestSize.Level1) +{ + m_outputs = {1, 2}; + m_params = {3, 4, 5}; + + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + SetAxis(OH_NN_INT64, m_paramDim, nullptr, OH_NN_L2_NORMALIZE_AXIS); + SetEpsilon(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_L2_NORMALIZE_EPSILON); + SetActivation(OH_NN_INT8, m_paramDim, nullptr, OH_NN_L2_NORMALIZE_ACTIVATION_TYPE); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: l2_normalize_build_005 + * @tc.desc: Verify that the build function returns a failed message with empty allTensor. + * @tc.type: FUNC + */ +HWTEST_F(L2NormalizeBuilderTest, l2_normalize_build_005, TestSize.Level1) +{ + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputs, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: l2_normalize_build_006 + * @tc.desc: Verify that the build function returns a failed message without output tensor. + * @tc.type: FUNC + */ +HWTEST_F(L2NormalizeBuilderTest, l2_normalize_build_006, TestSize.Level1) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: l2_normalize_build_007 + * @tc.desc: Verify that the build function returns a failed message with invalid axis's dataType. + * @tc.type: FUNC + */ +HWTEST_F(L2NormalizeBuilderTest, l2_normalize_build_007, TestSize.Level1) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + + std::shared_ptr axisTensor = TransToNNTensor(OH_NN_FLOAT32, m_paramDim, + nullptr, OH_NN_L2_NORMALIZE_AXIS); + float* axisValue = new (std::nothrow) float[1] {1.0f}; + axisTensor->SetBuffer(axisValue, sizeof(float)); + m_allTensors.emplace_back(axisTensor); + SetEpsilon(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_L2_NORMALIZE_EPSILON); + SetActivation(OH_NN_INT8, m_paramDim, nullptr, OH_NN_L2_NORMALIZE_ACTIVATION_TYPE); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + axisTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: l2_normalize_build_008 + * @tc.desc: Verify that the build function returns a failed message with invalid epsilon's dataType. + * @tc.type: FUNC + */ +HWTEST_F(L2NormalizeBuilderTest, l2_normalize_build_008, TestSize.Level1) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + + SetAxis(OH_NN_INT64, m_paramDim, nullptr, OH_NN_L2_NORMALIZE_AXIS); + std::shared_ptr epsilonTensor = TransToNNTensor(OH_NN_INT64, m_paramDim, + nullptr, OH_NN_L2_NORMALIZE_EPSILON); + int64_t* epsilonValue = new (std::nothrow) int64_t[1] {0}; + epsilonTensor->SetBuffer(epsilonValue, sizeof(int64_t)); + m_allTensors.emplace_back(epsilonTensor); + SetActivation(OH_NN_INT8, m_paramDim, nullptr, OH_NN_L2_NORMALIZE_ACTIVATION_TYPE); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + epsilonTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: l2_normalize_build_009 + * @tc.desc: Verify that the build function returns a failed message with invalid activationType's dataType. + * @tc.type: FUNC + */ +HWTEST_F(L2NormalizeBuilderTest, l2_normalize_build_009, TestSize.Level1) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + + SetAxis(OH_NN_INT64, m_paramDim, nullptr, OH_NN_L2_NORMALIZE_AXIS); + SetEpsilon(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_L2_NORMALIZE_EPSILON); + std::shared_ptr activationTypeTensor = TransToNNTensor(OH_NN_FLOAT32, m_paramDim, + nullptr, OH_NN_L2_NORMALIZE_ACTIVATION_TYPE); + float* activationTypeValue = new (std::nothrow) float[1] {0.0f}; + activationTypeTensor->SetBuffer(activationTypeValue, sizeof(float)); + m_allTensors.emplace_back(activationTypeTensor); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + activationTypeTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: l2_normalize_build_010 + * @tc.desc: Verify that the build function returns a failed message with passing invalid axis. + * @tc.type: FUNC + */ +HWTEST_F(L2NormalizeBuilderTest, l2_normalize_build_010, TestSize.Level1) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + SetAxis(OH_NN_INT64, m_paramDim, nullptr, OH_NN_MUL_ACTIVATION_TYPE); + SetEpsilon(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_L2_NORMALIZE_EPSILON); + SetActivation(OH_NN_INT8, m_paramDim, nullptr, OH_NN_L2_NORMALIZE_ACTIVATION_TYPE); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: l2_normalize_build_011 + * @tc.desc: Verify that the build function returns a failed message with passing invalid epsilon. + * @tc.type: FUNC + */ +HWTEST_F(L2NormalizeBuilderTest, l2_normalize_build_011, TestSize.Level1) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + SetAxis(OH_NN_INT64, m_paramDim, nullptr, OH_NN_L2_NORMALIZE_AXIS); + SetEpsilon(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_MUL_ACTIVATION_TYPE); + SetActivation(OH_NN_INT8, m_paramDim, nullptr, OH_NN_L2_NORMALIZE_ACTIVATION_TYPE); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: l2_normalize_build_012 + * @tc.desc: Verify that the build function returns a failed message with passing invalid activationType. + * @tc.type: FUNC + */ +HWTEST_F(L2NormalizeBuilderTest, l2_normalize_build_012, TestSize.Level1) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + SetAxis(OH_NN_INT64, m_paramDim, nullptr, OH_NN_L2_NORMALIZE_AXIS); + SetEpsilon(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_L2_NORMALIZE_EPSILON); + SetActivation(OH_NN_INT8, m_paramDim, nullptr, OH_NN_MUL_ACTIVATION_TYPE); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: l2_normalize_build_013 + * @tc.desc: Verify that the build function returns a failed message without set buffer for axis. + * @tc.type: FUNC + */ +HWTEST_F(L2NormalizeBuilderTest, l2_normalize_build_013, TestSize.Level1) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + + std::shared_ptr axisTensor = TransToNNTensor(OH_NN_INT64, m_paramDim, + nullptr, OH_NN_L2_NORMALIZE_AXIS); + m_allTensors.emplace_back(axisTensor); + SetEpsilon(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_L2_NORMALIZE_EPSILON); + SetActivation(OH_NN_INT8, m_paramDim, nullptr, OH_NN_L2_NORMALIZE_ACTIVATION_TYPE); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: l2_normalize_build_014 + * @tc.desc: Verify that the build function returns a failed message without set buffer for epsilon. + * @tc.type: FUNC + */ +HWTEST_F(L2NormalizeBuilderTest, l2_normalize_build_014, TestSize.Level1) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + + SetAxis(OH_NN_INT64, m_paramDim, nullptr, OH_NN_L2_NORMALIZE_AXIS); + std::shared_ptr epsilonTensor = TransToNNTensor(OH_NN_FLOAT32, m_paramDim, + nullptr, OH_NN_L2_NORMALIZE_EPSILON); + m_allTensors.emplace_back(epsilonTensor); + SetActivation(OH_NN_INT8, m_paramDim, nullptr, OH_NN_L2_NORMALIZE_ACTIVATION_TYPE); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: l2_normalize_build_015 + * @tc.desc: Verify that the build function returns a failed message without set buffer for activationType. + * @tc.type: FUNC + */ +HWTEST_F(L2NormalizeBuilderTest, l2_normalize_build_015, TestSize.Level1) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + + SetAxis(OH_NN_INT64, m_paramDim, nullptr, OH_NN_L2_NORMALIZE_AXIS); + SetEpsilon(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_L2_NORMALIZE_EPSILON); + std::shared_ptr shapeTensor = TransToNNTensor(OH_NN_INT8, m_paramDim, + nullptr, OH_NN_L2_NORMALIZE_ACTIVATION_TYPE); + m_allTensors.emplace_back(shapeTensor); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: l2_normalize_getprimitive_001 + * @tc.desc: Verify that the getPrimitive function returns a successful message + * @tc.type: FUNC + */ +HWTEST_F(L2NormalizeBuilderTest, l2_normalize_getprimitive_001, TestSize.Level1) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + SetAxis(OH_NN_INT64, m_paramDim, nullptr, OH_NN_L2_NORMALIZE_AXIS); + SetEpsilon(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_L2_NORMALIZE_EPSILON); + SetActivation(OH_NN_INT8, m_paramDim, nullptr, OH_NN_L2_NORMALIZE_ACTIVATION_TYPE); + + std::vector axisValue {1}; + float epsilonValue {0.0f}; + mindspore::lite::ActivationType activationTypeValue {mindspore::lite::ACTIVATION_TYPE_NO_ACTIVATION}; + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors)); + LiteGraphPrimitvePtr primitive = m_builder.GetPrimitive(); + LiteGraphPrimitvePtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); + EXPECT_NE(expectPrimitive, primitive); + + auto returnAxis = mindspore::lite::MindIR_L2NormalizeFusion_GetAxis(primitive.get()); + auto returnAxisSize = returnAxis.size(); + for (size_t i = 0; i < returnAxisSize; ++i) { + EXPECT_EQ(returnAxis[i], axisValue[i]); + } + auto returnEpsilon = mindspore::lite::MindIR_L2NormalizeFusion_GetEpsilon(primitive.get()); + EXPECT_EQ(returnEpsilon, epsilonValue); + mindspore::lite::ActivationType returnActivationValue = + mindspore::lite::MindIR_L2NormalizeFusion_GetActivationType(primitive.get()); + EXPECT_EQ(returnActivationValue, activationTypeValue); +} + +/** + * @tc.name: l2_normalize_getprimitive_002 + * @tc.desc: Verify that the getPrimitive function returns a failed message without build. + * @tc.type: FUNC + */ +HWTEST_F(L2NormalizeBuilderTest, l2_normalize_getprimitive_002, TestSize.Level1) +{ + LiteGraphPrimitvePtr primitive = m_builder.GetPrimitive(); + LiteGraphPrimitvePtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); + EXPECT_EQ(expectPrimitive, primitive); +} +} +} +} \ No newline at end of file diff --git a/test/unittest/ops/layernorm_builder_test.cpp b/test/unittest/ops/layernorm_builder_test.cpp index 33f234d9e05b2429349b358dc41f2ef0127bc80f..fbe678d70b6b433ec871f51bf886898d366f983a 100644 --- a/test/unittest/ops/layernorm_builder_test.cpp +++ b/test/unittest/ops/layernorm_builder_test.cpp @@ -58,10 +58,10 @@ void LayerNormBuilderTest::TearDown() {} void LayerNormBuilderTest::SaveNormAixsTensor(OH_NN_DataType dataType, const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) { - int32_t* beginNormAxisValue = new (std::nothrow) int32_t(1); + int64_t* beginNormAxisValue = new (std::nothrow) int64_t(1); EXPECT_NE(nullptr, beginNormAxisValue); std::shared_ptr normAxisTensor = TransToNNTensor(dataType, dim, quantParam, type); - normAxisTensor->SetBuffer(beginNormAxisValue, sizeof(int32_t)); + normAxisTensor->SetBuffer(beginNormAxisValue, sizeof(int64_t)); m_allTensors.emplace_back(normAxisTensor); } @@ -78,10 +78,10 @@ void LayerNormBuilderTest::SaveEpsilonTensor(OH_NN_DataType dataType, void LayerNormBuilderTest::SaveParamAxisTensor(OH_NN_DataType dataType, const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) { - int32_t* beginNormParamValue = new (std::nothrow) int32_t(1); + int64_t* beginNormParamValue = new (std::nothrow) int64_t(1); EXPECT_NE(nullptr, beginNormParamValue); std::shared_ptr paramAxisTensor = TransToNNTensor(dataType, dim, quantParam, type); - paramAxisTensor->SetBuffer(beginNormParamValue, sizeof(int32_t)); + paramAxisTensor->SetBuffer(beginNormParamValue, sizeof(int64_t)); m_allTensors.emplace_back(paramAxisTensor); } @@ -106,9 +106,9 @@ HWTEST_F(LayerNormBuilderTest, layernorm_build_001, TestSize.Level0) { SetInputTensor(m_inputTensor); SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); - SaveNormAixsTensor(OH_NN_INT32, m_paramDim, nullptr, OH_NN_LAYER_NORM_BEGIN_NORM_AXIS); + SaveNormAixsTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_LAYER_NORM_BEGIN_NORM_AXIS); SaveEpsilonTensor(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_LAYER_NORM_EPSILON); - SaveParamAxisTensor(OH_NN_INT32, m_paramDim, nullptr, OH_NN_LAYER_NORM_BEGIN_PARAM_AXIS); + SaveParamAxisTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_LAYER_NORM_BEGIN_PARAM_AXIS); OH_NN_ReturnCode ret = m_layerNorm.Build(m_params, m_inputs, m_outputsIndex, m_allTensors); EXPECT_EQ(OH_NN_SUCCESS, ret); @@ -123,9 +123,9 @@ HWTEST_F(LayerNormBuilderTest, layernorm_build_002, TestSize.Level0) { SetInputTensor(m_inputTensor); SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); - SaveNormAixsTensor(OH_NN_INT32, m_paramDim, nullptr, OH_NN_LAYER_NORM_BEGIN_NORM_AXIS); + SaveNormAixsTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_LAYER_NORM_BEGIN_NORM_AXIS); SaveEpsilonTensor(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_LAYER_NORM_EPSILON); - SaveParamAxisTensor(OH_NN_INT32, m_paramDim, nullptr, OH_NN_LAYER_NORM_BEGIN_PARAM_AXIS); + SaveParamAxisTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_LAYER_NORM_BEGIN_PARAM_AXIS); EXPECT_EQ(OH_NN_SUCCESS, m_layerNorm.Build(m_params, m_inputs, m_outputsIndex, m_allTensors)); OH_NN_ReturnCode ret = m_layerNorm.Build(m_params, m_inputs, m_outputsIndex, m_allTensors); @@ -145,9 +145,9 @@ HWTEST_F(LayerNormBuilderTest, layernorm_build_003, TestSize.Level0) SetInputTensor(m_inputTensor); SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); - SaveNormAixsTensor(OH_NN_INT32, m_paramDim, nullptr, OH_NN_LAYER_NORM_BEGIN_NORM_AXIS); + SaveNormAixsTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_LAYER_NORM_BEGIN_NORM_AXIS); SaveEpsilonTensor(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_LAYER_NORM_EPSILON); - SaveParamAxisTensor(OH_NN_INT32, m_paramDim, nullptr, OH_NN_LAYER_NORM_BEGIN_PARAM_AXIS); + SaveParamAxisTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_LAYER_NORM_BEGIN_PARAM_AXIS); OH_NN_ReturnCode ret = m_layerNorm.Build(m_params, m_inputs, m_outputsIndex, m_allTensors); EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); @@ -165,9 +165,9 @@ HWTEST_F(LayerNormBuilderTest, layernorm_build_004, TestSize.Level0) SetInputTensor(m_inputTensor); SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); - SaveNormAixsTensor(OH_NN_INT32, m_paramDim, nullptr, OH_NN_LAYER_NORM_BEGIN_NORM_AXIS); + SaveNormAixsTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_LAYER_NORM_BEGIN_NORM_AXIS); SaveEpsilonTensor(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_LAYER_NORM_EPSILON); - SaveParamAxisTensor(OH_NN_INT32, m_paramDim, nullptr, OH_NN_LAYER_NORM_BEGIN_PARAM_AXIS); + SaveParamAxisTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_LAYER_NORM_BEGIN_PARAM_AXIS); OH_NN_ReturnCode ret = m_layerNorm.Build(m_params, m_inputs, m_outputsIndex, m_allTensors); EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); @@ -214,7 +214,7 @@ HWTEST_F(LayerNormBuilderTest, layernorm_build_007, TestSize.Level0) m_allTensors.emplace_back(normAxisTensor); SaveEpsilonTensor(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_LAYER_NORM_EPSILON); - SaveParamAxisTensor(OH_NN_INT32, m_paramDim, nullptr, OH_NN_LAYER_NORM_BEGIN_PARAM_AXIS); + SaveParamAxisTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_LAYER_NORM_BEGIN_PARAM_AXIS); OH_NN_ReturnCode ret = m_layerNorm.Build(m_params, m_inputs, m_outputsIndex, m_allTensors); EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); @@ -234,13 +234,13 @@ HWTEST_F(LayerNormBuilderTest, layernorm_build_008, TestSize.Level0) SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); std::shared_ptr normAxisTensor; - normAxisTensor = TransToNNTensor(OH_NN_INT32, expectParamDim, nullptr, OH_NN_LAYER_NORM_BEGIN_NORM_AXIS); - int32_t beginNormAxisValue[2] = {1, 2}; - normAxisTensor->SetBuffer(beginNormAxisValue, 2 * sizeof(int32_t)); + normAxisTensor = TransToNNTensor(OH_NN_INT64, expectParamDim, nullptr, OH_NN_LAYER_NORM_BEGIN_NORM_AXIS); + int64_t beginNormAxisValue[2] = {1, 2}; + normAxisTensor->SetBuffer(beginNormAxisValue, 2 * sizeof(int64_t)); m_allTensors.emplace_back(normAxisTensor); SaveEpsilonTensor(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_LAYER_NORM_EPSILON); - SaveParamAxisTensor(OH_NN_INT32, m_paramDim, nullptr, OH_NN_LAYER_NORM_BEGIN_PARAM_AXIS); + SaveParamAxisTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_LAYER_NORM_BEGIN_PARAM_AXIS); OH_NN_ReturnCode ret = m_layerNorm.Build(m_params, m_inputs, m_outputsIndex, m_allTensors); EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); @@ -256,8 +256,8 @@ HWTEST_F(LayerNormBuilderTest, layernorm_build_009, TestSize.Level0) { SetInputTensor(m_inputTensor); SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); - SaveNormAixsTensor(OH_NN_INT32, m_paramDim, nullptr, OH_NN_LAYER_NORM_BEGIN_NORM_AXIS); - SaveParamAxisTensor(OH_NN_INT32, m_paramDim, nullptr, OH_NN_LAYER_NORM_BEGIN_PARAM_AXIS); + SaveNormAixsTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_LAYER_NORM_BEGIN_NORM_AXIS); + SaveParamAxisTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_LAYER_NORM_BEGIN_PARAM_AXIS); std::shared_ptr epsilonTensor; epsilonTensor = TransToNNTensor(OH_NN_INT32, m_paramDim, nullptr, OH_NN_LAYER_NORM_EPSILON); @@ -281,8 +281,8 @@ HWTEST_F(LayerNormBuilderTest, layernorm_build_010, TestSize.Level0) SetInputTensor(m_inputTensor); SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); - SaveNormAixsTensor(OH_NN_INT32, m_paramDim, nullptr, OH_NN_LAYER_NORM_BEGIN_NORM_AXIS); - SaveParamAxisTensor(OH_NN_INT32, m_paramDim, nullptr, OH_NN_LAYER_NORM_BEGIN_PARAM_AXIS); + SaveNormAixsTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_LAYER_NORM_BEGIN_NORM_AXIS); + SaveParamAxisTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_LAYER_NORM_BEGIN_PARAM_AXIS); std::shared_ptr epsilonTensor; epsilonTensor = TransToNNTensor(OH_NN_FLOAT32, expectParamDim, nullptr, OH_NN_LAYER_NORM_EPSILON); @@ -304,7 +304,7 @@ HWTEST_F(LayerNormBuilderTest, layernorm_build_011, TestSize.Level0) { SetInputTensor(m_inputTensor); SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); - SaveNormAixsTensor(OH_NN_INT32, m_paramDim, nullptr, OH_NN_LAYER_NORM_BEGIN_NORM_AXIS); + SaveNormAixsTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_LAYER_NORM_BEGIN_NORM_AXIS); SaveEpsilonTensor(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_LAYER_NORM_EPSILON); std::shared_ptr paramAxisTensor; @@ -329,13 +329,13 @@ HWTEST_F(LayerNormBuilderTest, layernorm_build_012, TestSize.Level0) SetInputTensor(m_inputTensor); SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); - SaveNormAixsTensor(OH_NN_INT32, m_paramDim, nullptr, OH_NN_LAYER_NORM_BEGIN_NORM_AXIS); + SaveNormAixsTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_LAYER_NORM_BEGIN_NORM_AXIS); SaveEpsilonTensor(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_LAYER_NORM_EPSILON); std::shared_ptr paramAxisTensor; - paramAxisTensor = TransToNNTensor(OH_NN_INT32, expectParamDim, nullptr, OH_NN_LAYER_NORM_BEGIN_PARAM_AXIS); - int32_t beginNormParamValue[2] = {1, 1}; - paramAxisTensor->SetBuffer(beginNormParamValue, 2 * sizeof(int32_t)); + paramAxisTensor = TransToNNTensor(OH_NN_INT64, expectParamDim, nullptr, OH_NN_LAYER_NORM_BEGIN_PARAM_AXIS); + int64_t beginNormParamValue[2] = {1, 1}; + paramAxisTensor->SetBuffer(beginNormParamValue, 2 * sizeof(int64_t)); m_allTensors.emplace_back(paramAxisTensor); OH_NN_ReturnCode ret = m_layerNorm.Build(m_params, m_inputs, m_outputsIndex, m_allTensors); @@ -352,9 +352,9 @@ HWTEST_F(LayerNormBuilderTest, layernorm_build_0013, TestSize.Level0) { SetInputTensor(m_inputTensor); SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); - SaveNormAixsTensor(OH_NN_INT32, m_paramDim, nullptr, OH_NN_BATCH_NORM_EPSILON); + SaveNormAixsTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_BATCH_NORM_EPSILON); SaveEpsilonTensor(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_LAYER_NORM_EPSILON); - SaveParamAxisTensor(OH_NN_INT32, m_paramDim, nullptr, OH_NN_LAYER_NORM_BEGIN_PARAM_AXIS); + SaveParamAxisTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_LAYER_NORM_BEGIN_PARAM_AXIS); OH_NN_ReturnCode ret = m_layerNorm.Build(m_params, m_inputs, m_outputsIndex, m_allTensors); EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); @@ -370,10 +370,10 @@ HWTEST_F(LayerNormBuilderTest, layernorm_build_014, TestSize.Level0) SetInputTensor(m_inputTensor); SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); SaveEpsilonTensor(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_LAYER_NORM_EPSILON); - SaveParamAxisTensor(OH_NN_INT32, m_paramDim, nullptr, OH_NN_LAYER_NORM_BEGIN_PARAM_AXIS); + SaveParamAxisTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_LAYER_NORM_BEGIN_PARAM_AXIS); std::shared_ptr normAxisTensor; - normAxisTensor = TransToNNTensor(OH_NN_INT32, m_paramDim, nullptr, OH_NN_LAYER_NORM_BEGIN_NORM_AXIS); + normAxisTensor = TransToNNTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_LAYER_NORM_BEGIN_NORM_AXIS); m_allTensors.emplace_back(normAxisTensor); OH_NN_ReturnCode ret = m_layerNorm.Build(m_params, m_inputs, m_outputsIndex, m_allTensors); @@ -389,8 +389,8 @@ HWTEST_F(LayerNormBuilderTest, layernorm_build_015, TestSize.Level0) { SetInputTensor(m_inputTensor); SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); - SaveNormAixsTensor(OH_NN_INT32, m_paramDim, nullptr, OH_NN_LAYER_NORM_BEGIN_NORM_AXIS); - SaveParamAxisTensor(OH_NN_INT32, m_paramDim, nullptr, OH_NN_LAYER_NORM_BEGIN_PARAM_AXIS); + SaveNormAixsTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_LAYER_NORM_BEGIN_NORM_AXIS); + SaveParamAxisTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_LAYER_NORM_BEGIN_PARAM_AXIS); std::shared_ptr epsilonTensor; epsilonTensor = TransToNNTensor(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_LAYER_NORM_EPSILON); @@ -409,11 +409,11 @@ HWTEST_F(LayerNormBuilderTest, layernorm_build_016, TestSize.Level0) { SetInputTensor(m_inputTensor); SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); - SaveNormAixsTensor(OH_NN_INT32, m_paramDim, nullptr, OH_NN_LAYER_NORM_BEGIN_NORM_AXIS); + SaveNormAixsTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_LAYER_NORM_BEGIN_NORM_AXIS); SaveEpsilonTensor(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_LAYER_NORM_EPSILON); std::shared_ptr paramAxisTensor; - paramAxisTensor = TransToNNTensor(OH_NN_INT32, m_paramDim, nullptr, OH_NN_LAYER_NORM_BEGIN_PARAM_AXIS); + paramAxisTensor = TransToNNTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_LAYER_NORM_BEGIN_PARAM_AXIS); m_allTensors.emplace_back(paramAxisTensor); OH_NN_ReturnCode ret = m_layerNorm.Build(m_params, m_inputs, m_outputsIndex, m_allTensors); @@ -429,13 +429,13 @@ HWTEST_F(LayerNormBuilderTest, layernorm_getprimitive_001, TestSize.Level0) { SetInputTensor(m_inputTensor); SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); - SaveNormAixsTensor(OH_NN_INT32, m_paramDim, nullptr, OH_NN_LAYER_NORM_BEGIN_NORM_AXIS); + SaveNormAixsTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_LAYER_NORM_BEGIN_NORM_AXIS); SaveEpsilonTensor(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_LAYER_NORM_EPSILON); - SaveParamAxisTensor(OH_NN_INT32, m_paramDim, nullptr, OH_NN_LAYER_NORM_BEGIN_PARAM_AXIS); + SaveParamAxisTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_LAYER_NORM_BEGIN_PARAM_AXIS); - int32_t beginNormAxisValue = 1; + int64_t beginNormAxisValue = 1; float epsilonValue = 0.0f; - int32_t beginNormParamValue = 1; + int64_t beginNormParamValue = 1; EXPECT_EQ(OH_NN_SUCCESS, m_layerNorm.Build(m_params, m_inputs, m_outputsIndex, m_allTensors)); LiteGraphPrimitvePtr primitive = m_layerNorm.GetPrimitive(); LiteGraphPrimitvePtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); diff --git a/test/unittest/ops/log_softmax_test.cpp b/test/unittest/ops/log_softmax_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..925f0ab7b456aab32eb45dfa7b4462a92b1363b4 --- /dev/null +++ b/test/unittest/ops/log_softmax_test.cpp @@ -0,0 +1,239 @@ +/* + * Copyright (c) 2024 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ops/log_softmax_builder.h" + +#include "ops_test.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Ops; + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class LogSoftmaxBuilderTest : public OpsTest { +public: + void SetUp() override; + void TearDown() override; + +protected: + void SaveParamsTensor(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + +protected: + LogSoftmaxBuilder m_builder; + std::vector m_inputs {0}; + std::vector m_outputs {1}; + std::vector m_params {2}; + std::vector m_dim {2, 2}; + std::vector m_paramDim {}; +}; + +void LogSoftmaxBuilderTest::SetUp() {} + +void LogSoftmaxBuilderTest::TearDown() {} + +void LogSoftmaxBuilderTest::SaveParamsTensor(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + std::shared_ptr axisTensor = TransToNNTensor(dataType, dim, quantParam, type); + int64_t* axisValue = new (std::nothrow) int64_t[1] {0}; + EXPECT_NE(nullptr, axisValue); + axisTensor->SetBuffer(axisValue, sizeof(int64_t)); + m_allTensors.emplace_back(axisTensor); +} + +/** + * @tc.name: log_softmax_build_001 + * @tc.desc: Verify that the build function returns a successful message. + * @tc.type: FUNC + */ +HWTEST_F(LogSoftmaxBuilderTest, log_softmax_build_001, TestSize.Level1) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); + SaveParamsTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_LOG_SOFTMAX_AXIS); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/** + * @tc.name: log_softmax_build_002 + * @tc.desc: Verify that the build function returns a failed message with true m_isBuild. + * @tc.type: FUNC + */ +HWTEST_F(LogSoftmaxBuilderTest, log_softmax_build_002, TestSize.Level1) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); + SaveParamsTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_LOG_SOFTMAX_AXIS); + + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors)); + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/** + * @tc.name: log_softmax_build_003 + * @tc.desc: Verify that the build function returns a failed message with invalided input. + * @tc.type: FUNC + */ +HWTEST_F(LogSoftmaxBuilderTest, log_softmax_build_003, TestSize.Level1) +{ + m_inputs = {0, 1}; + m_outputs = {3}; + m_params = {4}; + + SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); + SaveParamsTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_LOG_SOFTMAX_AXIS); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: log_softmax_build_004 + * @tc.desc: Verify that the build function returns a failed message with invalided output. + * @tc.type: FUNC + */ +HWTEST_F(LogSoftmaxBuilderTest, log_softmax_build_004, TestSize.Level1) +{ + m_outputs = {1, 2}; + m_params = {3}; + + SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); + SaveParamsTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_LOG_SOFTMAX_AXIS); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: log_softmax_build_005 + * @tc.desc: Verify that the build function returns a failed message with empty allTensor. + * @tc.type: FUNC + */ +HWTEST_F(LogSoftmaxBuilderTest, log_softmax_build_005, TestSize.Level1) +{ + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputs, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: log_softmax_build_006 + * @tc.desc: Verify that the build function returns a failed message without output tensor. + * @tc.type: FUNC + */ +HWTEST_F(LogSoftmaxBuilderTest, log_softmax_build_006, TestSize.Level1) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: log_softmax_build_007 + * @tc.desc: Verify that the build function returns a failed message with invalid keep_dims's dataType. + * @tc.type: FUNC + */ +HWTEST_F(LogSoftmaxBuilderTest, log_softmax_build_007, TestSize.Level1) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); + + std::shared_ptr axisTensor = TransToNNTensor(OH_NN_FLOAT32, m_paramDim, + nullptr, OH_NN_LOG_SOFTMAX_AXIS); + float* axisValue = new (std::nothrow) float[1] {0.0f}; + axisTensor->SetBuffer(axisValue, sizeof(float)); + m_allTensors.emplace_back(axisTensor); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + axisTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: log_softmax_build_008 + * @tc.desc: Verify that the build function returns a failed message with passing invalid keep_dims param. + * @tc.type: FUNC + */ +HWTEST_F(LogSoftmaxBuilderTest, log_softmax_build_008, TestSize.Level1) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); + SaveParamsTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_MUL_ACTIVATION_TYPE); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: log_softmax_build_009 + * @tc.desc: Verify that the build function returns a failed message without set buffer for keep_dims. + * @tc.type: FUNC + */ +HWTEST_F(LogSoftmaxBuilderTest, log_softmax_build_009, TestSize.Level1) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); + + std::shared_ptr axisTensor = TransToNNTensor(OH_NN_INT64, m_paramDim, + nullptr, OH_NN_LOG_SOFTMAX_AXIS); + m_allTensors.emplace_back(axisTensor); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: log_softmax_getprimitive_001 + * @tc.desc: Verify that the getPrimitive function returns a successful message + * @tc.type: FUNC + */ +HWTEST_F(LogSoftmaxBuilderTest, log_softmax_getprimitive_001, TestSize.Level1) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); + SaveParamsTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_LOG_SOFTMAX_AXIS); + + int64_t axisValue = 0; + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors)); + LiteGraphPrimitvePtr primitive = m_builder.GetPrimitive(); + LiteGraphPrimitvePtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); + EXPECT_NE(expectPrimitive, primitive); + + auto returnValue = mindspore::lite::MindIR_LogSoftmax_GetAxis(primitive.get()); + EXPECT_EQ(returnValue, axisValue); +} + +/** + * @tc.name: log_softmax_getprimitive_002 + * @tc.desc: Verify that the getPrimitive function returns a failed message without build. + * @tc.type: FUNC + */ +HWTEST_F(LogSoftmaxBuilderTest, log_softmax_getprimitive_002, TestSize.Level1) +{ + LiteGraphPrimitvePtr primitive = m_builder.GetPrimitive(); + LiteGraphPrimitvePtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); + EXPECT_EQ(expectPrimitive, primitive); +} +} +} +} \ No newline at end of file diff --git a/test/unittest/ops/lrn_test.cpp b/test/unittest/ops/lrn_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..3b5804d6af101c7fcd92b218f45baab2d1246bcf --- /dev/null +++ b/test/unittest/ops/lrn_test.cpp @@ -0,0 +1,597 @@ +/* + * Copyright (c) 2024 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ops/lrn_builder.h" + +#include "ops_test.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Ops; + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class LRNBuilderTest : public OpsTest { +public: + void SetUp() override; + void TearDown() override; + +protected: + void SaveDepthRadius(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + void SaveAlpha(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + void SaveBeta(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + void SaveBias(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + void SaveNormRegion(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + +protected: + LRNBuilder m_builder; + std::vector m_inputs {0}; + std::vector m_outputs {1}; + std::vector m_params {2, 3, 4, 5, 6}; + std::vector m_inputDim {1, 3, 2, 2}; + std::vector m_outputDim {1, 3, 2, 2}; + std::vector m_paramDim {}; +}; + +void LRNBuilderTest::SetUp() {} + +void LRNBuilderTest::TearDown() {} + +void LRNBuilderTest::SaveDepthRadius(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + std::shared_ptr depthRadiusTensor = TransToNNTensor(dataType, dim, quantParam, type); + int64_t* depthRadiusValue = new (std::nothrow) int64_t[1] {1}; + EXPECT_NE(nullptr, depthRadiusValue); + depthRadiusTensor->SetBuffer(depthRadiusValue, sizeof(int64_t)); + m_allTensors.emplace_back(depthRadiusTensor); +} + +void LRNBuilderTest::SaveAlpha(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + std::shared_ptr alphaTensor = TransToNNTensor(dataType, dim, quantParam, type); + float* alphaValue = new (std::nothrow) float[1] {0.0001}; + EXPECT_NE(nullptr, alphaValue); + alphaTensor->SetBuffer(alphaValue, sizeof(float)); + m_allTensors.emplace_back(alphaTensor); +} + +void LRNBuilderTest::SaveBeta(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + std::shared_ptr betaTensor = TransToNNTensor(dataType, dim, quantParam, type); + float* betaValue = new (std::nothrow) float[1] {0.75}; + EXPECT_NE(nullptr, betaValue); + betaTensor->SetBuffer(betaValue, sizeof(float)); + m_allTensors.emplace_back(betaTensor); +} + +void LRNBuilderTest::SaveBias(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + std::shared_ptr biasTensor = TransToNNTensor(dataType, dim, quantParam, type); + float* biasValue = new (std::nothrow) float[1] {2.0f}; + EXPECT_NE(nullptr, biasValue); + biasTensor->SetBuffer(biasValue, sizeof(float)); + m_allTensors.emplace_back(biasTensor); +} + +void LRNBuilderTest::SaveNormRegion(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + std::shared_ptr normRegionTensor = TransToNNTensor(dataType, dim, quantParam, type); + int32_t* normRegionValue = new (std::nothrow) int32_t[1] {0}; + EXPECT_NE(nullptr, normRegionValue); + normRegionTensor->SetBuffer(normRegionValue, sizeof(int32_t)); + m_allTensors.emplace_back(normRegionTensor); +} + +/** + * @tc.name: lrn_build_001 + * @tc.desc: Verify that the build function returns a successful message. + * @tc.type: FUNC + */ +HWTEST_F(LRNBuilderTest, lrn_build_001, TestSize.Level1) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + SaveDepthRadius(OH_NN_INT64, m_paramDim, nullptr, OH_NN_LRN_DEPTH_RADIUS); + SaveAlpha(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_LRN_ALPHA); + SaveBeta(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_LRN_BETA); + SaveBias(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_LRN_BIAS); + SaveNormRegion(OH_NN_INT32, m_paramDim, nullptr, OH_NN_LRN_NORM_REGION); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/** + * @tc.name: lrn_build_002 + * @tc.desc: Verify that the build function returns a failed message with true m_isBuild. + * @tc.type: FUNC + */ +HWTEST_F(LRNBuilderTest, lrn_build_002, TestSize.Level1) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + SaveDepthRadius(OH_NN_INT64, m_paramDim, nullptr, OH_NN_LRN_DEPTH_RADIUS); + SaveAlpha(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_LRN_ALPHA); + SaveBeta(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_LRN_BETA); + SaveBias(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_LRN_BIAS); + SaveNormRegion(OH_NN_INT32, m_paramDim, nullptr, OH_NN_LRN_NORM_REGION); + + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors)); + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/** + * @tc.name: lrn_build_003 + * @tc.desc: Verify that the build function returns a failed message with invalided input. + * @tc.type: FUNC + */ +HWTEST_F(LRNBuilderTest, lrn_build_003, TestSize.Level1) +{ + m_inputs = {0, 1}; + m_outputs = {2}; + m_params = {3, 4, 5, 6, 7}; + + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + SaveDepthRadius(OH_NN_INT64, m_paramDim, nullptr, OH_NN_LRN_DEPTH_RADIUS); + SaveAlpha(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_LRN_ALPHA); + SaveBeta(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_LRN_BETA); + SaveBias(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_LRN_BIAS); + SaveNormRegion(OH_NN_INT32, m_paramDim, nullptr, OH_NN_LRN_NORM_REGION); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: lrn_build_004 + * @tc.desc: Verify that the build function returns a failed message with invalided output. + * @tc.type: FUNC + */ +HWTEST_F(LRNBuilderTest, lrn_build_004, TestSize.Level1) +{ + m_outputs = {1, 2}; + m_params = {3, 4, 5, 6, 7}; + + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + SaveDepthRadius(OH_NN_INT64, m_paramDim, nullptr, OH_NN_LRN_DEPTH_RADIUS); + SaveAlpha(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_LRN_ALPHA); + SaveBeta(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_LRN_BETA); + SaveBias(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_LRN_BIAS); + SaveNormRegion(OH_NN_INT32, m_paramDim, nullptr, OH_NN_LRN_NORM_REGION); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: lrn_build_005 + * @tc.desc: Verify that the build function returns a failed message with empty allTensor. + * @tc.type: FUNC + */ +HWTEST_F(LRNBuilderTest, lrn_build_005, TestSize.Level1) +{ + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputs, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: lrn_build_006 + * @tc.desc: Verify that the build function returns a failed message without output tensor. + * @tc.type: FUNC + */ +HWTEST_F(LRNBuilderTest, lrn_build_006, TestSize.Level1) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: lrn_build_007 + * @tc.desc: Verify that the build function returns a failed message with invalid depthRadius's dataType. + * @tc.type: FUNC + */ +HWTEST_F(LRNBuilderTest, lrn_build_007, TestSize.Level1) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + + std::shared_ptr depthRadiusTensor = TransToNNTensor(OH_NN_FLOAT32, m_paramDim, + nullptr, OH_NN_LRN_DEPTH_RADIUS); + float* depthRadiusValue = new (std::nothrow) float[1] {1.5}; + depthRadiusTensor->SetBuffer(depthRadiusValue, sizeof(float)); + m_allTensors.emplace_back(depthRadiusTensor); + SaveAlpha(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_LRN_ALPHA); + SaveBeta(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_LRN_BETA); + SaveBias(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_LRN_BIAS); + SaveNormRegion(OH_NN_INT32, m_paramDim, nullptr, OH_NN_LRN_NORM_REGION); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + depthRadiusTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: lrn_build_008 + * @tc.desc: Verify that the build function returns a failed message with invalid alpha's dataType. + * @tc.type: FUNC + */ +HWTEST_F(LRNBuilderTest, lrn_build_008, TestSize.Level1) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + + SaveDepthRadius(OH_NN_INT64, m_paramDim, nullptr, OH_NN_LRN_DEPTH_RADIUS); + std::shared_ptr alphaTensor = TransToNNTensor(OH_NN_INT64, m_paramDim, + nullptr, OH_NN_LRN_ALPHA); + int64_t* alphaValue = new (std::nothrow) int64_t[1] {0}; + alphaTensor->SetBuffer(alphaValue, sizeof(int64_t)); + m_allTensors.emplace_back(alphaTensor); + SaveBeta(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_LRN_BETA); + SaveBias(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_LRN_BIAS); + SaveNormRegion(OH_NN_INT32, m_paramDim, nullptr, OH_NN_LRN_NORM_REGION); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + alphaTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: lrn_build_009 + * @tc.desc: Verify that the build function returns a failed message with invalid beta's dataType. + * @tc.type: FUNC + */ +HWTEST_F(LRNBuilderTest, lrn_build_009, TestSize.Level1) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + + SaveDepthRadius(OH_NN_INT64, m_paramDim, nullptr, OH_NN_LRN_DEPTH_RADIUS); + SaveAlpha(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_LRN_ALPHA); + std::shared_ptr betaTensor = TransToNNTensor(OH_NN_INT64, m_paramDim, + nullptr, OH_NN_LRN_BETA); + int64_t* betaValue = new (std::nothrow) int64_t[1] {1}; + betaTensor->SetBuffer(betaValue, sizeof(int64_t)); + m_allTensors.emplace_back(betaTensor); + SaveBias(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_LRN_BIAS); + SaveNormRegion(OH_NN_INT32, m_paramDim, nullptr, OH_NN_LRN_NORM_REGION); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + betaTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: lrn_build_010 + * @tc.desc: Verify that the build function returns a failed message with invalid bias's dataType. + * @tc.type: FUNC + */ +HWTEST_F(LRNBuilderTest, lrn_build_010, TestSize.Level1) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + + SaveDepthRadius(OH_NN_INT64, m_paramDim, nullptr, OH_NN_LRN_DEPTH_RADIUS); + SaveAlpha(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_LRN_ALPHA); + SaveBeta(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_LRN_BETA); + std::shared_ptr biasTensor = TransToNNTensor(OH_NN_INT64, m_paramDim, + nullptr, OH_NN_LRN_BIAS); + int64_t* biasValue = new (std::nothrow) int64_t[2] {2}; + biasTensor->SetBuffer(biasValue, sizeof(int64_t)); + m_allTensors.emplace_back(biasTensor); + SaveNormRegion(OH_NN_INT32, m_paramDim, nullptr, OH_NN_LRN_NORM_REGION); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + biasTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: lrn_build_011 + * @tc.desc: Verify that the build function returns a failed message with invalid normRegion's dataType. + * @tc.type: FUNC + */ +HWTEST_F(LRNBuilderTest, lrn_build_011, TestSize.Level1) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + + SaveDepthRadius(OH_NN_INT64, m_paramDim, nullptr, OH_NN_LRN_DEPTH_RADIUS); + SaveAlpha(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_LRN_ALPHA); + SaveBeta(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_LRN_BETA); + SaveBias(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_LRN_BIAS); + std::shared_ptr normRegionTensor = TransToNNTensor(OH_NN_INT64, m_paramDim, + nullptr, OH_NN_LRN_NORM_REGION); + int64_t* normRegionValue = new (std::nothrow) int64_t[1] {0}; + normRegionTensor->SetBuffer(normRegionValue, sizeof(int64_t)); + m_allTensors.emplace_back(normRegionTensor); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + normRegionTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: lrn_build_012 + * @tc.desc: Verify that the build function returns a failed message with passing invalid depthRadius. + * @tc.type: FUNC + */ +HWTEST_F(LRNBuilderTest, lrn_build_012, TestSize.Level1) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + SaveDepthRadius(OH_NN_INT64, m_paramDim, nullptr, OH_NN_MUL_ACTIVATION_TYPE); + SaveAlpha(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_LRN_ALPHA); + SaveBeta(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_LRN_BETA); + SaveBias(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_LRN_BIAS); + SaveNormRegion(OH_NN_INT32, m_paramDim, nullptr, OH_NN_LRN_NORM_REGION); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: lrn_build_013 + * @tc.desc: Verify that the build function returns a failed message with passing invalid alpha. + * @tc.type: FUNC + */ +HWTEST_F(LRNBuilderTest, lrn_build_013, TestSize.Level1) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + SaveDepthRadius(OH_NN_INT64, m_paramDim, nullptr, OH_NN_LRN_DEPTH_RADIUS); + SaveAlpha(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_MUL_ACTIVATION_TYPE); + SaveBeta(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_LRN_BETA); + SaveBias(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_LRN_BIAS); + SaveNormRegion(OH_NN_INT32, m_paramDim, nullptr, OH_NN_LRN_NORM_REGION); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: lrn_build_014 + * @tc.desc: Verify that the build function returns a failed message with passing invalid beta. + * @tc.type: FUNC + */ +HWTEST_F(LRNBuilderTest, lrn_build_014, TestSize.Level1) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + SaveDepthRadius(OH_NN_INT64, m_paramDim, nullptr, OH_NN_LRN_DEPTH_RADIUS); + SaveAlpha(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_LRN_ALPHA); + SaveBeta(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_MUL_ACTIVATION_TYPE); + SaveBias(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_LRN_BIAS); + SaveNormRegion(OH_NN_INT32, m_paramDim, nullptr, OH_NN_LRN_NORM_REGION); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: lrn_build_015 + * @tc.desc: Verify that the build function returns a failed message with passing invalid bias. + * @tc.type: FUNC + */ +HWTEST_F(LRNBuilderTest, lrn_build_015, TestSize.Level1) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + SaveDepthRadius(OH_NN_INT64, m_paramDim, nullptr, OH_NN_LRN_DEPTH_RADIUS); + SaveAlpha(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_LRN_ALPHA); + SaveBeta(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_LRN_BETA); + SaveBias(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_MUL_ACTIVATION_TYPE); + SaveNormRegion(OH_NN_INT32, m_paramDim, nullptr, OH_NN_LRN_NORM_REGION); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: lrn_build_016 + * @tc.desc: Verify that the build function returns a failed message with passing invalid normRegion. + * @tc.type: FUNC + */ +HWTEST_F(LRNBuilderTest, lrn_build_016, TestSize.Level1) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + SaveDepthRadius(OH_NN_INT64, m_paramDim, nullptr, OH_NN_LRN_DEPTH_RADIUS); + SaveAlpha(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_LRN_ALPHA); + SaveBeta(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_LRN_BETA); + SaveBias(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_LRN_BIAS); + SaveNormRegion(OH_NN_INT32, m_paramDim, nullptr, OH_NN_MUL_ACTIVATION_TYPE); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: lrn_build_017 + * @tc.desc: Verify that the build function returns a failed message without set buffer for depthRadius. + * @tc.type: FUNC + */ +HWTEST_F(LRNBuilderTest, lrn_build_017, TestSize.Level1) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + + std::shared_ptr depthRadiusTensor = TransToNNTensor(OH_NN_INT64, m_paramDim, + nullptr, OH_NN_LRN_DEPTH_RADIUS); + m_allTensors.emplace_back(depthRadiusTensor); + SaveAlpha(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_LRN_ALPHA); + SaveBeta(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_LRN_BETA); + SaveBias(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_LRN_BIAS); + SaveNormRegion(OH_NN_INT32, m_paramDim, nullptr, OH_NN_LRN_NORM_REGION); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: lrn_build_018 + * @tc.desc: Verify that the build function returns a failed message without set buffer for alpha. + * @tc.type: FUNC + */ +HWTEST_F(LRNBuilderTest, lrn_build_018, TestSize.Level1) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + + SaveDepthRadius(OH_NN_INT64, m_paramDim, nullptr, OH_NN_LRN_DEPTH_RADIUS); + std::shared_ptr alphaTensor = TransToNNTensor(OH_NN_FLOAT32, m_paramDim, + nullptr, OH_NN_LRN_ALPHA); + m_allTensors.emplace_back(alphaTensor); + SaveBeta(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_LRN_BETA); + SaveBias(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_LRN_BIAS); + SaveNormRegion(OH_NN_INT32, m_paramDim, nullptr, OH_NN_LRN_NORM_REGION); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: lrn_build_019 + * @tc.desc: Verify that the build function returns a failed message without set buffer for beta. + * @tc.type: FUNC + */ +HWTEST_F(LRNBuilderTest, lrn_build_019, TestSize.Level1) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + + SaveDepthRadius(OH_NN_INT64, m_paramDim, nullptr, OH_NN_LRN_DEPTH_RADIUS); + SaveAlpha(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_LRN_ALPHA); + std::shared_ptr betaTensor = TransToNNTensor(OH_NN_FLOAT32, m_paramDim, + nullptr, OH_NN_LRN_BETA); + m_allTensors.emplace_back(betaTensor); + SaveBias(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_LRN_BIAS); + SaveNormRegion(OH_NN_INT32, m_paramDim, nullptr, OH_NN_LRN_NORM_REGION); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: lrn_build_020 + * @tc.desc: Verify that the build function returns a failed message without set buffer for bias. + * @tc.type: FUNC + */ +HWTEST_F(LRNBuilderTest, lrn_build_020, TestSize.Level1) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + + SaveDepthRadius(OH_NN_INT64, m_paramDim, nullptr, OH_NN_LRN_DEPTH_RADIUS); + SaveAlpha(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_LRN_ALPHA); + SaveBeta(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_LRN_BETA); + std::shared_ptr biasTensor = TransToNNTensor(OH_NN_FLOAT32, m_paramDim, + nullptr, OH_NN_LRN_BIAS); + m_allTensors.emplace_back(biasTensor); + SaveNormRegion(OH_NN_INT32, m_paramDim, nullptr, OH_NN_LRN_NORM_REGION); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: lrn_build_021 + * @tc.desc: Verify that the build function returns a failed message without set buffer for normRegion. + * @tc.type: FUNC + */ +HWTEST_F(LRNBuilderTest, lrn_build_021, TestSize.Level1) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + + SaveDepthRadius(OH_NN_INT64, m_paramDim, nullptr, OH_NN_LRN_DEPTH_RADIUS); + SaveAlpha(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_LRN_ALPHA); + SaveBeta(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_LRN_BETA); + SaveBias(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_LRN_BIAS); + std::shared_ptr normRegionTensor = TransToNNTensor(OH_NN_INT32, m_paramDim, + nullptr, OH_NN_LRN_NORM_REGION); + m_allTensors.emplace_back(normRegionTensor); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: lrn_getprimitive_001 + * @tc.desc: Verify that the getPrimitive function returns a successful message + * @tc.type: FUNC + */ +HWTEST_F(LRNBuilderTest, lrn_getprimitive_001, TestSize.Level1) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + SaveDepthRadius(OH_NN_INT64, m_paramDim, nullptr, OH_NN_LRN_DEPTH_RADIUS); + SaveAlpha(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_LRN_ALPHA); + SaveBeta(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_LRN_BETA); + SaveBias(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_LRN_BIAS); + SaveNormRegion(OH_NN_INT32, m_paramDim, nullptr, OH_NN_LRN_NORM_REGION); + + int64_t depthRadiusValue {1}; + float alphaValue {0.0001}; + float betaValue {0.75}; + float biasValue {2.0f}; + std::string normRegionValue = "ACROSS_CHANNELS"; + + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors)); + LiteGraphPrimitvePtr primitive = m_builder.GetPrimitive(); + LiteGraphPrimitvePtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); + EXPECT_NE(expectPrimitive, primitive); + + auto returnDepthRadius = mindspore::lite::MindIR_LRN_GetDepthRadius(primitive.get()); + EXPECT_EQ(returnDepthRadius, depthRadiusValue); + auto returnAlpha = mindspore::lite::MindIR_LRN_GetAlpha(primitive.get()); + EXPECT_EQ(returnAlpha, alphaValue); + auto returnBeta = mindspore::lite::MindIR_LRN_GetBeta(primitive.get()); + EXPECT_EQ(returnBeta, betaValue); + auto returnBias = mindspore::lite::MindIR_LRN_GetBias(primitive.get()); + EXPECT_EQ(returnBias, biasValue); + auto returnNormRegion = mindspore::lite::MindIR_LRN_GetNormRegion(primitive.get()); + EXPECT_EQ(returnNormRegion, normRegionValue); +} + +/** + * @tc.name: lrn_getprimitive_002 + * @tc.desc: Verify that the getPrimitive function returns a failed message without build. + * @tc.type: FUNC + */ +HWTEST_F(LRNBuilderTest, lrn_getprimitive_002, TestSize.Level1) +{ + LiteGraphPrimitvePtr primitive = m_builder.GetPrimitive(); + LiteGraphPrimitvePtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); + EXPECT_EQ(expectPrimitive, primitive); +} +} +} +} \ No newline at end of file diff --git a/test/unittest/ops/maxpool_pad_test.cpp b/test/unittest/ops/maxpool_pad_test.cpp index 92e67beaad497ddd97929cf28219b2bb3ee941f5..6228084f25e3375d36f4f7cd639e55bc70467033 100644 --- a/test/unittest/ops/maxpool_pad_test.cpp +++ b/test/unittest/ops/maxpool_pad_test.cpp @@ -31,13 +31,17 @@ public: void SetPad(OH_NN_DataType dataType, const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + void SetRoundMode(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + void SetGlobal(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); void SetPadParam(); public: MaxPoolBuilder m_builder; std::vector m_inputs{0}; std::vector m_outputs{1}; - std::vector m_params{2, 3, 4, 5}; + std::vector m_params{2, 3, 4, 5, 6, 7}; std::vector m_input_dim{1, 3, 3, 1}; std::vector m_output_dim{1, 2, 2, 1}; std::vector m_kenelsize_dim{2}; @@ -50,6 +54,26 @@ void MaxPoolPadBuilderTest::SetUp() {} void MaxPoolPadBuilderTest::TearDown() {} +void MaxPoolPadBuilderTest::SetRoundMode(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + std::shared_ptr tensor = TransToNNTensor(dataType, dim, quantParam, type); + int32_t* roundModeValue = new (std::nothrow) int32_t(0); + EXPECT_NE(nullptr, roundModeValue); + tensor->SetBuffer(roundModeValue, sizeof(int32_t)); + m_allTensors.emplace_back(tensor); +} + +void MaxPoolPadBuilderTest::SetGlobal(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + std::shared_ptr tensor = TransToNNTensor(dataType, dim, quantParam, type); + bool* globalValue = new (std::nothrow) bool(false); + EXPECT_NE(nullptr, globalValue); + tensor->SetBuffer(globalValue, sizeof(bool)); + m_allTensors.emplace_back(tensor); +} + void MaxPoolPadBuilderTest::SetPad(OH_NN_DataType dataType, const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) { @@ -68,6 +92,8 @@ void MaxPoolPadBuilderTest::SetPadParam() SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_MAX_POOL_STRIDE); SetPad(OH_NN_INT64, m_pad_dim, nullptr, OH_NN_MAX_POOL_PAD); SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_MAX_POOL_ACTIVATION_TYPE); + SetRoundMode(OH_NN_INT32, m_param_dim, nullptr, OH_NN_MAX_POOL_ROUND_MODE); + SetGlobal(OH_NN_BOOL, m_param_dim, nullptr, OH_NN_MAX_POOL_GLOBAL); } /** @@ -110,7 +136,7 @@ HWTEST_F(MaxPoolPadBuilderTest, maxpool_build_pad_003, TestSize.Level1) { m_inputs = {}; m_outputs = {0}; - m_params = {1, 2, 3, 4}; + m_params = {1, 2, 3, 4, 5, 6}; m_paramsIndex = m_params; SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); @@ -128,7 +154,7 @@ HWTEST_F(MaxPoolPadBuilderTest, maxpool_build_pad_004, TestSize.Level1) { m_inputs = {0}; m_outputs = {}; - m_params = {1, 2, 3, 4}; + m_params = {1, 2, 3, 4, 5, 6}; m_paramsIndex = m_params; SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); @@ -144,9 +170,9 @@ HWTEST_F(MaxPoolPadBuilderTest, maxpool_build_pad_004, TestSize.Level1) */ HWTEST_F(MaxPoolPadBuilderTest, maxpool_build_pad_005, TestSize.Level1) { - m_inputs = {6}; + m_inputs = {8}; m_outputs = {1}; - m_params = {2, 3, 4, 5}; + m_params = {2, 3, 4, 5, 6, 7}; m_paramsIndex = m_params; SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); @@ -163,8 +189,8 @@ HWTEST_F(MaxPoolPadBuilderTest, maxpool_build_pad_005, TestSize.Level1) HWTEST_F(MaxPoolPadBuilderTest, maxpool_build_pad_006, TestSize.Level1) { m_inputs = {0}; - m_outputs = {6}; - m_params = {2, 3, 4, 5}; + m_outputs = {8}; + m_params = {2, 3, 4, 5, 6, 7}; m_paramsIndex = m_params; SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); @@ -195,6 +221,8 @@ HWTEST_F(MaxPoolPadBuilderTest, maxpool_build_pad_007, TestSize.Level1) SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_MAX_POOL_STRIDE); SetPad(OH_NN_INT64, m_pad_dim, nullptr, OH_NN_MAX_POOL_PAD); SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_MAX_POOL_ACTIVATION_TYPE); + SetRoundMode(OH_NN_INT32, m_param_dim, nullptr, OH_NN_MAX_POOL_ROUND_MODE); + SetGlobal(OH_NN_BOOL, m_param_dim, nullptr, OH_NN_MAX_POOL_GLOBAL); m_paramsIndex = m_params; EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); } @@ -220,6 +248,8 @@ HWTEST_F(MaxPoolPadBuilderTest, maxpool_build_pad_008, TestSize.Level1) m_allTensors.emplace_back(tensor); SetPad(OH_NN_INT64, m_pad_dim, nullptr, OH_NN_MAX_POOL_PAD); SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_MAX_POOL_ACTIVATION_TYPE); + SetRoundMode(OH_NN_INT32, m_param_dim, nullptr, OH_NN_MAX_POOL_ROUND_MODE); + SetGlobal(OH_NN_BOOL, m_param_dim, nullptr, OH_NN_MAX_POOL_GLOBAL); EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); } @@ -244,6 +274,8 @@ HWTEST_F(MaxPoolPadBuilderTest, maxpool_build_pad_009, TestSize.Level1) tensor->SetBuffer(padValue, sizeof(int32_t) * padNum); m_allTensors.emplace_back(tensor); SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_MAX_POOL_ACTIVATION_TYPE); + SetRoundMode(OH_NN_INT32, m_param_dim, nullptr, OH_NN_MAX_POOL_ROUND_MODE); + SetGlobal(OH_NN_BOOL, m_param_dim, nullptr, OH_NN_MAX_POOL_GLOBAL); EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); } @@ -269,15 +301,71 @@ HWTEST_F(MaxPoolPadBuilderTest, maxpool_build_pad_010, TestSize.Level1) tensor->SetBuffer(activationValue, sizeof(int32_t)); m_allTensors.emplace_back(tensor); + SetRoundMode(OH_NN_INT32, m_param_dim, nullptr, OH_NN_MAX_POOL_ROUND_MODE); + SetGlobal(OH_NN_BOOL, m_param_dim, nullptr, OH_NN_MAX_POOL_GLOBAL); EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); } /** * @tc.name: maxpool_build_pad_011 - * @tc.desc: Verify the activation scalar length of the build function + * @tc.desc: Verify the invalid roundMode of the build function * @tc.type: FUNC */ HWTEST_F(MaxPoolPadBuilderTest, maxpool_build_pad_011, TestSize.Level1) +{ + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetKernelSize(OH_NN_INT64, m_kenelsize_dim, nullptr, OH_NN_MAX_POOL_KERNEL_SIZE); + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_MAX_POOL_STRIDE); + SetPad(OH_NN_INT64, m_pad_dim, nullptr, OH_NN_MAX_POOL_PAD); + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_MAX_POOL_ACTIVATION_TYPE); + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT64, m_param_dim, nullptr, + OH_NN_MAX_POOL_ROUND_MODE); + int64_t* roundModeValue = new (std::nothrow) int64_t(0); + EXPECT_NE(nullptr, roundModeValue); + + tensor->SetBuffer(roundModeValue, sizeof(int64_t)); + m_allTensors.emplace_back(tensor); + SetGlobal(OH_NN_BOOL, m_param_dim, nullptr, OH_NN_MAX_POOL_GLOBAL); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + tensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: maxpool_build_pad_012 + * @tc.desc: Verify the invalid activation of the build function + * @tc.type: FUNC + */ +HWTEST_F(MaxPoolPadBuilderTest, maxpool_build_pad_012, TestSize.Level1) +{ + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetKernelSize(OH_NN_INT64, m_kenelsize_dim, nullptr, OH_NN_MAX_POOL_KERNEL_SIZE); + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_MAX_POOL_STRIDE); + SetPad(OH_NN_INT64, m_pad_dim, nullptr, OH_NN_MAX_POOL_PAD); + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_MAX_POOL_ACTIVATION_TYPE); + SetRoundMode(OH_NN_INT32, m_param_dim, nullptr, OH_NN_MAX_POOL_ROUND_MODE); + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT32, m_param_dim, nullptr, + OH_NN_MAX_POOL_GLOBAL); + int32_t* globalValue = new (std::nothrow) int32_t(0); + EXPECT_NE(nullptr, globalValue); + + tensor->SetBuffer(globalValue, sizeof(int32_t)); + m_allTensors.emplace_back(tensor); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + tensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: maxpool_build_pad_013 + * @tc.desc: Verify the activation scalar length of the build function + * @tc.type: FUNC + */ +HWTEST_F(MaxPoolPadBuilderTest, maxpool_build_pad_013, TestSize.Level1) { m_param_dim = {2}; m_paramsIndex = m_params; @@ -298,11 +386,11 @@ HWTEST_F(MaxPoolPadBuilderTest, maxpool_build_pad_011, TestSize.Level1) } /** - * @tc.name: maxpool_build_pad_012 + * @tc.name: maxpool_build_pad_014 * @tc.desc: Verify the maxpool without set kernelsize of the build function * @tc.type: FUNC */ -HWTEST_F(MaxPoolPadBuilderTest, maxpool_build_pad_012, TestSize.Level1) +HWTEST_F(MaxPoolPadBuilderTest, maxpool_build_pad_014, TestSize.Level1) { m_paramsIndex = m_params; SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); @@ -315,15 +403,17 @@ HWTEST_F(MaxPoolPadBuilderTest, maxpool_build_pad_012, TestSize.Level1) SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_MAX_POOL_STRIDE); SetPad(OH_NN_INT64, m_pad_dim, nullptr, OH_NN_MAX_POOL_PAD); SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_MAX_POOL_ACTIVATION_TYPE); + SetRoundMode(OH_NN_INT32, m_param_dim, nullptr, OH_NN_MAX_POOL_ROUND_MODE); + SetGlobal(OH_NN_BOOL, m_param_dim, nullptr, OH_NN_MAX_POOL_GLOBAL); EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); } /** - * @tc.name: maxpool_build_pad_013 + * @tc.name: maxpool_build_pad_015 * @tc.desc: Verify the maxpool without set stride of the build function * @tc.type: FUNC */ -HWTEST_F(MaxPoolPadBuilderTest, maxpool_build_pad_013, TestSize.Level1) +HWTEST_F(MaxPoolPadBuilderTest, maxpool_build_pad_015, TestSize.Level1) { m_paramsIndex = m_params; SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); @@ -335,15 +425,17 @@ HWTEST_F(MaxPoolPadBuilderTest, maxpool_build_pad_013, TestSize.Level1) SetPad(OH_NN_INT64, m_pad_dim, nullptr, OH_NN_MAX_POOL_PAD); SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_MAX_POOL_ACTIVATION_TYPE); + SetRoundMode(OH_NN_INT32, m_param_dim, nullptr, OH_NN_MAX_POOL_ROUND_MODE); + SetGlobal(OH_NN_BOOL, m_param_dim, nullptr, OH_NN_MAX_POOL_GLOBAL); EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); } /** - * @tc.name: maxpool_build_pad_014 + * @tc.name: maxpool_build_pad_016 * @tc.desc: Verify the maxpool without set pad of the build function * @tc.type: FUNC */ -HWTEST_F(MaxPoolPadBuilderTest, maxpool_build_pad_014, TestSize.Level1) +HWTEST_F(MaxPoolPadBuilderTest, maxpool_build_pad_016, TestSize.Level1) { m_paramsIndex = m_params; SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); @@ -355,15 +447,17 @@ HWTEST_F(MaxPoolPadBuilderTest, maxpool_build_pad_014, TestSize.Level1) m_allTensors.emplace_back(tensor); SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_MAX_POOL_ACTIVATION_TYPE); + SetRoundMode(OH_NN_INT32, m_param_dim, nullptr, OH_NN_MAX_POOL_ROUND_MODE); + SetGlobal(OH_NN_BOOL, m_param_dim, nullptr, OH_NN_MAX_POOL_GLOBAL); EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); } /** - * @tc.name: maxpool_build_pad_015 + * @tc.name: maxpool_build_pad_017 * @tc.desc: Verify the maxpool without set activation of the build function * @tc.type: FUNC */ -HWTEST_F(MaxPoolPadBuilderTest, maxpool_build_pad_015, TestSize.Level1) +HWTEST_F(MaxPoolPadBuilderTest, maxpool_build_pad_017, TestSize.Level1) { m_paramsIndex = m_params; SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); @@ -376,6 +470,54 @@ HWTEST_F(MaxPoolPadBuilderTest, maxpool_build_pad_015, TestSize.Level1) OH_NN_MAX_POOL_ACTIVATION_TYPE); m_allTensors.emplace_back(tensor); + SetRoundMode(OH_NN_INT32, m_param_dim, nullptr, OH_NN_MAX_POOL_ROUND_MODE); + SetGlobal(OH_NN_BOOL, m_param_dim, nullptr, OH_NN_MAX_POOL_GLOBAL); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: maxpool_build_pad_018 + * @tc.desc: Verify the avgpool without set activation of the build function + * @tc.type: FUNC + */ +HWTEST_F(MaxPoolPadBuilderTest, maxpool_build_pad_018, TestSize.Level1) +{ + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetKernelSize(OH_NN_INT64, m_kenelsize_dim, nullptr, OH_NN_MAX_POOL_KERNEL_SIZE); + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_MAX_POOL_STRIDE); + SetPad(OH_NN_INT64, m_pad_dim, nullptr, OH_NN_MAX_POOL_PAD); + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_MAX_POOL_ACTIVATION_TYPE); + + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT32, m_param_dim, nullptr, + OH_NN_MAX_POOL_ROUND_MODE); + m_allTensors.emplace_back(tensor); + SetGlobal(OH_NN_BOOL, m_param_dim, nullptr, OH_NN_MAX_POOL_GLOBAL); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: maxpool_build_pad_019 + * @tc.desc: Verify the avgpool without set activation of the build function + * @tc.type: FUNC + */ +HWTEST_F(MaxPoolPadBuilderTest, maxpool_build_pad_019, TestSize.Level1) +{ + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetKernelSize(OH_NN_INT64, m_kenelsize_dim, nullptr, OH_NN_MAX_POOL_KERNEL_SIZE); + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_MAX_POOL_STRIDE); + SetPad(OH_NN_INT64, m_pad_dim, nullptr, OH_NN_MAX_POOL_PAD); + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_MAX_POOL_ACTIVATION_TYPE); + SetRoundMode(OH_NN_INT32, m_param_dim, nullptr, OH_NN_MAX_POOL_ROUND_MODE); + + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT8, m_param_dim, nullptr, + OH_NN_MAX_POOL_GLOBAL); + m_allTensors.emplace_back(tensor); EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); } @@ -409,6 +551,12 @@ HWTEST_F(MaxPoolPadBuilderTest, maxpool_getprimitive_pad_001, TestSize.Level1) int8_t activationValue = 0; int expectActivation = mindspore::lite::MindIR_MaxPoolFusion_GetActivationType(primitive.get()); EXPECT_EQ(activationValue, expectActivation); + mindspore::lite::RoundMode roundModeValue = mindspore::lite::ROUND_MODE_FLOOR; + auto expectRoundMode = mindspore::lite::MindIR_MaxPoolFusion_GetRoundMode(primitive.get()); + EXPECT_EQ(roundModeValue, expectRoundMode); + bool globalValue = false; + bool expectGlobal = mindspore::lite::MindIR_MaxPoolFusion_GetGlobal(primitive.get()); + EXPECT_EQ(globalValue, expectGlobal); } /** diff --git a/test/unittest/ops/maxpool_padmode_test.cpp b/test/unittest/ops/maxpool_padmode_test.cpp index 34f0ae0a8f00851b21012be8cff714c862ac8bc0..d6c1b79c3cc5eded4fa3b43222f57275fd9c2890 100644 --- a/test/unittest/ops/maxpool_padmode_test.cpp +++ b/test/unittest/ops/maxpool_padmode_test.cpp @@ -31,13 +31,17 @@ public: void SetPadMode(OH_NN_DataType dataType, const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + void SetRoundMode(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + void SetGlobal(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); void SetParam(); public: MaxPoolBuilder m_builder; std::vector m_inputs{0}; std::vector m_outputs{1}; - std::vector m_params{2, 3, 4, 5}; + std::vector m_params{2, 3, 4, 5, 6, 7}; std::vector m_input_dim{1, 3, 3, 1}; std::vector m_output_dim{1, 2, 2, 1}; std::vector m_kenelsize_dim{2}; @@ -49,6 +53,26 @@ void MaxPoolBuilderTest::SetUp() {} void MaxPoolBuilderTest::TearDown() {} +void MaxPoolBuilderTest::SetRoundMode(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + std::shared_ptr tensor = TransToNNTensor(dataType, dim, quantParam, type); + int32_t* roundModeValue = new (std::nothrow) int32_t(0); + EXPECT_NE(nullptr, roundModeValue); + tensor->SetBuffer(roundModeValue, sizeof(int32_t)); + m_allTensors.emplace_back(tensor); +} + +void MaxPoolBuilderTest::SetGlobal(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + std::shared_ptr tensor = TransToNNTensor(dataType, dim, quantParam, type); + bool* globalValue = new (std::nothrow) bool(false); + EXPECT_NE(nullptr, globalValue); + tensor->SetBuffer(globalValue, sizeof(bool)); + m_allTensors.emplace_back(tensor); +} + void MaxPoolBuilderTest::SetPadMode(OH_NN_DataType dataType, const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) { @@ -65,6 +89,8 @@ void MaxPoolBuilderTest::SetParam() SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_MAX_POOL_STRIDE); SetPadMode(OH_NN_INT8, m_param_dim, nullptr, OH_NN_MAX_POOL_PAD_MODE); SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_MAX_POOL_ACTIVATION_TYPE); + SetRoundMode(OH_NN_INT32, m_param_dim, nullptr, OH_NN_MAX_POOL_ROUND_MODE); + SetGlobal(OH_NN_BOOL, m_param_dim, nullptr, OH_NN_MAX_POOL_GLOBAL); } /** @@ -105,7 +131,7 @@ HWTEST_F(MaxPoolBuilderTest, maxpool_build_pad_mode_003, TestSize.Level1) { m_inputs = {}; m_outputs = {0}; - m_params = {1, 2, 3, 4}; + m_params = {1, 2, 3, 4, 5, 6}; m_paramsIndex = m_params; SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); @@ -123,7 +149,7 @@ HWTEST_F(MaxPoolBuilderTest, maxpool_build_pad_mode_004, TestSize.Level1) { m_inputs = {0}; m_outputs = {}; - m_params = {1, 2, 3, 4}; + m_params = {1, 2, 3, 4, 5, 6}; m_paramsIndex = m_params; SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); @@ -139,9 +165,9 @@ HWTEST_F(MaxPoolBuilderTest, maxpool_build_pad_mode_004, TestSize.Level1) */ HWTEST_F(MaxPoolBuilderTest, maxpool_build_pad_mode_005, TestSize.Level1) { - m_inputs = {6}; + m_inputs = {8}; m_outputs = {1}; - m_params = {2, 3, 4, 5}; + m_params = {2, 3, 4, 5, 6, 7}; m_paramsIndex = m_params; SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); @@ -158,8 +184,8 @@ HWTEST_F(MaxPoolBuilderTest, maxpool_build_pad_mode_005, TestSize.Level1) HWTEST_F(MaxPoolBuilderTest, maxpool_build_pad_mode_006, TestSize.Level1) { m_inputs = {0}; - m_outputs = {6}; - m_params = {2, 3, 4, 5}; + m_outputs = {8}; + m_params = {2, 3, 4, 5, 6, 7}; m_paramsIndex = m_params; SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); @@ -190,6 +216,8 @@ HWTEST_F(MaxPoolBuilderTest, maxpool_build_pad_mode_007, TestSize.Level1) SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_MAX_POOL_STRIDE); SetPadMode(OH_NN_INT8, m_param_dim, nullptr, OH_NN_MAX_POOL_PAD_MODE); SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_MAX_POOL_ACTIVATION_TYPE); + SetRoundMode(OH_NN_INT32, m_param_dim, nullptr, OH_NN_MAX_POOL_ROUND_MODE); + SetGlobal(OH_NN_BOOL, m_param_dim, nullptr, OH_NN_MAX_POOL_GLOBAL); EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); } @@ -214,6 +242,8 @@ HWTEST_F(MaxPoolBuilderTest, maxpool_build_pad_mode_008, TestSize.Level1) m_allTensors.emplace_back(tensor); SetPadMode(OH_NN_INT8, m_param_dim, nullptr, OH_NN_MAX_POOL_PAD_MODE); SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_MAX_POOL_ACTIVATION_TYPE); + SetRoundMode(OH_NN_INT32, m_param_dim, nullptr, OH_NN_MAX_POOL_ROUND_MODE); + SetGlobal(OH_NN_BOOL, m_param_dim, nullptr, OH_NN_MAX_POOL_GLOBAL); EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); } @@ -237,6 +267,8 @@ HWTEST_F(MaxPoolBuilderTest, maxpool_build_pad_mode_009, TestSize.Level1) tensor->SetBuffer(padValueTest, sizeof(int32_t)); m_allTensors.emplace_back(tensor); SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_MAX_POOL_ACTIVATION_TYPE); + SetRoundMode(OH_NN_INT32, m_param_dim, nullptr, OH_NN_MAX_POOL_ROUND_MODE); + SetGlobal(OH_NN_BOOL, m_param_dim, nullptr, OH_NN_MAX_POOL_GLOBAL); EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); } @@ -262,15 +294,71 @@ HWTEST_F(MaxPoolBuilderTest, maxpool_build_pad_mode_010, TestSize.Level1) tensor->SetBuffer(activationValue, sizeof(int32_t)); m_allTensors.emplace_back(tensor); + SetRoundMode(OH_NN_INT32, m_param_dim, nullptr, OH_NN_MAX_POOL_ROUND_MODE); + SetGlobal(OH_NN_BOOL, m_param_dim, nullptr, OH_NN_MAX_POOL_GLOBAL); EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); } /** * @tc.name: maxpool_build_pad_mode_011 - * @tc.desc: Verify the scalar length of the build function + * @tc.desc: Verify the invalid roundMode of the build function * @tc.type: FUNC */ HWTEST_F(MaxPoolBuilderTest, maxpool_build_pad_mode_011, TestSize.Level1) +{ + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetKernelSize(OH_NN_INT64, m_kenelsize_dim, nullptr, OH_NN_MAX_POOL_KERNEL_SIZE); + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_MAX_POOL_STRIDE); + SetPadMode(OH_NN_INT8, m_param_dim, nullptr, OH_NN_MAX_POOL_PAD_MODE); + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_MAX_POOL_ACTIVATION_TYPE); + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT64, m_param_dim, nullptr, + OH_NN_MAX_POOL_ROUND_MODE); + int64_t* roundModeValue = new (std::nothrow) int64_t(0); + EXPECT_NE(nullptr, roundModeValue); + + tensor->SetBuffer(roundModeValue, sizeof(int64_t)); + m_allTensors.emplace_back(tensor); + SetGlobal(OH_NN_BOOL, m_param_dim, nullptr, OH_NN_MAX_POOL_GLOBAL); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + tensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: maxpool_build_pad_mode_012 + * @tc.desc: Verify the invalid activation of the build function + * @tc.type: FUNC + */ +HWTEST_F(MaxPoolBuilderTest, maxpool_build_pad_mode_012, TestSize.Level1) +{ + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetKernelSize(OH_NN_INT64, m_kenelsize_dim, nullptr, OH_NN_MAX_POOL_KERNEL_SIZE); + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_MAX_POOL_STRIDE); + SetPadMode(OH_NN_INT8, m_param_dim, nullptr, OH_NN_MAX_POOL_PAD_MODE); + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_MAX_POOL_ACTIVATION_TYPE); + SetRoundMode(OH_NN_INT32, m_param_dim, nullptr, OH_NN_MAX_POOL_ROUND_MODE); + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT32, m_param_dim, nullptr, + OH_NN_MAX_POOL_GLOBAL); + int32_t* globalValue = new (std::nothrow) int32_t(0); + EXPECT_NE(nullptr, globalValue); + + tensor->SetBuffer(globalValue, sizeof(int32_t)); + m_allTensors.emplace_back(tensor); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + tensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: maxpool_build_pad_mode_013 + * @tc.desc: Verify the scalar length of the build function + * @tc.type: FUNC + */ +HWTEST_F(MaxPoolBuilderTest, maxpool_build_pad_mode_013, TestSize.Level1) { m_param_dim = {2}; m_paramsIndex = m_params; @@ -290,6 +378,79 @@ HWTEST_F(MaxPoolBuilderTest, maxpool_build_pad_mode_011, TestSize.Level1) EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); } +/** + * @tc.name: maxpool_build_pad_mode_014 + * @tc.desc: Verify the param invalid to avgpool of the build function + * @tc.type: FUNC + */ +HWTEST_F(MaxPoolBuilderTest, maxpool_build_pad_mode_014, TestSize.Level1) +{ + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetKernelSize(OH_NN_INT64, m_kenelsize_dim, nullptr, OH_NN_MAX_POOL_KERNEL_SIZE); + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_MAX_POOL_STRIDE); + SetPadMode(OH_NN_INT8, m_param_dim, nullptr, OH_NN_MAX_POOL_PAD_MODE); + int8_t* activationValue = new (std::nothrow) int8_t(0); + EXPECT_NE(nullptr, activationValue); + + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT8, m_param_dim, nullptr, + OH_NN_DIV_ACTIVATIONTYPE); + tensor->SetBuffer(activationValue, sizeof(int8_t)); + m_allTensors.emplace_back(tensor); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: maxpool_build_pad_mode_015 + * @tc.desc: Verify the invalid padmode of the build function + * @tc.type: FUNC + */ +HWTEST_F(MaxPoolBuilderTest, maxpool_build_pad_mode_015, TestSize.Level1) +{ + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetKernelSize(OH_NN_INT64, m_kenelsize_dim, nullptr, OH_NN_MAX_POOL_KERNEL_SIZE); + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_MAX_POOL_STRIDE); + int8_t *padValueTest = new (std::nothrow) int8_t(6); + EXPECT_NE(nullptr, padValueTest); + + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT8, m_param_dim, nullptr, OH_NN_MAX_POOL_PAD_MODE); + tensor->SetBuffer(padValueTest, sizeof(int8_t)); + m_allTensors.emplace_back(tensor); + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_MAX_POOL_ACTIVATION_TYPE); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: maxpool_build_pad_mode_016 + * @tc.desc: Verify the invalid activation value of the build function + * @tc.type: FUNC + */ +HWTEST_F(MaxPoolBuilderTest, maxpool_build_pad_mode_016, TestSize.Level1) +{ + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetKernelSize(OH_NN_INT64, m_kenelsize_dim, nullptr, OH_NN_MAX_POOL_KERNEL_SIZE); + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_MAX_POOL_STRIDE); + SetPadMode(OH_NN_INT8, m_param_dim, nullptr, OH_NN_MAX_POOL_PAD_MODE); + + int8_t* activationValue = new (std::nothrow) int8_t(6); + EXPECT_NE(nullptr, activationValue); + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT8, m_param_dim, nullptr, + OH_NN_MAX_POOL_ACTIVATION_TYPE); + tensor->SetBuffer(activationValue, sizeof(int8_t)); + m_allTensors.emplace_back(tensor); + + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + /** * @tc.name: maxpool_getprimitive_pad_mode_001 * @tc.desc: Verify the behavior of the GetPrimitive function @@ -315,9 +476,15 @@ HWTEST_F(MaxPoolBuilderTest, maxpool_getprimitive_pad_mode_001, TestSize.Level1) std::vector strideValueTest{1, 1}; int returnPadMode = mindspore::lite::MindIR_MaxPoolFusion_GetPadMode(primitive.get()); EXPECT_EQ(1, returnPadMode); - int returnActivation = mindspore::lite::MindIR_MaxPoolFusion_GetActivationType(primitive.get()); EXPECT_EQ(0, returnActivation); + + mindspore::lite::RoundMode roundModeValue = mindspore::lite::ROUND_MODE_FLOOR; + auto expectRoundMode = mindspore::lite::MindIR_MaxPoolFusion_GetRoundMode(primitive.get()); + EXPECT_EQ(roundModeValue, expectRoundMode); + bool globalValue = false; + bool expectGlobal = mindspore::lite::MindIR_MaxPoolFusion_GetGlobal(primitive.get()); + EXPECT_EQ(globalValue, expectGlobal); } /** diff --git a/test/unittest/ops/minimum_test.cpp b/test/unittest/ops/minimum_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..34fdaa20b274e6a68add6418080d2f6886cf3ec5 --- /dev/null +++ b/test/unittest/ops/minimum_test.cpp @@ -0,0 +1,158 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ops/minimum_builder.h" + +#include "ops_test.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Ops; + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class MinimumBuilderTest : public OpsTest { +public: + void SetUp() override; + void TearDown() override; + +protected: + MinimumBuilder m_builder; + std::vector m_inputs {0, 1}; + std::vector m_outputs {2}; + std::vector m_dim {1, 2, 2, 1}; +}; + +void MinimumBuilderTest::SetUp() {} + +void MinimumBuilderTest::TearDown() {} + +/** + * @tc.name: minimum_build_001 + * @tc.desc: Verify that the build function returns a successful message. + * @tc.type: FUNC + */ +HWTEST_F(MinimumBuilderTest, minimum_build_001, TestSize.Level1) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/** + * @tc.name: minimum_build_002 + * @tc.desc: Verify that the build function returns a failed message with true m_isBuild. + * @tc.type: FUNC + */ +HWTEST_F(MinimumBuilderTest, minimum_build_002, TestSize.Level1) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); + + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/** + * @tc.name: minimum_build_003 + * @tc.desc: Verify that the build function returns a failed message with invalided input. + * @tc.type: FUNC + */ +HWTEST_F(MinimumBuilderTest, minimum_build_003, TestSize.Level1) +{ + m_inputs = {0, 1, 2}; + m_outputs = {3}; + + SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: minimum_build_004 + * @tc.desc: Verify that the build function returns a failed message with invalided output. + * @tc.type: FUNC + */ +HWTEST_F(MinimumBuilderTest, minimum_build_004, TestSize.Level1) +{ + m_outputs = {2, 3}; + + SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: minimum_build_005 + * @tc.desc: Verify that the build function returns a failed message with empty allTensor. + * @tc.type: FUNC + */ +HWTEST_F(MinimumBuilderTest, minimum_build_005, TestSize.Level1) +{ + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputs, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: minimum_build_006 + * @tc.desc: Verify that the build function returns a failed message without output tensor. + * @tc.type: FUNC + */ +HWTEST_F(MinimumBuilderTest, minimum_build_006, TestSize.Level1) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: minimum_getprimitive_001 + * @tc.desc: Verify that the getPrimitive function returns a successful message + * @tc.type: FUNC + */ +HWTEST_F(MinimumBuilderTest, minimum_getprimitive_001, TestSize.Level1) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); + + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + LiteGraphPrimitvePtr primitive = m_builder.GetPrimitive(); + LiteGraphPrimitvePtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); + EXPECT_NE(expectPrimitive, primitive); +} + +/** + * @tc.name: minimum_getprimitive_002 + * @tc.desc: Verify that the getPrimitive function returns a failed message without build. + * @tc.type: FUNC + */ +HWTEST_F(MinimumBuilderTest, minimum_getprimitive_002, TestSize.Level1) +{ + LiteGraphPrimitvePtr primitive = m_builder.GetPrimitive(); + LiteGraphPrimitvePtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); + EXPECT_EQ(expectPrimitive, primitive); +} +} +} +} \ No newline at end of file diff --git a/test/unittest/ops/pad_builder_test.cpp b/test/unittest/ops/pad_builder_test.cpp index 2e3f8474adb35b430b3e8baa7752f9b089875c50..ca68df75777b206f6307f21e8c9541c6edeadb6c 100644 --- a/test/unittest/ops/pad_builder_test.cpp +++ b/test/unittest/ops/pad_builder_test.cpp @@ -30,14 +30,16 @@ public: void TearDown() override; protected: - void SaveParamsTensor(OH_NN_DataType dataType, + void SetConstValueTensor(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + void SetPaddingModeTensor(OH_NN_DataType dataType, const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); protected: PadBuilder m_pad; std::vector m_inputs {0, 1}; std::vector m_outputs {2}; - std::vector m_params {3}; + std::vector m_params {3, 4}; std::vector m_inputDim {1, 1, 2, 3}; std::vector m_outputDim {1, 2, 7, 7}; std::vector m_paramDim {}; @@ -47,7 +49,7 @@ void PadBuilderTest::SetUp() {} void PadBuilderTest::TearDown() {} -void PadBuilderTest::SaveParamsTensor(OH_NN_DataType dataType, +void PadBuilderTest::SetConstValueTensor(OH_NN_DataType dataType, const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) { std::shared_ptr constantValueTensor = TransToNNTensor(dataType, dim, quantParam, type); @@ -57,6 +59,16 @@ void PadBuilderTest::SaveParamsTensor(OH_NN_DataType dataType, m_allTensors.emplace_back(constantValueTensor); } +void PadBuilderTest::SetPaddingModeTensor(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + std::shared_ptr paddingModeValueTensor = TransToNNTensor(dataType, dim, quantParam, type); + int32_t* paddingModeValue = new (std::nothrow) int32_t(0); + EXPECT_NE(nullptr, paddingModeValue); + paddingModeValueTensor->SetBuffer(paddingModeValue, sizeof(int32_t)); + m_allTensors.emplace_back(paddingModeValueTensor); +} + /** * @tc.name: pad_build_001 * @tc.desc: Verify that the build function returns a successful message. @@ -66,7 +78,8 @@ HWTEST_F(PadBuilderTest, pad_build_001, TestSize.Level0) { SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); - SaveParamsTensor(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_PAD_CONSTANT_VALUE); + SetConstValueTensor(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_PAD_CONSTANT_VALUE); + SetPaddingModeTensor(OH_NN_INT32, m_paramDim, nullptr, OH_NN_PAD_PADDING_MODE); OH_NN_ReturnCode ret = m_pad.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); EXPECT_EQ(OH_NN_SUCCESS, ret); @@ -81,7 +94,8 @@ HWTEST_F(PadBuilderTest, pad_build_002, TestSize.Level0) { SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); - SaveParamsTensor(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_PAD_CONSTANT_VALUE); + SetConstValueTensor(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_PAD_CONSTANT_VALUE); + SetPaddingModeTensor(OH_NN_INT32, m_paramDim, nullptr, OH_NN_PAD_PADDING_MODE); EXPECT_EQ(OH_NN_SUCCESS, m_pad.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors)); OH_NN_ReturnCode ret = m_pad.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); @@ -97,11 +111,12 @@ HWTEST_F(PadBuilderTest, pad_build_003, TestSize.Level0) { m_inputs = {0, 1, 2}; m_outputs = {3}; - m_params = {4}; + m_params = {4, 5}; SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); - SaveParamsTensor(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_PAD_CONSTANT_VALUE); + SetConstValueTensor(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_PAD_CONSTANT_VALUE); + SetPaddingModeTensor(OH_NN_INT32, m_paramDim, nullptr, OH_NN_PAD_PADDING_MODE); OH_NN_ReturnCode ret = m_pad.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); @@ -115,11 +130,12 @@ HWTEST_F(PadBuilderTest, pad_build_003, TestSize.Level0) HWTEST_F(PadBuilderTest, pad_build_004, TestSize.Level0) { m_outputs = {2, 3}; - m_params = {4}; + m_params = {4, 5}; SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); - SaveParamsTensor(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_PAD_CONSTANT_VALUE); + SetConstValueTensor(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_PAD_CONSTANT_VALUE); + SetPaddingModeTensor(OH_NN_INT32, m_paramDim, nullptr, OH_NN_PAD_PADDING_MODE); OH_NN_ReturnCode ret = m_pad.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); @@ -163,6 +179,7 @@ HWTEST_F(PadBuilderTest, pad_build_007, TestSize.Level0) int32_t constantValue = 0; constantValueTensor->SetBuffer(&constantValue, sizeof(constantValue)); m_allTensors.emplace_back(constantValueTensor); + SetPaddingModeTensor(OH_NN_INT32, m_paramDim, nullptr, OH_NN_PAD_PADDING_MODE); OH_NN_ReturnCode ret = m_pad.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); @@ -171,10 +188,32 @@ HWTEST_F(PadBuilderTest, pad_build_007, TestSize.Level0) /** * @tc.name: pad_build_008 - * @tc.desc: Verify that the build function returns a failed message with invalid constant's dimension. + * @tc.desc: Verify that the build function returns a failed message with invalid paddingMode's dataType. * @tc.type: FUNC */ HWTEST_F(PadBuilderTest, pad_build_008, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + + SetConstValueTensor(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_PAD_CONSTANT_VALUE); + std::shared_ptr paddingModeTensor = TransToNNTensor(OH_NN_INT64, m_paramDim, + nullptr, OH_NN_PAD_PADDING_MODE); + int64_t paddingMode = 0; + paddingModeTensor->SetBuffer(&paddingMode, sizeof(int64_t)); + m_allTensors.emplace_back(paddingModeTensor); + + OH_NN_ReturnCode ret = m_pad.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + paddingModeTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: pad_build_009 + * @tc.desc: Verify that the build function returns a failed message with invalid constant's dimension. + * @tc.type: FUNC + */ +HWTEST_F(PadBuilderTest, pad_build_009, TestSize.Level0) { m_paramDim = {2}; @@ -186,38 +225,76 @@ HWTEST_F(PadBuilderTest, pad_build_008, TestSize.Level0) float constantValue[2] = {2.0, 2.0}; constantValueTensor->SetBuffer(constantValue, 2 * sizeof(float)); m_allTensors.emplace_back(constantValueTensor); + SetPaddingModeTensor(OH_NN_INT32, m_paramDim, nullptr, OH_NN_PAD_PADDING_MODE); + OH_NN_ReturnCode ret = m_pad.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); constantValueTensor->SetBuffer(nullptr, 0); } /** - * @tc.name: pad_build_009 - * @tc.desc: Verify that the build function returns a failed message with passing invalid param. + * @tc.name: pad_build_010 + * @tc.desc: Verify that the build function returns a failed message with passing invalid constvalue. * @tc.type: FUNC */ -HWTEST_F(PadBuilderTest, pad_build_009, TestSize.Level0) +HWTEST_F(PadBuilderTest, pad_build_010, TestSize.Level0) { SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); - SaveParamsTensor(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_ONE_HOT_AXIS); + SetConstValueTensor(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_ONE_HOT_AXIS); + SetPaddingModeTensor(OH_NN_INT32, m_paramDim, nullptr, OH_NN_PAD_PADDING_MODE); OH_NN_ReturnCode ret = m_pad.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); } /** - * @tc.name: pad_build_010 + * @tc.name: pad_build_011 + * @tc.desc: Verify that the build function returns a failed message with passing invalid paddingMode. + * @tc.type: FUNC + */ +HWTEST_F(PadBuilderTest, pad_build_011, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + SetConstValueTensor(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_PAD_CONSTANT_VALUE); + SetPaddingModeTensor(OH_NN_INT32, m_paramDim, nullptr, OH_NN_ONE_HOT_AXIS); + + OH_NN_ReturnCode ret = m_pad.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: pad_build_012 * @tc.desc: Verify that the build function returns a failed message without set buffer for constantValue. * @tc.type: FUNC */ -HWTEST_F(PadBuilderTest, pad_build_010, TestSize.Level0) +HWTEST_F(PadBuilderTest, pad_build_012, TestSize.Level0) { SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); std::shared_ptr constantValueTensor = TransToNNTensor(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_PAD_CONSTANT_VALUE); m_allTensors.emplace_back(constantValueTensor); + SetPaddingModeTensor(OH_NN_INT32, m_paramDim, nullptr, OH_NN_PAD_PADDING_MODE); + + OH_NN_ReturnCode ret = m_pad.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: pad_build_013 + * @tc.desc: Verify that the build function returns a failed message without set buffer for paddingMode. + * @tc.type: FUNC + */ +HWTEST_F(PadBuilderTest, pad_build_013, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + SetConstValueTensor(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_PAD_CONSTANT_VALUE); + std::shared_ptr paddingModeTensor = TransToNNTensor(OH_NN_INT32, m_paramDim, + nullptr, OH_NN_PAD_PADDING_MODE); + m_allTensors.emplace_back(paddingModeTensor); OH_NN_ReturnCode ret = m_pad.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); @@ -232,9 +309,11 @@ HWTEST_F(PadBuilderTest, pad_getprimitive_001, TestSize.Level0) { SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); - SaveParamsTensor(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_PAD_CONSTANT_VALUE); + SetConstValueTensor(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_PAD_CONSTANT_VALUE); + SetPaddingModeTensor(OH_NN_INT32, m_paramDim, nullptr, OH_NN_PAD_PADDING_MODE); float constantValue = 2.0; + mindspore::lite::PaddingMode paddingModeValue = mindspore::lite::PADDING_MODE_CONSTANT; EXPECT_EQ(OH_NN_SUCCESS, m_pad.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors)); LiteGraphPrimitvePtr primitive = m_pad.GetPrimitive(); LiteGraphPrimitvePtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); @@ -242,6 +321,8 @@ HWTEST_F(PadBuilderTest, pad_getprimitive_001, TestSize.Level0) auto returnValue = mindspore::lite::MindIR_PadFusion_GetConstantValue(primitive.get()); EXPECT_EQ(returnValue, constantValue); + auto returnPaddingMode = mindspore::lite::MindIR_PadFusion_GetPaddingMode(primitive.get()); + EXPECT_EQ(returnPaddingMode, paddingModeValue); } /** diff --git a/test/unittest/ops/quant_dtype_cast_builder_test.cpp b/test/unittest/ops/quant_dtype_cast_builder_test.cpp index 55c71b836c179d6e12e4d3956baa093d07b06c45..9a046727c9c847572e0c5e6d181931126af883a5 100644 --- a/test/unittest/ops/quant_dtype_cast_builder_test.cpp +++ b/test/unittest/ops/quant_dtype_cast_builder_test.cpp @@ -34,12 +34,14 @@ protected: const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); void SaveDstTensor(OH_NN_DataType dataType, const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + void SaveAxisTensor(OH_NN_DataType dataType, const std::vector &dim, + const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); protected: QuantDTypeCastBuilder m_builder; std::vector m_inputs {0}; std::vector m_outputs {1}; - std::vector m_params {2, 3}; + std::vector m_params {2, 3, 4}; std::vector m_dim {3, 3}; std::vector m_paramDim {}; }; @@ -68,6 +70,16 @@ void QuantDTypeCastBuilderTest::SaveDstTensor(OH_NN_DataType dataType, const std m_allTensors.emplace_back(dstTensor); } +void QuantDTypeCastBuilderTest::SaveAxisTensor(OH_NN_DataType dataType, const std::vector &dim, + const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + std::shared_ptr axisTensor = TransToNNTensor(dataType, dim, quantParam, type); + int64_t *axisValue = new (std::nothrow) int64_t(1); + EXPECT_NE(nullptr, axisValue); + axisTensor->SetBuffer(axisValue, sizeof(int64_t)); + m_allTensors.emplace_back(axisTensor); +} + /** * @tc.name: quantdtypecast_build_001 * @tc.desc: Provide normal input, output, and parameters to verify the normal behavior of the Build function @@ -79,6 +91,7 @@ HWTEST_F(QuantDTypeCastBuilderTest, quantdtypecast_build_001, TestSize.Level0) SaveOutputTensor(m_outputs, OH_NN_INT8, m_dim, nullptr); SaveSrcTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_QUANT_DTYPE_CAST_SRC_T); SaveDstTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_QUANT_DTYPE_CAST_DST_T); + SaveAxisTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_QUANT_DTYPE_CAST_AXIS); OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); EXPECT_EQ(OH_NN_SUCCESS, ret); @@ -95,6 +108,7 @@ HWTEST_F(QuantDTypeCastBuilderTest, quantdtypecast_build_002, TestSize.Level0) SaveOutputTensor(m_outputs, OH_NN_INT8, m_dim, nullptr); SaveSrcTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_QUANT_DTYPE_CAST_SRC_T); SaveDstTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_QUANT_DTYPE_CAST_DST_T); + SaveAxisTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_QUANT_DTYPE_CAST_AXIS); EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors)); OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); @@ -110,12 +124,13 @@ HWTEST_F(QuantDTypeCastBuilderTest, quantdtypecast_build_003, TestSize.Level0) { m_inputs = {0, 1}; m_outputs = {2}; - m_params = {3, 4}; + m_params = {3, 4, 5}; SaveInputTensor(m_inputs, OH_NN_INT8, m_dim, nullptr); SaveOutputTensor(m_outputs, OH_NN_INT8, m_dim, nullptr); SaveSrcTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_QUANT_DTYPE_CAST_SRC_T); SaveDstTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_QUANT_DTYPE_CAST_DST_T); + SaveAxisTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_QUANT_DTYPE_CAST_AXIS); OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); @@ -129,12 +144,13 @@ HWTEST_F(QuantDTypeCastBuilderTest, quantdtypecast_build_003, TestSize.Level0) HWTEST_F(QuantDTypeCastBuilderTest, quantdtypecast_build_004, TestSize.Level0) { m_outputs = {1, 2}; - m_params = {3, 4}; + m_params = {3, 4, 5}; SaveInputTensor(m_inputs, OH_NN_INT8, m_dim, nullptr); SaveOutputTensor(m_outputs, OH_NN_INT8, m_dim, nullptr); SaveSrcTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_QUANT_DTYPE_CAST_SRC_T); SaveDstTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_QUANT_DTYPE_CAST_DST_T); + SaveAxisTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_QUANT_DTYPE_CAST_AXIS); OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); @@ -173,13 +189,14 @@ HWTEST_F(QuantDTypeCastBuilderTest, quantdtypecast_build_007, TestSize.Level0) { SaveInputTensor(m_inputs, OH_NN_INT8, m_dim, nullptr); SaveOutputTensor(m_outputs, OH_NN_INT8, m_dim, nullptr); - SaveDstTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_QUANT_DTYPE_CAST_DST_T); std::shared_ptr srcTensor = TransToNNTensor(OH_NN_INT32, m_paramDim, nullptr, OH_NN_QUANT_DTYPE_CAST_SRC_T); int32_t srcValue = 1; srcTensor->SetBuffer(&srcValue, sizeof(srcValue)); m_allTensors.emplace_back(srcTensor); + SaveDstTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_QUANT_DTYPE_CAST_DST_T); + SaveAxisTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_QUANT_DTYPE_CAST_AXIS); OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputs, m_allTensors); EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); @@ -195,14 +212,14 @@ HWTEST_F(QuantDTypeCastBuilderTest, quantdtypecast_build_008, TestSize.Level0) { SaveInputTensor(m_inputs, OH_NN_INT8, m_dim, nullptr); SaveOutputTensor(m_outputs, OH_NN_INT8, m_dim, nullptr); - SaveSrcTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_QUANT_DTYPE_CAST_SRC_T); - + SaveSrcTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_QUANT_DTYPE_CAST_SRC_T); std::shared_ptr dstTensor = TransToNNTensor(OH_NN_INT32, m_paramDim, nullptr, OH_NN_QUANT_DTYPE_CAST_DST_T); int32_t dstValue = 1; dstTensor->SetBuffer(&dstValue, sizeof(dstValue)); m_allTensors.emplace_back(dstTensor); + SaveAxisTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_QUANT_DTYPE_CAST_AXIS); OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputs, m_allTensors); EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); @@ -211,34 +228,96 @@ HWTEST_F(QuantDTypeCastBuilderTest, quantdtypecast_build_008, TestSize.Level0) /** * @tc.name: quantdtypecast_build_009 - * @tc.desc: Verify that the build function return a failed message with invalided parameter + * @tc.desc: Verify that the build function return a failed message with invalided axis's dataType * @tc.type: FUNC */ HWTEST_F(QuantDTypeCastBuilderTest, quantdtypecast_build_009, TestSize.Level0) { SaveInputTensor(m_inputs, OH_NN_INT8, m_dim, nullptr); SaveOutputTensor(m_outputs, OH_NN_INT8, m_dim, nullptr); + SaveSrcTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_QUANT_DTYPE_CAST_SRC_T); - SaveDstTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_REDUCE_ALL_KEEP_DIMS); + SaveDstTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_QUANT_DTYPE_CAST_DST_T); + std::shared_ptr axisTensor = TransToNNTensor(OH_NN_INT32, m_paramDim, + nullptr, OH_NN_QUANT_DTYPE_CAST_AXIS); + int32_t axisValue = 1; + axisTensor->SetBuffer(&axisValue, sizeof(axisValue)); + m_allTensors.emplace_back(axisTensor); - OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputs, m_allTensors); EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + axisTensor->SetBuffer(nullptr, 0); } /** * @tc.name: quantdtypecast_build_010 - * @tc.desc: Verify that the build function return a failed message with empty src's buffer + * @tc.desc: Verify that the build function return a failed message with invalided src * @tc.type: FUNC */ HWTEST_F(QuantDTypeCastBuilderTest, quantdtypecast_build_010, TestSize.Level0) { SaveInputTensor(m_inputs, OH_NN_INT8, m_dim, nullptr); SaveOutputTensor(m_outputs, OH_NN_INT8, m_dim, nullptr); + + SaveSrcTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_MUL_ACTIVATION_TYPE); SaveDstTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_QUANT_DTYPE_CAST_DST_T); + SaveAxisTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_QUANT_DTYPE_CAST_AXIS); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: quantdtypecast_build_011 + * @tc.desc: Verify that the build function return a failed message with invalided dst + * @tc.type: FUNC + */ +HWTEST_F(QuantDTypeCastBuilderTest, quantdtypecast_build_011, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_INT8, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT8, m_dim, nullptr); + + SaveSrcTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_QUANT_DTYPE_CAST_SRC_T); + SaveDstTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_MUL_ACTIVATION_TYPE); + SaveAxisTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_QUANT_DTYPE_CAST_AXIS); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: quantdtypecast_build_012 + * @tc.desc: Verify that the build function return a failed message with invalided axis + * @tc.type: FUNC + */ +HWTEST_F(QuantDTypeCastBuilderTest, quantdtypecast_build_012, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_INT8, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT8, m_dim, nullptr); + + SaveSrcTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_QUANT_DTYPE_CAST_SRC_T); + SaveDstTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_QUANT_DTYPE_CAST_DST_T); + SaveAxisTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_MUL_ACTIVATION_TYPE); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: quantdtypecast_build_013 + * @tc.desc: Verify that the build function return a failed message with empty src's buffer + * @tc.type: FUNC + */ +HWTEST_F(QuantDTypeCastBuilderTest, quantdtypecast_build_013, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_INT8, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT8, m_dim, nullptr); std::shared_ptr srcTensor = TransToNNTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_QUANT_DTYPE_CAST_SRC_T); m_allTensors.emplace_back(srcTensor); + SaveDstTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_QUANT_DTYPE_CAST_DST_T); + SaveAxisTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_QUANT_DTYPE_CAST_AXIS); OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputs, m_allTensors); EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); @@ -246,26 +325,47 @@ HWTEST_F(QuantDTypeCastBuilderTest, quantdtypecast_build_010, TestSize.Level0) } /** - * @tc.name: quantdtypecast_build_011 + * @tc.name: quantdtypecast_build_014 * @tc.desc: Verify that the build function return a failed message with empty dst's buffer * @tc.type: FUNC */ -HWTEST_F(QuantDTypeCastBuilderTest, quantdtypecast_build_011, TestSize.Level0) +HWTEST_F(QuantDTypeCastBuilderTest, quantdtypecast_build_014, TestSize.Level0) { SaveInputTensor(m_inputs, OH_NN_INT8, m_dim, nullptr); SaveOutputTensor(m_outputs, OH_NN_INT8, m_dim, nullptr); - SaveSrcTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_QUANT_DTYPE_CAST_SRC_T); - + SaveSrcTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_QUANT_DTYPE_CAST_SRC_T); std::shared_ptr dstTensor = TransToNNTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_QUANT_DTYPE_CAST_DST_T); m_allTensors.emplace_back(dstTensor); + SaveAxisTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_QUANT_DTYPE_CAST_AXIS); OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputs, m_allTensors); EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); dstTensor->SetBuffer(nullptr, 0); } +/** + * @tc.name: quantdtypecast_build_015 + * @tc.desc: Verify that the build function return a failed message with empty axis's buffer + * @tc.type: FUNC + */ +HWTEST_F(QuantDTypeCastBuilderTest, quantdtypecast_build_015, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_INT8, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT8, m_dim, nullptr); + + SaveSrcTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_QUANT_DTYPE_CAST_SRC_T); + SaveDstTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_QUANT_DTYPE_CAST_DST_T); + std::shared_ptr axisTensor = TransToNNTensor(OH_NN_INT64, m_paramDim, + nullptr, OH_NN_QUANT_DTYPE_CAST_AXIS); + m_allTensors.emplace_back(axisTensor); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + axisTensor->SetBuffer(nullptr, 0); +} + /** * @tc.name: quantdtypecast_get_primitive_001 * @tc.desc: Verify the GetPrimitive function return nullptr @@ -289,6 +389,7 @@ HWTEST_F(QuantDTypeCastBuilderTest, quantdtypecast_get_primitive_002, TestSize.L SaveOutputTensor(m_outputs, OH_NN_INT8, m_dim, nullptr); SaveSrcTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_QUANT_DTYPE_CAST_SRC_T); SaveDstTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_QUANT_DTYPE_CAST_DST_T); + SaveAxisTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_QUANT_DTYPE_CAST_AXIS); EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors)); LiteGraphTensorPtr primitive = m_builder.GetPrimitive(); @@ -297,10 +398,13 @@ HWTEST_F(QuantDTypeCastBuilderTest, quantdtypecast_get_primitive_002, TestSize.L int64_t srcValue = 1; int64_t dstValue = 1; + int64_t axisValue = 1; auto srcReturn = mindspore::lite::MindIR_QuantDTypeCast_GetSrcT(primitive.get()); EXPECT_EQ(srcReturn, srcValue); auto dstReturn = mindspore::lite::MindIR_QuantDTypeCast_GetDstT(primitive.get()); EXPECT_EQ(dstReturn, dstValue); + auto axisReturn = mindspore::lite::MindIR_QuantDTypeCast_GetAxis(primitive.get()); + EXPECT_EQ(axisReturn, axisValue); } } // namespace UnitTest } // namespace NeuralNetworkRuntime diff --git a/test/unittest/ops/range_test.cpp b/test/unittest/ops/range_test.cpp index 68b535f28f41959f4530fc2cec980f1c892d07ce..e53251d57d8f5e88158fa074b428c2e2af301ea4 100644 --- a/test/unittest/ops/range_test.cpp +++ b/test/unittest/ops/range_test.cpp @@ -30,8 +30,6 @@ public: void TearDown() override; protected: - void SaveDType(OH_NN_DataType dataType, - const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); void SaveStart(OH_NN_DataType dataType, const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); void SaveLimit(OH_NN_DataType dataType, @@ -43,7 +41,7 @@ protected: RangeBuilder m_builder; std::vector m_inputs {0}; std::vector m_outputs {1}; - std::vector m_params {2, 3, 4, 5}; + std::vector m_params {2, 3, 4}; std::vector m_dim {3}; std::vector m_paramDim {}; }; @@ -52,16 +50,6 @@ void RangeBuilderTest::SetUp() {} void RangeBuilderTest::TearDown() {} -void RangeBuilderTest::SaveDType(OH_NN_DataType dataType, - const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) -{ - std::shared_ptr dTypeTensor = TransToNNTensor(dataType, dim, quantParam, type); - int64_t* dTypeValue = new (std::nothrow) int64_t [1]{0}; - EXPECT_NE(nullptr, dTypeValue); - dTypeTensor->SetBuffer(dTypeValue, sizeof(int64_t)); - m_allTensors.emplace_back(dTypeTensor); -} - void RangeBuilderTest::SaveStart(OH_NN_DataType dataType, const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) { @@ -101,7 +89,6 @@ HWTEST_F(RangeBuilderTest, range_build_001, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); - SaveDType(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RANGE_DTYPE); SaveStart(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RANGE_START); SaveLimit(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RANGE_LIMIT); SaveDelta(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RANGE_DELTA); @@ -119,7 +106,6 @@ HWTEST_F(RangeBuilderTest, range_build_002, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); - SaveDType(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RANGE_DTYPE); SaveStart(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RANGE_START); SaveLimit(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RANGE_LIMIT); SaveDelta(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RANGE_DELTA); @@ -138,11 +124,10 @@ HWTEST_F(RangeBuilderTest, range_build_003, TestSize.Level1) { m_inputs = {0, 1}; m_outputs = {2}; - m_params = {3, 4, 5, 6}; + m_params = {3, 4, 5}; SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); - SaveDType(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RANGE_DTYPE); SaveStart(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RANGE_START); SaveLimit(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RANGE_LIMIT); SaveDelta(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RANGE_DELTA); @@ -159,11 +144,10 @@ HWTEST_F(RangeBuilderTest, range_build_003, TestSize.Level1) HWTEST_F(RangeBuilderTest, range_build_004, TestSize.Level1) { m_outputs = {1, 2}; - m_params = {3, 4, 5, 6}; + m_params = {3, 4, 5}; SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); - SaveDType(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RANGE_DTYPE); SaveStart(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RANGE_START); SaveLimit(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RANGE_LIMIT); SaveDelta(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RANGE_DELTA); @@ -198,40 +182,14 @@ HWTEST_F(RangeBuilderTest, range_build_006, TestSize.Level1) /** * @tc.name: range_build_007 - * @tc.desc: Verify that the build function returns a failed message with invalid dType's dataType. - * @tc.type: FUNC - */ -HWTEST_F(RangeBuilderTest, range_build_007, TestSize.Level1) -{ - SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); - SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); - - std::shared_ptr dTypeTensor = TransToNNTensor(OH_NN_FLOAT32, m_paramDim, - nullptr, OH_NN_RANGE_DTYPE); - float* dTypeValue = new (std::nothrow) float [1]{0.0f}; - EXPECT_NE(nullptr, dTypeValue); - dTypeTensor->SetBuffer(dTypeValue, sizeof(float)); - m_allTensors.emplace_back(dTypeTensor); - SaveStart(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RANGE_START); - SaveLimit(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RANGE_LIMIT); - SaveDelta(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RANGE_DELTA); - - OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); - EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); - dTypeTensor->SetBuffer(nullptr, 0); -} - -/** - * @tc.name: range_build_008 * @tc.desc: Verify that the build function returns a failed message with invalid start's dataType. * @tc.type: FUNC */ -HWTEST_F(RangeBuilderTest, range_build_008, TestSize.Level1) +HWTEST_F(RangeBuilderTest, range_build_007, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); - SaveDType(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RANGE_DTYPE); std::shared_ptr startTensor = TransToNNTensor(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_RANGE_START); float* startValue = new (std::nothrow) float [1]{0.0f}; @@ -247,16 +205,15 @@ HWTEST_F(RangeBuilderTest, range_build_008, TestSize.Level1) } /** - * @tc.name: range_build_009 + * @tc.name: range_build_008 * @tc.desc: Verify that the build function returns a failed message with invalid limit's dataType. * @tc.type: FUNC */ -HWTEST_F(RangeBuilderTest, range_build_009, TestSize.Level1) +HWTEST_F(RangeBuilderTest, range_build_008, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); - SaveDType(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RANGE_DTYPE); SaveStart(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RANGE_START); std::shared_ptr limitTensor = TransToNNTensor(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_RANGE_LIMIT); @@ -272,16 +229,15 @@ HWTEST_F(RangeBuilderTest, range_build_009, TestSize.Level1) } /** - * @tc.name: range_build_010 + * @tc.name: range_build_009 * @tc.desc: Verify that the build function returns a failed message with invalid delta's dataType. * @tc.type: FUNC */ -HWTEST_F(RangeBuilderTest, range_build_010, TestSize.Level1) +HWTEST_F(RangeBuilderTest, range_build_009, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); - SaveDType(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RANGE_DTYPE); SaveStart(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RANGE_START); SaveLimit(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RANGE_LIMIT); std::shared_ptr deltaTensor = TransToNNTensor(OH_NN_FLOAT32, m_paramDim, @@ -297,35 +253,15 @@ HWTEST_F(RangeBuilderTest, range_build_010, TestSize.Level1) } /** - * @tc.name: range_build_011 - * @tc.desc: Verify that the build function returns a failed message with passing invalid dType param. - * @tc.type: FUNC - */ -HWTEST_F(RangeBuilderTest, range_build_011, TestSize.Level1) -{ - SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); - SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); - - SaveDType(OH_NN_INT64, m_paramDim, nullptr, OH_NN_MUL_ACTIVATION_TYPE); - SaveStart(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RANGE_START); - SaveLimit(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RANGE_LIMIT); - SaveDelta(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RANGE_DELTA); - - OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); - EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); -} - -/** - * @tc.name: range_build_012 + * @tc.name: range_build_010 * @tc.desc: Verify that the build function returns a failed message with passing invalid start param. * @tc.type: FUNC */ -HWTEST_F(RangeBuilderTest, range_build_012, TestSize.Level1) +HWTEST_F(RangeBuilderTest, range_build_010, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); - SaveDType(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RANGE_DTYPE); SaveStart(OH_NN_INT64, m_paramDim, nullptr, OH_NN_MUL_ACTIVATION_TYPE); SaveLimit(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RANGE_LIMIT); SaveDelta(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RANGE_DELTA); @@ -335,16 +271,15 @@ HWTEST_F(RangeBuilderTest, range_build_012, TestSize.Level1) } /** - * @tc.name: range_build_013 + * @tc.name: range_build_011 * @tc.desc: Verify that the build function returns a failed message with passing invalid limit param. * @tc.type: FUNC */ -HWTEST_F(RangeBuilderTest, range_build_013, TestSize.Level1) +HWTEST_F(RangeBuilderTest, range_build_011, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); - SaveDType(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RANGE_DTYPE); SaveStart(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RANGE_START); SaveLimit(OH_NN_INT64, m_paramDim, nullptr, OH_NN_MUL_ACTIVATION_TYPE); SaveDelta(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RANGE_DELTA); @@ -354,16 +289,15 @@ HWTEST_F(RangeBuilderTest, range_build_013, TestSize.Level1) } /** - * @tc.name: range_build_014 + * @tc.name: range_build_012 * @tc.desc: Verify that the build function returns a failed message with passing invalid delta param. * @tc.type: FUNC */ -HWTEST_F(RangeBuilderTest, range_build_014, TestSize.Level1) +HWTEST_F(RangeBuilderTest, range_build_012, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); - SaveDType(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RANGE_DTYPE); SaveStart(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RANGE_START); SaveLimit(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RANGE_LIMIT); SaveDelta(OH_NN_INT64, m_paramDim, nullptr, OH_NN_MUL_ACTIVATION_TYPE); @@ -373,37 +307,15 @@ HWTEST_F(RangeBuilderTest, range_build_014, TestSize.Level1) } /** - * @tc.name: range_build_015 - * @tc.desc: Verify that the build function returns a failed message without set buffer for dType. - * @tc.type: FUNC - */ -HWTEST_F(RangeBuilderTest, range_build_015, TestSize.Level1) -{ - SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); - SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); - - std::shared_ptr dTypeTensor = TransToNNTensor(OH_NN_INT64, m_paramDim, - nullptr, OH_NN_RANGE_DTYPE); - m_allTensors.emplace_back(dTypeTensor); - SaveStart(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RANGE_START); - SaveLimit(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RANGE_LIMIT); - SaveDelta(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RANGE_DELTA); - - OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); - EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); -} - -/** - * @tc.name: range_build_016 + * @tc.name: range_build_013 * @tc.desc: Verify that the build function returns a failed message without set buffer for start. * @tc.type: FUNC */ -HWTEST_F(RangeBuilderTest, range_build_016, TestSize.Level1) +HWTEST_F(RangeBuilderTest, range_build_013, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); - SaveDType(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RANGE_DTYPE); std::shared_ptr startTensor = TransToNNTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RANGE_START); m_allTensors.emplace_back(startTensor); @@ -415,16 +327,15 @@ HWTEST_F(RangeBuilderTest, range_build_016, TestSize.Level1) } /** - * @tc.name: range_build_017 + * @tc.name: range_build_014 * @tc.desc: Verify that the build function returns a failed message without set buffer for limit. * @tc.type: FUNC */ -HWTEST_F(RangeBuilderTest, range_build_017, TestSize.Level1) +HWTEST_F(RangeBuilderTest, range_build_014, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); - SaveDType(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RANGE_DTYPE); SaveStart(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RANGE_START); std::shared_ptr limitTensor = TransToNNTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RANGE_LIMIT); @@ -436,16 +347,15 @@ HWTEST_F(RangeBuilderTest, range_build_017, TestSize.Level1) } /** - * @tc.name: range_build_018 + * @tc.name: range_build_015 * @tc.desc: Verify that the build function returns a failed message without set buffer for delta. * @tc.type: FUNC */ -HWTEST_F(RangeBuilderTest, range_build_018, TestSize.Level1) +HWTEST_F(RangeBuilderTest, range_build_015, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); - SaveDType(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RANGE_DTYPE); SaveStart(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RANGE_START); SaveLimit(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RANGE_LIMIT); std::shared_ptr deltaTensor = TransToNNTensor(OH_NN_INT64, m_paramDim, @@ -465,12 +375,10 @@ HWTEST_F(RangeBuilderTest, range_getprimitive_001, TestSize.Level1) { SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); - SaveDType(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RANGE_DTYPE); SaveStart(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RANGE_START); SaveLimit(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RANGE_LIMIT); SaveDelta(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RANGE_DELTA); - int64_t dTypeValue = 0; int64_t startValue = 0; int64_t limitValue = 3; int64_t deltaValue = 1; @@ -479,8 +387,6 @@ HWTEST_F(RangeBuilderTest, range_getprimitive_001, TestSize.Level1) LiteGraphPrimitvePtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); EXPECT_NE(expectPrimitive, primitive); - auto returnDTypeValue = mindspore::lite::MindIR_Range_GetDType(primitive.get()); - EXPECT_EQ(returnDTypeValue, dTypeValue); auto returnStartValue = mindspore::lite::MindIR_Range_GetStart(primitive.get()); EXPECT_EQ(returnStartValue, startValue); auto returnLimitValue = mindspore::lite::MindIR_Range_GetLimit(primitive.get()); diff --git a/test/unittest/ops/rank_test.cpp b/test/unittest/ops/rank_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..36f98631f072687a42b18b8d13b0f3e5be0734ef --- /dev/null +++ b/test/unittest/ops/rank_test.cpp @@ -0,0 +1,158 @@ +/* + * Copyright (c) 2024 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ops/rank_builder.h" + +#include "ops_test.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Ops; + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class RankBuilderTest : public OpsTest { +public: + void SetUp() override; + void TearDown() override; + +protected: + RankBuilder m_builder; + std::vector m_inputs {0}; + std::vector m_outputs {1}; + std::vector m_dim {1, 2, 2, 1}; +}; + +void RankBuilderTest::SetUp() {} + +void RankBuilderTest::TearDown() {} + +/** + * @tc.name: rank_build_001 + * @tc.desc: Verify that the build function returns a successful message. + * @tc.type: FUNC + */ +HWTEST_F(RankBuilderTest, rank_build_001, TestSize.Level1) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/** + * @tc.name: rank_build_002 + * @tc.desc: Verify that the build function returns a failed message with true m_isBuild. + * @tc.type: FUNC + */ +HWTEST_F(RankBuilderTest, rank_build_002, TestSize.Level1) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); + + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/** + * @tc.name: rank_build_003 + * @tc.desc: Verify that the build function returns a failed message with invalided input. + * @tc.type: FUNC + */ +HWTEST_F(RankBuilderTest, rank_build_003, TestSize.Level1) +{ + m_inputs = {0, 1}; + m_outputs = {2}; + + SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: rank_build_004 + * @tc.desc: Verify that the build function returns a failed message with invalided output. + * @tc.type: FUNC + */ +HWTEST_F(RankBuilderTest, rank_build_004, TestSize.Level1) +{ + m_outputs = {1, 2}; + + SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: rank_build_005 + * @tc.desc: Verify that the build function returns a failed message with empty allTensor. + * @tc.type: FUNC + */ +HWTEST_F(RankBuilderTest, rank_build_005, TestSize.Level1) +{ + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputs, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: rank_build_006 + * @tc.desc: Verify that the build function returns a failed message without output tensor. + * @tc.type: FUNC + */ +HWTEST_F(RankBuilderTest, rank_build_006, TestSize.Level1) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: rank_getprimitive_001 + * @tc.desc: Verify that the getPrimitive function returns a successful message + * @tc.type: FUNC + */ +HWTEST_F(RankBuilderTest, rank_getprimitive_001, TestSize.Level1) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); + + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + LiteGraphPrimitvePtr primitive = m_builder.GetPrimitive(); + LiteGraphPrimitvePtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); + EXPECT_NE(expectPrimitive, primitive); +} + +/** + * @tc.name: rank_getprimitive_002 + * @tc.desc: Verify that the getPrimitive function returns a failed message without build. + * @tc.type: FUNC + */ +HWTEST_F(RankBuilderTest, rank_getprimitive_002, TestSize.Level1) +{ + LiteGraphPrimitvePtr primitive = m_builder.GetPrimitive(); + LiteGraphPrimitvePtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); + EXPECT_EQ(expectPrimitive, primitive); +} +} +} +} \ No newline at end of file diff --git a/test/unittest/ops/reduce_all_builder_test.cpp b/test/unittest/ops/reduce_all_builder_test.cpp index 768b645ba9476376cc8a1ffdfd9e4f961ba11db4..ea8cc33ddc9b0d086afe5242562043da082f0f04 100644 --- a/test/unittest/ops/reduce_all_builder_test.cpp +++ b/test/unittest/ops/reduce_all_builder_test.cpp @@ -30,14 +30,18 @@ public: void TearDown() override; protected: - void SaveParamsTensor(OH_NN_DataType dataType, + void SetKeepDims(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + void SetCoeff(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + void SetReduceToEnd(OH_NN_DataType dataType, const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); protected: ReduceAllBuilder m_builder; std::vector m_inputs {0, 1}; std::vector m_outputs {2}; - std::vector m_params {3}; + std::vector m_params {3, 4, 5}; std::vector m_inputDim {1, 1, 2, 2}; std::vector m_outputDim {1, 1, 1, 2}; std::vector m_paramDim {1}; @@ -47,7 +51,7 @@ void ReduceAllBuilderTest::SetUp() {} void ReduceAllBuilderTest::TearDown() {} -void ReduceAllBuilderTest::SaveParamsTensor(OH_NN_DataType dataType, +void ReduceAllBuilderTest::SetKeepDims(OH_NN_DataType dataType, const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) { std::shared_ptr keepDimsTensor = TransToNNTensor(dataType, dim, quantParam, type); @@ -57,6 +61,26 @@ void ReduceAllBuilderTest::SaveParamsTensor(OH_NN_DataType dataType, m_allTensors.emplace_back(keepDimsTensor); } +void ReduceAllBuilderTest::SetCoeff(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + std::shared_ptr coeffTensor = TransToNNTensor(dataType, dim, quantParam, type); + float *coeffValue = new (std::nothrow) float(0.0f); + EXPECT_NE(nullptr, coeffValue); + coeffTensor->SetBuffer(coeffValue, sizeof(float)); + m_allTensors.emplace_back(coeffTensor); +} + +void ReduceAllBuilderTest::SetReduceToEnd(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + std::shared_ptr reduceToEndTensor = TransToNNTensor(dataType, dim, quantParam, type); + bool *reduceToEndValue = new (std::nothrow) bool(true); + EXPECT_NE(nullptr, reduceToEndValue); + reduceToEndTensor->SetBuffer(reduceToEndValue, sizeof(bool)); + m_allTensors.emplace_back(reduceToEndTensor); +} + /** * @tc.name: reduceall_build_001 * @tc.desc: Provide normal input, output, and parameters to verify the normal behavior of the Build function @@ -66,7 +90,9 @@ HWTEST_F(ReduceAllBuilderTest, reduceall_build_001, TestSize.Level0) { SaveInputTensor(m_inputs, OH_NN_BOOL, m_inputDim, nullptr); SaveOutputTensor(m_outputs, OH_NN_BOOL, m_outputDim, nullptr); - SaveParamsTensor(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_ALL_KEEP_DIMS); + SetKeepDims(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_ALL_KEEP_DIMS); + SetCoeff(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_REDUCE_ALL_COEFF); + SetReduceToEnd(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_ALL_REDUCE_TO_END); OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); EXPECT_EQ(OH_NN_SUCCESS, ret); @@ -81,7 +107,9 @@ HWTEST_F(ReduceAllBuilderTest, reduceall_build_002, TestSize.Level0) { SaveInputTensor(m_inputs, OH_NN_BOOL, m_inputDim, nullptr); SaveOutputTensor(m_outputs, OH_NN_BOOL, m_outputDim, nullptr); - SaveParamsTensor(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_ALL_KEEP_DIMS); + SetKeepDims(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_ALL_KEEP_DIMS); + SetCoeff(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_REDUCE_ALL_COEFF); + SetReduceToEnd(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_ALL_REDUCE_TO_END); EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors)); OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); @@ -97,11 +125,13 @@ HWTEST_F(ReduceAllBuilderTest, reduceall_build_003, TestSize.Level0) { m_inputs = {0, 1, 2}; m_outputs = {3}; - m_params = {4}; + m_params = {4, 5, 6}; SaveInputTensor(m_inputs, OH_NN_BOOL, m_inputDim, nullptr); SaveOutputTensor(m_outputs, OH_NN_BOOL, m_outputDim, nullptr); - SaveParamsTensor(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_ALL_KEEP_DIMS); + SetKeepDims(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_ALL_KEEP_DIMS); + SetCoeff(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_REDUCE_ALL_COEFF); + SetReduceToEnd(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_ALL_REDUCE_TO_END); OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); @@ -115,11 +145,13 @@ HWTEST_F(ReduceAllBuilderTest, reduceall_build_003, TestSize.Level0) HWTEST_F(ReduceAllBuilderTest, reduceall_build_004, TestSize.Level0) { m_outputs = {2, 3}; - m_params = {4}; + m_params = {4, 5, 6}; SaveInputTensor(m_inputs, OH_NN_BOOL, m_inputDim, nullptr); SaveOutputTensor(m_outputs, OH_NN_BOOL, m_outputDim, nullptr); - SaveParamsTensor(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_ALL_KEEP_DIMS); + SetKeepDims(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_ALL_KEEP_DIMS); + SetCoeff(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_REDUCE_ALL_COEFF); + SetReduceToEnd(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_ALL_REDUCE_TO_END); OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); @@ -164,6 +196,8 @@ HWTEST_F(ReduceAllBuilderTest, reduceall_build_007, TestSize.Level0) int64_t keepDimsValue = 1; keepDimsTensor->SetBuffer(&keepDimsValue, sizeof(keepDimsValue)); m_allTensors.emplace_back(keepDimsTensor); + SetCoeff(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_REDUCE_ALL_COEFF); + SetReduceToEnd(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_ALL_REDUCE_TO_END); OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); @@ -172,10 +206,56 @@ HWTEST_F(ReduceAllBuilderTest, reduceall_build_007, TestSize.Level0) /** * @tc.name: reduceall_build_008 - * @tc.desc: Verify that the build function return a failed message with invalided keepdims's dimension + * @tc.desc: Verify that the build function return a failed message with invalided coeff's dataType * @tc.type: FUNC */ HWTEST_F(ReduceAllBuilderTest, reduceall_build_008, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_BOOL, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_BOOL, m_outputDim, nullptr); + + SetKeepDims(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_ALL_KEEP_DIMS); + std::shared_ptr coeffTensor = TransToNNTensor(OH_NN_INT64, + m_paramDim, nullptr, OH_NN_REDUCE_ALL_COEFF); + int64_t coeffValue = 1; + coeffTensor->SetBuffer(&coeffValue, sizeof(int64_t)); + m_allTensors.emplace_back(coeffTensor); + SetReduceToEnd(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_ALL_REDUCE_TO_END); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + coeffTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: reduceall_build_009 + * @tc.desc: Verify that the build function return a failed message with invalided reduceToEnd's dataType + * @tc.type: FUNC + */ +HWTEST_F(ReduceAllBuilderTest, reduceall_build_009, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_BOOL, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_BOOL, m_outputDim, nullptr); + + SetKeepDims(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_ALL_KEEP_DIMS); + SetCoeff(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_REDUCE_ALL_COEFF); + std::shared_ptr reduceToEndTensor = TransToNNTensor(OH_NN_INT64, + m_paramDim, nullptr, OH_NN_REDUCE_ALL_REDUCE_TO_END); + int64_t reduceToEndValue = 1; + reduceToEndTensor->SetBuffer(&reduceToEndValue, sizeof(reduceToEndValue)); + m_allTensors.emplace_back(reduceToEndTensor); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + reduceToEndTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: reduceall_build_010 + * @tc.desc: Verify that the build function return a failed message with invalided keepdims's dimension + * @tc.type: FUNC + */ +HWTEST_F(ReduceAllBuilderTest, reduceall_build_010, TestSize.Level0) { m_paramDim = {1, 2}; @@ -187,6 +267,8 @@ HWTEST_F(ReduceAllBuilderTest, reduceall_build_008, TestSize.Level0) bool keepDimsValue[2] = {true, true}; keepDimsTensor->SetBuffer(keepDimsValue, 2 * sizeof(bool)); m_allTensors.emplace_back(keepDimsTensor); + SetCoeff(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_REDUCE_ALL_COEFF); + SetReduceToEnd(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_ALL_REDUCE_TO_END); OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); @@ -194,26 +276,112 @@ HWTEST_F(ReduceAllBuilderTest, reduceall_build_008, TestSize.Level0) } /** - * @tc.name: reduceall_build_009 - * @tc.desc: Verify that the build function return a failed message with invalided parameter + * @tc.name: reduceall_build_011 + * @tc.desc: Verify that the build function return a failed message with invalided coeff's dimension * @tc.type: FUNC */ -HWTEST_F(ReduceAllBuilderTest, reduceall_build_009, TestSize.Level0) +HWTEST_F(ReduceAllBuilderTest, reduceall_build_011, TestSize.Level0) +{ + m_paramDim = {1, 2}; + + SaveInputTensor(m_inputs, OH_NN_BOOL, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_BOOL, m_outputDim, nullptr); + + SetKeepDims(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_ALL_KEEP_DIMS); + std::shared_ptr coeffTensor = TransToNNTensor(OH_NN_INT64, m_paramDim, + nullptr, OH_NN_REDUCE_ALL_COEFF); + float coeffValue[2] = {1.0f, 1.0f}; + coeffTensor->SetBuffer(coeffValue, 2 * sizeof(float)); + m_allTensors.emplace_back(coeffTensor); + SetReduceToEnd(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_ALL_REDUCE_TO_END); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + coeffTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: reduceall_build_012 + * @tc.desc: Verify that the build function return a failed message with invalided reduceToEnd's dimension + * @tc.type: FUNC + */ +HWTEST_F(ReduceAllBuilderTest, reduceall_build_012, TestSize.Level0) { + m_paramDim = {1, 2}; + SaveInputTensor(m_inputs, OH_NN_BOOL, m_inputDim, nullptr); SaveOutputTensor(m_outputs, OH_NN_BOOL, m_outputDim, nullptr); - SaveParamsTensor(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_QUANT_DTYPE_CAST_SRC_T); + + SetKeepDims(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_ALL_KEEP_DIMS); + SetCoeff(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_REDUCE_ALL_COEFF); + std::shared_ptr reduceToEndTensor = TransToNNTensor(OH_NN_BOOL, m_paramDim, + nullptr, OH_NN_REDUCE_ALL_REDUCE_TO_END); + bool reduceToEndValue[2] = {true, true}; + reduceToEndTensor->SetBuffer(reduceToEndValue, 2 * sizeof(bool)); + m_allTensors.emplace_back(reduceToEndTensor); OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + reduceToEndTensor->SetBuffer(nullptr, 0); } /** - * @tc.name: reduceall_build_010 + * @tc.name: reduceall_build_013 + * @tc.desc: Verify that the build function return a failed message with invalided keepDims parameter + * @tc.type: FUNC + */ +HWTEST_F(ReduceAllBuilderTest, reduceall_build_013, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_BOOL, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_BOOL, m_outputDim, nullptr); + SetKeepDims(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_QUANT_DTYPE_CAST_SRC_T); + SetCoeff(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_REDUCE_ALL_COEFF); + SetReduceToEnd(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_ALL_REDUCE_TO_END); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: reduceall_build_014 + * @tc.desc: Verify that the build function return a failed message with invalided coeff parameter + * @tc.type: FUNC + */ +HWTEST_F(ReduceAllBuilderTest, reduceall_build_014, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_BOOL, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_BOOL, m_outputDim, nullptr); + SetKeepDims(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_ALL_KEEP_DIMS); + SetCoeff(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_QUANT_DTYPE_CAST_SRC_T); + SetReduceToEnd(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_ALL_REDUCE_TO_END); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: reduceall_build_015 + * @tc.desc: Verify that the build function return a failed message with invalided reduceToEnd parameter + * @tc.type: FUNC + */ +HWTEST_F(ReduceAllBuilderTest, reduceall_build_015, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_BOOL, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_BOOL, m_outputDim, nullptr); + SetKeepDims(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_ALL_KEEP_DIMS); + SetCoeff(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_REDUCE_ALL_COEFF); + SetReduceToEnd(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_QUANT_DTYPE_CAST_SRC_T); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: reduceall_build_016 * @tc.desc: Verify that the build function return a failed message with empty keepdims's buffer * @tc.type: FUNC */ -HWTEST_F(ReduceAllBuilderTest, reduceall_build_010, TestSize.Level0) +HWTEST_F(ReduceAllBuilderTest, reduceall_build_016, TestSize.Level0) { SaveInputTensor(m_inputs, OH_NN_BOOL, m_inputDim, nullptr); SaveOutputTensor(m_outputs, OH_NN_BOOL, m_outputDim, nullptr); @@ -221,12 +389,56 @@ HWTEST_F(ReduceAllBuilderTest, reduceall_build_010, TestSize.Level0) std::shared_ptr keepDimsTensor = TransToNNTensor(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_ALL_KEEP_DIMS); m_allTensors.emplace_back(keepDimsTensor); + SetCoeff(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_REDUCE_ALL_COEFF); + SetReduceToEnd(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_ALL_REDUCE_TO_END); OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); keepDimsTensor->SetBuffer(nullptr, 0); } +/** + * @tc.name: reduceall_build_017 + * @tc.desc: Verify that the build function return a failed message with empty coeff's buffer + * @tc.type: FUNC + */ +HWTEST_F(ReduceAllBuilderTest, reduceall_build_017, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_BOOL, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_BOOL, m_outputDim, nullptr); + + SetKeepDims(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_ALL_KEEP_DIMS); + std::shared_ptr coeffTensor = TransToNNTensor(OH_NN_INT64, + m_paramDim, nullptr, OH_NN_REDUCE_ALL_COEFF); + m_allTensors.emplace_back(coeffTensor); + SetReduceToEnd(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_ALL_REDUCE_TO_END); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + coeffTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: reduceall_build_018 + * @tc.desc: Verify that the build function return a failed message with empty reduceToEnd's buffer + * @tc.type: FUNC + */ +HWTEST_F(ReduceAllBuilderTest, reduceall_build_018, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_BOOL, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_BOOL, m_outputDim, nullptr); + + SetKeepDims(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_ALL_KEEP_DIMS); + SetCoeff(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_REDUCE_ALL_COEFF); + std::shared_ptr reduceToEndTensor = TransToNNTensor(OH_NN_BOOL, + m_paramDim, nullptr, OH_NN_REDUCE_ALL_REDUCE_TO_END); + m_allTensors.emplace_back(reduceToEndTensor); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + reduceToEndTensor->SetBuffer(nullptr, 0); +} + /** * @tc.name: reduceall_get_primitive_001 * @tc.desc: Verify the GetPrimitive function return nullptr @@ -248,15 +460,24 @@ HWTEST_F(ReduceAllBuilderTest, reduceall_get_primitive_002, TestSize.Level0) { SaveInputTensor(m_inputs, OH_NN_BOOL, m_inputDim, nullptr); SaveOutputTensor(m_outputs, OH_NN_BOOL, m_outputDim, nullptr); - SaveParamsTensor(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_ALL_KEEP_DIMS); + SetKeepDims(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_ALL_KEEP_DIMS); + SetCoeff(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_REDUCE_ALL_COEFF); + SetReduceToEnd(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_ALL_REDUCE_TO_END); bool keepDimsValue = true; + float coeffValue = 0.0f; + bool reduceToEndValue = true; EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors)); LiteGraphTensorPtr reduceallPrimitive = m_builder.GetPrimitive(); LiteGraphTensorPtr expectPrimitive = {nullptr, DestroyLiteGraphPrimitive}; + EXPECT_NE(reduceallPrimitive, expectPrimitive); - auto returnValue = mindspore::lite::MindIR_ReduceFusion_GetKeepDims(reduceallPrimitive.get()); - EXPECT_EQ(returnValue, keepDimsValue); + auto returnKeepDimsValue = mindspore::lite::MindIR_ReduceFusion_GetKeepDims(reduceallPrimitive.get()); + EXPECT_EQ(returnKeepDimsValue, keepDimsValue); + auto returnCoeffValue = mindspore::lite::MindIR_ReduceFusion_GetCoeff(reduceallPrimitive.get()); + EXPECT_EQ(returnCoeffValue, coeffValue); + auto returnReduceToEndValue = mindspore::lite::MindIR_ReduceFusion_GetReduceToEnd(reduceallPrimitive.get()); + EXPECT_EQ(returnReduceToEndValue, reduceToEndValue); } } // namespace UnitTest } // namespace NeuralNetworkRuntime diff --git a/test/unittest/ops/reduce_max_builder_test.cpp b/test/unittest/ops/reduce_max_builder_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..4b4969c29ecfd319b28f86633c375e8e899e04c5 --- /dev/null +++ b/test/unittest/ops/reduce_max_builder_test.cpp @@ -0,0 +1,484 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ops/reducemax_builder.h" + +#include "ops_test.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Ops; + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class ReduceMaxBuilderTest : public OpsTest { +public: + void SetUp() override; + void TearDown() override; + +protected: + void SetKeepDims(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + void SetCoeff(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + void SetReduceToEnd(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + +protected: + ReduceMaxBuilder m_builder; + std::vector m_inputs {0, 1}; + std::vector m_outputs {2}; + std::vector m_params {3, 4, 5}; + std::vector m_inputDim {1, 1, 2, 2}; + std::vector m_outputDim {1, 1, 1, 2}; + std::vector m_paramDim {1}; +}; + +void ReduceMaxBuilderTest::SetUp() {} + +void ReduceMaxBuilderTest::TearDown() {} + +void ReduceMaxBuilderTest::SetKeepDims(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + std::shared_ptr keepDimsTensor = TransToNNTensor(dataType, dim, quantParam, type); + bool *keepDimsValue = new (std::nothrow) bool(true); + EXPECT_NE(nullptr, keepDimsValue); + keepDimsTensor->SetBuffer(keepDimsValue, sizeof(bool)); + m_allTensors.emplace_back(keepDimsTensor); +} + +void ReduceMaxBuilderTest::SetCoeff(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + std::shared_ptr coeffTensor = TransToNNTensor(dataType, dim, quantParam, type); + float *coeffValue = new (std::nothrow) float(0.0f); + EXPECT_NE(nullptr, coeffValue); + coeffTensor->SetBuffer(coeffValue, sizeof(float)); + m_allTensors.emplace_back(coeffTensor); +} + +void ReduceMaxBuilderTest::SetReduceToEnd(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + std::shared_ptr reduceToEndTensor = TransToNNTensor(dataType, dim, quantParam, type); + bool *reduceToEndValue = new (std::nothrow) bool(true); + EXPECT_NE(nullptr, reduceToEndValue); + reduceToEndTensor->SetBuffer(reduceToEndValue, sizeof(bool)); + m_allTensors.emplace_back(reduceToEndTensor); +} + +/** + * @tc.name: reducemax_build_001 + * @tc.desc: Provide normal input, output, and parameters to verify the normal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(ReduceMaxBuilderTest, reducemax_build_001, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_BOOL, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_BOOL, m_outputDim, nullptr); + SetKeepDims(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_MAX_KEEP_DIMS); + SetCoeff(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_REDUCE_MAX_COEFF); + SetReduceToEnd(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_MAX_REDUCE_TO_END); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/** + * @tc.name: reducemax_build_002 + * @tc.desc: Call Build func twice to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(ReduceMaxBuilderTest, reducemax_build_002, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_BOOL, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_BOOL, m_outputDim, nullptr); + SetKeepDims(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_MAX_KEEP_DIMS); + SetCoeff(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_REDUCE_MAX_COEFF); + SetReduceToEnd(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_MAX_REDUCE_TO_END); + + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors)); + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/** + * @tc.name: reducemax_build_003 + * @tc.desc: Provide one more than normal input to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(ReduceMaxBuilderTest, reducemax_build_003, TestSize.Level0) +{ + m_inputs = {0, 1, 2}; + m_outputs = {3}; + m_params = {4, 5, 6}; + + SaveInputTensor(m_inputs, OH_NN_BOOL, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_BOOL, m_outputDim, nullptr); + SetKeepDims(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_MAX_KEEP_DIMS); + SetCoeff(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_REDUCE_MAX_COEFF); + SetReduceToEnd(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_MAX_REDUCE_TO_END); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: reducemax_build_004 + * @tc.desc: Provide one more than normal output to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(ReduceMaxBuilderTest, reducemax_build_004, TestSize.Level0) +{ + m_outputs = {2, 3}; + m_params = {4, 5, 6}; + + SaveInputTensor(m_inputs, OH_NN_BOOL, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_BOOL, m_outputDim, nullptr); + SetKeepDims(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_MAX_KEEP_DIMS); + SetCoeff(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_REDUCE_MAX_COEFF); + SetReduceToEnd(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_MAX_REDUCE_TO_END); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: reducemax_build_005 + * @tc.desc: Verify that the build function return a failed message with null allTensor + * @tc.type: FUNC + */ +HWTEST_F(ReduceMaxBuilderTest, reducemax_build_005, TestSize.Level0) +{ + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputs, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: reducemax_build_006 + * @tc.desc: Verify that the build function return a failed message without output tensor + * @tc.type: FUNC + */ +HWTEST_F(ReduceMaxBuilderTest, reducemax_build_006, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_BOOL, m_inputDim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: reducemax_build_007 + * @tc.desc: Verify that the build function return a failed message with invalided keepdims's dataType + * @tc.type: FUNC + */ +HWTEST_F(ReduceMaxBuilderTest, reducemax_build_007, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_BOOL, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_BOOL, m_outputDim, nullptr); + + std::shared_ptr keepDimsTensor = TransToNNTensor(OH_NN_INT64, + m_paramDim, nullptr, OH_NN_REDUCE_MAX_KEEP_DIMS); + int64_t keepDimsValue = 1; + keepDimsTensor->SetBuffer(&keepDimsValue, sizeof(keepDimsValue)); + m_allTensors.emplace_back(keepDimsTensor); + SetCoeff(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_REDUCE_MAX_COEFF); + SetReduceToEnd(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_MAX_REDUCE_TO_END); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + keepDimsTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: reducemax_build_008 + * @tc.desc: Verify that the build function return a failed message with invalided coeff's dataType + * @tc.type: FUNC + */ +HWTEST_F(ReduceMaxBuilderTest, reducemax_build_008, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_BOOL, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_BOOL, m_outputDim, nullptr); + + SetKeepDims(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_MAX_KEEP_DIMS); + std::shared_ptr coeffTensor = TransToNNTensor(OH_NN_INT64, + m_paramDim, nullptr, OH_NN_REDUCE_MAX_COEFF); + int64_t coeffValue = 1; + coeffTensor->SetBuffer(&coeffValue, sizeof(int64_t)); + m_allTensors.emplace_back(coeffTensor); + SetReduceToEnd(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_MAX_REDUCE_TO_END); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + coeffTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: reducemax_build_009 + * @tc.desc: Verify that the build function return a failed message with invalided reduceToEnd's dataType + * @tc.type: FUNC + */ +HWTEST_F(ReduceMaxBuilderTest, reducemax_build_009, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_BOOL, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_BOOL, m_outputDim, nullptr); + + SetKeepDims(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_MAX_KEEP_DIMS); + SetCoeff(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_REDUCE_MAX_COEFF); + std::shared_ptr reduceToEndTensor = TransToNNTensor(OH_NN_INT64, + m_paramDim, nullptr, OH_NN_REDUCE_MAX_REDUCE_TO_END); + int64_t reduceToEndValue = 1; + reduceToEndTensor->SetBuffer(&reduceToEndValue, sizeof(reduceToEndValue)); + m_allTensors.emplace_back(reduceToEndTensor); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + reduceToEndTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: reducemax_build_010 + * @tc.desc: Verify that the build function return a failed message with invalided keepdims's dimension + * @tc.type: FUNC + */ +HWTEST_F(ReduceMaxBuilderTest, reducemax_build_010, TestSize.Level0) +{ + m_paramDim = {1, 2}; + + SaveInputTensor(m_inputs, OH_NN_BOOL, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_BOOL, m_outputDim, nullptr); + + std::shared_ptr keepDimsTensor = TransToNNTensor(OH_NN_BOOL, m_paramDim, + nullptr, OH_NN_REDUCE_MAX_KEEP_DIMS); + bool keepDimsValue[2] = {true, true}; + keepDimsTensor->SetBuffer(keepDimsValue, 2 * sizeof(bool)); + m_allTensors.emplace_back(keepDimsTensor); + SetCoeff(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_REDUCE_MAX_COEFF); + SetReduceToEnd(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_MAX_REDUCE_TO_END); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + keepDimsTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: reducemax_build_011 + * @tc.desc: Verify that the build function return a failed message with invalided coeff's dimension + * @tc.type: FUNC + */ +HWTEST_F(ReduceMaxBuilderTest, reducemax_build_011, TestSize.Level0) +{ + m_paramDim = {1, 2}; + + SaveInputTensor(m_inputs, OH_NN_BOOL, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_BOOL, m_outputDim, nullptr); + + SetKeepDims(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_MAX_KEEP_DIMS); + std::shared_ptr coeffTensor = TransToNNTensor(OH_NN_INT64, m_paramDim, + nullptr, OH_NN_REDUCE_MAX_COEFF); + float coeffValue[2] = {1.0f, 1.0f}; + coeffTensor->SetBuffer(coeffValue, 2 * sizeof(float)); + m_allTensors.emplace_back(coeffTensor); + SetReduceToEnd(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_MAX_REDUCE_TO_END); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + coeffTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: reducemax_build_012 + * @tc.desc: Verify that the build function return a failed message with invalided reduceToEnd's dimension + * @tc.type: FUNC + */ +HWTEST_F(ReduceMaxBuilderTest, reducemax_build_012, TestSize.Level0) +{ + m_paramDim = {1, 2}; + + SaveInputTensor(m_inputs, OH_NN_BOOL, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_BOOL, m_outputDim, nullptr); + + SetKeepDims(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_MAX_KEEP_DIMS); + SetCoeff(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_REDUCE_MAX_COEFF); + std::shared_ptr reduceToEndTensor = TransToNNTensor(OH_NN_BOOL, m_paramDim, + nullptr, OH_NN_REDUCE_MAX_REDUCE_TO_END); + bool reduceToEndValue[2] = {true, true}; + reduceToEndTensor->SetBuffer(reduceToEndValue, 2 * sizeof(bool)); + m_allTensors.emplace_back(reduceToEndTensor); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + reduceToEndTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: reducemax_build_013 + * @tc.desc: Verify that the build function return a failed message with invalided keepDims parameter + * @tc.type: FUNC + */ +HWTEST_F(ReduceMaxBuilderTest, reducemax_build_013, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_BOOL, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_BOOL, m_outputDim, nullptr); + SetKeepDims(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_QUANT_DTYPE_CAST_SRC_T); + SetCoeff(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_REDUCE_MAX_COEFF); + SetReduceToEnd(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_MAX_REDUCE_TO_END); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: reducemax_build_014 + * @tc.desc: Verify that the build function return a failed message with invalided coeff parameter + * @tc.type: FUNC + */ +HWTEST_F(ReduceMaxBuilderTest, reducemax_build_014, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_BOOL, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_BOOL, m_outputDim, nullptr); + SetKeepDims(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_MAX_KEEP_DIMS); + SetCoeff(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_QUANT_DTYPE_CAST_SRC_T); + SetReduceToEnd(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_MAX_REDUCE_TO_END); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: reducemax_build_015 + * @tc.desc: Verify that the build function return a failed message with invalided reduceToEnd parameter + * @tc.type: FUNC + */ +HWTEST_F(ReduceMaxBuilderTest, reducemax_build_015, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_BOOL, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_BOOL, m_outputDim, nullptr); + SetKeepDims(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_MAX_KEEP_DIMS); + SetCoeff(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_REDUCE_MAX_COEFF); + SetReduceToEnd(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_QUANT_DTYPE_CAST_SRC_T); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: reducemax_build_016 + * @tc.desc: Verify that the build function return a failed message with empty keepdims's buffer + * @tc.type: FUNC + */ +HWTEST_F(ReduceMaxBuilderTest, reducemax_build_016, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_BOOL, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_BOOL, m_outputDim, nullptr); + + std::shared_ptr keepDimsTensor = TransToNNTensor(OH_NN_BOOL, + m_paramDim, nullptr, OH_NN_REDUCE_MAX_KEEP_DIMS); + m_allTensors.emplace_back(keepDimsTensor); + SetCoeff(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_REDUCE_MAX_COEFF); + SetReduceToEnd(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_MAX_REDUCE_TO_END); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + keepDimsTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: reducemax_build_017 + * @tc.desc: Verify that the build function return a failed message with empty coeff's buffer + * @tc.type: FUNC + */ +HWTEST_F(ReduceMaxBuilderTest, reducemax_build_017, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_BOOL, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_BOOL, m_outputDim, nullptr); + + SetKeepDims(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_MAX_KEEP_DIMS); + std::shared_ptr coeffTensor = TransToNNTensor(OH_NN_INT64, + m_paramDim, nullptr, OH_NN_REDUCE_MAX_COEFF); + m_allTensors.emplace_back(coeffTensor); + SetReduceToEnd(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_MAX_REDUCE_TO_END); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + coeffTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: reducemax_build_018 + * @tc.desc: Verify that the build function return a failed message with empty reduceToEnd's buffer + * @tc.type: FUNC + */ +HWTEST_F(ReduceMaxBuilderTest, reducemax_build_018, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_BOOL, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_BOOL, m_outputDim, nullptr); + + SetKeepDims(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_MAX_KEEP_DIMS); + SetCoeff(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_REDUCE_MAX_COEFF); + std::shared_ptr reduceToEndTensor = TransToNNTensor(OH_NN_BOOL, + m_paramDim, nullptr, OH_NN_REDUCE_MAX_REDUCE_TO_END); + m_allTensors.emplace_back(reduceToEndTensor); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + reduceToEndTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: reducemax_get_primitive_001 + * @tc.desc: Verify the GetPrimitive function return nullptr + * @tc.type: FUNC + */ +HWTEST_F(ReduceMaxBuilderTest, reducemax_get_primitive_001, TestSize.Level0) +{ + LiteGraphTensorPtr primitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive = {nullptr, DestroyLiteGraphPrimitive}; + EXPECT_EQ(primitive, expectPrimitive); +} + +/** + * @tc.name: reducemax_get_primitive_002 + * @tc.desc: Verify the normal params return behavior of the getprimitive function + * @tc.type: FUNC + */ +HWTEST_F(ReduceMaxBuilderTest, reducemax_get_primitive_002, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_BOOL, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_BOOL, m_outputDim, nullptr); + SetKeepDims(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_MAX_KEEP_DIMS); + SetCoeff(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_REDUCE_MAX_COEFF); + SetReduceToEnd(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_MAX_REDUCE_TO_END); + + bool keepDimsValue = true; + float coeffValue = 0.0f; + bool reduceToEndValue = true; + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors)); + LiteGraphTensorPtr reducemaxPrimitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive = {nullptr, DestroyLiteGraphPrimitive}; + + EXPECT_NE(reducemaxPrimitive, expectPrimitive); + auto returnKeepDimsValue = mindspore::lite::MindIR_ReduceFusion_GetKeepDims(reducemaxPrimitive.get()); + EXPECT_EQ(returnKeepDimsValue, keepDimsValue); + auto returnCoeffValue = mindspore::lite::MindIR_ReduceFusion_GetCoeff(reducemaxPrimitive.get()); + EXPECT_EQ(returnCoeffValue, coeffValue); + auto returnReduceToEndValue = mindspore::lite::MindIR_ReduceFusion_GetReduceToEnd(reducemaxPrimitive.get()); + EXPECT_EQ(returnReduceToEndValue, reduceToEndValue); +} +} // namespace UnitTest +} // namespace NeuralNetworkRuntime +} // namespace OHOS \ No newline at end of file diff --git a/test/unittest/ops/reduce_mean_builder_test.cpp b/test/unittest/ops/reduce_mean_builder_test.cpp index 4909912531a91261d15f93910de36d3a5da08420..55a44fc8db217028e2063a536e3bdd388e244221 100644 --- a/test/unittest/ops/reduce_mean_builder_test.cpp +++ b/test/unittest/ops/reduce_mean_builder_test.cpp @@ -30,14 +30,18 @@ public: void TearDown() override; protected: - void SaveParamsTensor(OH_NN_DataType dataType, + void SetKeepDims(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + void SetCoeff(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + void SetReduceToEnd(OH_NN_DataType dataType, const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); protected: ReduceMeanBuilder m_builder; std::vector m_inputs {0, 1}; std::vector m_outputs {2}; - std::vector m_params {3}; + std::vector m_params {3, 4, 5}; std::vector m_inputDim {3, 5, 6, 4}; std::vector m_outputDim {3, 5, 6, 1}; std::vector m_paramDim {1}; @@ -47,7 +51,7 @@ void ReduceMeanBuilderTest::SetUp() {} void ReduceMeanBuilderTest::TearDown() {} -void ReduceMeanBuilderTest::SaveParamsTensor(OH_NN_DataType dataType, +void ReduceMeanBuilderTest::SetKeepDims(OH_NN_DataType dataType, const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) { std::shared_ptr keepDimsTensor = TransToNNTensor(dataType, dim, quantParam, type); @@ -57,6 +61,26 @@ void ReduceMeanBuilderTest::SaveParamsTensor(OH_NN_DataType dataType, m_allTensors.emplace_back(keepDimsTensor); } +void ReduceMeanBuilderTest::SetCoeff(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + std::shared_ptr coeffTensor = TransToNNTensor(dataType, dim, quantParam, type); + float *coeffValue = new (std::nothrow) float(0.0f); + EXPECT_NE(nullptr, coeffValue); + coeffTensor->SetBuffer(coeffValue, sizeof(float)); + m_allTensors.emplace_back(coeffTensor); +} + +void ReduceMeanBuilderTest::SetReduceToEnd(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + std::shared_ptr reduceToEndTensor = TransToNNTensor(dataType, dim, quantParam, type); + bool *reduceToEndValue = new (std::nothrow) bool(true); + EXPECT_NE(nullptr, reduceToEndValue); + reduceToEndTensor->SetBuffer(reduceToEndValue, sizeof(bool)); + m_allTensors.emplace_back(reduceToEndTensor); +} + /** * @tc.name: reducemean_build_001 * @tc.desc: Provide normal input, output, and parameters to verify the normal behavior of the Build function @@ -66,7 +90,9 @@ HWTEST_F(ReduceMeanBuilderTest, reducemean_build_001, TestSize.Level0) { SaveInputTensor(m_inputs, OH_NN_BOOL, m_inputDim, nullptr); SaveOutputTensor(m_outputs, OH_NN_BOOL, m_outputDim, nullptr); - SaveParamsTensor(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_MEAN_KEEP_DIMS); + SetKeepDims(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_MEAN_KEEP_DIMS); + SetCoeff(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_REDUCE_MEAN_COEFF); + SetReduceToEnd(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_MEAN_REDUCE_TO_END); OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); EXPECT_EQ(OH_NN_SUCCESS, ret); @@ -81,7 +107,9 @@ HWTEST_F(ReduceMeanBuilderTest, reducemean_build_002, TestSize.Level0) { SaveInputTensor(m_inputs, OH_NN_BOOL, m_inputDim, nullptr); SaveOutputTensor(m_outputs, OH_NN_BOOL, m_outputDim, nullptr); - SaveParamsTensor(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_MEAN_KEEP_DIMS); + SetKeepDims(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_MEAN_KEEP_DIMS); + SetCoeff(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_REDUCE_MEAN_COEFF); + SetReduceToEnd(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_MEAN_REDUCE_TO_END); EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors)); OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); @@ -97,11 +125,13 @@ HWTEST_F(ReduceMeanBuilderTest, reducemean_build_003, TestSize.Level0) { m_inputs = {0, 1, 2}; m_outputs = {3}; - m_params = {4}; + m_params = {4, 5, 6}; SaveInputTensor(m_inputs, OH_NN_BOOL, m_inputDim, nullptr); SaveOutputTensor(m_outputs, OH_NN_BOOL, m_outputDim, nullptr); - SaveParamsTensor(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_MEAN_KEEP_DIMS); + SetKeepDims(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_MEAN_KEEP_DIMS); + SetCoeff(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_REDUCE_MEAN_COEFF); + SetReduceToEnd(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_MEAN_REDUCE_TO_END); OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); @@ -115,11 +145,13 @@ HWTEST_F(ReduceMeanBuilderTest, reducemean_build_003, TestSize.Level0) HWTEST_F(ReduceMeanBuilderTest, reducemean_build_004, TestSize.Level0) { m_outputs = {2, 3}; - m_params = {4}; + m_params = {4, 5, 6}; SaveInputTensor(m_inputs, OH_NN_BOOL, m_inputDim, nullptr); SaveOutputTensor(m_outputs, OH_NN_BOOL, m_outputDim, nullptr); - SaveParamsTensor(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_MEAN_KEEP_DIMS); + SetKeepDims(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_MEAN_KEEP_DIMS); + SetCoeff(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_REDUCE_MEAN_COEFF); + SetReduceToEnd(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_MEAN_REDUCE_TO_END); OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); @@ -164,6 +196,8 @@ HWTEST_F(ReduceMeanBuilderTest, reducemean_build_007, TestSize.Level0) int64_t keepDimsValue = 1; keepDimsTensor->SetBuffer(&keepDimsValue, sizeof(keepDimsValue)); m_allTensors.emplace_back(keepDimsTensor); + SetCoeff(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_REDUCE_MEAN_COEFF); + SetReduceToEnd(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_MEAN_REDUCE_TO_END); OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); @@ -172,10 +206,56 @@ HWTEST_F(ReduceMeanBuilderTest, reducemean_build_007, TestSize.Level0) /** * @tc.name: reducemean_build_008 - * @tc.desc: Verify that the build function return a failed message with invalided keepdims's dimension + * @tc.desc: Verify that the build function return a failed message with invalided coeff's dataType * @tc.type: FUNC */ HWTEST_F(ReduceMeanBuilderTest, reducemean_build_008, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_BOOL, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_BOOL, m_outputDim, nullptr); + + SetKeepDims(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_MEAN_KEEP_DIMS); + std::shared_ptr coeffTensor = TransToNNTensor(OH_NN_INT64, + m_paramDim, nullptr, OH_NN_REDUCE_MEAN_COEFF); + int64_t coeffValue = 1; + coeffTensor->SetBuffer(&coeffValue, sizeof(int64_t)); + m_allTensors.emplace_back(coeffTensor); + SetReduceToEnd(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_MEAN_REDUCE_TO_END); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + coeffTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: reducemean_build_009 + * @tc.desc: Verify that the build function return a failed message with invalided reduceToEnd's dataType + * @tc.type: FUNC + */ +HWTEST_F(ReduceMeanBuilderTest, reducemean_build_009, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_BOOL, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_BOOL, m_outputDim, nullptr); + + SetKeepDims(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_MEAN_KEEP_DIMS); + SetCoeff(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_REDUCE_MEAN_COEFF); + std::shared_ptr reduceToEndTensor = TransToNNTensor(OH_NN_INT64, + m_paramDim, nullptr, OH_NN_REDUCE_MEAN_REDUCE_TO_END); + int64_t reduceToEndValue = 1; + reduceToEndTensor->SetBuffer(&reduceToEndValue, sizeof(reduceToEndValue)); + m_allTensors.emplace_back(reduceToEndTensor); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + reduceToEndTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: reducemean_build_010 + * @tc.desc: Verify that the build function return a failed message with invalided keepdims's dimension + * @tc.type: FUNC + */ +HWTEST_F(ReduceMeanBuilderTest, reducemean_build_010, TestSize.Level0) { m_paramDim = {1, 2}; @@ -187,6 +267,8 @@ HWTEST_F(ReduceMeanBuilderTest, reducemean_build_008, TestSize.Level0) bool keepDimsValue[2] = {true, true}; keepDimsTensor->SetBuffer(keepDimsValue, 2 * sizeof(bool)); m_allTensors.emplace_back(keepDimsTensor); + SetCoeff(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_REDUCE_MEAN_COEFF); + SetReduceToEnd(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_MEAN_REDUCE_TO_END); OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); @@ -194,26 +276,112 @@ HWTEST_F(ReduceMeanBuilderTest, reducemean_build_008, TestSize.Level0) } /** - * @tc.name: reducemean_build_009 + * @tc.name: reducemean_build_011 + * @tc.desc: Verify that the build function return a failed message with invalided coeff's dimension + * @tc.type: FUNC + */ +HWTEST_F(ReduceMeanBuilderTest, reducemean_build_011, TestSize.Level0) +{ + m_paramDim = {1, 2}; + + SaveInputTensor(m_inputs, OH_NN_BOOL, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_BOOL, m_outputDim, nullptr); + + SetKeepDims(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_MEAN_KEEP_DIMS); + std::shared_ptr coeffTensor = TransToNNTensor(OH_NN_INT64, m_paramDim, + nullptr, OH_NN_REDUCE_MEAN_COEFF); + float coeffValue[2] = {1.0f, 1.0f}; + coeffTensor->SetBuffer(coeffValue, 2 * sizeof(float)); + m_allTensors.emplace_back(coeffTensor); + SetReduceToEnd(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_MEAN_REDUCE_TO_END); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + coeffTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: reducemean_build_012 + * @tc.desc: Verify that the build function return a failed message with invalided reduceToEnd's dimension + * @tc.type: FUNC + */ +HWTEST_F(ReduceMeanBuilderTest, reducemean_build_012, TestSize.Level0) +{ + m_paramDim = {1, 2}; + + SaveInputTensor(m_inputs, OH_NN_BOOL, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_BOOL, m_outputDim, nullptr); + + SetKeepDims(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_MEAN_KEEP_DIMS); + SetCoeff(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_REDUCE_MEAN_COEFF); + std::shared_ptr reduceToEndTensor = TransToNNTensor(OH_NN_BOOL, m_paramDim, + nullptr, OH_NN_REDUCE_MEAN_REDUCE_TO_END); + bool reduceToEndValue[2] = {true, true}; + reduceToEndTensor->SetBuffer(reduceToEndValue, 2 * sizeof(bool)); + m_allTensors.emplace_back(reduceToEndTensor); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + reduceToEndTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: reducemean_build_013 * @tc.desc: Verify that the build function return a failed message with invalided parameter * @tc.type: FUNC */ -HWTEST_F(ReduceMeanBuilderTest, reducemean_build_009, TestSize.Level0) +HWTEST_F(ReduceMeanBuilderTest, reducemean_build_013, TestSize.Level0) { SaveInputTensor(m_inputs, OH_NN_BOOL, m_inputDim, nullptr); SaveOutputTensor(m_outputs, OH_NN_BOOL, m_outputDim, nullptr); - SaveParamsTensor(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_QUANT_DTYPE_CAST_SRC_T); + SetKeepDims(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_QUANT_DTYPE_CAST_SRC_T); + SetCoeff(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_REDUCE_ALL_COEFF); + SetReduceToEnd(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_ALL_REDUCE_TO_END); OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); } /** - * @tc.name: reducemean_build_010 + * @tc.name: reducemean_build_014 + * @tc.desc: Verify that the build function return a failed message with invalided coeff parameter + * @tc.type: FUNC + */ +HWTEST_F(ReduceMeanBuilderTest, reducemean_build_014, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_BOOL, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_BOOL, m_outputDim, nullptr); + SetKeepDims(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_MEAN_KEEP_DIMS); + SetCoeff(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_QUANT_DTYPE_CAST_SRC_T); + SetReduceToEnd(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_MEAN_REDUCE_TO_END); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: reducemean_build_015 + * @tc.desc: Verify that the build function return a failed message with invalided reduceToEnd parameter + * @tc.type: FUNC + */ +HWTEST_F(ReduceMeanBuilderTest, reducemean_build_015, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_BOOL, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_BOOL, m_outputDim, nullptr); + SetKeepDims(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_MEAN_KEEP_DIMS); + SetCoeff(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_REDUCE_MEAN_COEFF); + SetReduceToEnd(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_QUANT_DTYPE_CAST_SRC_T); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: reducemean_build_016 * @tc.desc: Verify that the build function return a failed message with empty keepdims's buffer * @tc.type: FUNC */ -HWTEST_F(ReduceMeanBuilderTest, reducemean_build_010, TestSize.Level0) +HWTEST_F(ReduceMeanBuilderTest, reducemean_build_016, TestSize.Level0) { SaveInputTensor(m_inputs, OH_NN_BOOL, m_inputDim, nullptr); SaveOutputTensor(m_outputs, OH_NN_BOOL, m_outputDim, nullptr); @@ -221,12 +389,56 @@ HWTEST_F(ReduceMeanBuilderTest, reducemean_build_010, TestSize.Level0) std::shared_ptr keepDimsTensor = TransToNNTensor(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_MEAN_KEEP_DIMS); m_allTensors.emplace_back(keepDimsTensor); + SetCoeff(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_REDUCE_MEAN_COEFF); + SetReduceToEnd(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_MEAN_REDUCE_TO_END); OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); keepDimsTensor->SetBuffer(nullptr, 0); } +/** + * @tc.name: reducemean_build_017 + * @tc.desc: Verify that the build function return a failed message with empty coeff's buffer + * @tc.type: FUNC + */ +HWTEST_F(ReduceMeanBuilderTest, reducemean_build_017, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_BOOL, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_BOOL, m_outputDim, nullptr); + + SetKeepDims(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_MEAN_KEEP_DIMS); + std::shared_ptr coeffTensor = TransToNNTensor(OH_NN_INT64, + m_paramDim, nullptr, OH_NN_REDUCE_MEAN_COEFF); + m_allTensors.emplace_back(coeffTensor); + SetReduceToEnd(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_MEAN_REDUCE_TO_END); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + coeffTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: reducemean_build_018 + * @tc.desc: Verify that the build function return a failed message with empty reduceToEnd's buffer + * @tc.type: FUNC + */ +HWTEST_F(ReduceMeanBuilderTest, reducemean_build_018, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_BOOL, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_BOOL, m_outputDim, nullptr); + + SetKeepDims(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_MEAN_KEEP_DIMS); + SetCoeff(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_REDUCE_MEAN_COEFF); + std::shared_ptr reduceToEndTensor = TransToNNTensor(OH_NN_BOOL, + m_paramDim, nullptr, OH_NN_REDUCE_MEAN_REDUCE_TO_END); + m_allTensors.emplace_back(reduceToEndTensor); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + reduceToEndTensor->SetBuffer(nullptr, 0); +} + /** * @tc.name: reducemean_get_primitive_001 * @tc.desc: Verify the GetPrimitive function return nullptr @@ -248,15 +460,24 @@ HWTEST_F(ReduceMeanBuilderTest, reducemean_get_primitive_002, TestSize.Level0) { SaveInputTensor(m_inputs, OH_NN_BOOL, m_inputDim, nullptr); SaveOutputTensor(m_outputs, OH_NN_BOOL, m_outputDim, nullptr); - SaveParamsTensor(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_MEAN_KEEP_DIMS); + SetKeepDims(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_MEAN_KEEP_DIMS); + SetCoeff(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_REDUCE_MEAN_COEFF); + SetReduceToEnd(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_MEAN_REDUCE_TO_END); bool keepDimsValue = true; + float coeffValue = 0.0f; + bool reduceToEndValue = true; EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors)); LiteGraphTensorPtr reducemeanPrimitive = m_builder.GetPrimitive(); LiteGraphTensorPtr expectPrimitive = {nullptr, DestroyLiteGraphPrimitive}; + EXPECT_NE(reducemeanPrimitive, expectPrimitive); - auto returnValue = mindspore::lite::MindIR_ReduceFusion_GetKeepDims(reducemeanPrimitive.get()); - EXPECT_EQ(returnValue, keepDimsValue); + auto returnKeepDimsValue = mindspore::lite::MindIR_ReduceFusion_GetKeepDims(reducemeanPrimitive.get()); + EXPECT_EQ(returnKeepDimsValue, keepDimsValue); + auto returnCoeffValue = mindspore::lite::MindIR_ReduceFusion_GetCoeff(reducemeanPrimitive.get()); + EXPECT_EQ(returnCoeffValue, coeffValue); + auto returnReduceToEndValue = mindspore::lite::MindIR_ReduceFusion_GetReduceToEnd(reducemeanPrimitive.get()); + EXPECT_EQ(returnReduceToEndValue, reduceToEndValue); } } // namespace UnitTest } // namespace NeuralNetworkRuntime diff --git a/test/unittest/ops/reduce_min_builder_test.cpp b/test/unittest/ops/reduce_min_builder_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..5c230a63adb47cd0bbbc30649cc8c442a5b69ee5 --- /dev/null +++ b/test/unittest/ops/reduce_min_builder_test.cpp @@ -0,0 +1,484 @@ +/* + * Copyright (c) 2024 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ops/reducemin_builder.h" + +#include "ops_test.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Ops; + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class ReduceMinBuilderTest : public OpsTest { +public: + void SetUp() override; + void TearDown() override; + +protected: + void SetKeepDims(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + void SetCoeff(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + void SetReduceToEnd(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + +protected: + ReduceMinBuilder m_builder; + std::vector m_inputs {0, 1}; + std::vector m_outputs {2}; + std::vector m_params {3, 4, 5}; + std::vector m_inputDim {1, 1, 2, 2}; + std::vector m_outputDim {1, 1, 1, 2}; + std::vector m_paramDim {1}; +}; + +void ReduceMinBuilderTest::SetUp() {} + +void ReduceMinBuilderTest::TearDown() {} + +void ReduceMinBuilderTest::SetKeepDims(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + std::shared_ptr keepDimsTensor = TransToNNTensor(dataType, dim, quantParam, type); + bool *keepDimsValue = new (std::nothrow) bool(true); + EXPECT_NE(nullptr, keepDimsValue); + keepDimsTensor->SetBuffer(keepDimsValue, sizeof(bool)); + m_allTensors.emplace_back(keepDimsTensor); +} + +void ReduceMinBuilderTest::SetCoeff(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + std::shared_ptr coeffTensor = TransToNNTensor(dataType, dim, quantParam, type); + float *coeffValue = new (std::nothrow) float(0.0f); + EXPECT_NE(nullptr, coeffValue); + coeffTensor->SetBuffer(coeffValue, sizeof(float)); + m_allTensors.emplace_back(coeffTensor); +} + +void ReduceMinBuilderTest::SetReduceToEnd(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + std::shared_ptr reduceToEndTensor = TransToNNTensor(dataType, dim, quantParam, type); + bool *reduceToEndValue = new (std::nothrow) bool(true); + EXPECT_NE(nullptr, reduceToEndValue); + reduceToEndTensor->SetBuffer(reduceToEndValue, sizeof(bool)); + m_allTensors.emplace_back(reduceToEndTensor); +} + +/** + * @tc.name: reducemin_build_001 + * @tc.desc: Provide normal input, output, and parameters to verify the normal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(ReduceMinBuilderTest, reducemin_build_001, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_BOOL, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_BOOL, m_outputDim, nullptr); + SetKeepDims(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_MIN_KEEP_DIMS); + SetCoeff(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_REDUCE_MIN_COEFF); + SetReduceToEnd(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_MIN_REDUCE_TO_END); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/** + * @tc.name: reducemin_build_002 + * @tc.desc: Call Build func twice to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(ReduceMinBuilderTest, reducemin_build_002, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_BOOL, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_BOOL, m_outputDim, nullptr); + SetKeepDims(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_MIN_KEEP_DIMS); + SetCoeff(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_REDUCE_MIN_COEFF); + SetReduceToEnd(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_MIN_REDUCE_TO_END); + + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors)); + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/** + * @tc.name: reducemin_build_003 + * @tc.desc: Provide one more than normal input to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(ReduceMinBuilderTest, reducemin_build_003, TestSize.Level0) +{ + m_inputs = {0, 1, 2}; + m_outputs = {3}; + m_params = {4, 5, 6}; + + SaveInputTensor(m_inputs, OH_NN_BOOL, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_BOOL, m_outputDim, nullptr); + SetKeepDims(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_MIN_KEEP_DIMS); + SetCoeff(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_REDUCE_MIN_COEFF); + SetReduceToEnd(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_MIN_REDUCE_TO_END); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: reducemin_build_004 + * @tc.desc: Provide one more than normal output to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(ReduceMinBuilderTest, reducemin_build_004, TestSize.Level0) +{ + m_outputs = {2, 3}; + m_params = {4, 5, 6}; + + SaveInputTensor(m_inputs, OH_NN_BOOL, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_BOOL, m_outputDim, nullptr); + SetKeepDims(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_MIN_KEEP_DIMS); + SetCoeff(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_REDUCE_MIN_COEFF); + SetReduceToEnd(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_MIN_REDUCE_TO_END); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: reducemin_build_005 + * @tc.desc: Verify that the build function return a failed message with null allTensor + * @tc.type: FUNC + */ +HWTEST_F(ReduceMinBuilderTest, reducemin_build_005, TestSize.Level0) +{ + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputs, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: reducemin_build_006 + * @tc.desc: Verify that the build function return a failed message without output tensor + * @tc.type: FUNC + */ +HWTEST_F(ReduceMinBuilderTest, reducemin_build_006, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_BOOL, m_inputDim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: reducemin_build_007 + * @tc.desc: Verify that the build function return a failed message with invalided keepdims's dataType + * @tc.type: FUNC + */ +HWTEST_F(ReduceMinBuilderTest, reducemin_build_007, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_BOOL, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_BOOL, m_outputDim, nullptr); + + std::shared_ptr keepDimsTensor = TransToNNTensor(OH_NN_INT64, + m_paramDim, nullptr, OH_NN_REDUCE_MIN_KEEP_DIMS); + int64_t keepDimsValue = 1; + keepDimsTensor->SetBuffer(&keepDimsValue, sizeof(keepDimsValue)); + m_allTensors.emplace_back(keepDimsTensor); + SetCoeff(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_REDUCE_MIN_COEFF); + SetReduceToEnd(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_MIN_REDUCE_TO_END); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + keepDimsTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: reducemin_build_008 + * @tc.desc: Verify that the build function return a failed message with invalided coeff's dataType + * @tc.type: FUNC + */ +HWTEST_F(ReduceMinBuilderTest, reducemin_build_008, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_BOOL, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_BOOL, m_outputDim, nullptr); + + SetKeepDims(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_MIN_KEEP_DIMS); + std::shared_ptr coeffTensor = TransToNNTensor(OH_NN_INT64, + m_paramDim, nullptr, OH_NN_REDUCE_MIN_COEFF); + int64_t coeffValue = 1; + coeffTensor->SetBuffer(&coeffValue, sizeof(int64_t)); + m_allTensors.emplace_back(coeffTensor); + SetReduceToEnd(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_MIN_REDUCE_TO_END); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + coeffTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: reducemin_build_009 + * @tc.desc: Verify that the build function return a failed message with invalided reduceToEnd's dataType + * @tc.type: FUNC + */ +HWTEST_F(ReduceMinBuilderTest, reducemin_build_009, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_BOOL, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_BOOL, m_outputDim, nullptr); + + SetKeepDims(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_MIN_KEEP_DIMS); + SetCoeff(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_REDUCE_MIN_COEFF); + std::shared_ptr reduceToEndTensor = TransToNNTensor(OH_NN_INT64, + m_paramDim, nullptr, OH_NN_REDUCE_MIN_REDUCE_TO_END); + int64_t reduceToEndValue = 1; + reduceToEndTensor->SetBuffer(&reduceToEndValue, sizeof(reduceToEndValue)); + m_allTensors.emplace_back(reduceToEndTensor); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + reduceToEndTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: reducemin_build_010 + * @tc.desc: Verify that the build function return a failed message with invalided keepdims's dimension + * @tc.type: FUNC + */ +HWTEST_F(ReduceMinBuilderTest, reducemin_build_010, TestSize.Level0) +{ + m_paramDim = {1, 2}; + + SaveInputTensor(m_inputs, OH_NN_BOOL, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_BOOL, m_outputDim, nullptr); + + std::shared_ptr keepDimsTensor = TransToNNTensor(OH_NN_BOOL, m_paramDim, + nullptr, OH_NN_REDUCE_MIN_KEEP_DIMS); + bool keepDimsValue[2] = {true, true}; + keepDimsTensor->SetBuffer(keepDimsValue, 2 * sizeof(bool)); + m_allTensors.emplace_back(keepDimsTensor); + SetCoeff(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_REDUCE_MIN_COEFF); + SetReduceToEnd(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_MIN_REDUCE_TO_END); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + keepDimsTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: reducemin_build_011 + * @tc.desc: Verify that the build function return a failed message with invalided coeff's dimension + * @tc.type: FUNC + */ +HWTEST_F(ReduceMinBuilderTest, reducemin_build_011, TestSize.Level0) +{ + m_paramDim = {1, 2}; + + SaveInputTensor(m_inputs, OH_NN_BOOL, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_BOOL, m_outputDim, nullptr); + + SetKeepDims(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_MIN_KEEP_DIMS); + std::shared_ptr coeffTensor = TransToNNTensor(OH_NN_INT64, m_paramDim, + nullptr, OH_NN_REDUCE_MIN_COEFF); + float coeffValue[2] = {1.0f, 1.0f}; + coeffTensor->SetBuffer(coeffValue, 2 * sizeof(float)); + m_allTensors.emplace_back(coeffTensor); + SetReduceToEnd(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_MIN_REDUCE_TO_END); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + coeffTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: reducemin_build_012 + * @tc.desc: Verify that the build function return a failed message with invalided reduceToEnd's dimension + * @tc.type: FUNC + */ +HWTEST_F(ReduceMinBuilderTest, reducemin_build_012, TestSize.Level0) +{ + m_paramDim = {1, 2}; + + SaveInputTensor(m_inputs, OH_NN_BOOL, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_BOOL, m_outputDim, nullptr); + + SetKeepDims(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_MIN_KEEP_DIMS); + SetCoeff(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_REDUCE_MIN_COEFF); + std::shared_ptr reduceToEndTensor = TransToNNTensor(OH_NN_BOOL, m_paramDim, + nullptr, OH_NN_REDUCE_MIN_REDUCE_TO_END); + bool reduceToEndValue[2] = {true, true}; + reduceToEndTensor->SetBuffer(reduceToEndValue, 2 * sizeof(bool)); + m_allTensors.emplace_back(reduceToEndTensor); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + reduceToEndTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: reducemin_build_013 + * @tc.desc: Verify that the build function return a failed message with invalided keepDims parameter + * @tc.type: FUNC + */ +HWTEST_F(ReduceMinBuilderTest, reducemin_build_013, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_BOOL, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_BOOL, m_outputDim, nullptr); + SetKeepDims(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_QUANT_DTYPE_CAST_SRC_T); + SetCoeff(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_REDUCE_MIN_COEFF); + SetReduceToEnd(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_MIN_REDUCE_TO_END); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: reducemin_build_014 + * @tc.desc: Verify that the build function return a failed message with invalided coeff parameter + * @tc.type: FUNC + */ +HWTEST_F(ReduceMinBuilderTest, reducemin_build_014, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_BOOL, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_BOOL, m_outputDim, nullptr); + SetKeepDims(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_MIN_KEEP_DIMS); + SetCoeff(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_QUANT_DTYPE_CAST_SRC_T); + SetReduceToEnd(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_MIN_REDUCE_TO_END); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: reducemin_build_015 + * @tc.desc: Verify that the build function return a failed message with invalided reduceToEnd parameter + * @tc.type: FUNC + */ +HWTEST_F(ReduceMinBuilderTest, reducemin_build_015, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_BOOL, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_BOOL, m_outputDim, nullptr); + SetKeepDims(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_MIN_KEEP_DIMS); + SetCoeff(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_REDUCE_MIN_COEFF); + SetReduceToEnd(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_QUANT_DTYPE_CAST_SRC_T); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: reducemin_build_016 + * @tc.desc: Verify that the build function return a failed message with empty keepdims's buffer + * @tc.type: FUNC + */ +HWTEST_F(ReduceMinBuilderTest, reducemin_build_016, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_BOOL, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_BOOL, m_outputDim, nullptr); + + std::shared_ptr keepDimsTensor = TransToNNTensor(OH_NN_BOOL, + m_paramDim, nullptr, OH_NN_REDUCE_MIN_KEEP_DIMS); + m_allTensors.emplace_back(keepDimsTensor); + SetCoeff(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_REDUCE_MIN_COEFF); + SetReduceToEnd(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_MIN_REDUCE_TO_END); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + keepDimsTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: reducemin_build_017 + * @tc.desc: Verify that the build function return a failed message with empty coeff's buffer + * @tc.type: FUNC + */ +HWTEST_F(ReduceMinBuilderTest, reducemin_build_017, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_BOOL, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_BOOL, m_outputDim, nullptr); + + SetKeepDims(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_MIN_KEEP_DIMS); + std::shared_ptr coeffTensor = TransToNNTensor(OH_NN_INT64, + m_paramDim, nullptr, OH_NN_REDUCE_MIN_COEFF); + m_allTensors.emplace_back(coeffTensor); + SetReduceToEnd(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_MIN_REDUCE_TO_END); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + coeffTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: reducemin_build_018 + * @tc.desc: Verify that the build function return a failed message with empty reduceToEnd's buffer + * @tc.type: FUNC + */ +HWTEST_F(ReduceMinBuilderTest, reducemin_build_018, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_BOOL, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_BOOL, m_outputDim, nullptr); + + SetKeepDims(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_MIN_KEEP_DIMS); + SetCoeff(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_REDUCE_MIN_COEFF); + std::shared_ptr reduceToEndTensor = TransToNNTensor(OH_NN_BOOL, + m_paramDim, nullptr, OH_NN_REDUCE_MIN_REDUCE_TO_END); + m_allTensors.emplace_back(reduceToEndTensor); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + reduceToEndTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: reducemin_get_primitive_001 + * @tc.desc: Verify the GetPrimitive function return nullptr + * @tc.type: FUNC + */ +HWTEST_F(ReduceMinBuilderTest, reducemin_get_primitive_001, TestSize.Level0) +{ + LiteGraphTensorPtr primitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive = {nullptr, DestroyLiteGraphPrimitive}; + EXPECT_EQ(primitive, expectPrimitive); +} + +/** + * @tc.name: reducemin_get_primitive_002 + * @tc.desc: Verify the normal params return behavior of the getprimitive function + * @tc.type: FUNC + */ +HWTEST_F(ReduceMinBuilderTest, reducemin_get_primitive_002, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_BOOL, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_BOOL, m_outputDim, nullptr); + SetKeepDims(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_MIN_KEEP_DIMS); + SetCoeff(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_REDUCE_MIN_COEFF); + SetReduceToEnd(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_MIN_REDUCE_TO_END); + + bool keepDimsValue = true; + float coeffValue = 0.0f; + bool reduceToEndValue = true; + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors)); + LiteGraphTensorPtr reduceminPrimitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive = {nullptr, DestroyLiteGraphPrimitive}; + + EXPECT_NE(reduceminPrimitive, expectPrimitive); + auto returnKeepDimsValue = mindspore::lite::MindIR_ReduceFusion_GetKeepDims(reduceminPrimitive.get()); + EXPECT_EQ(returnKeepDimsValue, keepDimsValue); + auto returnCoeffValue = mindspore::lite::MindIR_ReduceFusion_GetCoeff(reduceminPrimitive.get()); + EXPECT_EQ(returnCoeffValue, coeffValue); + auto returnReduceToEndValue = mindspore::lite::MindIR_ReduceFusion_GetReduceToEnd(reduceminPrimitive.get()); + EXPECT_EQ(returnReduceToEndValue, reduceToEndValue); +} +} // namespace UnitTest +} // namespace NeuralNetworkRuntime +} // namespace OHOS \ No newline at end of file diff --git a/test/unittest/ops/reduce_prod_builder_test.cpp b/test/unittest/ops/reduce_prod_builder_test.cpp index 4ab78f6da238b806da7daced5019aa19c99ec60d..29a0285736338f34fc9dc01629248667c2b843ce 100644 --- a/test/unittest/ops/reduce_prod_builder_test.cpp +++ b/test/unittest/ops/reduce_prod_builder_test.cpp @@ -30,14 +30,18 @@ public: void TearDown() override; protected: - void SaveParamsTensor(OH_NN_DataType dataType, + void SetKeepDims(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + void SetCoeff(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + void SetReduceToEnd(OH_NN_DataType dataType, const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); protected: ReduceProdBuilder m_builder; std::vector m_inputs {0, 1}; std::vector m_outputs {2}; - std::vector m_params {3}; + std::vector m_params {3, 4, 5}; std::vector m_inputDim {3, 5, 6, 4}; std::vector m_outputDim {3, 5, 6, 1}; std::vector m_paramDim {1}; @@ -47,7 +51,7 @@ void ReduceProdBuilderTest::SetUp() {} void ReduceProdBuilderTest::TearDown() {} -void ReduceProdBuilderTest::SaveParamsTensor(OH_NN_DataType dataType, +void ReduceProdBuilderTest::SetKeepDims(OH_NN_DataType dataType, const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) { std::shared_ptr keepDimsTensor = TransToNNTensor(dataType, dim, quantParam, type); @@ -57,6 +61,26 @@ void ReduceProdBuilderTest::SaveParamsTensor(OH_NN_DataType dataType, m_allTensors.emplace_back(keepDimsTensor); } +void ReduceProdBuilderTest::SetCoeff(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + std::shared_ptr coeffTensor = TransToNNTensor(dataType, dim, quantParam, type); + float *coeffValue = new (std::nothrow) float(0.0f); + EXPECT_NE(nullptr, coeffValue); + coeffTensor->SetBuffer(coeffValue, sizeof(float)); + m_allTensors.emplace_back(coeffTensor); +} + +void ReduceProdBuilderTest::SetReduceToEnd(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + std::shared_ptr reduceToEndTensor = TransToNNTensor(dataType, dim, quantParam, type); + bool *reduceToEndValue = new (std::nothrow) bool(true); + EXPECT_NE(nullptr, reduceToEndValue); + reduceToEndTensor->SetBuffer(reduceToEndValue, sizeof(bool)); + m_allTensors.emplace_back(reduceToEndTensor); +} + /** * @tc.name: reduceprod_build_001 * @tc.desc: Provide normal input, output, and parameters to verify the normal behavior of the Build function @@ -66,7 +90,9 @@ HWTEST_F(ReduceProdBuilderTest, reduceprod_build_001, TestSize.Level0) { SaveInputTensor(m_inputs, OH_NN_BOOL, m_inputDim, nullptr); SaveOutputTensor(m_outputs, OH_NN_BOOL, m_outputDim, nullptr); - SaveParamsTensor(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_PROD_KEEP_DIMS); + SetKeepDims(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_PROD_KEEP_DIMS); + SetCoeff(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_REDUCE_PROD_COEFF); + SetReduceToEnd(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_PROD_REDUCE_TO_END); OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); EXPECT_EQ(OH_NN_SUCCESS, ret); @@ -81,7 +107,9 @@ HWTEST_F(ReduceProdBuilderTest, reduceprod_build_002, TestSize.Level0) { SaveInputTensor(m_inputs, OH_NN_BOOL, m_inputDim, nullptr); SaveOutputTensor(m_outputs, OH_NN_BOOL, m_outputDim, nullptr); - SaveParamsTensor(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_PROD_KEEP_DIMS); + SetKeepDims(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_PROD_KEEP_DIMS); + SetCoeff(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_REDUCE_PROD_COEFF); + SetReduceToEnd(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_PROD_REDUCE_TO_END); EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors)); OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); @@ -97,11 +125,13 @@ HWTEST_F(ReduceProdBuilderTest, reduceprod_build_003, TestSize.Level0) { m_inputs = {0, 1, 2}; m_outputs = {3}; - m_params = {4}; + m_params = {4, 5, 6}; SaveInputTensor(m_inputs, OH_NN_BOOL, m_inputDim, nullptr); SaveOutputTensor(m_outputs, OH_NN_BOOL, m_outputDim, nullptr); - SaveParamsTensor(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_PROD_KEEP_DIMS); + SetKeepDims(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_PROD_KEEP_DIMS); + SetCoeff(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_REDUCE_PROD_COEFF); + SetReduceToEnd(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_PROD_REDUCE_TO_END); OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); @@ -115,11 +145,13 @@ HWTEST_F(ReduceProdBuilderTest, reduceprod_build_003, TestSize.Level0) HWTEST_F(ReduceProdBuilderTest, reduceprod_build_004, TestSize.Level0) { m_outputs = {2, 3}; - m_params = {4}; + m_params = {4, 5, 6}; SaveInputTensor(m_inputs, OH_NN_BOOL, m_inputDim, nullptr); SaveOutputTensor(m_outputs, OH_NN_BOOL, m_outputDim, nullptr); - SaveParamsTensor(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_PROD_KEEP_DIMS); + SetKeepDims(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_PROD_KEEP_DIMS); + SetCoeff(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_REDUCE_PROD_COEFF); + SetReduceToEnd(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_PROD_REDUCE_TO_END); OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); @@ -164,6 +196,8 @@ HWTEST_F(ReduceProdBuilderTest, reduceprod_build_007, TestSize.Level0) int64_t keepDimsValue = 1; keepDimsTensor->SetBuffer(&keepDimsValue, sizeof(keepDimsValue)); m_allTensors.emplace_back(keepDimsTensor); + SetCoeff(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_REDUCE_PROD_COEFF); + SetReduceToEnd(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_PROD_REDUCE_TO_END); OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); @@ -172,10 +206,56 @@ HWTEST_F(ReduceProdBuilderTest, reduceprod_build_007, TestSize.Level0) /** * @tc.name: reduceprod_build_008 - * @tc.desc: Verify that the build function return a failed message with invalided keepdims's dimension + * @tc.desc: Verify that the build function return a failed message with invalided coeff's dataType * @tc.type: FUNC */ HWTEST_F(ReduceProdBuilderTest, reduceprod_build_008, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_BOOL, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_BOOL, m_outputDim, nullptr); + + SetKeepDims(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_PROD_KEEP_DIMS); + std::shared_ptr coeffTensor = TransToNNTensor(OH_NN_INT64, + m_paramDim, nullptr, OH_NN_REDUCE_PROD_COEFF); + int64_t coeffValue = 1; + coeffTensor->SetBuffer(&coeffValue, sizeof(int64_t)); + m_allTensors.emplace_back(coeffTensor); + SetReduceToEnd(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_PROD_REDUCE_TO_END); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + coeffTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: reduceprod_build_009 + * @tc.desc: Verify that the build function return a failed message with invalided reduceToEnd's dataType + * @tc.type: FUNC + */ +HWTEST_F(ReduceProdBuilderTest, reduceprod_build_009, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_BOOL, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_BOOL, m_outputDim, nullptr); + + SetKeepDims(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_PROD_KEEP_DIMS); + SetCoeff(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_REDUCE_PROD_COEFF); + std::shared_ptr reduceToEndTensor = TransToNNTensor(OH_NN_INT64, + m_paramDim, nullptr, OH_NN_REDUCE_PROD_REDUCE_TO_END); + int64_t reduceToEndValue = 1; + reduceToEndTensor->SetBuffer(&reduceToEndValue, sizeof(reduceToEndValue)); + m_allTensors.emplace_back(reduceToEndTensor); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + reduceToEndTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: reduceprod_build_010 + * @tc.desc: Verify that the build function return a failed message with invalided keepdims's dimension + * @tc.type: FUNC + */ +HWTEST_F(ReduceProdBuilderTest, reduceprod_build_010, TestSize.Level0) { m_paramDim = {1, 2}; @@ -183,10 +263,12 @@ HWTEST_F(ReduceProdBuilderTest, reduceprod_build_008, TestSize.Level0) SaveOutputTensor(m_outputs, OH_NN_BOOL, m_outputDim, nullptr); std::shared_ptr keepDimsTensor = TransToNNTensor(OH_NN_BOOL, - m_paramDim, nullptr, OH_NN_REDUCE_PROD_KEEP_DIMS); + m_paramDim, nullptr, OH_NN_REDUCE_PROD_KEEP_DIMS); bool keepDimsValue[2] = {true, true}; keepDimsTensor->SetBuffer(keepDimsValue, 2 * sizeof(bool)); m_allTensors.emplace_back(keepDimsTensor); + SetCoeff(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_REDUCE_PROD_COEFF); + SetReduceToEnd(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_PROD_REDUCE_TO_END); OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); @@ -194,26 +276,112 @@ HWTEST_F(ReduceProdBuilderTest, reduceprod_build_008, TestSize.Level0) } /** - * @tc.name: reduceprod_build_009 + * @tc.name: reduceprod_build_011 + * @tc.desc: Verify that the build function return a failed message with invalided coeff's dimension + * @tc.type: FUNC + */ +HWTEST_F(ReduceProdBuilderTest, reduceprod_build_011, TestSize.Level0) +{ + m_paramDim = {1, 2}; + + SaveInputTensor(m_inputs, OH_NN_BOOL, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_BOOL, m_outputDim, nullptr); + + SetKeepDims(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_PROD_KEEP_DIMS); + std::shared_ptr coeffTensor = TransToNNTensor(OH_NN_INT64, m_paramDim, + nullptr, OH_NN_REDUCE_PROD_COEFF); + float coeffValue[2] = {1.0f, 1.0f}; + coeffTensor->SetBuffer(coeffValue, 2 * sizeof(float)); + m_allTensors.emplace_back(coeffTensor); + SetReduceToEnd(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_PROD_REDUCE_TO_END); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + coeffTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: reduceprod_build_012 + * @tc.desc: Verify that the build function return a failed message with invalided reduceToEnd's dimension + * @tc.type: FUNC + */ +HWTEST_F(ReduceProdBuilderTest, reduceprod_build_012, TestSize.Level0) +{ + m_paramDim = {1, 2}; + + SaveInputTensor(m_inputs, OH_NN_BOOL, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_BOOL, m_outputDim, nullptr); + + SetKeepDims(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_PROD_KEEP_DIMS); + SetCoeff(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_REDUCE_PROD_COEFF); + std::shared_ptr reduceToEndTensor = TransToNNTensor(OH_NN_BOOL, m_paramDim, + nullptr, OH_NN_REDUCE_PROD_REDUCE_TO_END); + bool reduceToEndValue[2] = {true, true}; + reduceToEndTensor->SetBuffer(reduceToEndValue, 2 * sizeof(bool)); + m_allTensors.emplace_back(reduceToEndTensor); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + reduceToEndTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: reduceprod_build_013 * @tc.desc: Verify that the build function return a failed message with invalided parameter * @tc.type: FUNC */ -HWTEST_F(ReduceProdBuilderTest, reduceprod_build_009, TestSize.Level0) +HWTEST_F(ReduceProdBuilderTest, reduceprod_build_013, TestSize.Level0) { SaveInputTensor(m_inputs, OH_NN_BOOL, m_inputDim, nullptr); SaveOutputTensor(m_outputs, OH_NN_BOOL, m_outputDim, nullptr); - SaveParamsTensor(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_QUANT_DTYPE_CAST_SRC_T); + SetKeepDims(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_QUANT_DTYPE_CAST_SRC_T); + SetCoeff(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_REDUCE_PROD_COEFF); + SetReduceToEnd(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_PROD_REDUCE_TO_END); OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); } /** - * @tc.name: reduceprod_build_010 + * @tc.name: reduceprod_build_014 + * @tc.desc: Verify that the build function return a failed message with invalided coeff parameter + * @tc.type: FUNC + */ +HWTEST_F(ReduceProdBuilderTest, reduceprod_build_014, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_BOOL, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_BOOL, m_outputDim, nullptr); + SetKeepDims(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_PROD_KEEP_DIMS); + SetCoeff(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_QUANT_DTYPE_CAST_SRC_T); + SetReduceToEnd(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_PROD_REDUCE_TO_END); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: reduceprod_build_015 + * @tc.desc: Verify that the build function return a failed message with invalided reduceToEnd parameter + * @tc.type: FUNC + */ +HWTEST_F(ReduceProdBuilderTest, reduceprod_build_015, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_BOOL, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_BOOL, m_outputDim, nullptr); + SetKeepDims(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_PROD_KEEP_DIMS); + SetCoeff(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_REDUCE_PROD_COEFF); + SetReduceToEnd(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_QUANT_DTYPE_CAST_SRC_T); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: reduceprod_build_016 * @tc.desc: Verify that the build function return a failed message with empty keepdims's buffer * @tc.type: FUNC */ -HWTEST_F(ReduceProdBuilderTest, reduceprod_build_010, TestSize.Level0) +HWTEST_F(ReduceProdBuilderTest, reduceprod_build_016, TestSize.Level0) { SaveInputTensor(m_inputs, OH_NN_BOOL, m_inputDim, nullptr); SaveOutputTensor(m_outputs, OH_NN_BOOL, m_outputDim, nullptr); @@ -221,12 +389,56 @@ HWTEST_F(ReduceProdBuilderTest, reduceprod_build_010, TestSize.Level0) std::shared_ptr keepDimsTensor = TransToNNTensor(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_PROD_KEEP_DIMS); m_allTensors.emplace_back(keepDimsTensor); + SetCoeff(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_REDUCE_PROD_COEFF); + SetReduceToEnd(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_PROD_REDUCE_TO_END); OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); keepDimsTensor->SetBuffer(nullptr, 0); } +/** + * @tc.name: reduceprod_build_017 + * @tc.desc: Verify that the build function return a failed message with empty coeff's buffer + * @tc.type: FUNC + */ +HWTEST_F(ReduceProdBuilderTest, reduceprod_build_017, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_BOOL, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_BOOL, m_outputDim, nullptr); + + SetKeepDims(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_PROD_KEEP_DIMS); + std::shared_ptr coeffTensor = TransToNNTensor(OH_NN_INT64, + m_paramDim, nullptr, OH_NN_REDUCE_PROD_COEFF); + m_allTensors.emplace_back(coeffTensor); + SetReduceToEnd(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_PROD_REDUCE_TO_END); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + coeffTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: reduceprod_build_018 + * @tc.desc: Verify that the build function return a failed message with empty reduceToEnd's buffer + * @tc.type: FUNC + */ +HWTEST_F(ReduceProdBuilderTest, reduceprod_build_018, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_BOOL, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_BOOL, m_outputDim, nullptr); + + SetKeepDims(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_PROD_KEEP_DIMS); + SetCoeff(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_REDUCE_PROD_COEFF); + std::shared_ptr reduceToEndTensor = TransToNNTensor(OH_NN_BOOL, + m_paramDim, nullptr, OH_NN_REDUCE_PROD_REDUCE_TO_END); + m_allTensors.emplace_back(reduceToEndTensor); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + reduceToEndTensor->SetBuffer(nullptr, 0); +} + /** * @tc.name: reduceprod_get_primitive_001 * @tc.desc: Verify the GetPrimitive function return nullptr @@ -248,15 +460,24 @@ HWTEST_F(ReduceProdBuilderTest, reduceprod_get_primitive_002, TestSize.Level0) { SaveInputTensor(m_inputs, OH_NN_BOOL, m_inputDim, nullptr); SaveOutputTensor(m_outputs, OH_NN_BOOL, m_outputDim, nullptr); - SaveParamsTensor(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_PROD_KEEP_DIMS); + SetKeepDims(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_PROD_KEEP_DIMS); + SetCoeff(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_REDUCE_PROD_COEFF); + SetReduceToEnd(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_PROD_REDUCE_TO_END); bool keepDimsValue = true; + float coeffValue = 0.0f; + bool reduceToEndValue = true; EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors)); LiteGraphTensorPtr reduceprodPrimitive = m_builder.GetPrimitive(); LiteGraphTensorPtr expectPrimitive = {nullptr, DestroyLiteGraphPrimitive}; + EXPECT_NE(reduceprodPrimitive, expectPrimitive); - auto returnValue = mindspore::lite::MindIR_ReduceFusion_GetKeepDims(reduceprodPrimitive.get()); - EXPECT_EQ(returnValue, keepDimsValue); + auto returnKeepDimsValue = mindspore::lite::MindIR_ReduceFusion_GetKeepDims(reduceprodPrimitive.get()); + EXPECT_EQ(returnKeepDimsValue, keepDimsValue); + auto returnCoeffValue = mindspore::lite::MindIR_ReduceFusion_GetCoeff(reduceprodPrimitive.get()); + EXPECT_EQ(returnCoeffValue, coeffValue); + auto returnReduceToEndValue = mindspore::lite::MindIR_ReduceFusion_GetReduceToEnd(reduceprodPrimitive.get()); + EXPECT_EQ(returnReduceToEndValue, reduceToEndValue); } } // namespace UnitTest } // namespace NeuralNetworkRuntime diff --git a/test/unittest/ops/reduce_sum_builder_test.cpp b/test/unittest/ops/reduce_sum_builder_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..a118cc4fc4b439950d530c9c417a6f806af4dec4 --- /dev/null +++ b/test/unittest/ops/reduce_sum_builder_test.cpp @@ -0,0 +1,484 @@ +/* + * Copyright (c) 2024 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ops/reducesum_builder.h" + +#include "ops_test.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Ops; + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class ReduceSumBuilderTest : public OpsTest { +public: + void SetUp() override; + void TearDown() override; + +protected: + void SetKeepDims(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + void SetCoeff(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + void SetReduceToEnd(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + +protected: + ReduceSumBuilder m_builder; + std::vector m_inputs {0, 1}; + std::vector m_outputs {2}; + std::vector m_params {3, 4, 5}; + std::vector m_inputDim {1, 1, 2, 2}; + std::vector m_outputDim {1, 1, 1, 2}; + std::vector m_paramDim {1}; +}; + +void ReduceSumBuilderTest::SetUp() {} + +void ReduceSumBuilderTest::TearDown() {} + +void ReduceSumBuilderTest::SetKeepDims(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + std::shared_ptr keepDimsTensor = TransToNNTensor(dataType, dim, quantParam, type); + bool *keepDimsValue = new (std::nothrow) bool(true); + EXPECT_NE(nullptr, keepDimsValue); + keepDimsTensor->SetBuffer(keepDimsValue, sizeof(bool)); + m_allTensors.emplace_back(keepDimsTensor); +} + +void ReduceSumBuilderTest::SetCoeff(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + std::shared_ptr coeffTensor = TransToNNTensor(dataType, dim, quantParam, type); + float *coeffValue = new (std::nothrow) float(0.0f); + EXPECT_NE(nullptr, coeffValue); + coeffTensor->SetBuffer(coeffValue, sizeof(float)); + m_allTensors.emplace_back(coeffTensor); +} + +void ReduceSumBuilderTest::SetReduceToEnd(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + std::shared_ptr reduceToEndTensor = TransToNNTensor(dataType, dim, quantParam, type); + bool *reduceToEndValue = new (std::nothrow) bool(true); + EXPECT_NE(nullptr, reduceToEndValue); + reduceToEndTensor->SetBuffer(reduceToEndValue, sizeof(bool)); + m_allTensors.emplace_back(reduceToEndTensor); +} + +/** + * @tc.name: reducesum_build_001 + * @tc.desc: Provide normal input, output, and parameters to verify the normal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(ReduceSumBuilderTest, reducesum_build_001, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_BOOL, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_BOOL, m_outputDim, nullptr); + SetKeepDims(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_SUM_KEEP_DIMS); + SetCoeff(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_REDUCE_SUM_COEFF); + SetReduceToEnd(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_SUM_REDUCE_TO_END); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/** + * @tc.name: reducesum_build_002 + * @tc.desc: Call Build func twice to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(ReduceSumBuilderTest, reducesum_build_002, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_BOOL, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_BOOL, m_outputDim, nullptr); + SetKeepDims(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_SUM_KEEP_DIMS); + SetCoeff(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_REDUCE_SUM_COEFF); + SetReduceToEnd(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_SUM_REDUCE_TO_END); + + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors)); + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/** + * @tc.name: reducesum_build_003 + * @tc.desc: Provide one more than normal input to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(ReduceSumBuilderTest, reducesum_build_003, TestSize.Level0) +{ + m_inputs = {0, 1, 2}; + m_outputs = {3}; + m_params = {4, 5, 6}; + + SaveInputTensor(m_inputs, OH_NN_BOOL, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_BOOL, m_outputDim, nullptr); + SetKeepDims(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_SUM_KEEP_DIMS); + SetCoeff(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_REDUCE_SUM_COEFF); + SetReduceToEnd(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_SUM_REDUCE_TO_END); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: reducesum_build_004 + * @tc.desc: Provide one more than normal output to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(ReduceSumBuilderTest, reducesum_build_004, TestSize.Level0) +{ + m_outputs = {2, 3}; + m_params = {4, 5, 6}; + + SaveInputTensor(m_inputs, OH_NN_BOOL, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_BOOL, m_outputDim, nullptr); + SetKeepDims(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_SUM_KEEP_DIMS); + SetCoeff(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_REDUCE_SUM_COEFF); + SetReduceToEnd(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_SUM_REDUCE_TO_END); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: reducesum_build_005 + * @tc.desc: Verify that the build function return a failed message with null allTensor + * @tc.type: FUNC + */ +HWTEST_F(ReduceSumBuilderTest, reducesum_build_005, TestSize.Level0) +{ + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputs, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: reducesum_build_006 + * @tc.desc: Verify that the build function return a failed message without output tensor + * @tc.type: FUNC + */ +HWTEST_F(ReduceSumBuilderTest, reducesum_build_006, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_BOOL, m_inputDim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: reducesum_build_007 + * @tc.desc: Verify that the build function return a failed message with invalided keepdims's dataType + * @tc.type: FUNC + */ +HWTEST_F(ReduceSumBuilderTest, reducesum_build_007, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_BOOL, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_BOOL, m_outputDim, nullptr); + + std::shared_ptr keepDimsTensor = TransToNNTensor(OH_NN_INT64, + m_paramDim, nullptr, OH_NN_REDUCE_SUM_KEEP_DIMS); + int64_t keepDimsValue = 1; + keepDimsTensor->SetBuffer(&keepDimsValue, sizeof(keepDimsValue)); + m_allTensors.emplace_back(keepDimsTensor); + SetCoeff(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_REDUCE_SUM_COEFF); + SetReduceToEnd(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_SUM_REDUCE_TO_END); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + keepDimsTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: reducesum_build_008 + * @tc.desc: Verify that the build function return a failed message with invalided coeff's dataType + * @tc.type: FUNC + */ +HWTEST_F(ReduceSumBuilderTest, reducesum_build_008, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_BOOL, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_BOOL, m_outputDim, nullptr); + + SetKeepDims(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_SUM_KEEP_DIMS); + std::shared_ptr coeffTensor = TransToNNTensor(OH_NN_INT64, + m_paramDim, nullptr, OH_NN_REDUCE_SUM_COEFF); + int64_t coeffValue = 1; + coeffTensor->SetBuffer(&coeffValue, sizeof(int64_t)); + m_allTensors.emplace_back(coeffTensor); + SetReduceToEnd(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_SUM_REDUCE_TO_END); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + coeffTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: reducesum_build_009 + * @tc.desc: Verify that the build function return a failed message with invalided reduceToEnd's dataType + * @tc.type: FUNC + */ +HWTEST_F(ReduceSumBuilderTest, reducesum_build_009, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_BOOL, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_BOOL, m_outputDim, nullptr); + + SetKeepDims(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_SUM_KEEP_DIMS); + SetCoeff(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_REDUCE_SUM_COEFF); + std::shared_ptr reduceToEndTensor = TransToNNTensor(OH_NN_INT64, + m_paramDim, nullptr, OH_NN_REDUCE_SUM_REDUCE_TO_END); + int64_t reduceToEndValue = 1; + reduceToEndTensor->SetBuffer(&reduceToEndValue, sizeof(reduceToEndValue)); + m_allTensors.emplace_back(reduceToEndTensor); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + reduceToEndTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: reducesum_build_010 + * @tc.desc: Verify that the build function return a failed message with invalided keepdims's dimension + * @tc.type: FUNC + */ +HWTEST_F(ReduceSumBuilderTest, reducesum_build_010, TestSize.Level0) +{ + m_paramDim = {1, 2}; + + SaveInputTensor(m_inputs, OH_NN_BOOL, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_BOOL, m_outputDim, nullptr); + + std::shared_ptr keepDimsTensor = TransToNNTensor(OH_NN_BOOL, m_paramDim, + nullptr, OH_NN_REDUCE_SUM_KEEP_DIMS); + bool keepDimsValue[2] = {true, true}; + keepDimsTensor->SetBuffer(keepDimsValue, 2 * sizeof(bool)); + m_allTensors.emplace_back(keepDimsTensor); + SetCoeff(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_REDUCE_SUM_COEFF); + SetReduceToEnd(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_SUM_REDUCE_TO_END); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + keepDimsTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: reducesum_build_011 + * @tc.desc: Verify that the build function return a failed message with invalided coeff's dimension + * @tc.type: FUNC + */ +HWTEST_F(ReduceSumBuilderTest, reducesum_build_011, TestSize.Level0) +{ + m_paramDim = {1, 2}; + + SaveInputTensor(m_inputs, OH_NN_BOOL, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_BOOL, m_outputDim, nullptr); + + SetKeepDims(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_SUM_KEEP_DIMS); + std::shared_ptr coeffTensor = TransToNNTensor(OH_NN_INT64, m_paramDim, + nullptr, OH_NN_REDUCE_SUM_COEFF); + float coeffValue[2] = {1.0f, 1.0f}; + coeffTensor->SetBuffer(coeffValue, 2 * sizeof(float)); + m_allTensors.emplace_back(coeffTensor); + SetReduceToEnd(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_SUM_REDUCE_TO_END); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + coeffTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: reducesum_build_012 + * @tc.desc: Verify that the build function return a failed message with invalided reduceToEnd's dimension + * @tc.type: FUNC + */ +HWTEST_F(ReduceSumBuilderTest, reducesum_build_012, TestSize.Level0) +{ + m_paramDim = {1, 2}; + + SaveInputTensor(m_inputs, OH_NN_BOOL, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_BOOL, m_outputDim, nullptr); + + SetKeepDims(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_SUM_KEEP_DIMS); + SetCoeff(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_REDUCE_SUM_COEFF); + std::shared_ptr reduceToEndTensor = TransToNNTensor(OH_NN_BOOL, m_paramDim, + nullptr, OH_NN_REDUCE_SUM_REDUCE_TO_END); + bool reduceToEndValue[2] = {true, true}; + reduceToEndTensor->SetBuffer(reduceToEndValue, 2 * sizeof(bool)); + m_allTensors.emplace_back(reduceToEndTensor); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + reduceToEndTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: reducesum_build_013 + * @tc.desc: Verify that the build function return a failed message with invalided keepDims parameter + * @tc.type: FUNC + */ +HWTEST_F(ReduceSumBuilderTest, reducesum_build_013, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_BOOL, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_BOOL, m_outputDim, nullptr); + SetKeepDims(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_QUANT_DTYPE_CAST_SRC_T); + SetCoeff(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_REDUCE_SUM_COEFF); + SetReduceToEnd(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_SUM_REDUCE_TO_END); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: reducesum_build_014 + * @tc.desc: Verify that the build function return a failed message with invalided coeff parameter + * @tc.type: FUNC + */ +HWTEST_F(ReduceSumBuilderTest, reducesum_build_014, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_BOOL, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_BOOL, m_outputDim, nullptr); + SetKeepDims(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_SUM_KEEP_DIMS); + SetCoeff(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_QUANT_DTYPE_CAST_SRC_T); + SetReduceToEnd(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_SUM_REDUCE_TO_END); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: reducesum_build_015 + * @tc.desc: Verify that the build function return a failed message with invalided reduceToEnd parameter + * @tc.type: FUNC + */ +HWTEST_F(ReduceSumBuilderTest, reducesum_build_015, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_BOOL, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_BOOL, m_outputDim, nullptr); + SetKeepDims(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_SUM_KEEP_DIMS); + SetCoeff(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_REDUCE_SUM_COEFF); + SetReduceToEnd(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_QUANT_DTYPE_CAST_SRC_T); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: reducesum_build_016 + * @tc.desc: Verify that the build function return a failed message with empty keepdims's buffer + * @tc.type: FUNC + */ +HWTEST_F(ReduceSumBuilderTest, reducesum_build_016, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_BOOL, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_BOOL, m_outputDim, nullptr); + + std::shared_ptr keepDimsTensor = TransToNNTensor(OH_NN_BOOL, + m_paramDim, nullptr, OH_NN_REDUCE_SUM_KEEP_DIMS); + m_allTensors.emplace_back(keepDimsTensor); + SetCoeff(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_REDUCE_SUM_COEFF); + SetReduceToEnd(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_SUM_REDUCE_TO_END); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + keepDimsTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: reducesum_build_017 + * @tc.desc: Verify that the build function return a failed message with empty coeff's buffer + * @tc.type: FUNC + */ +HWTEST_F(ReduceSumBuilderTest, reducesum_build_017, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_BOOL, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_BOOL, m_outputDim, nullptr); + + SetKeepDims(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_SUM_KEEP_DIMS); + std::shared_ptr coeffTensor = TransToNNTensor(OH_NN_INT64, + m_paramDim, nullptr, OH_NN_REDUCE_SUM_COEFF); + m_allTensors.emplace_back(coeffTensor); + SetReduceToEnd(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_SUM_REDUCE_TO_END); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + coeffTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: reducesum_build_018 + * @tc.desc: Verify that the build function return a failed message with empty reduceToEnd's buffer + * @tc.type: FUNC + */ +HWTEST_F(ReduceSumBuilderTest, reducesum_build_018, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_BOOL, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_BOOL, m_outputDim, nullptr); + + SetKeepDims(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_SUM_KEEP_DIMS); + SetCoeff(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_REDUCE_SUM_COEFF); + std::shared_ptr reduceToEndTensor = TransToNNTensor(OH_NN_BOOL, + m_paramDim, nullptr, OH_NN_REDUCE_SUM_REDUCE_TO_END); + m_allTensors.emplace_back(reduceToEndTensor); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + reduceToEndTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: reducesum_get_primitive_001 + * @tc.desc: Verify the GetPrimitive function return nullptr + * @tc.type: FUNC + */ +HWTEST_F(ReduceSumBuilderTest, reducesum_get_primitive_001, TestSize.Level0) +{ + LiteGraphTensorPtr primitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive = {nullptr, DestroyLiteGraphPrimitive}; + EXPECT_EQ(primitive, expectPrimitive); +} + +/** + * @tc.name: reducesum_get_primitive_002 + * @tc.desc: Verify the normal params return behavior of the getprimitive function + * @tc.type: FUNC + */ +HWTEST_F(ReduceSumBuilderTest, reducesum_get_primitive_002, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_BOOL, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_BOOL, m_outputDim, nullptr); + SetKeepDims(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_SUM_KEEP_DIMS); + SetCoeff(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_REDUCE_SUM_COEFF); + SetReduceToEnd(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_SUM_REDUCE_TO_END); + + bool keepDimsValue = true; + float coeffValue = 0.0f; + bool reduceToEndValue = true; + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors)); + LiteGraphTensorPtr reducesumPrimitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive = {nullptr, DestroyLiteGraphPrimitive}; + + EXPECT_NE(reducesumPrimitive, expectPrimitive); + auto returnKeepDimsValue = mindspore::lite::MindIR_ReduceFusion_GetKeepDims(reducesumPrimitive.get()); + EXPECT_EQ(returnKeepDimsValue, keepDimsValue); + auto returnCoeffValue = mindspore::lite::MindIR_ReduceFusion_GetCoeff(reducesumPrimitive.get()); + EXPECT_EQ(returnCoeffValue, coeffValue); + auto returnReduceToEndValue = mindspore::lite::MindIR_ReduceFusion_GetReduceToEnd(reducesumPrimitive.get()); + EXPECT_EQ(returnReduceToEndValue, reduceToEndValue); +} +} // namespace UnitTest +} // namespace NeuralNetworkRuntime +} // namespace OHOS \ No newline at end of file diff --git a/test/unittest/ops/round_test.cpp b/test/unittest/ops/round_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..e882541a4274b5b3b3b88ed4d199ae3cf419d23b --- /dev/null +++ b/test/unittest/ops/round_test.cpp @@ -0,0 +1,158 @@ +/* + * Copyright (c) 2024 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ops/round_builder.h" + +#include "ops_test.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Ops; + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class RoundBuilderTest : public OpsTest { +public: + void SetUp() override; + void TearDown() override; + +protected: + RoundBuilder m_builder; + std::vector m_inputs {0}; + std::vector m_outputs {1}; + std::vector m_dim {1, 2, 2, 1}; +}; + +void RoundBuilderTest::SetUp() {} + +void RoundBuilderTest::TearDown() {} + +/** + * @tc.name: round_build_001 + * @tc.desc: Verify that the build function returns a successful message. + * @tc.type: FUNC + */ +HWTEST_F(RoundBuilderTest, round_build_001, TestSize.Level1) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/** + * @tc.name: round_build_002 + * @tc.desc: Verify that the build function returns a failed message with true m_isBuild. + * @tc.type: FUNC + */ +HWTEST_F(RoundBuilderTest, round_build_002, TestSize.Level1) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); + + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/** + * @tc.name: round_build_003 + * @tc.desc: Verify that the build function returns a failed message with invalided input. + * @tc.type: FUNC + */ +HWTEST_F(RoundBuilderTest, round_build_003, TestSize.Level1) +{ + m_inputs = {0, 1}; + m_outputs = {2}; + + SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: round_build_004 + * @tc.desc: Verify that the build function returns a failed message with invalided output. + * @tc.type: FUNC + */ +HWTEST_F(RoundBuilderTest, round_build_004, TestSize.Level1) +{ + m_outputs = {1, 2}; + + SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: round_build_005 + * @tc.desc: Verify that the build function returns a failed message with empty allTensor. + * @tc.type: FUNC + */ +HWTEST_F(RoundBuilderTest, round_build_005, TestSize.Level1) +{ + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputs, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: round_build_006 + * @tc.desc: Verify that the build function returns a failed message without output tensor. + * @tc.type: FUNC + */ +HWTEST_F(RoundBuilderTest, round_build_006, TestSize.Level1) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: round_getprimitive_001 + * @tc.desc: Verify that the getPrimitive function returns a successful message + * @tc.type: FUNC + */ +HWTEST_F(RoundBuilderTest, round_getprimitive_001, TestSize.Level1) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); + + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + LiteGraphPrimitvePtr primitive = m_builder.GetPrimitive(); + LiteGraphPrimitvePtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); + EXPECT_NE(expectPrimitive, primitive); +} + +/** + * @tc.name: round_getprimitive_002 + * @tc.desc: Verify that the getPrimitive function returns a failed message without build. + * @tc.type: FUNC + */ +HWTEST_F(RoundBuilderTest, round_getprimitive_002, TestSize.Level1) +{ + LiteGraphPrimitvePtr primitive = m_builder.GetPrimitive(); + LiteGraphPrimitvePtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); + EXPECT_EQ(expectPrimitive, primitive); +} +} +} +} \ No newline at end of file diff --git a/test/unittest/ops/scatter_nd_test.cpp b/test/unittest/ops/scatter_nd_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..22f2d4d6cb014827d197abad471025ca1652f0f0 --- /dev/null +++ b/test/unittest/ops/scatter_nd_test.cpp @@ -0,0 +1,158 @@ +/* + * Copyright (c) 2024 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ops/scatter_nd_builder.h" + +#include "ops_test.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Ops; + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class ScatterNDBuilderTest : public OpsTest { +public: + void SetUp() override; + void TearDown() override; + +protected: + ScatterNDBuilder m_builder; + std::vector m_inputs {0, 1, 2}; + std::vector m_outputs {3}; + std::vector m_dim {1, 2, 2, 1}; +}; + +void ScatterNDBuilderTest::SetUp() {} + +void ScatterNDBuilderTest::TearDown() {} + +/** + * @tc.name: scatterNd_build_001 + * @tc.desc: Verify that the build function returns a successful message. + * @tc.type: FUNC + */ +HWTEST_F(ScatterNDBuilderTest, scatterNd_build_001, TestSize.Level1) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/** + * @tc.name: scatterNd_build_002 + * @tc.desc: Verify that the build function returns a failed message with true m_isBuild. + * @tc.type: FUNC + */ +HWTEST_F(ScatterNDBuilderTest, scatterNd_build_002, TestSize.Level1) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); + + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/** + * @tc.name: scatterNd_build_003 + * @tc.desc: Verify that the build function returns a failed message with invalided input. + * @tc.type: FUNC + */ +HWTEST_F(ScatterNDBuilderTest, scatterNd_build_003, TestSize.Level1) +{ + m_inputs = {0, 1, 2, 3}; + m_outputs = {4}; + + SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: scatterNd_build_004 + * @tc.desc: Verify that the build function returns a failed message with invalided output. + * @tc.type: FUNC + */ +HWTEST_F(ScatterNDBuilderTest, scatterNd_build_004, TestSize.Level1) +{ + m_outputs = {3, 4}; + + SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: scatterNd_build_005 + * @tc.desc: Verify that the build function returns a failed message with empty allTensor. + * @tc.type: FUNC + */ +HWTEST_F(ScatterNDBuilderTest, scatterNd_build_005, TestSize.Level1) +{ + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputs, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: scatterNd_build_006 + * @tc.desc: Verify that the build function returns a failed message without output tensor. + * @tc.type: FUNC + */ +HWTEST_F(ScatterNDBuilderTest, scatterNd_build_006, TestSize.Level1) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: scatterNd_getprimitive_001 + * @tc.desc: Verify that the getPrimitive function returns a successful message + * @tc.type: FUNC + */ +HWTEST_F(ScatterNDBuilderTest, scatterNd_getprimitive_001, TestSize.Level1) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); + + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + LiteGraphPrimitvePtr primitive = m_builder.GetPrimitive(); + LiteGraphPrimitvePtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); + EXPECT_NE(expectPrimitive, primitive); +} + +/** + * @tc.name: scatterNd_getprimitive_002 + * @tc.desc: Verify that the getPrimitive function returns a failed message without build. + * @tc.type: FUNC + */ +HWTEST_F(ScatterNDBuilderTest, scatterNd_getprimitive_002, TestSize.Level1) +{ + LiteGraphPrimitvePtr primitive = m_builder.GetPrimitive(); + LiteGraphPrimitvePtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); + EXPECT_EQ(expectPrimitive, primitive); +} +} +} +} \ No newline at end of file diff --git a/test/unittest/ops/slice_builder_test.cpp b/test/unittest/ops/slice_builder_test.cpp index 1cdbed99e661654f81ef4335b6192f6b79f71bf3..dd1b81296a2996daff929e1baf0df200c060c667 100644 --- a/test/unittest/ops/slice_builder_test.cpp +++ b/test/unittest/ops/slice_builder_test.cpp @@ -35,16 +35,18 @@ protected: protected: SliceBuilder m_builder; + std::vector inputsIndex = { 0, 1, 2 }; + std::vector outputsIndex = { 3 }; + std::vector paramsIndex = { 4 }; + std::vector paramsDim = {}; }; void SliceBuilderTest::InitTensor(const std::vector& inputsIndex, - const std::vector& outputsIndex) + const std::vector& outputsIndex) { - std::vector paramsIndex = {}; std::vector inputDim = {3, 2, 3}; std::vector OutputDim = {1, 1, 3}; - m_paramsIndex = paramsIndex; SaveInputTensor(inputsIndex, OH_NN_FLOAT32, inputDim, nullptr); SaveOutputTensor(outputsIndex, OH_NN_FLOAT32, OutputDim, nullptr); } @@ -66,10 +68,8 @@ void SliceBuilderTest::SaveAxesTensor(OH_NN_DataType dataType, const std::vector */ HWTEST_F(SliceBuilderTest, slice_build_001, TestSize.Level0) { - std::vector inputsIndex = { 0, 1, 2 }; - std::vector outputsIndex = { 3 }; - InitTensor(inputsIndex, outputsIndex); + SaveAxesTensor(OH_NN_INT64, paramsDim, nullptr, OH_NN_SLICE_AXES); OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); EXPECT_EQ(OH_NN_SUCCESS, ret); @@ -82,10 +82,8 @@ HWTEST_F(SliceBuilderTest, slice_build_001, TestSize.Level0) */ HWTEST_F(SliceBuilderTest, slice_build_002, TestSize.Level0) { - std::vector inputsIndex = { 0, 1, 2 }; - std::vector outputsIndex = { 3 }; - InitTensor(inputsIndex, outputsIndex); + SaveAxesTensor(OH_NN_INT64, paramsDim, nullptr, OH_NN_SLICE_AXES); EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); @@ -99,10 +97,12 @@ HWTEST_F(SliceBuilderTest, slice_build_002, TestSize.Level0) */ HWTEST_F(SliceBuilderTest, slice_build_003, TestSize.Level0) { - std::vector inputsIndex = { 0, 1, 2, 3 }; - std::vector outputsIndex = { 4 }; + inputsIndex = { 0, 1, 2, 3 }; + outputsIndex = { 4 }; + paramsIndex = { 5 }; InitTensor(inputsIndex, outputsIndex); + SaveAxesTensor(OH_NN_INT64, paramsDim, nullptr, OH_NN_SLICE_AXES); OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); @@ -115,10 +115,12 @@ HWTEST_F(SliceBuilderTest, slice_build_003, TestSize.Level0) */ HWTEST_F(SliceBuilderTest, slice_build_004, TestSize.Level0) { - std::vector inputsIndex = { 0, 1, 2 }; - std::vector outputsIndex = { 3, 4 }; + inputsIndex = { 0, 1, 2 }; + outputsIndex = { 3, 4 }; + paramsIndex = { 5 }; InitTensor(inputsIndex, outputsIndex); + SaveAxesTensor(OH_NN_INT64, paramsDim, nullptr, OH_NN_SLICE_AXES); OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); @@ -131,10 +133,12 @@ HWTEST_F(SliceBuilderTest, slice_build_004, TestSize.Level0) */ HWTEST_F(SliceBuilderTest, slice_build_005, TestSize.Level0) { - std::vector inputsIndex = {}; - std::vector outputsIndex = {}; + inputsIndex = {}; + outputsIndex = {}; + paramsIndex = {}; InitTensor(inputsIndex, outputsIndex); + SaveAxesTensor(OH_NN_INT64, paramsDim, nullptr, OH_NN_SLICE_AXES); OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); @@ -147,10 +151,12 @@ HWTEST_F(SliceBuilderTest, slice_build_005, TestSize.Level0) */ HWTEST_F(SliceBuilderTest, slice_build_006, TestSize.Level0) { - std::vector inputsIndex = { 0, 1, 2 }; - std::vector outputsIndex = {}; + inputsIndex = { 0, 1, 2 }; + outputsIndex = {}; + paramsIndex = {}; InitTensor(inputsIndex, outputsIndex); + SaveAxesTensor(OH_NN_INT64, paramsDim, nullptr, OH_NN_SLICE_AXES); OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); @@ -158,22 +164,70 @@ HWTEST_F(SliceBuilderTest, slice_build_006, TestSize.Level0) /** * @tc.name: slice_build_007 - * @tc.desc: Provide a param to verify the abnormal behavior of the Build function + * @tc.desc: Provide a valid datatype param to verify the abnormal behavior of the Build function * @tc.type: FUNC */ HWTEST_F(SliceBuilderTest, slice_build_007, TestSize.Level0) { - std::vector inputsIndex = { 0, 1, 2 }; - std::vector outputsIndex = {3}; - std::vector paramsIndex = { 4 }; - std::vector inputDim = {3, 2, 3}; - std::vector OutputDim = {1, 1, 3}; - std::vector paramDim = {}; + std::vector inputDim = { 3, 2, 3 }; + std::vector OutputDim = { 1, 1, 3 }; + std::vector paramsDim = {}; m_paramsIndex = paramsIndex; SaveInputTensor(inputsIndex, OH_NN_FLOAT32, inputDim, nullptr); SaveOutputTensor(outputsIndex, OH_NN_FLOAT32, OutputDim, nullptr); - SaveAxesTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_SPLIT_AXIS); + + std::shared_ptr axesTensor = TransToNNTensor(OH_NN_FLOAT32, paramsDim, + nullptr, OH_NN_SLICE_AXES); + float* axesValue = new (std::nothrow) float[1] {0.0f}; + EXPECT_NE(nullptr, axesValue); + axesTensor->SetBuffer(axesValue, sizeof(float)); + m_allTensors.emplace_back(axesTensor); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: slice_build_008 + * @tc.desc: Provide a valid type param to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(SliceBuilderTest, slice_build_008, TestSize.Level0) +{ + std::vector inputDim = { 3, 2, 3 }; + std::vector OutputDim = { 1, 1, 3 }; + std::vector paramsDim = {}; + + m_paramsIndex = paramsIndex; + SaveInputTensor(inputsIndex, OH_NN_FLOAT32, inputDim, nullptr); + SaveOutputTensor(outputsIndex, OH_NN_FLOAT32, OutputDim, nullptr); + SaveAxesTensor(OH_NN_INT64, paramsDim, nullptr, OH_NN_MUL_ACTIVATION_TYPE); + + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: slice_build_009 + * @tc.desc: Provide a param without set buffer to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(SliceBuilderTest, slice_build_009, TestSize.Level0) +{ + std::vector inputDim = { 3, 2, 3 }; + std::vector OutputDim = { 1, 1, 3 }; + std::vector paramsDim = {}; + + m_paramsIndex = paramsIndex; + SaveInputTensor(inputsIndex, OH_NN_FLOAT32, inputDim, nullptr); + SaveOutputTensor(outputsIndex, OH_NN_FLOAT32, OutputDim, nullptr); + + std::shared_ptr axesTensor = TransToNNTensor(OH_NN_INT64, paramsDim, + nullptr, OH_NN_SLICE_AXES); + m_allTensors.emplace_back(axesTensor); + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); @@ -198,16 +252,20 @@ HWTEST_F(SliceBuilderTest, slice_getprimitive_001, TestSize.Level0) */ HWTEST_F(SliceBuilderTest, slice_getprimitive_002, TestSize.Level0) { - std::vector inputsIndex = { 0, 1, 2 }; - std::vector outputsIndex = { 3 }; - InitTensor(inputsIndex, outputsIndex); + SaveAxesTensor(OH_NN_INT64, paramsDim, nullptr, OH_NN_SLICE_AXES); std::vector expectAxesValue = {0}; EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); LiteGraphTensorPtr primitive = m_builder.GetPrimitive(); LiteGraphTensorPtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); EXPECT_NE(primitive, expectPrimitive); + + auto returnAxes = mindspore::lite::MindIR_SliceFusion_GetAxes(primitive.get()); + auto returnAxesSize = returnAxes.size(); + for (size_t i = 0; i < returnAxesSize; ++i) { + EXPECT_EQ(returnAxes[i], expectAxesValue[i]); + } } } // namespace UnitTest } // namespace NeuralNetworkRuntime diff --git a/test/unittest/ops/space_to_depth_test.cpp b/test/unittest/ops/space_to_depth_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..a528c361aa358b2c41811a11d3f7e2fde2560081 --- /dev/null +++ b/test/unittest/ops/space_to_depth_test.cpp @@ -0,0 +1,242 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ops/space_to_depth_builder.h" + +#include "ops_test.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Ops; + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class SpaceToDepthBuilderTest : public OpsTest { +public: + void SetUp() override; + void TearDown() override; + +protected: + void SaveBlockSize(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + +protected: + SpaceToDepthBuilder m_builder; + std::vector m_inputs {0}; + std::vector m_outputs {1}; + std::vector m_params {2}; + std::vector m_inputDim {1, 3, 2, 2}; + std::vector m_outputDim {1, 12, 1, 1}; + std::vector m_paramDim {}; +}; + +void SpaceToDepthBuilderTest::SetUp() {} + +void SpaceToDepthBuilderTest::TearDown() {} + +void SpaceToDepthBuilderTest::SaveBlockSize(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + std::shared_ptr blockSizeTensor = TransToNNTensor(dataType, dim, quantParam, type); + int64_t* blockSizeValue = new (std::nothrow) int64_t [1] {2}; + EXPECT_NE(nullptr, blockSizeValue); + blockSizeTensor->SetBuffer(blockSizeValue, sizeof(int64_t)); + m_allTensors.emplace_back(blockSizeTensor); +} + +/** + * @tc.name: space_to_depth_build_001 + * @tc.desc: Verify that the build function returns a successful message. + * @tc.type: FUNC + */ +HWTEST_F(SpaceToDepthBuilderTest, space_to_depth_build_001, TestSize.Level1) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_outputDim, nullptr); + SaveBlockSize(OH_NN_INT64, m_paramDim, nullptr, OH_NN_SPACE_TO_DEPTH_BLOCK_SIZE); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/** + * @tc.name: space_to_depth_build_002 + * @tc.desc: Verify that the build function returns a failed message with true m_isBuild. + * @tc.type: FUNC + */ +HWTEST_F(SpaceToDepthBuilderTest, space_to_depth_build_002, TestSize.Level1) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_outputDim, nullptr); + SaveBlockSize(OH_NN_INT64, m_paramDim, nullptr, OH_NN_SPACE_TO_DEPTH_BLOCK_SIZE); + + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors)); + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/** + * @tc.name: space_to_depth_build_003 + * @tc.desc: Verify that the build function returns a failed message with invalided input. + * @tc.type: FUNC + */ +HWTEST_F(SpaceToDepthBuilderTest, space_to_depth_build_003, TestSize.Level1) +{ + m_inputs = {0, 1}; + m_outputs = {2}; + m_params = {3}; + + SaveInputTensor(m_inputs, OH_NN_INT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_outputDim, nullptr); + SaveBlockSize(OH_NN_INT64, m_paramDim, nullptr, OH_NN_SPACE_TO_DEPTH_BLOCK_SIZE); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: space_to_depth_build_004 + * @tc.desc: Verify that the build function returns a failed message with invalided output. + * @tc.type: FUNC + */ +HWTEST_F(SpaceToDepthBuilderTest, space_to_depth_build_004, TestSize.Level1) +{ + m_outputs = {1, 2}; + m_params = {3}; + + SaveInputTensor(m_inputs, OH_NN_INT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_outputDim, nullptr); + SaveBlockSize(OH_NN_INT64, m_paramDim, nullptr, OH_NN_SPACE_TO_DEPTH_BLOCK_SIZE); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: space_to_depth_build_005 + * @tc.desc: Verify that the build function returns a failed message with empty allTensor. + * @tc.type: FUNC + */ +HWTEST_F(SpaceToDepthBuilderTest, space_to_depth_build_005, TestSize.Level1) +{ + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputs, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: space_to_depth_build_006 + * @tc.desc: Verify that the build function returns a failed message without output tensor. + * @tc.type: FUNC + */ +HWTEST_F(SpaceToDepthBuilderTest, space_to_depth_build_006, TestSize.Level1) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_inputDim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: space_to_depth_build_007 + * @tc.desc: Verify that the build function returns a failed message with invalid blockSize's dataType. + * @tc.type: FUNC + */ +HWTEST_F(SpaceToDepthBuilderTest, space_to_depth_build_007, TestSize.Level1) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_outputDim, nullptr); + + std::shared_ptr blockSizeTensor = TransToNNTensor(OH_NN_FLOAT32, m_paramDim, + nullptr, OH_NN_SPACE_TO_DEPTH_BLOCK_SIZE); + float* blockSizeValue = new (std::nothrow) float [1]{2.0f}; + EXPECT_NE(nullptr, blockSizeValue); + blockSizeTensor->SetBuffer(blockSizeValue, sizeof(float)); + m_allTensors.emplace_back(blockSizeTensor); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + blockSizeTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: space_to_depth_build_008 + * @tc.desc: Verify that the build function returns a failed message with passing invalid blockSize param. + * @tc.type: FUNC + */ +HWTEST_F(SpaceToDepthBuilderTest, space_to_depth_build_008, TestSize.Level1) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_outputDim, nullptr); + SaveBlockSize(OH_NN_INT64, m_paramDim, nullptr, OH_NN_MUL_ACTIVATION_TYPE); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: space_to_depth_build_009 + * @tc.desc: Verify that the build function returns a failed message without set buffer for blockSize. + * @tc.type: FUNC + */ +HWTEST_F(SpaceToDepthBuilderTest, space_to_depth_build_009, TestSize.Level1) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_outputDim, nullptr); + + std::shared_ptr blockSizeTensor = TransToNNTensor(OH_NN_INT64, m_paramDim, + nullptr, OH_NN_SPACE_TO_DEPTH_BLOCK_SIZE); + m_allTensors.emplace_back(blockSizeTensor); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: space_to_depth_getprimitive_001 + * @tc.desc: Verify that the getPrimitive function returns a successful message + * @tc.type: FUNC + */ +HWTEST_F(SpaceToDepthBuilderTest, space_to_depth_getprimitive_001, TestSize.Level1) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_outputDim, nullptr); + SaveBlockSize(OH_NN_INT64, m_paramDim, nullptr, OH_NN_SPACE_TO_DEPTH_BLOCK_SIZE); + + int64_t blockSizeValue = 2; + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors)); + + LiteGraphPrimitvePtr primitive = m_builder.GetPrimitive(); + LiteGraphPrimitvePtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); + EXPECT_NE(expectPrimitive, primitive); + + auto returnBlockSizeValue = mindspore::lite::MindIR_SpaceToDepth_GetBlockSize(primitive.get()); + EXPECT_EQ(returnBlockSizeValue, blockSizeValue); +} + +/** + * @tc.name: space_to_depth_getprimitive_002 + * @tc.desc: Verify that the getPrimitive function returns a failed message without build. + * @tc.type: FUNC + */ +HWTEST_F(SpaceToDepthBuilderTest, space_to_depth_getprimitive_002, TestSize.Level1) +{ + LiteGraphPrimitvePtr primitive = m_builder.GetPrimitive(); + LiteGraphPrimitvePtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); + EXPECT_EQ(expectPrimitive, primitive); +} +} +} +} \ No newline at end of file diff --git a/test/unittest/ops/swish_test.cpp b/test/unittest/ops/swish_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..e84ac91143d814b10eafeb09a968d37286aa8d28 --- /dev/null +++ b/test/unittest/ops/swish_test.cpp @@ -0,0 +1,158 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ops/swish_builder.h" + +#include "ops_test.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Ops; + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class SwishBuilderTest : public OpsTest { +public: + void SetUp() override; + void TearDown() override; + +protected: + SwishBuilder m_builder; + std::vector m_inputs {0}; + std::vector m_outputs {1}; + std::vector m_dim {1, 2, 2, 1}; +}; + +void SwishBuilderTest::SetUp() {} + +void SwishBuilderTest::TearDown() {} + +/** + * @tc.name: swish_build_001 + * @tc.desc: Verify that the build function returns a successful message. + * @tc.type: FUNC + */ +HWTEST_F(SwishBuilderTest, swish_build_001, TestSize.Level1) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/** + * @tc.name: swish_build_002 + * @tc.desc: Verify that the build function returns a failed message with true m_isBuild. + * @tc.type: FUNC + */ +HWTEST_F(SwishBuilderTest, swish_build_002, TestSize.Level1) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); + + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/** + * @tc.name: swish_build_003 + * @tc.desc: Verify that the build function returns a failed message with invalided input. + * @tc.type: FUNC + */ +HWTEST_F(SwishBuilderTest, swish_build_003, TestSize.Level1) +{ + m_inputs = {0, 1}; + m_outputs = {2}; + + SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: swish_build_004 + * @tc.desc: Verify that the build function returns a failed message with invalided output. + * @tc.type: FUNC + */ +HWTEST_F(SwishBuilderTest, swish_build_004, TestSize.Level1) +{ + m_outputs = {1, 2}; + + SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: swish_build_005 + * @tc.desc: Verify that the build function returns a failed message with empty allTensor. + * @tc.type: FUNC + */ +HWTEST_F(SwishBuilderTest, swish_build_005, TestSize.Level1) +{ + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputs, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: swish_build_006 + * @tc.desc: Verify that the build function returns a failed message without output tensor. + * @tc.type: FUNC + */ +HWTEST_F(SwishBuilderTest, swish_build_006, TestSize.Level1) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: swish_getprimitive_001 + * @tc.desc: Verify that the getPrimitive function returns a successful message + * @tc.type: FUNC + */ +HWTEST_F(SwishBuilderTest, swish_getprimitive_001, TestSize.Level1) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); + + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + LiteGraphPrimitvePtr primitive = m_builder.GetPrimitive(); + LiteGraphPrimitvePtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); + EXPECT_NE(expectPrimitive, primitive); +} + +/** + * @tc.name: swish_getprimitive_002 + * @tc.desc: Verify that the getPrimitive function returns a failed message without build. + * @tc.type: FUNC + */ +HWTEST_F(SwishBuilderTest, swish_getprimitive_002, TestSize.Level1) +{ + LiteGraphPrimitvePtr primitive = m_builder.GetPrimitive(); + LiteGraphPrimitvePtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); + EXPECT_EQ(expectPrimitive, primitive); +} +} +} +} \ No newline at end of file diff --git a/test/unittest/ops/tile_builder_test.cpp b/test/unittest/ops/tile_builder_test.cpp index e180dd92df318259a19df7c876f78eb190dec0b9..3e2bda373ae051b41dfe7bfc0652b45ae66772b8 100644 --- a/test/unittest/ops/tile_builder_test.cpp +++ b/test/unittest/ops/tile_builder_test.cpp @@ -30,28 +30,36 @@ class TileBuilderTest : public OpsTest { protected: void InitTensor(const std::vector& inputsIndex, const std::vector& outputsIndex) override; - void CheckResult(); + void SaveDimsTensor(OH_NN_DataType dataType, const std::vector &dim, + const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); protected: TileBuilder m_builder; + std::vector inputsIndex = { 0, 1 }; + std::vector outputsIndex = { 2 }; + std::vector paramsIndex = { 3 }; + std::vector paramDim = {}; }; void TileBuilderTest::InitTensor(const std::vector& inputsIndex, - const std::vector& outputsIndex) + const std::vector& outputsIndex) { std::vector inputDim = {2, 2}; std::vector OutputDim = {4, 4}; + m_paramsIndex = paramsIndex; SaveInputTensor(inputsIndex, OH_NN_FLOAT32, inputDim, nullptr); SaveOutputTensor(outputsIndex, OH_NN_FLOAT32, OutputDim, nullptr); } -void TileBuilderTest::CheckResult() +void TileBuilderTest::SaveDimsTensor(OH_NN_DataType dataType, const std::vector &dim, + const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) { - EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); - LiteGraphTensorPtr primitive = m_builder.GetPrimitive(); - LiteGraphTensorPtr expectPrimitive = { nullptr, DestroyLiteGraphPrimitive }; - EXPECT_NE(primitive, expectPrimitive); + std::shared_ptr dimsTensor = TransToNNTensor(dataType, dim, quantParam, type); + int64_t* dimsValue = new (std::nothrow) int64_t[1] {0}; + EXPECT_NE(nullptr, dimsValue); + dimsTensor->SetBuffer(dimsValue, sizeof(int64_t)); + m_allTensors.emplace_back(dimsTensor); } /** @@ -61,10 +69,8 @@ void TileBuilderTest::CheckResult() */ HWTEST_F(TileBuilderTest, tile_build_001, TestSize.Level0) { - std::vector inputsIndex = { 0, 1 }; - std::vector outputsIndex = { 2 }; - InitTensor(inputsIndex, outputsIndex); + SaveDimsTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_TILE_DIMS); OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); EXPECT_EQ(OH_NN_SUCCESS, ret); @@ -77,10 +83,8 @@ HWTEST_F(TileBuilderTest, tile_build_001, TestSize.Level0) */ HWTEST_F(TileBuilderTest, tile_build_002, TestSize.Level0) { - std::vector inputsIndex = { 0, 1 }; - std::vector outputsIndex = { 2 }; - InitTensor(inputsIndex, outputsIndex); + SaveDimsTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_TILE_DIMS); EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); @@ -94,10 +98,12 @@ HWTEST_F(TileBuilderTest, tile_build_002, TestSize.Level0) */ HWTEST_F(TileBuilderTest, tile_build_003, TestSize.Level0) { - std::vector inputsIndex = { 0, 1, 2 }; - std::vector outputsIndex = { 3 }; + inputsIndex = { 0, 1, 2 }; + outputsIndex = { 3 }; + paramsIndex = { 4 }; InitTensor(inputsIndex, outputsIndex); + SaveDimsTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_TILE_DIMS); OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); @@ -110,10 +116,11 @@ HWTEST_F(TileBuilderTest, tile_build_003, TestSize.Level0) */ HWTEST_F(TileBuilderTest, tile_build_004, TestSize.Level0) { - std::vector inputsIndex = { 0, 1 }; - std::vector outputsIndex = { 2, 3 }; + outputsIndex = { 2, 3 }; + paramsIndex = { 4 }; InitTensor(inputsIndex, outputsIndex); + SaveDimsTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_TILE_DIMS); OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); @@ -126,23 +133,17 @@ HWTEST_F(TileBuilderTest, tile_build_004, TestSize.Level0) */ HWTEST_F(TileBuilderTest, tile_build_005, TestSize.Level0) { - std::vector inputsIndex = { 0, 1 }; - std::vector outputsIndex = { 2 }; - std::vector paramsIndex = {}; - OH_NN_ReturnCode ret = m_builder.Build(paramsIndex, inputsIndex, outputsIndex, m_allTensors); EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); } /** - * @tc.name: tile_build_001 + * @tc.name: tile_build_006 * @tc.desc: Provide empty output to verify the abnormal behavior of the Build function * @tc.type: FUNC */ HWTEST_F(TileBuilderTest, tile_build_006, TestSize.Level0) { - std::vector inputsIndex = { 0, 1 }; - std::vector outputsIndex = { 2 }; std::vector inputDim = {2, 2}; SaveInputTensor(inputsIndex, OH_NN_FLOAT32, inputDim, nullptr); @@ -153,17 +154,66 @@ HWTEST_F(TileBuilderTest, tile_build_006, TestSize.Level0) /** * @tc.name: tile_build_007 - * @tc.desc: Provide a param to verify the abnormal behavior of the Build function + * @tc.desc: Provide a valid datatype param to verify the abnormal behavior of the Build function * @tc.type: FUNC */ HWTEST_F(TileBuilderTest, tile_build_007, TestSize.Level0) { - std::vector inputsIndex = { 0, 1 }; - std::vector outputsIndex = { 2 }; - std::vector paramsIndex = { 4 }; + std::vector inputDim = {2, 2}; + std::vector OutputDim = {4, 4}; m_paramsIndex = paramsIndex; - InitTensor(inputsIndex, outputsIndex); + SaveInputTensor(inputsIndex, OH_NN_FLOAT32, inputDim, nullptr); + SaveOutputTensor(outputsIndex, OH_NN_FLOAT32, OutputDim, nullptr); + + std::shared_ptr dimsTensor = TransToNNTensor(OH_NN_FLOAT32, paramDim, + nullptr, OH_NN_TILE_DIMS); + float* dimsValue = new (std::nothrow) float[1] {0.0f}; + EXPECT_NE(nullptr, dimsValue); + dimsTensor->SetBuffer(dimsValue, sizeof(float)); + m_allTensors.emplace_back(dimsTensor); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + dimsTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: tile_build_008 + * @tc.desc: Provide a valid type param to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(TileBuilderTest, tile_build_008, TestSize.Level0) +{ + std::vector inputDim = {2, 2}; + std::vector OutputDim = {4, 4}; + m_paramsIndex = paramsIndex; + SaveInputTensor(inputsIndex, OH_NN_FLOAT32, inputDim, nullptr); + SaveOutputTensor(outputsIndex, OH_NN_FLOAT32, OutputDim, nullptr); + SaveDimsTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_MUL_ACTIVATION_TYPE); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: tile_build_009 + * @tc.desc: Provide a param without set buffer to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(TileBuilderTest, tile_build_009, TestSize.Level0) +{ + std::vector inputDim = {2, 2}; + std::vector OutputDim = {4, 4}; + + m_paramsIndex = paramsIndex; + SaveInputTensor(inputsIndex, OH_NN_FLOAT32, inputDim, nullptr); + SaveOutputTensor(outputsIndex, OH_NN_FLOAT32, OutputDim, nullptr); + + std::shared_ptr dimsTensor = TransToNNTensor(OH_NN_INT64, paramDim, + nullptr, OH_NN_TILE_DIMS); + m_allTensors.emplace_back(dimsTensor); + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); @@ -188,12 +238,20 @@ HWTEST_F(TileBuilderTest, tile_get_primitive_001, TestSize.Level0) */ HWTEST_F(TileBuilderTest, tile_getprimitive_002, TestSize.Level0) { - std::vector inputsIndex = { 0, 1 }; - std::vector outputsIndex = { 2 }; - InitTensor(inputsIndex, outputsIndex); + SaveDimsTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_TILE_DIMS); + + std::vector expectDimsValue = {0}; + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + LiteGraphTensorPtr primitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive = { nullptr, DestroyLiteGraphPrimitive }; + EXPECT_NE(primitive, expectPrimitive); - CheckResult(); + auto returnDims = mindspore::lite::MindIR_TileFusion_GetDims(primitive.get()); + auto returnDimsSize = returnDims.size(); + for (size_t i = 0; i < returnDimsSize; ++i) { + EXPECT_EQ(returnDims[i], expectDimsValue[i]); + } } } // namespace UnitTest } // namespace NeuralNetworkRuntime diff --git a/test/unittest/ops/topk_builder_test.cpp b/test/unittest/ops/topk_builder_test.cpp index 2f16de2d4f21f2fceb315aa8674880e9eb6eae9b..b8f9bf1d2943cf1ccf21a26e884443e5ac833326 100644 --- a/test/unittest/ops/topk_builder_test.cpp +++ b/test/unittest/ops/topk_builder_test.cpp @@ -32,6 +32,8 @@ protected: const std::vector& outputsIndex) override; void SaveSortedTensor(OH_NN_DataType dataType, const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + void SaveAxisTensor(OH_NN_DataType dataType, const std::vector &dim, + const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); protected: TopKBuilder m_builder; @@ -49,11 +51,21 @@ void TopKBuilderTest::InitTensor(const std::vector& inputsIndex, const SaveOutputTensor(outputsIndex, OH_NN_FLOAT32, OutputDim, nullptr); } +void TopKBuilderTest::SaveAxisTensor(OH_NN_DataType dataType, const std::vector &dim, + const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + std::shared_ptr axisTensor = TransToNNTensor(dataType, dim, quantParam, type); + int64_t* axisValue = new (std::nothrow) int64_t[1] {0}; + EXPECT_NE(nullptr, axisValue); + axisTensor->SetBuffer(axisValue, sizeof(int64_t)); + m_allTensors.emplace_back(axisTensor); +} + void TopKBuilderTest::SaveSortedTensor(OH_NN_DataType dataType, const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) { std::shared_ptr topkTensor = TransToNNTensor(dataType, dim, quantParam, type); - bool* topkValue = new (std::nothrow) bool[1]{true}; + bool* topkValue = new (std::nothrow) bool[1] {true}; EXPECT_NE(nullptr, topkValue); topkTensor->SetBuffer(topkValue, sizeof(bool)); m_allTensors.emplace_back(topkTensor); @@ -69,12 +81,14 @@ HWTEST_F(TopKBuilderTest, topk_build_001, TestSize.Level0) { std::vector inputsIndex = { 0, 1 }; std::vector outputsIndex = { 2, 3 }; + std::vector paramsIndex = { 4, 5 }; std::vector paramDim = {}; InitTensor(inputsIndex, outputsIndex); SaveSortedTensor(OH_NN_BOOL, paramDim, nullptr, OH_NN_TOP_K_SORTED); + SaveAxisTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_TOP_K_AXIS); - OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + OH_NN_ReturnCode ret = m_builder.Build(paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); EXPECT_EQ(OH_NN_SUCCESS, ret); } @@ -87,13 +101,15 @@ HWTEST_F(TopKBuilderTest, topk_build_002, TestSize.Level0) { std::vector inputsIndex = { 0, 1 }; std::vector outputsIndex = { 2, 3 }; + std::vector paramsIndex = { 4, 5 }; std::vector paramDim = {}; InitTensor(inputsIndex, outputsIndex); SaveSortedTensor(OH_NN_BOOL, paramDim, nullptr, OH_NN_TOP_K_SORTED); + SaveAxisTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_TOP_K_AXIS); - EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); - OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + OH_NN_ReturnCode ret = m_builder.Build(paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); } @@ -106,12 +122,14 @@ HWTEST_F(TopKBuilderTest, topk_build_003, TestSize.Level0) { std::vector inputsIndex = { 0, 1, 2, 3 }; std::vector outputsIndex = { 4, 5 }; + std::vector paramsIndex = { 6, 7 }; std::vector paramDim = {}; InitTensor(inputsIndex, outputsIndex); SaveSortedTensor(OH_NN_BOOL, paramDim, nullptr, OH_NN_TOP_K_SORTED); + SaveAxisTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_TOP_K_AXIS); - OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + OH_NN_ReturnCode ret = m_builder.Build(paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); } @@ -124,12 +142,14 @@ HWTEST_F(TopKBuilderTest, topk_builder_004, TestSize.Level0) { std::vector inputsIndex = { 0, 1 }; std::vector outputsIndex = { 2, 3, 4 }; + std::vector paramsIndex = { 5, 6 }; std::vector paramDim = {}; InitTensor(inputsIndex, outputsIndex); SaveSortedTensor(OH_NN_BOOL, paramDim, nullptr, OH_NN_TOP_K_SORTED); + SaveAxisTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_TOP_K_AXIS); - OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + OH_NN_ReturnCode ret = m_builder.Build(paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); } @@ -142,7 +162,7 @@ HWTEST_F(TopKBuilderTest, topk_build_005, TestSize.Level0) { std::vector inputsIndex = { 0, 1, 2 }; std::vector outputsIndex = { 3, 4 }; - std::vector paramsIndex = { 5 }; + std::vector paramsIndex = { 5, 6, 7 }; OH_NN_ReturnCode ret = m_builder.Build(paramsIndex, inputsIndex, outputsIndex, m_allTensors); EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); @@ -157,12 +177,14 @@ HWTEST_F(TopKBuilderTest, topk_build_006, TestSize.Level0) { std::vector inputsIndex = { 0, 1 }; std::vector outputsIndex = { 2, 3 }; + std::vector paramsIndex = { 4, 5 }; std::vector paramDim = {}; InitTensor(inputsIndex, outputsIndex); SaveSortedTensor(OH_NN_INT8, paramDim, nullptr, OH_NN_TOP_K_SORTED); + SaveAxisTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_TOP_K_AXIS); - OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + OH_NN_ReturnCode ret = m_builder.Build(paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); } @@ -175,12 +197,14 @@ HWTEST_F(TopKBuilderTest, topk_build_007, TestSize.Level0) { std::vector inputsIndex = { 0, 1 }; std::vector outputsIndex = { 2, 3 }; + std::vector paramsIndex = { 4, 5 }; std::vector paramDim = {}; InitTensor(inputsIndex, outputsIndex); SaveSortedTensor(OH_NN_INT32, paramDim, nullptr, OH_NN_TOP_K_SORTED); + SaveAxisTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_TOP_K_AXIS); - OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + OH_NN_ReturnCode ret = m_builder.Build(paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); } @@ -193,6 +217,7 @@ HWTEST_F(TopKBuilderTest, topk_build_008, TestSize.Level0) { std::vector inputsIndex = { 0, 1 }; std::vector outputsIndex = { 2, 3 }; + std::vector paramsIndex = { 4, 5 }; std::vector paramDim = {}; InitTensor(inputsIndex, outputsIndex); @@ -200,26 +225,72 @@ HWTEST_F(TopKBuilderTest, topk_build_008, TestSize.Level0) std::shared_ptr topkTensor = TransToNNTensor(OH_NN_BOOL, paramDim, nullptr, OH_NN_TOP_K_SORTED); topkTensor->SetBuffer(nullptr, 0); m_allTensors.emplace_back(topkTensor); + SaveAxisTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_TOP_K_AXIS); - OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + OH_NN_ReturnCode ret = m_builder.Build(paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); } /** * @tc.name: topk_build_009 - * @tc.desc: Provide invalid parameter type to verify the abnormal behavior of the Build function + * @tc.desc: Provide axis parameter buffer is nullptr to verify the abnormal behavior of the Build function * @tc.type: FUNC */ HWTEST_F(TopKBuilderTest, topk_build_009, TestSize.Level0) { std::vector inputsIndex = { 0, 1 }; std::vector outputsIndex = { 2, 3 }; + std::vector paramsIndex = { 4, 5 }; + std::vector paramDim = {}; + + InitTensor(inputsIndex, outputsIndex); + + SaveSortedTensor(OH_NN_INT32, paramDim, nullptr, OH_NN_TOP_K_SORTED); + std::shared_ptr axisTensor = TransToNNTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_TOP_K_AXIS); + axisTensor->SetBuffer(nullptr, 0); + m_allTensors.emplace_back(axisTensor); + + OH_NN_ReturnCode ret = m_builder.Build(paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: topk_build_010 + * @tc.desc: Provide invalid parameter type to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(TopKBuilderTest, topk_build_010, TestSize.Level0) +{ + std::vector inputsIndex = { 0, 1 }; + std::vector outputsIndex = { 2, 3 }; + std::vector paramsIndex = { 4, 5 }; std::vector paramDim = {}; InitTensor(inputsIndex, outputsIndex); SaveSortedTensor(OH_NN_BOOL, paramDim, nullptr, OH_NN_SCALE_AXIS); + SaveAxisTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_TOP_K_AXIS); - OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + OH_NN_ReturnCode ret = m_builder.Build(paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: topk_build_011 + * @tc.desc: Provide invalid parameter type to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(TopKBuilderTest, topk_build_011, TestSize.Level0) +{ + std::vector inputsIndex = { 0, 1 }; + std::vector outputsIndex = { 2, 3 }; + std::vector paramsIndex = { 4, 5 }; + std::vector paramDim = {}; + + InitTensor(inputsIndex, outputsIndex); + SaveSortedTensor(OH_NN_BOOL, paramDim, nullptr, OH_NN_TOP_K_SORTED); + SaveAxisTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_SCALE_AXIS); + + OH_NN_ReturnCode ret = m_builder.Build(paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); } @@ -244,10 +315,14 @@ HWTEST_F(TopKBuilderTest, topk_get_primitive_002, TestSize.Level0) { std::vector inputsIndex = { 0, 1 }; std::vector outputsIndex = { 2, 3 }; + std::vector paramsIndex = { 4, 5 }; std::vector paramDim = {}; + InitTensor(inputsIndex, outputsIndex); SaveSortedTensor(OH_NN_BOOL, paramDim, nullptr, OH_NN_TOP_K_SORTED); + SaveAxisTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_TOP_K_AXIS); + int64_t axisValue = 0; EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); LiteGraphTensorPtr primitive = m_builder.GetPrimitive(); LiteGraphTensorPtr expectPrimitive = { nullptr, DestroyLiteGraphPrimitive }; @@ -255,6 +330,8 @@ HWTEST_F(TopKBuilderTest, topk_get_primitive_002, TestSize.Level0) auto sortedReturn = mindspore::lite::MindIR_TopKFusion_GetSorted(primitive.get()); EXPECT_EQ(sortedReturn, m_topkValue); + auto axisReturn = mindspore::lite::MindIR_TopKFusion_GetAxis(primitive.get()); + EXPECT_EQ(axisReturn, axisValue); } } // namespace UnitTest } // namespace NeuralNetworkRuntime