From 1340646fa25cecb554dc5f433772eda4af276416 Mon Sep 17 00:00:00 2001 From: wang-yangsong Date: Thu, 11 Apr 2024 15:27:48 +0800 Subject: [PATCH 1/6] optimize switch-case Signed-off-by: wang-yangsong --- .../lite_graph_to_hdi_model_v2_1.cpp | 378 +++++------------- 1 file changed, 99 insertions(+), 279 deletions(-) diff --git a/frameworks/native/neural_network_runtime/lite_graph_to_hdi_model_v2_1.cpp b/frameworks/native/neural_network_runtime/lite_graph_to_hdi_model_v2_1.cpp index f94b159..8e24005 100644 --- a/frameworks/native/neural_network_runtime/lite_graph_to_hdi_model_v2_1.cpp +++ b/frameworks/native/neural_network_runtime/lite_graph_to_hdi_model_v2_1.cpp @@ -17,6 +17,7 @@ #include #include #include +#include #include "common/log.h" #include "message_parcel.h" #include "nnrt/v2_1/nnrt_types.h" @@ -29,6 +30,7 @@ typedef void *TensorPtr; namespace OHOS { namespace NeuralNetworkRuntime { namespace NNRt_V2_1 { +//std::unordered_map(*)(PrimitivePtr)> convertOpMap = {{NODE_TYPE_ACTIVATION, &ConvertActivation}}; std::vector ConvertActivation(PrimitivePtr primitive) { if (primitive == nullptr) { @@ -1638,288 +1640,106 @@ std::vector ConvertLogSoftmax(PrimitivePtr primitive) return ret; } +std::unordered_map(*)(PrimitivePtr)> convertOpMap = { + {NODE_TYPE_ACTIVATION, &ConvertActivation}, + {NODE_TYPE_ADD_FUSION, &ConvertAddFusion}, + {NODE_TYPE_ALL, &ConvertAll}, + {NODE_TYPE_ARGMAX_FUSION, &ConvertArgMaxFusion}, + {NODE_TYPE_ASSERT, &ConvertAssert}, + {NODE_TYPE_AVGPOOL_FUSION, &ConvertAvgPoolFusion}, + {NODE_TYPE_BATCH_TO_SPACE_ND, &ConvertBatchToSpaceND}, + {NODE_TYPE_BIAS_ADD, &ConvertBiasAdd}, + {NODE_TYPE_BROADCAST_TO, &ConvertBroadcastTo}, + {NODE_TYPE_CAST, &ConvertCast}, + {NODE_TYPE_CEIL, &ConvertCeil}, + {NODE_TYPE_CLIP, &ConvertClip}, + {NODE_TYPE_CONCAT, &ConvertConcat}, + {NODE_TYPE_CONV2D_FUSION, &ConvertConv2DFusion}, + {NODE_TYPE_CONV2D_TRANSPOSE_FUSION, &ConvertConv2dTransposeFusion}, + {NODE_TYPE_COS, &ConvertCos}, + {NODE_TYPE_CONSTANT_OF_SHAPE, &ConvertConstantOfShape}, + {NODE_TYPE_CROP, &ConvertCrop}, + {NODE_TYPE_DEPTH_TO_SPACE, &ConvertDepthToSpace}, + {NODE_TYPE_DETECTION_POST_PROCESS, &ConvertDetectionPostProcess}, + {NODE_TYPE_DIV_FUSION, &ConvertDivFusion}, + {NODE_TYPE_ELTWISE, &ConvertEltwise}, + {NODE_TYPE_EQUAL, &ConvertEqual}, + {NODE_TYPE_EXPFUSION, &ConvertExpFusion}, + {NODE_TYPE_EXPAND_DIMS, &ConvertExpandDims}, + {NODE_TYPE_FLATTEN, &ConvertFlatten}, + {NODE_TYPE_FLOOR, &ConvertFloor}, + {NODE_TYPE_FILL, &ConvertFill}, + {NODE_TYPE_FULL_CONNECTION, &ConvertFullConnection}, + {NODE_TYPE_FUSED_BATCH_NORM, &ConvertFusedBatchNorm}, + {NODE_TYPE_GATHER, &ConvertGather}, + {NODE_TYPE_GATHER_ND, &ConvertGatherNd}, + {NODE_TYPE_GREATER, &ConvertGreater}, + {NODE_TYPE_GREATER_EQUAL, &ConvertGreaterEqual}, + {NODE_TYPE_INSTANCE_NORM, &ConvertInstanceNorm}, + {NODE_TYPE_LAYER_NORM_FUSION, &ConvertLayerNormFusion}, + {NODE_TYPE_LESS, &ConvertLess}, + {NODE_TYPE_LESS_EQUAL, &ConvertLessEqual}, + {NODE_TYPE_LOG, &ConvertLog}, + {NODE_TYPE_LOGICAL_AND, &ConvertLogicalAnd}, + {NODE_TYPE_LOGICAL_NOT, &ConvertLogicalNot}, + {NODE_TYPE_LOGICAL_OR, &ConvertLogicalOr}, + {NODE_TYPE_LRN, &ConvertLRN}, + {NODE_TYPE_LSTM, &ConvertLSTM}, + {NODE_TYPE_L2_NORMALIZE_FUSION, &ConvertL2NormalizeFusion}, + {NODE_TYPE_MATMUL_FUSION, &ConvertMatMulFusion}, + {NODE_TYPE_MAXIMUM, &ConvertMaximum}, + {NODE_TYPE_MAX_POOL_FUSION, &ConvertMaxPoolFusion}, + {NODE_TYPE_MINIMUM, &ConvertMinimum}, + {NODE_TYPE_MOD, &ConvertMod}, + {NODE_TYPE_MUL_FUSION, &ConvertMulFusion}, + {NODE_TYPE_NEG, &ConvertNeg}, + {NODE_TYPE_NOT_EQUAL, &ConvertNotEqual}, + {NODE_TYPE_ONE_HOT, &ConvertOneHot}, + {NODE_TYPE_PAD_FUSION, &ConvertPadFusion}, + {NODE_TYPE_POW_FUSION, &ConvertPowFusion}, + {NODE_TYPE_PRELU_FUSION, &ConvertPReLUFusion}, + {NODE_TYPE_QUANT_DTYPE_CAST, &ConvertQuantDTypeCast}, + {NODE_TYPE_RANK, &ConvertRank}, + {NODE_TYPE_RANGE, &ConvertRange}, + {NODE_TYPE_RECIPROCAL, &ConvertReciprocal}, + {NODE_TYPE_REDUCE_FUSION, &ConvertReduceFusion}, + {NODE_TYPE_RESHAPE, &ConvertReshape}, + {NODE_TYPE_RESIZE, &ConvertResize}, + {NODE_TYPE_ROUND, &ConvertRound}, + {NODE_TYPE_RSQRT, &ConvertRsqrt}, + {NODE_TYPE_SCALE_FUSION, &ConvertScaleFusion}, + {NODE_TYPE_SCATTER_ND, &ConvertScatterNd}, + {NODE_TYPE_SHAPE, &ConvertShape}, + {NODE_TYPE_SIN, &ConvertSin}, + {NODE_TYPE_SLICE_FUSION, &ConvertSliceFusion}, + {NODE_TYPE_SOFTMAX, &ConvertSoftmax}, + {NODE_TYPE_SPACE_TO_BATCH_ND, &ConvertSpaceToBatchND}, + {NODE_TYPE_SPACE_TO_DEPTH, &ConvertSpaceToDepth}, + {NODE_TYPE_SPARSE_TO_DENSE, &ConvertSparseToDense}, + {NODE_TYPE_SPLIT, &ConvertSplit}, + {NODE_TYPE_SQRT, &ConvertSqrt}, + {NODE_TYPE_SQUARED_DIFFERENCE, &ConvertSquaredDifference}, + {NODE_TYPE_SQUEEZE, &ConvertSqueeze}, + {NODE_TYPE_SQUARE, &ConvertSquare}, + {NODE_TYPE_STACK, &ConvertStack}, + {NODE_TYPE_STRIDED_SLICE, &ConvertStridedSlice}, + {NODE_TYPE_SUB_FUSION, &ConvertSubFusion}, + {NODE_TYPE_TILE_FUSION, &ConvertTileFusion}, + {NODE_TYPE_TOPK_FUSION, &ConvertTopKFusion}, + {NODE_TYPE_TRANSPOSE, &ConvertTranspose}, + {NODE_TYPE_UNSQUEEZE, &ConvertUnsqueeze}, + {NODE_TYPE_UNSTACK, &ConvertUnstack}, + {NODE_TYPE_WHERE, &ConvertWhere}, + {NODE_TYPE_SELECT, &ConvertSelect}, + {NODE_TYPE_ERF, &ConvertErf}, + {NODE_TYPE_LOG_SOFTMAX, &ConvertLogSoftmax}}; + std::vector Convert(OHOS::HDI::Nnrt::V2_1::NodeType type, PrimitivePtr primitive) { - switch (type) { - case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_ACTIVATION: - return ConvertActivation(primitive); - break; - case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_ADD_FUSION: - return ConvertAddFusion(primitive); - break; - case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_ALL: - return ConvertAll(primitive); - break; - case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_ARGMAX_FUSION: - return ConvertArgMaxFusion(primitive); - break; - case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_ASSERT: - return ConvertAssert(primitive); - break; - case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_AVGPOOL_FUSION: - return ConvertAvgPoolFusion(primitive); - break; - case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_BATCH_TO_SPACE_ND: - return ConvertBatchToSpaceND(primitive); - break; - case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_BIAS_ADD: - return ConvertBiasAdd(primitive); - break; - case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_BROADCAST_TO: - return ConvertBroadcastTo(primitive); - break; - case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_CAST: - return ConvertCast(primitive); - break; - case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_CEIL: - return ConvertCeil(primitive); - break; - case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_CLIP: - return ConvertClip(primitive); - break; - case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_CONCAT: - return ConvertConcat(primitive); - break; - case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_CONV2D_FUSION: - return ConvertConv2DFusion(primitive); - break; - case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_CONV2D_TRANSPOSE_FUSION: - return ConvertConv2dTransposeFusion(primitive); - break; - case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_COS: - return ConvertCos(primitive); - break; - case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_CONSTANT_OF_SHAPE: - return ConvertConstantOfShape(primitive); - break; - case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_CROP: - return ConvertCrop(primitive); - break; - case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_DEPTH_TO_SPACE: - return ConvertDepthToSpace(primitive); - break; - case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_DETECTION_POST_PROCESS: - return ConvertDetectionPostProcess(primitive); - break; - case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_DIV_FUSION: - return ConvertDivFusion(primitive); - break; - case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_ELTWISE: - return ConvertEltwise(primitive); - break; - case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_EQUAL: - return ConvertEqual(primitive); - break; - case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_EXPFUSION: - return ConvertExpFusion(primitive); - break; - case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_EXPAND_DIMS: - return ConvertExpandDims(primitive); - break; - case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_FLATTEN: - return ConvertFlatten(primitive); - break; - case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_FLOOR: - return ConvertFloor(primitive); - break; - case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_FILL: - return ConvertFill(primitive); - break; - case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_FULL_CONNECTION: - return ConvertFullConnection(primitive); - break; - case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_FUSED_BATCH_NORM: - return ConvertFusedBatchNorm(primitive); - break; - case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_GATHER: - return ConvertGather(primitive); - break; - case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_GATHER_ND: - return ConvertGatherNd(primitive); - break; - case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_GREATER: - return ConvertGreater(primitive); - break; - case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_GREATER_EQUAL: - return ConvertGreaterEqual(primitive); - break; - case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_INSTANCE_NORM: - return ConvertInstanceNorm(primitive); - break; - case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_LAYER_NORM_FUSION: - return ConvertLayerNormFusion(primitive); - break; - case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_LESS: - return ConvertLess(primitive); - break; - case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_LESS_EQUAL: - return ConvertLessEqual(primitive); - break; - case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_LOG: - return ConvertLog(primitive); - break; - case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_LOGICAL_AND: - return ConvertLogicalAnd(primitive); - break; - case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_LOGICAL_NOT: - return ConvertLogicalNot(primitive); - break; - case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_LOGICAL_OR: - return ConvertLogicalOr(primitive); - break; - case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_LRN: - return ConvertLRN(primitive); - break; - case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_LSTM: - return ConvertLSTM(primitive); - break; - case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_L2_NORMALIZE_FUSION: - return ConvertL2NormalizeFusion(primitive); - break; - case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_MATMUL_FUSION: - return ConvertMatMulFusion(primitive); - break; - case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_MAXIMUM: - return ConvertMaximum(primitive); - break; - case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_MAX_POOL_FUSION: - return ConvertMaxPoolFusion(primitive); - break; - case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_MINIMUM: - return ConvertMinimum(primitive); - break; - case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_MOD: - return ConvertMod(primitive); - break; - case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_MUL_FUSION: - return ConvertMulFusion(primitive); - break; - case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_NEG: - return ConvertNeg(primitive); - break; - case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_NOT_EQUAL: - return ConvertNotEqual(primitive); - break; - case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_ONE_HOT: - return ConvertOneHot(primitive); - break; - case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_PAD_FUSION: - return ConvertPadFusion(primitive); - break; - case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_POW_FUSION: - return ConvertPowFusion(primitive); - break; - case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_PRELU_FUSION: - return ConvertPReLUFusion(primitive); - break; - case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_QUANT_DTYPE_CAST: - return ConvertQuantDTypeCast(primitive); - break; - case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_RANK: - return ConvertRank(primitive); - break; - case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_RANGE: - return ConvertRange(primitive); - break; - case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_RECIPROCAL: - return ConvertReciprocal(primitive); - break; - case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_REDUCE_FUSION: - return ConvertReduceFusion(primitive); - break; - case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_RESHAPE: - return ConvertReshape(primitive); - break; - case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_RESIZE: - return ConvertResize(primitive); - break; - case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_ROUND: - return ConvertRound(primitive); - break; - case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_RSQRT: - return ConvertRsqrt(primitive); - break; - case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_SCALE_FUSION: - return ConvertScaleFusion(primitive); - break; - case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_SCATTER_ND: - return ConvertScatterNd(primitive); - break; - case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_SHAPE: - return ConvertShape(primitive); - break; - case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_SIN: - return ConvertSin(primitive); - break; - case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_SLICE_FUSION: - return ConvertSliceFusion(primitive); - break; - case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_SOFTMAX: - return ConvertSoftmax(primitive); - break; - case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_SPACE_TO_BATCH_ND: - return ConvertSpaceToBatchND(primitive); - break; - case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_SPACE_TO_DEPTH: - return ConvertSpaceToDepth(primitive); - break; - case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_SPARSE_TO_DENSE: - return ConvertSparseToDense(primitive); - break; - case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_SPLIT: - return ConvertSplit(primitive); - break; - case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_SQRT: - return ConvertSqrt(primitive); - break; - case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_SQUARED_DIFFERENCE: - return ConvertSquaredDifference(primitive); - break; - case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_SQUEEZE: - return ConvertSqueeze(primitive); - break; - case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_SQUARE: - return ConvertSquare(primitive); - break; - case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_STACK: - return ConvertStack(primitive); - break; - case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_STRIDED_SLICE: - return ConvertStridedSlice(primitive); - break; - case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_SUB_FUSION: - return ConvertSubFusion(primitive); - break; - case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_TILE_FUSION: - return ConvertTileFusion(primitive); - break; - case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_TOPK_FUSION: - return ConvertTopKFusion(primitive); - break; - case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_TRANSPOSE: - return ConvertTranspose(primitive); - break; - case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_UNSQUEEZE: - return ConvertUnsqueeze(primitive); - break; - case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_UNSTACK: - return ConvertUnstack(primitive); - break; - case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_WHERE: - return ConvertWhere(primitive); - break; - case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_SELECT: - return ConvertSelect(primitive); - break; - case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_ERF: - return ConvertErf(primitive); - break; - case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_LOG_SOFTMAX: - return ConvertLogSoftmax(primitive); - break; - default: - return {}; + if (convertOpMap.find(type) != convertOpMap.end()) { + return convertOpMap[type](primitive); } + return {}; } inline std::vector MindIR_Tensor_GetQuantParams_OHOS(TensorPtr tensor) -- Gitee From 5688d4f5f0557f360920ad36b27300c5b3e6c49a Mon Sep 17 00:00:00 2001 From: wang-yangsong Date: Thu, 11 Apr 2024 15:35:11 +0800 Subject: [PATCH 2/6] optimize switch-case Signed-off-by: wang-yangsong --- .../neural_network_runtime/lite_graph_to_hdi_model_v2_1.cpp | 2 -- 1 file changed, 2 deletions(-) diff --git a/frameworks/native/neural_network_runtime/lite_graph_to_hdi_model_v2_1.cpp b/frameworks/native/neural_network_runtime/lite_graph_to_hdi_model_v2_1.cpp index 8e24005..0cd5b5f 100644 --- a/frameworks/native/neural_network_runtime/lite_graph_to_hdi_model_v2_1.cpp +++ b/frameworks/native/neural_network_runtime/lite_graph_to_hdi_model_v2_1.cpp @@ -17,7 +17,6 @@ #include #include #include -#include #include "common/log.h" #include "message_parcel.h" #include "nnrt/v2_1/nnrt_types.h" @@ -30,7 +29,6 @@ typedef void *TensorPtr; namespace OHOS { namespace NeuralNetworkRuntime { namespace NNRt_V2_1 { -//std::unordered_map(*)(PrimitivePtr)> convertOpMap = {{NODE_TYPE_ACTIVATION, &ConvertActivation}}; std::vector ConvertActivation(PrimitivePtr primitive) { if (primitive == nullptr) { -- Gitee From 02ffdf3e8cfdf49df92d9cc61a41c0431d7ff193 Mon Sep 17 00:00:00 2001 From: wang-yangsong Date: Mon, 15 Apr 2024 15:39:35 +0800 Subject: [PATCH 3/6] optimize switch-case Signed-off-by: wang-yangsong --- .../lite_graph_to_hdi_model_v1_0.cpp | 201 +++++------------- .../lite_graph_to_hdi_model_v2_0.cpp | 201 +++++------------- .../lite_graph_to_hdi_model_v2_1.cpp | 1 + 3 files changed, 109 insertions(+), 294 deletions(-) diff --git a/frameworks/native/neural_network_runtime/lite_graph_to_hdi_model_v1_0.cpp b/frameworks/native/neural_network_runtime/lite_graph_to_hdi_model_v1_0.cpp index 2e5ef3c..fe062c4 100644 --- a/frameworks/native/neural_network_runtime/lite_graph_to_hdi_model_v1_0.cpp +++ b/frameworks/native/neural_network_runtime/lite_graph_to_hdi_model_v1_0.cpp @@ -877,156 +877,63 @@ std::vector ConvertUnsqueeze(PrimitivePtr primitive) return ret; } +std::unordered_map(*)(PrimitivePtr)> convertOpMap = { + {NODE_TYPE_ACTIVATION, &ConvertActivation}, + {NODE_TYPE_ADD_FUSION, &ConvertAddFusion}, + {NODE_TYPE_ARGMAX_FUSION, &ConvertArgMaxFusion}, + {NODE_TYPE_AVGPOOL_FUSION, &ConvertAvgPoolFusion}, + {NODE_TYPE_BATCH_TO_SPACE_ND, &ConvertBatchToSpaceND}, + {NODE_TYPE_BIAS_ADD, &ConvertBiasAdd}, + {NODE_TYPE_CAST, &ConvertCast}, + {NODE_TYPE_CONCAT, &ConvertConcat}, + {NODE_TYPE_CONV2D_FUSION, &ConvertConv2DFusion}, + {NODE_TYPE_CONV2D_TRANSPOSE_FUSION, &ConvertConv2dTransposeFusion}, + {NODE_TYPE_DIV_FUSION, &ConvertDivFusion}, + {NODE_TYPE_ELTWISE, &ConvertEltwise}, + {NODE_TYPE_EXPAND_DIMS, &ConvertExpandDims}, + {NODE_TYPE_FILL, &ConvertFill}, + {NODE_TYPE_FULL_CONNECTION, &ConvertFullConnection}, + {NODE_TYPE_FUSED_BATCH_NORM, &ConvertFusedBatchNorm}, + {NODE_TYPE_GATHER, &ConvertGather}, + {NODE_TYPE_LAYER_NORM_FUSION, &ConvertLayerNormFusion}, + {NODE_TYPE_LESS_EQUAL, &ConvertLessEqual}, + {NODE_TYPE_MATMUL_FUSION, &ConvertMatMulFusion}, + {NODE_TYPE_MAXIMUM, &ConvertMaximum}, + {NODE_TYPE_MAX_POOL_FUSION, &ConvertMaxPoolFusion}, + {NODE_TYPE_MUL_FUSION, &ConvertMulFusion}, + {NODE_TYPE_ONE_HOT, &ConvertOneHot}, + {NODE_TYPE_PAD_FUSION, &ConvertPadFusion}, + {NODE_TYPE_POW_FUSION, &ConvertPowFusion}, + {NODE_TYPE_PRELU_FUSION, &ConvertPReLUFusion}, + {NODE_TYPE_QUANT_DTYPE_CAST, &ConvertQuantDTypeCast}, + {NODE_TYPE_REDUCE_FUSION, &ConvertReduceFusion}, + {NODE_TYPE_RESHAPE, &ConvertReshape}, + {NODE_TYPE_RESIZE, &ConvertResize}, + {NODE_TYPE_RSQRT, &ConvertRsqrt}, + {NODE_TYPE_SCALE_FUSION, &ConvertScaleFusion}, + {NODE_TYPE_SHAPE, &ConvertShape}, + {NODE_TYPE_SLICE_FUSION, &ConvertSliceFusion}, + {NODE_TYPE_SOFTMAX, &ConvertSoftmax}, + {NODE_TYPE_SPACE_TO_BATCH_ND, &ConvertSpaceToBatchND}, + {NODE_TYPE_SPLIT, &ConvertSplit}, + {NODE_TYPE_SQRT, &ConvertSqrt}, + {NODE_TYPE_SQUARED_DIFFERENCE, &ConvertSquaredDifference}, + {NODE_TYPE_SQUEEZE, &ConvertSqueeze}, + {NODE_TYPE_STACK, &ConvertStack}, + {NODE_TYPE_STRIDED_SLICE, &ConvertStridedSlice}, + {NODE_TYPE_SUB_FUSION, &ConvertSubFusion}, + {NODE_TYPE_TILE_FUSION, &ConvertTileFusion}, + {NODE_TYPE_TOPK_FUSION, &ConvertTopKFusion}, + {NODE_TYPE_TRANSPOSE, &ConvertTranspose}, + {NODE_TYPE_UNSQUEEZE, &ConvertUnsqueeze}}; + std::vector Convert(NodeType type, PrimitivePtr primitive) { - switch (type) { - case NODE_TYPE_ACTIVATION: - return ConvertActivation(primitive); - break; - case NODE_TYPE_ADD_FUSION: - return ConvertAddFusion(primitive); - break; - case NODE_TYPE_ARGMAX_FUSION: - return ConvertArgMaxFusion(primitive); - break; - case NODE_TYPE_AVGPOOL_FUSION: - return ConvertAvgPoolFusion(primitive); - break; - case NODE_TYPE_BATCH_TO_SPACE_ND: - return ConvertBatchToSpaceND(primitive); - break; - case NODE_TYPE_BIAS_ADD: - return ConvertBiasAdd(primitive); - break; - case NODE_TYPE_CAST: - return ConvertCast(primitive); - break; - case NODE_TYPE_CONCAT: - return ConvertConcat(primitive); - break; - case NODE_TYPE_CONV2D_FUSION: - return ConvertConv2DFusion(primitive); - break; - case NODE_TYPE_CONV2D_TRANSPOSE_FUSION: - return ConvertConv2dTransposeFusion(primitive); - break; - case NODE_TYPE_DIV_FUSION: - return ConvertDivFusion(primitive); - break; - case NODE_TYPE_ELTWISE: - return ConvertEltwise(primitive); - break; - case NODE_TYPE_EXPAND_DIMS: - return ConvertExpandDims(primitive); - break; - case NODE_TYPE_FILL: - return ConvertFill(primitive); - break; - case NODE_TYPE_FULL_CONNECTION: - return ConvertFullConnection(primitive); - break; - case NODE_TYPE_FUSED_BATCH_NORM: - return ConvertFusedBatchNorm(primitive); - break; - case NODE_TYPE_GATHER: - return ConvertGather(primitive); - break; - case NODE_TYPE_LAYER_NORM_FUSION: - return ConvertLayerNormFusion(primitive); - break; - case NODE_TYPE_LESS_EQUAL: - return ConvertLessEqual(primitive); - break; - case NODE_TYPE_MATMUL_FUSION: - return ConvertMatMulFusion(primitive); - break; - case NODE_TYPE_MAXIMUM: - return ConvertMaximum(primitive); - break; - case NODE_TYPE_MAX_POOL_FUSION: - return ConvertMaxPoolFusion(primitive); - break; - case NODE_TYPE_MUL_FUSION: - return ConvertMulFusion(primitive); - break; - case NODE_TYPE_ONE_HOT: - return ConvertOneHot(primitive); - break; - case NODE_TYPE_PAD_FUSION: - return ConvertPadFusion(primitive); - break; - case NODE_TYPE_POW_FUSION: - return ConvertPowFusion(primitive); - break; - case NODE_TYPE_PRELU_FUSION: - return ConvertPReLUFusion(primitive); - break; - case NODE_TYPE_QUANT_DTYPE_CAST: - return ConvertQuantDTypeCast(primitive); - break; - case NODE_TYPE_REDUCE_FUSION: - return ConvertReduceFusion(primitive); - break; - case NODE_TYPE_RESHAPE: - return ConvertReshape(primitive); - break; - case NODE_TYPE_RESIZE: - return ConvertResize(primitive); - break; - case NODE_TYPE_RSQRT: - return ConvertRsqrt(primitive); - break; - case NODE_TYPE_SCALE_FUSION: - return ConvertScaleFusion(primitive); - break; - case NODE_TYPE_SHAPE: - return ConvertShape(primitive); - break; - case NODE_TYPE_SLICE_FUSION: - return ConvertSliceFusion(primitive); - break; - case NODE_TYPE_SOFTMAX: - return ConvertSoftmax(primitive); - break; - case NODE_TYPE_SPACE_TO_BATCH_ND: - return ConvertSpaceToBatchND(primitive); - break; - case NODE_TYPE_SPLIT: - return ConvertSplit(primitive); - break; - case NODE_TYPE_SQRT: - return ConvertSqrt(primitive); - break; - case NODE_TYPE_SQUARED_DIFFERENCE: - return ConvertSquaredDifference(primitive); - break; - case NODE_TYPE_SQUEEZE: - return ConvertSqueeze(primitive); - break; - case NODE_TYPE_STACK: - return ConvertStack(primitive); - break; - case NODE_TYPE_STRIDED_SLICE: - return ConvertStridedSlice(primitive); - break; - case NODE_TYPE_SUB_FUSION: - return ConvertSubFusion(primitive); - break; - case NODE_TYPE_TILE_FUSION: - return ConvertTileFusion(primitive); - break; - case NODE_TYPE_TOPK_FUSION: - return ConvertTopKFusion(primitive); - break; - case NODE_TYPE_TRANSPOSE: - return ConvertTranspose(primitive); - break; - case NODE_TYPE_UNSQUEEZE: - return ConvertUnsqueeze(primitive); - break; - default: - return {}; + if (convertOpMap.find(type) != convertOpMap.end()) { + return convertOpMap[type](primitive); } + LOGE("MindIR_LiteGraph_To_Model v1_0 failed, nodeType invalid, type =%d", type); + return {}; } inline std::vector MindIR_Tensor_GetQuantParams_OHOS(TensorPtr tensor) diff --git a/frameworks/native/neural_network_runtime/lite_graph_to_hdi_model_v2_0.cpp b/frameworks/native/neural_network_runtime/lite_graph_to_hdi_model_v2_0.cpp index 4849baf..0e238ff 100644 --- a/frameworks/native/neural_network_runtime/lite_graph_to_hdi_model_v2_0.cpp +++ b/frameworks/native/neural_network_runtime/lite_graph_to_hdi_model_v2_0.cpp @@ -878,156 +878,63 @@ std::vector ConvertUnsqueeze(PrimitivePtr primitive) return ret; } +std::unordered_map(*)(PrimitivePtr)> convertOpMap = { + {NODE_TYPE_ACTIVATION, &ConvertActivation}, + {NODE_TYPE_ADD_FUSION, &ConvertAddFusion}, + {NODE_TYPE_ARGMAX_FUSION, &ConvertArgMaxFusion}, + {NODE_TYPE_AVGPOOL_FUSION, &ConvertAvgPoolFusion}, + {NODE_TYPE_BATCH_TO_SPACE_ND, &ConvertBatchToSpaceND}, + {NODE_TYPE_BIAS_ADD, &ConvertBiasAdd}, + {NODE_TYPE_CAST, &ConvertCast}, + {NODE_TYPE_CONCAT, &ConvertConcat}, + {NODE_TYPE_CONV2D_FUSION, &ConvertConv2DFusion}, + {NODE_TYPE_CONV2D_TRANSPOSE_FUSION, &ConvertConv2dTransposeFusion}, + {NODE_TYPE_DIV_FUSION, &ConvertDivFusion}, + {NODE_TYPE_ELTWISE, &ConvertEltwise}, + {NODE_TYPE_EXPAND_DIMS, &ConvertExpandDims}, + {NODE_TYPE_FILL, &ConvertFill}, + {NODE_TYPE_FULL_CONNECTION, &ConvertFullConnection}, + {NODE_TYPE_FUSED_BATCH_NORM, &ConvertFusedBatchNorm}, + {NODE_TYPE_GATHER, &ConvertGather}, + {NODE_TYPE_LAYER_NORM_FUSION, &ConvertLayerNormFusion}, + {NODE_TYPE_LESS_EQUAL, &ConvertLessEqual}, + {NODE_TYPE_MATMUL_FUSION, &ConvertMatMulFusion}, + {NODE_TYPE_MAXIMUM, &ConvertMaximum}, + {NODE_TYPE_MAX_POOL_FUSION, &ConvertMaxPoolFusion}, + {NODE_TYPE_MUL_FUSION, &ConvertMulFusion}, + {NODE_TYPE_ONE_HOT, &ConvertOneHot}, + {NODE_TYPE_PAD_FUSION, &ConvertPadFusion}, + {NODE_TYPE_POW_FUSION, &ConvertPowFusion}, + {NODE_TYPE_PRELU_FUSION, &ConvertPReLUFusion}, + {NODE_TYPE_QUANT_DTYPE_CAST, &ConvertQuantDTypeCast}, + {NODE_TYPE_REDUCE_FUSION, &ConvertReduceFusion}, + {NODE_TYPE_RESHAPE, &ConvertReshape}, + {NODE_TYPE_RESIZE, &ConvertResize}, + {NODE_TYPE_RSQRT, &ConvertRsqrt}, + {NODE_TYPE_SCALE_FUSION, &ConvertScaleFusion}, + {NODE_TYPE_SHAPE, &ConvertShape}, + {NODE_TYPE_SLICE_FUSION, &ConvertSliceFusion}, + {NODE_TYPE_SOFTMAX, &ConvertSoftmax}, + {NODE_TYPE_SPACE_TO_BATCH_ND, &ConvertSpaceToBatchND}, + {NODE_TYPE_SPLIT, &ConvertSplit}, + {NODE_TYPE_SQRT, &ConvertSqrt}, + {NODE_TYPE_SQUARED_DIFFERENCE, &ConvertSquaredDifference}, + {NODE_TYPE_SQUEEZE, &ConvertSqueeze}, + {NODE_TYPE_STACK, &ConvertStack}, + {NODE_TYPE_STRIDED_SLICE, &ConvertStridedSlice}, + {NODE_TYPE_SUB_FUSION, &ConvertSubFusion}, + {NODE_TYPE_TILE_FUSION, &ConvertTileFusion}, + {NODE_TYPE_TOPK_FUSION, &ConvertTopKFusion}, + {NODE_TYPE_TRANSPOSE, &ConvertTranspose}, + {NODE_TYPE_UNSQUEEZE, &ConvertUnsqueeze}}; + std::vector Convert(NodeType type, PrimitivePtr primitive) { - switch (type) { - case NODE_TYPE_ACTIVATION: - return ConvertActivation(primitive); - break; - case NODE_TYPE_ADD_FUSION: - return ConvertAddFusion(primitive); - break; - case NODE_TYPE_ARGMAX_FUSION: - return ConvertArgMaxFusion(primitive); - break; - case NODE_TYPE_AVGPOOL_FUSION: - return ConvertAvgPoolFusion(primitive); - break; - case NODE_TYPE_BATCH_TO_SPACE_ND: - return ConvertBatchToSpaceND(primitive); - break; - case NODE_TYPE_BIAS_ADD: - return ConvertBiasAdd(primitive); - break; - case NODE_TYPE_CAST: - return ConvertCast(primitive); - break; - case NODE_TYPE_CONCAT: - return ConvertConcat(primitive); - break; - case NODE_TYPE_CONV2D_FUSION: - return ConvertConv2DFusion(primitive); - break; - case NODE_TYPE_CONV2D_TRANSPOSE_FUSION: - return ConvertConv2dTransposeFusion(primitive); - break; - case NODE_TYPE_DIV_FUSION: - return ConvertDivFusion(primitive); - break; - case NODE_TYPE_ELTWISE: - return ConvertEltwise(primitive); - break; - case NODE_TYPE_EXPAND_DIMS: - return ConvertExpandDims(primitive); - break; - case NODE_TYPE_FILL: - return ConvertFill(primitive); - break; - case NODE_TYPE_FULL_CONNECTION: - return ConvertFullConnection(primitive); - break; - case NODE_TYPE_FUSED_BATCH_NORM: - return ConvertFusedBatchNorm(primitive); - break; - case NODE_TYPE_GATHER: - return ConvertGather(primitive); - break; - case NODE_TYPE_LAYER_NORM_FUSION: - return ConvertLayerNormFusion(primitive); - break; - case NODE_TYPE_LESS_EQUAL: - return ConvertLessEqual(primitive); - break; - case NODE_TYPE_MATMUL_FUSION: - return ConvertMatMulFusion(primitive); - break; - case NODE_TYPE_MAXIMUM: - return ConvertMaximum(primitive); - break; - case NODE_TYPE_MAX_POOL_FUSION: - return ConvertMaxPoolFusion(primitive); - break; - case NODE_TYPE_MUL_FUSION: - return ConvertMulFusion(primitive); - break; - case NODE_TYPE_ONE_HOT: - return ConvertOneHot(primitive); - break; - case NODE_TYPE_PAD_FUSION: - return ConvertPadFusion(primitive); - break; - case NODE_TYPE_POW_FUSION: - return ConvertPowFusion(primitive); - break; - case NODE_TYPE_PRELU_FUSION: - return ConvertPReLUFusion(primitive); - break; - case NODE_TYPE_QUANT_DTYPE_CAST: - return ConvertQuantDTypeCast(primitive); - break; - case NODE_TYPE_REDUCE_FUSION: - return ConvertReduceFusion(primitive); - break; - case NODE_TYPE_RESHAPE: - return ConvertReshape(primitive); - break; - case NODE_TYPE_RESIZE: - return ConvertResize(primitive); - break; - case NODE_TYPE_RSQRT: - return ConvertRsqrt(primitive); - break; - case NODE_TYPE_SCALE_FUSION: - return ConvertScaleFusion(primitive); - break; - case NODE_TYPE_SHAPE: - return ConvertShape(primitive); - break; - case NODE_TYPE_SLICE_FUSION: - return ConvertSliceFusion(primitive); - break; - case NODE_TYPE_SOFTMAX: - return ConvertSoftmax(primitive); - break; - case NODE_TYPE_SPACE_TO_BATCH_ND: - return ConvertSpaceToBatchND(primitive); - break; - case NODE_TYPE_SPLIT: - return ConvertSplit(primitive); - break; - case NODE_TYPE_SQRT: - return ConvertSqrt(primitive); - break; - case NODE_TYPE_SQUARED_DIFFERENCE: - return ConvertSquaredDifference(primitive); - break; - case NODE_TYPE_SQUEEZE: - return ConvertSqueeze(primitive); - break; - case NODE_TYPE_STACK: - return ConvertStack(primitive); - break; - case NODE_TYPE_STRIDED_SLICE: - return ConvertStridedSlice(primitive); - break; - case NODE_TYPE_SUB_FUSION: - return ConvertSubFusion(primitive); - break; - case NODE_TYPE_TILE_FUSION: - return ConvertTileFusion(primitive); - break; - case NODE_TYPE_TOPK_FUSION: - return ConvertTopKFusion(primitive); - break; - case NODE_TYPE_TRANSPOSE: - return ConvertTranspose(primitive); - break; - case NODE_TYPE_UNSQUEEZE: - return ConvertUnsqueeze(primitive); - break; - default: - return {}; + if (convertOpMap.find(type) != convertOpMap.end()) { + return convertOpMap[type](primitive); } + LOGE("MindIR_LiteGraph_To_Model v2_0 failed, nodeType invalid, type =%d", type); + return {}; } inline std::vector MindIR_Tensor_GetQuantParams_OHOS(TensorPtr tensor) diff --git a/frameworks/native/neural_network_runtime/lite_graph_to_hdi_model_v2_1.cpp b/frameworks/native/neural_network_runtime/lite_graph_to_hdi_model_v2_1.cpp index 0cd5b5f..2f59aae 100644 --- a/frameworks/native/neural_network_runtime/lite_graph_to_hdi_model_v2_1.cpp +++ b/frameworks/native/neural_network_runtime/lite_graph_to_hdi_model_v2_1.cpp @@ -1737,6 +1737,7 @@ std::vector Convert(OHOS::HDI::Nnrt::V2_1::NodeType type, PrimitivePtr p if (convertOpMap.find(type) != convertOpMap.end()) { return convertOpMap[type](primitive); } + LOGE("MindIR_LiteGraph_To_Model v2_1 failed, nodeType invalid, type =%d", type); return {}; } -- Gitee From aac70711ac461585818cd82da39e9380822821c8 Mon Sep 17 00:00:00 2001 From: wang-yangsong Date: Mon, 15 Apr 2024 18:57:34 +0800 Subject: [PATCH 4/6] builder fix switch-case Signed-off-by: wang-yangsong --- .../ops/pow_builder.cpp | 20 +++++++++---------- .../neural_network_runtime/ops/pow_builder.h | 4 ++++ 2 files changed, 13 insertions(+), 11 deletions(-) diff --git a/frameworks/native/neural_network_runtime/ops/pow_builder.cpp b/frameworks/native/neural_network_runtime/ops/pow_builder.cpp index 20cc8a7..008ddec 100644 --- a/frameworks/native/neural_network_runtime/ops/pow_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/pow_builder.cpp @@ -27,7 +27,10 @@ static const int PARAM_MAX_NUM = 2; static const int SCALAR_LENGTH = 1; static const std::string OP_NAME = "Pow"; -PowBuilder::PowBuilder() {} +PowBuilder::PowBuilder() { + //ParamHashMap[OH_NN_POW_SCALE] = &PowBuilder::SetScale; + //ParamHashMap[OH_NN_POW_SHIFT] = &PowBuilder::SetShift; +} PowBuilder::~PowBuilder() {} @@ -104,16 +107,11 @@ OH_NN_ReturnCode PowBuilder::Build(const std::vector& paramsIndex, for (int i : paramsIndex) { std::shared_ptr tensor = allTensors[i]; tensor->IdentifyOpParameter(); - switch (tensor->GetType()) { - case OH_NN_POW_SCALE: - returnCode = SetScale(tensor); - break; - case OH_NN_POW_SHIFT: - returnCode = SetShift(tensor); - break; - default: - LOGE("[Pow] Build failed, param invalid, type=%d", tensor->GetType()); - return OH_NN_INVALID_PARAMETER; + if (ParamHashMap.find(tensor->GetType()) != ParamHashMap.end()) { + returnCode = (this->*(ParamHashMap[tensor->GetType()]))(tensor); + } else { + LOGE("[Pow] Build failed, param invalid, type=%d", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; } if (returnCode != OH_NN_SUCCESS) { diff --git a/frameworks/native/neural_network_runtime/ops/pow_builder.h b/frameworks/native/neural_network_runtime/ops/pow_builder.h index 2b58d86..f48612a 100644 --- a/frameworks/native/neural_network_runtime/ops/pow_builder.h +++ b/frameworks/native/neural_network_runtime/ops/pow_builder.h @@ -39,6 +39,10 @@ private: private: float m_scale {1.0f}; float m_shift {0.0f}; + std::unordered_map)> ParamHashMap = { + {OH_NN_POW_SCALE, &PowBuilder::SetScale}, + {OH_NN_POW_SHIFT, &PowBuilder::SetShift} + }; }; } // namespace Ops } // namespace NeuralNetworkRuntime -- Gitee From 94a636f1b375db42fa02f8d1f4d8bb08287bf3e0 Mon Sep 17 00:00:00 2001 From: wang-yangsong Date: Tue, 16 Apr 2024 14:42:58 +0800 Subject: [PATCH 5/6] optimize builder switch-case Signed-off-by: wang-yangsong --- .../ops/add_builder.cpp | 14 +++---- .../neural_network_runtime/ops/add_builder.h | 8 +++- .../ops/all_builder.cpp | 19 ++++----- .../neural_network_runtime/ops/all_builder.h | 8 +++- .../ops/argmax_builder.cpp | 22 +++------- .../ops/argmax_builder.h | 8 ++++ .../ops/assert_builder.cpp | 19 ++++----- .../ops/assert_builder.h | 7 +++- .../ops/batch_to_space_nd_builder.cpp | 16 +++----- .../ops/batch_to_space_nd_builder.h | 6 +++ .../ops/batchnorm_builder.cpp | 12 +++--- .../ops/batchnorm_builder.h | 5 +++ .../ops/broadcast_to_builder.cpp | 17 ++++---- .../ops/broadcast_to_builder.h | 5 +++ .../ops/clip_builder.cpp | 20 ++++------ .../neural_network_runtime/ops/clip_builder.h | 6 +++ .../ops/concat_builder.cpp | 13 +++--- .../ops/concat_builder.h | 5 +++ .../ops/constant_of_shape_builder.cpp | 15 +++---- .../ops/constant_of_shape_builder.h | 6 +++ .../ops/conv2d_builder.cpp | 26 +++--------- .../ops/conv2d_builder.h | 10 +++++ .../ops/conv2d_transpose_builder.cpp | 28 +++---------- .../ops/conv2d_transpose_builder.h | 11 +++++ .../ops/crop_builder.cpp | 20 ++++------ .../neural_network_runtime/ops/crop_builder.h | 6 +++ .../ops/depth_to_space_builder.cpp | 15 +++---- .../ops/depth_to_space_builder.h | 6 +++ .../ops/depthwise_conv2d_native_builder.cpp | 23 +++-------- .../ops/depthwise_conv2d_native_builder.h | 9 +++++ .../ops/detection_post_process_builder.cpp | 39 +++--------------- .../ops/detection_post_process_builder.h | 15 +++++++ .../ops/div_builder.cpp | 13 +++--- .../neural_network_runtime/ops/div_builder.h | 5 +++ .../ops/eltwise_builder.cpp | 13 +++--- .../ops/eltwise_builder.h | 5 +++ .../ops/exp_builder.cpp | 18 +++------ .../neural_network_runtime/ops/exp_builder.h | 7 ++++ .../ops/flatten_builder.cpp | 12 +++--- .../ops/flatten_builder.h | 5 +++ .../ops/fullconnection_builder.cpp | 22 +++------- .../ops/fullconnection_builder.h | 8 ++++ .../ops/gelu_builder.cpp | 13 +++--- .../neural_network_runtime/ops/gelu_builder.h | 5 +++ .../ops/instance_norm_builder.cpp | 12 +++--- .../ops/instance_norm_builder.h | 5 +++ .../ops/l2_normalize_builder.cpp | 18 +++------ .../ops/l2_normalize_builder.h | 7 ++++ .../ops/layernorm_builder.cpp | 18 +++------ .../ops/layernorm_builder.h | 7 ++++ .../ops/leaky_relu_builder.cpp | 12 +++--- .../ops/leaky_relu_builder.h | 5 +++ .../ops/log_softmax_builder.cpp | 14 +++---- .../ops/log_softmax_builder.h | 7 +++- .../ops/lrn_builder.cpp | 24 +++-------- .../neural_network_runtime/ops/lrn_builder.h | 9 +++++ .../ops/lstm_builder.cpp | 40 +++---------------- .../neural_network_runtime/ops/lstm_builder.h | 14 +++++++ .../ops/matmul_builder.cpp | 18 +++------ .../ops/matmul_builder.h | 7 ++++ .../neural_network_runtime/ops/mul_builder.h | 5 +++ .../ops/onehot_builder.cpp | 12 +++--- .../ops/onehot_builder.h | 5 +++ .../ops/pad_builder.cpp | 15 +++---- .../neural_network_runtime/ops/pad_builder.h | 6 +++ .../ops/pooling_builder.cpp | 36 +++-------------- .../ops/pooling_builder.h | 20 ++++++++++ .../ops/pow_builder.cpp | 9 ++--- .../neural_network_runtime/ops/pow_builder.h | 4 +- .../ops/quant_dtype_cast_builder.cpp | 18 +++------ .../ops/quant_dtype_cast_builder.h | 7 ++++ .../ops/range_builder.cpp | 18 +++------ .../ops/range_builder.h | 7 ++++ .../ops/reduceL2_builder.cpp | 18 +++------ .../ops/reduceL2_builder.h | 7 ++++ .../ops/reduceall_builder.cpp | 18 +++------ .../ops/reduceall_builder.h | 7 ++++ .../ops/reducemax_builder.cpp | 18 +++------ .../ops/reducemax_builder.h | 7 ++++ .../ops/reducemean_builder.cpp | 18 +++------ .../ops/reducemean_builder.h | 7 ++++ .../ops/reducemin_builder.cpp | 18 +++------ .../ops/reducemin_builder.h | 7 ++++ .../ops/reduceprod_builder.cpp | 18 +++------ .../ops/reduceprod_builder.h | 7 ++++ .../ops/reducesum_builder.cpp | 18 +++------ .../ops/reducesum_builder.h | 7 ++++ .../ops/resize_bilinear_builder.cpp | 24 +++-------- .../ops/resize_bilinear_builder.h | 9 +++++ .../ops/scale_builder.cpp | 15 +++---- .../ops/scale_builder.h | 6 +++ .../ops/slice_builder.cpp | 13 +++--- .../ops/slice_builder.h | 5 +++ .../ops/softmax_builder.cpp | 12 +++--- .../ops/softmax_builder.h | 5 +++ .../ops/space_to_batch_nd_builder.cpp | 15 +++---- .../ops/space_to_batch_nd_builder.h | 6 +++ .../ops/space_to_depth_builder.cpp | 12 +++--- .../ops/space_to_depth_builder.h | 5 +++ .../ops/split_builder.cpp | 18 +++------ .../ops/split_builder.h | 7 ++++ .../ops/squeeze_builder.cpp | 12 +++--- .../ops/squeeze_builder.h | 5 +++ .../ops/stack_builder.cpp | 12 +++--- .../ops/stack_builder.h | 5 +++ .../ops/strided_slice_builder.cpp | 24 +++-------- .../ops/strided_slice_builder.h | 9 +++++ .../ops/sub_builder.cpp | 12 +++--- .../neural_network_runtime/ops/sub_builder.h | 5 +++ .../ops/tile_builder.cpp | 13 +++--- .../neural_network_runtime/ops/tile_builder.h | 5 +++ .../ops/top_k_builder.cpp | 15 +++---- .../ops/top_k_builder.h | 6 +++ .../ops/unsqueeze_builder.cpp | 12 +++--- .../ops/unsqueeze_builder.h | 5 +++ .../ops/unstack_builder.cpp | 12 +++--- .../ops/unstack_builder.h | 5 +++ 117 files changed, 717 insertions(+), 711 deletions(-) diff --git a/frameworks/native/neural_network_runtime/ops/add_builder.cpp b/frameworks/native/neural_network_runtime/ops/add_builder.cpp index 431733a..3b5b6e3 100644 --- a/frameworks/native/neural_network_runtime/ops/add_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/add_builder.cpp @@ -30,7 +30,7 @@ AddBuilder::AddBuilder() {} AddBuilder::~AddBuilder() {} -OH_NN_ReturnCode AddBuilder::SetActivation(std::shared_ptr& tensor) +OH_NN_ReturnCode AddBuilder::SetActivation(std::shared_ptr tensor) { tensor->IdentifyOpParameter(); @@ -81,13 +81,11 @@ OH_NN_ReturnCode AddBuilder::Build(const std::vector& paramsIndex, for (uint32_t i : paramsIndex) { std::shared_ptr tensor = allTensors[i]; - switch (tensor->GetType()) { - case OH_NN_ADD_ACTIVATIONTYPE: - ret = SetActivation(tensor); - break; - default: - LOGE("[Add] Build failed, param invalid, type = %d.", tensor->GetType()); - return OH_NN_INVALID_PARAMETER; + if (m_paramMap.find(tensor->GetType()) != m_paramMap.end()) { + ret = (this->*(m_paramMap[tensor->GetType()]))(tensor); + } else { + LOGE("[Add] Build failed, param invalid, type=%d", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; } if (ret != OH_NN_SUCCESS) { diff --git a/frameworks/native/neural_network_runtime/ops/add_builder.h b/frameworks/native/neural_network_runtime/ops/add_builder.h index 1b650ea..067ab73 100644 --- a/frameworks/native/neural_network_runtime/ops/add_builder.h +++ b/frameworks/native/neural_network_runtime/ops/add_builder.h @@ -20,12 +20,13 @@ #include "ops_builder.h" #include "ops_registry.h" - namespace OHOS { namespace NeuralNetworkRuntime { namespace Ops { class AddBuilder : public OpsBuilder { public: + typedef OH_NN_ReturnCode(AddBuilder::*FuncPtr)(std::shared_ptr); + AddBuilder(); ~AddBuilder() override; OH_NN_ReturnCode Build(const std::vector& paramsIndex, @@ -36,10 +37,13 @@ public: LiteGraphPrimitvePtr GetPrimitive() override; private: - OH_NN_ReturnCode SetActivation(std::shared_ptr& tensor); + OH_NN_ReturnCode SetActivation(std::shared_ptr tensor); private: mindspore::lite::ActivationType m_activationType {mindspore::lite::ACTIVATION_TYPE_NO_ACTIVATION}; + std::unordered_map m_paramMap = { + {OH_NN_ADD_ACTIVATIONTYPE, &AddBuilder::SetActivation} + }; }; } // namespace Ops } // namespace NeuralNetworkRuntime diff --git a/frameworks/native/neural_network_runtime/ops/all_builder.cpp b/frameworks/native/neural_network_runtime/ops/all_builder.cpp index 63ac02b..bdeb03c 100644 --- a/frameworks/native/neural_network_runtime/ops/all_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/all_builder.cpp @@ -28,7 +28,7 @@ AllBuilder::AllBuilder() {} AllBuilder::~AllBuilder() {} -OH_NN_ReturnCode AllBuilder::SetKeepDims(std::shared_ptr& tensor) +OH_NN_ReturnCode AllBuilder::SetKeepDims(std::shared_ptr tensor) { if (tensor->GetDataType() != OH_NN_INT64) { LOGE("[All] The keep_dims should be type OH_NN_INT64."); @@ -75,22 +75,19 @@ OH_NN_ReturnCode AllBuilder::Build(const std::vector& paramsIndex, return ret; } - OH_NN_ReturnCode returnCode; for (int i : paramsIndex) { std::shared_ptr tensor = allTensors[i]; tensor->IdentifyOpParameter(); - switch (tensor->GetType()) { - case OH_NN_ALL_KEEP_DIMS: - returnCode = SetKeepDims(tensor); - break; - default: - LOGE("[All] Build failed, param invalid, type=%d", tensor->GetType()); - return OH_NN_INVALID_PARAMETER; + if (m_paramMap.find(tensor->GetType()) != m_paramMap.end()) { + ret = (this->*(m_paramMap[tensor->GetType()]))(tensor); + } else { + LOGE("[All] Build failed, param invalid, type=%d", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; } - if (returnCode != OH_NN_SUCCESS) { + if (ret != OH_NN_SUCCESS) { LOGE("[All] Build failed, passed invalid param."); - return returnCode; + return ret; } } diff --git a/frameworks/native/neural_network_runtime/ops/all_builder.h b/frameworks/native/neural_network_runtime/ops/all_builder.h index e43ff1b..0c1a006 100644 --- a/frameworks/native/neural_network_runtime/ops/all_builder.h +++ b/frameworks/native/neural_network_runtime/ops/all_builder.h @@ -26,6 +26,8 @@ namespace NeuralNetworkRuntime { namespace Ops { class AllBuilder : public OpsBuilder { public: + typedef OH_NN_ReturnCode(AllBuilder::*FuncPtr)(std::shared_ptr); + AllBuilder(); ~AllBuilder() override; OH_NN_ReturnCode Build(const std::vector& paramsIndex, @@ -36,10 +38,14 @@ public: LiteGraphPrimitvePtr GetPrimitive() override; private: - OH_NN_ReturnCode SetKeepDims(std::shared_ptr& tensor); + OH_NN_ReturnCode SetKeepDims(std::shared_ptr tensor); private: int64_t m_keepDims {0}; + std::unordered_map m_paramMap = { + {OH_NN_ALL_KEEP_DIMS, &AllBuilder::SetKeepDims} + }; + }; } // namespace Ops } // namespace NeuralNetworkRuntime diff --git a/frameworks/native/neural_network_runtime/ops/argmax_builder.cpp b/frameworks/native/neural_network_runtime/ops/argmax_builder.cpp index 5d11dce..8e032a3 100644 --- a/frameworks/native/neural_network_runtime/ops/argmax_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/argmax_builder.cpp @@ -134,23 +134,13 @@ OH_NN_ReturnCode ArgMaxBuilder::Build(const std::vector& paramsIndex, for (int i : paramsIndex) { const std::shared_ptr tensor = allTensors[i]; - switch (tensor->GetType()) { - case OH_NN_ARG_MAX_AXIS: - returnCode = SetAxis(tensor); - break; - case OH_NN_ARG_MAX_TOP_K: - returnCode = SetTopK(tensor); - break; - case OH_NN_ARG_MAX_KEEPDIMS: - returnCode = SetKeepdims(tensor); - break; - case OH_NN_ARG_MAX_OUT_MAX_VALUE: - returnCode = SetOutMaxValue(tensor); - break; - default: - LOGE("[ArgMax] Build failed, param invalid, type = %d.", tensor->GetType()); - return OH_NN_INVALID_PARAMETER; + if (m_paramMap.find(tensor->GetType()) != m_paramMap.end()) { + returnCode = (this->*(m_paramMap[tensor->GetType()]))(tensor); + } else { + LOGE("[ArgMax] Build failed, param invalid, type=%d", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; } + if (returnCode != OH_NN_SUCCESS) { LOGE("[ArgMax] Build failed, passed invalid param."); return returnCode; diff --git a/frameworks/native/neural_network_runtime/ops/argmax_builder.h b/frameworks/native/neural_network_runtime/ops/argmax_builder.h index 997088a..0e7992e 100644 --- a/frameworks/native/neural_network_runtime/ops/argmax_builder.h +++ b/frameworks/native/neural_network_runtime/ops/argmax_builder.h @@ -26,6 +26,8 @@ namespace NeuralNetworkRuntime { namespace Ops { class ArgMaxBuilder : public OpsBuilder { public: + typedef OH_NN_ReturnCode(ArgMaxBuilder::*FuncPtr)(std::shared_ptr); + ArgMaxBuilder(); ~ArgMaxBuilder() override; OH_NN_ReturnCode Build(const std::vector& paramsIndex, @@ -45,6 +47,12 @@ private: int64_t m_topK {1}; bool m_keepDims {false}; bool m_outMaxValue {false}; + std::unordered_map m_paramMap = { + {OH_NN_ARG_MAX_AXIS, &ArgMaxBuilder::SetAxis}, + {OH_NN_ARG_MAX_KEEPDIMS, &ArgMaxBuilder::SetKeepdims}, + {OH_NN_ARG_MAX_TOP_K, &ArgMaxBuilder::SetTopK}, + {OH_NN_ARG_MAX_OUT_MAX_VALUE, &ArgMaxBuilder::SetOutMaxValue} + }; }; } // namespace Ops } // namespace NeuralNetworkRuntime diff --git a/frameworks/native/neural_network_runtime/ops/assert_builder.cpp b/frameworks/native/neural_network_runtime/ops/assert_builder.cpp index b185e19..5f50532 100644 --- a/frameworks/native/neural_network_runtime/ops/assert_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/assert_builder.cpp @@ -28,7 +28,7 @@ AssertBuilder::AssertBuilder() {} AssertBuilder::~AssertBuilder() {} -OH_NN_ReturnCode AssertBuilder::SetSummarize(std::shared_ptr& tensor) +OH_NN_ReturnCode AssertBuilder::SetSummarize(std::shared_ptr tensor) { if (tensor->GetDataType() != OH_NN_INT64) { LOGE("[Assert] The summarize should be type OH_NN_INT64."); @@ -75,22 +75,19 @@ OH_NN_ReturnCode AssertBuilder::Build(const std::vector& paramsIndex, return ret; } - OH_NN_ReturnCode returnCode; for (int i : paramsIndex) { std::shared_ptr tensor = allTensors[i]; tensor->IdentifyOpParameter(); - switch (tensor->GetType()) { - case OH_NN_ASSERT_SUMMARIZE: - returnCode = SetSummarize(tensor); - break; - default: - LOGE("[Assert] Build failed, param invalid, type=%d", tensor->GetType()); - return OH_NN_INVALID_PARAMETER; + if (m_paramMap.find(tensor->GetType()) != m_paramMap.end()) { + ret = (this->*(m_paramMap[tensor->GetType()]))(tensor); + } else { + LOGE("[Assert] Build failed, param invalid, type=%d", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; } - if (returnCode != OH_NN_SUCCESS) { + if (ret != OH_NN_SUCCESS) { LOGE("[Assert] Build failed, passed invalid param."); - return returnCode; + return ret; } } diff --git a/frameworks/native/neural_network_runtime/ops/assert_builder.h b/frameworks/native/neural_network_runtime/ops/assert_builder.h index 7f4189b..062aea0 100644 --- a/frameworks/native/neural_network_runtime/ops/assert_builder.h +++ b/frameworks/native/neural_network_runtime/ops/assert_builder.h @@ -26,6 +26,8 @@ namespace NeuralNetworkRuntime { namespace Ops { class AssertBuilder : public OpsBuilder { public: + typedef OH_NN_ReturnCode(AssertBuilder::*FuncPtr)(std::shared_ptr); + AssertBuilder(); ~AssertBuilder() override; OH_NN_ReturnCode Build(const std::vector& paramsIndex, @@ -36,10 +38,13 @@ public: LiteGraphPrimitvePtr GetPrimitive() override; private: - OH_NN_ReturnCode SetSummarize(std::shared_ptr& tensor); + OH_NN_ReturnCode SetSummarize(std::shared_ptr tensor); private: int64_t m_summarize {0}; + std::unordered_map m_paramMap = { + {OH_NN_ASSERT_SUMMARIZE, &AssertBuilder::SetSummarize} + }; }; } // namespace Ops } // namespace NeuralNetworkRuntime diff --git a/frameworks/native/neural_network_runtime/ops/batch_to_space_nd_builder.cpp b/frameworks/native/neural_network_runtime/ops/batch_to_space_nd_builder.cpp index 1e6ced6..a96239b 100644 --- a/frameworks/native/neural_network_runtime/ops/batch_to_space_nd_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/batch_to_space_nd_builder.cpp @@ -109,17 +109,13 @@ OH_NN_ReturnCode BatchToSpaceNDBuilder::Build(const std::vector& param for (int i : paramsIndex) { std::shared_ptr tensor = allTensors[i]; - switch (tensor->GetType()) { - case OH_NN_BATCH_TO_SPACE_ND_BLOCKSIZE: - returnCode = SetInputBlock(tensor); - break; - case OH_NN_BATCH_TO_SPACE_ND_CROPS: - returnCode = SetInputCrops(tensor); - break; - default: - LOGE("[BatchToSpaceND] Build failed, param invalid, type = %d.", tensor->GetType()); - return OH_NN_INVALID_PARAMETER; + if (m_paramMap.find(tensor->GetType()) != m_paramMap.end()) { + returnCode = (this->*(m_paramMap[tensor->GetType()]))(tensor); + } else { + LOGE("[BatchToSpaceND] Build failed, param invalid, type=%d", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; } + if (returnCode != OH_NN_SUCCESS) { LOGE("[BatchToSpaceND] Build failed, passed invalid param."); return returnCode; diff --git a/frameworks/native/neural_network_runtime/ops/batch_to_space_nd_builder.h b/frameworks/native/neural_network_runtime/ops/batch_to_space_nd_builder.h index 1675c9b..4e10a5f 100644 --- a/frameworks/native/neural_network_runtime/ops/batch_to_space_nd_builder.h +++ b/frameworks/native/neural_network_runtime/ops/batch_to_space_nd_builder.h @@ -26,6 +26,8 @@ namespace NeuralNetworkRuntime { namespace Ops { class BatchToSpaceNDBuilder : public OpsBuilder { public: + typedef OH_NN_ReturnCode(BatchToSpaceNDBuilder::*FuncPtr)(std::shared_ptr); + BatchToSpaceNDBuilder(); ~BatchToSpaceNDBuilder() override; OH_NN_ReturnCode Build(const std::vector& paramsIndex, @@ -45,6 +47,10 @@ private: private: std::vector m_blockSize; std::vector> m_crops; + std::unordered_map m_paramMap = { + {OH_NN_BATCH_TO_SPACE_ND_BLOCKSIZE, &BatchToSpaceNDBuilder::SetInputBlock}, + {OH_NN_BATCH_TO_SPACE_ND_CROPS, &BatchToSpaceNDBuilder::SetInputCrops} + }; }; } // namespace Ops } // namespace NeuralNetworkRuntime diff --git a/frameworks/native/neural_network_runtime/ops/batchnorm_builder.cpp b/frameworks/native/neural_network_runtime/ops/batchnorm_builder.cpp index ee05ea4..2364b20 100644 --- a/frameworks/native/neural_network_runtime/ops/batchnorm_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/batchnorm_builder.cpp @@ -82,13 +82,11 @@ OH_NN_ReturnCode BatchNormBuilder::Build(const std::vector& paramsInde for (int i : paramsIndex) { std::shared_ptr tensor = allTensors[i]; - switch (tensor->GetType()) { - case OH_NN_BATCH_NORM_EPSILON: - returnCode = SetEpsilon(tensor); - break; - default: - LOGE("[BatchNorm] Parameter Type is invalid, type=%d", tensor->GetType()); - return OH_NN_INVALID_PARAMETER; + if (m_paramMap.find(tensor->GetType()) != m_paramMap.end()) { + returnCode = (this->*(m_paramMap[tensor->GetType()]))(tensor); + } else { + LOGE("[BatchNorm] Build failed, param invalid, type=%d", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; } if (returnCode != OH_NN_SUCCESS) { diff --git a/frameworks/native/neural_network_runtime/ops/batchnorm_builder.h b/frameworks/native/neural_network_runtime/ops/batchnorm_builder.h index ec9ed36..ee395c0 100644 --- a/frameworks/native/neural_network_runtime/ops/batchnorm_builder.h +++ b/frameworks/native/neural_network_runtime/ops/batchnorm_builder.h @@ -23,6 +23,8 @@ namespace NeuralNetworkRuntime { namespace Ops { class BatchNormBuilder : public OpsBuilder { public: + typedef OH_NN_ReturnCode(BatchNormBuilder::*FuncPtr)(std::shared_ptr); + BatchNormBuilder(); ~BatchNormBuilder() override; OH_NN_ReturnCode Build(const std::vector& paramsIndex, @@ -36,6 +38,9 @@ private: private: float m_epsilon {0.0001f}; + std::unordered_map m_paramMap = { + {OH_NN_BATCH_NORM_EPSILON, &BatchNormBuilder::SetEpsilon} + }; }; } // namespace Ops } // namespace NeuralNetworkRuntime diff --git a/frameworks/native/neural_network_runtime/ops/broadcast_to_builder.cpp b/frameworks/native/neural_network_runtime/ops/broadcast_to_builder.cpp index 132cd8c..3fe4664 100755 --- a/frameworks/native/neural_network_runtime/ops/broadcast_to_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/broadcast_to_builder.cpp @@ -77,22 +77,19 @@ OH_NN_ReturnCode BroadcastToBuilder::Build(const std::vector& paramsIn return ret; } - OH_NN_ReturnCode returnCode; for (int i : paramsIndex) { std::shared_ptr tensor = allTensors[i]; tensor->IdentifyOpParameter(); - switch (tensor->GetType()) { - case OH_NN_BROADCAST_TO_SHAPE: - returnCode = SetShape(tensor); - break; - default: - LOGE("[BroadcastTo] Build failed, param invalid, type=%d", tensor->GetType()); - return OH_NN_INVALID_PARAMETER; + if (m_paramMap.find(tensor->GetType()) != m_paramMap.end()) { + ret = (this->*(m_paramMap[tensor->GetType()]))(tensor); + } else { + LOGE("[BroadcastTo] Build failed, param invalid, type=%d", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; } - if (returnCode != OH_NN_SUCCESS) { + if (ret != OH_NN_SUCCESS) { LOGE("[BroadcastTo] Build failed, passed invalid param."); - return returnCode; + return ret; } } diff --git a/frameworks/native/neural_network_runtime/ops/broadcast_to_builder.h b/frameworks/native/neural_network_runtime/ops/broadcast_to_builder.h index 92eea4c..297b310 100755 --- a/frameworks/native/neural_network_runtime/ops/broadcast_to_builder.h +++ b/frameworks/native/neural_network_runtime/ops/broadcast_to_builder.h @@ -26,6 +26,8 @@ namespace NeuralNetworkRuntime { namespace Ops { class BroadcastToBuilder : public OpsBuilder { public: + typedef OH_NN_ReturnCode(BroadcastToBuilder::*FuncPtr)(std::shared_ptr); + BroadcastToBuilder(); ~BroadcastToBuilder() override; OH_NN_ReturnCode Build(const std::vector& paramsIndex, @@ -40,6 +42,9 @@ private: private: std::vector m_shape; + std::unordered_map m_paramMap = { + {OH_NN_BROADCAST_TO_SHAPE, &BroadcastToBuilder::SetShape} + }; }; } // namespace Ops } // namespace NeuralNetworkRuntime diff --git a/frameworks/native/neural_network_runtime/ops/clip_builder.cpp b/frameworks/native/neural_network_runtime/ops/clip_builder.cpp index 2f5257d..766bce4 100644 --- a/frameworks/native/neural_network_runtime/ops/clip_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/clip_builder.cpp @@ -97,25 +97,19 @@ OH_NN_ReturnCode ClipBuilder::Build(const std::vector& paramsIndex, return ret; } - OH_NN_ReturnCode returnCode; for (int i : paramsIndex) { std::shared_ptr tensor = allTensors[i]; tensor->IdentifyOpParameter(); - switch (tensor->GetType()) { - case OH_NN_CLIP_MAX: - returnCode = SetMax(tensor); - break; - case OH_NN_CLIP_MIN: - returnCode = SetMin(tensor); - break; - default: - LOGE("[Clip] Build failed, param invalid, type=%d", tensor->GetType()); - return OH_NN_INVALID_PARAMETER; + if (m_paramMap.find(tensor->GetType()) != m_paramMap.end()) { + ret = (this->*(m_paramMap[tensor->GetType()]))(tensor); + } else { + LOGE("[Clip] Build failed, param invalid, type=%d", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; } - if (returnCode != OH_NN_SUCCESS) { + if (ret != OH_NN_SUCCESS) { LOGE("[Clip] Build failed, passed invalid param."); - return returnCode; + return ret; } } diff --git a/frameworks/native/neural_network_runtime/ops/clip_builder.h b/frameworks/native/neural_network_runtime/ops/clip_builder.h index 09c1872..161a6e7 100644 --- a/frameworks/native/neural_network_runtime/ops/clip_builder.h +++ b/frameworks/native/neural_network_runtime/ops/clip_builder.h @@ -26,6 +26,8 @@ namespace NeuralNetworkRuntime { namespace Ops { class ClipBuilder : public OpsBuilder { public: + typedef OH_NN_ReturnCode(ClipBuilder::*FuncPtr)(std::shared_ptr); + ClipBuilder(); ~ClipBuilder() override; OH_NN_ReturnCode Build(const std::vector& paramsIndex, @@ -42,6 +44,10 @@ private: private: float m_max {0.0f}; float m_min {0.0f}; + std::unordered_map m_paramMap = { + {OH_NN_CLIP_MAX, &ClipBuilder::SetMax}, + {OH_NN_CLIP_MIN, &ClipBuilder::SetMin} + }; }; } // namespace Ops } // namespace NeuralNetworkRuntime diff --git a/frameworks/native/neural_network_runtime/ops/concat_builder.cpp b/frameworks/native/neural_network_runtime/ops/concat_builder.cpp index 7e08da0..b478514 100644 --- a/frameworks/native/neural_network_runtime/ops/concat_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/concat_builder.cpp @@ -85,14 +85,13 @@ OH_NN_ReturnCode ConcatBuilder::Build(const std::vector& paramsIndex, for (int i : paramsIndex) { std::shared_ptr tensor = allTensors[i]; - switch (tensor->GetType()) { - case OH_NN_CONCAT_AXIS: - returnCode = SetAxis(tensor); - break; - default: - LOGE("[Concat] Build failed, param invalid, type = %d.", tensor->GetType()); - return OH_NN_INVALID_PARAMETER; + if (m_paramMap.find(tensor->GetType()) != m_paramMap.end()) { + returnCode = (this->*(m_paramMap[tensor->GetType()]))(tensor); + } else { + LOGE("[Concat] Build failed, param invalid, type=%d", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; } + if (returnCode != OH_NN_SUCCESS) { LOGE("[Concat] Build failed, passed invalid param."); return returnCode; diff --git a/frameworks/native/neural_network_runtime/ops/concat_builder.h b/frameworks/native/neural_network_runtime/ops/concat_builder.h index 7d36a04..3d28914 100644 --- a/frameworks/native/neural_network_runtime/ops/concat_builder.h +++ b/frameworks/native/neural_network_runtime/ops/concat_builder.h @@ -26,6 +26,8 @@ namespace NeuralNetworkRuntime { namespace Ops { class ConcatBuilder : public OpsBuilder { public: + typedef OH_NN_ReturnCode(ConcatBuilder::*FuncPtr)(std::shared_ptr); + ConcatBuilder(); ~ConcatBuilder() override; OH_NN_ReturnCode Build(const std::vector& paramsIndex, @@ -42,6 +44,9 @@ private: const std::vector>& allTensors); private: int64_t m_axis{0}; + std::unordered_map m_paramMap = { + {OH_NN_CONCAT_AXIS, &ConcatBuilder::SetAxis} + }; }; } // namespace Ops } // namespace NeuralNetworkRuntime diff --git a/frameworks/native/neural_network_runtime/ops/constant_of_shape_builder.cpp b/frameworks/native/neural_network_runtime/ops/constant_of_shape_builder.cpp index d80bc09..aaf73d1 100755 --- a/frameworks/native/neural_network_runtime/ops/constant_of_shape_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/constant_of_shape_builder.cpp @@ -103,16 +103,11 @@ OH_NN_ReturnCode ConstantOfShapeBuilder::Build(const std::vector& para for (int i : paramsIndex) { std::shared_ptr tensor = allTensors[i]; tensor->IdentifyOpParameter(); - switch (tensor->GetType()) { - case OH_NN_CONSTANT_OF_SHAPE_DATA_TYPE: - ret = SetDataType(tensor); - break; - case OH_NN_CONSTANT_OF_SHAPE_VALUE: - ret = SetValue(tensor); - break; - default: - LOGE("[ConstantOfShape] Build failed, param invalid, type=%d", tensor->GetType()); - return OH_NN_INVALID_PARAMETER; + if (m_paramMap.find(tensor->GetType()) != m_paramMap.end()) { + ret = (this->*(m_paramMap[tensor->GetType()]))(tensor); + } else { + LOGE("[ConstantOfShape] Build failed, param invalid, type=%d", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; } if (ret != OH_NN_SUCCESS) { diff --git a/frameworks/native/neural_network_runtime/ops/constant_of_shape_builder.h b/frameworks/native/neural_network_runtime/ops/constant_of_shape_builder.h index 0aff934..08c3395 100755 --- a/frameworks/native/neural_network_runtime/ops/constant_of_shape_builder.h +++ b/frameworks/native/neural_network_runtime/ops/constant_of_shape_builder.h @@ -26,6 +26,8 @@ namespace NeuralNetworkRuntime { namespace Ops { class ConstantOfShapeBuilder : public OpsBuilder { public: + typedef OH_NN_ReturnCode(ConstantOfShapeBuilder::*FuncPtr)(std::shared_ptr); + ConstantOfShapeBuilder(); ~ConstantOfShapeBuilder() override; OH_NN_ReturnCode Build(const std::vector& paramsIndex, @@ -42,6 +44,10 @@ private: private: int64_t m_dataType {0}; std::vector m_value; + std::unordered_map m_paramMap = { + {OH_NN_CONSTANT_OF_SHAPE_DATA_TYPE, &ConstantOfShapeBuilder::SetDataType}, + {OH_NN_CONSTANT_OF_SHAPE_VALUE, &ConstantOfShapeBuilder::SetValue} + }; }; } // namespace Ops } // namespace NeuralNetworkRuntime diff --git a/frameworks/native/neural_network_runtime/ops/conv2d_builder.cpp b/frameworks/native/neural_network_runtime/ops/conv2d_builder.cpp index 4479204..9a1a2b7 100644 --- a/frameworks/native/neural_network_runtime/ops/conv2d_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/conv2d_builder.cpp @@ -252,27 +252,13 @@ OH_NN_ReturnCode Conv2DBuilder::Build(const std::vector& paramsIndex, for (int i : paramsIndex) { std::shared_ptr tensor = allTensors[i]; - switch (tensor->GetType()) { - case OH_NN_CONV2D_STRIDES: - returnCode = SetStrides(tensor); - break; - case OH_NN_CONV2D_DILATION: - returnCode = SetDilation(tensor); - break; - case OH_NN_CONV2D_PAD_MODE: - case OH_NN_CONV2D_PAD: - returnCode = SetPad(tensor); - break; - case OH_NN_CONV2D_GROUP: - returnCode = SetGroup(tensor); - break; - case OH_NN_CONV2D_ACTIVATION_TYPE: - returnCode = SetActavitation(tensor); - break; - default: - LOGE("[Conv2D] Build failed, param invalid, type = %d.", tensor->GetType()); - return OH_NN_INVALID_PARAMETER; + if (m_paramMap.find(tensor->GetType()) != m_paramMap.end()) { + returnCode = (this->*(m_paramMap[tensor->GetType()]))(tensor); + } else { + LOGE("[Conv2D] Build failed, param invalid, type=%d", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; } + if (returnCode != OH_NN_SUCCESS) { LOGE("[Conv2D] Build failed, Passed invalid param."); return returnCode; diff --git a/frameworks/native/neural_network_runtime/ops/conv2d_builder.h b/frameworks/native/neural_network_runtime/ops/conv2d_builder.h index 5b89b47..5f74688 100644 --- a/frameworks/native/neural_network_runtime/ops/conv2d_builder.h +++ b/frameworks/native/neural_network_runtime/ops/conv2d_builder.h @@ -25,6 +25,8 @@ namespace NeuralNetworkRuntime { namespace Ops { class Conv2DBuilder : public OpsBuilder { public: + typedef OH_NN_ReturnCode(Conv2DBuilder::*FuncPtr)(std::shared_ptr); + Conv2DBuilder(); ~Conv2DBuilder() override; OH_NN_ReturnCode Build(const std::vector& paramsIndex, @@ -58,6 +60,14 @@ private: std::vector m_dilation; mindspore::lite::PadMode m_padMode {mindspore::lite::PAD_MODE_PAD}; mindspore::lite::ActivationType m_activationType {mindspore::lite::ACTIVATION_TYPE_NO_ACTIVATION}; + std::unordered_map m_paramMap = { + {OH_NN_CONV2D_STRIDES, &Conv2DBuilder::SetStrides}, + {OH_NN_CONV2D_PAD, &Conv2DBuilder::SetPad}, + {OH_NN_CONV2D_DILATION, &Conv2DBuilder::SetDilation}, + {OH_NN_CONV2D_PAD_MODE, &Conv2DBuilder::SetPad}, + {OH_NN_CONV2D_ACTIVATION_TYPE, &Conv2DBuilder::SetActavitation}, + {OH_NN_CONV2D_GROUP, &Conv2DBuilder::SetGroup} + }; }; } // namespace Ops } // namespace NeuralNetworkRuntime diff --git a/frameworks/native/neural_network_runtime/ops/conv2d_transpose_builder.cpp b/frameworks/native/neural_network_runtime/ops/conv2d_transpose_builder.cpp index 460d65b..2b10933 100644 --- a/frameworks/native/neural_network_runtime/ops/conv2d_transpose_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/conv2d_transpose_builder.cpp @@ -262,29 +262,11 @@ OH_NN_ReturnCode Conv2DTransposeBuilder::Build(const std::vector& para for (int i : paramsIndex) { std::shared_ptr tensor = allTensors[i]; // 参数 tensor - switch (tensor->GetType()) { - case OH_NN_CONV2D_TRANSPOSE_STRIDES: - returnCode = SetStrides(tensor); - break; - case OH_NN_CONV2D_TRANSPOSE_DILATION: - returnCode = SetDilation(tensor); - break; - case OH_NN_CONV2D_TRANSPOSE_PAD_MODE: - case OH_NN_CONV2D_TRANSPOSE_PAD: - returnCode = SetPad(tensor); - break; - case OH_NN_CONV2D_TRANSPOSE_GROUP: - returnCode = SetGroup(tensor); - break; - case OH_NN_CONV2D_TRANSPOSE_OUTPUT_PADDINGS: - returnCode = SetOutPadding(tensor); - break; - case OH_NN_CONV2D_TRANSPOSE_ACTIVATION_TYPE: - returnCode = SetActivation(tensor); - break; - default: - LOGE("[Conv2DTranspose] Build failed, param invalid, type = %d.", tensor->GetType()); - return OH_NN_INVALID_PARAMETER; + if (m_paramMap.find(tensor->GetType()) != m_paramMap.end()) { + returnCode = (this->*(m_paramMap[tensor->GetType()]))(tensor); + } else { + LOGE("[Conv2DTranspose] Build failed, param invalid, type=%d", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; } if (returnCode != OH_NN_SUCCESS) { diff --git a/frameworks/native/neural_network_runtime/ops/conv2d_transpose_builder.h b/frameworks/native/neural_network_runtime/ops/conv2d_transpose_builder.h index a84c280..809b7f4 100644 --- a/frameworks/native/neural_network_runtime/ops/conv2d_transpose_builder.h +++ b/frameworks/native/neural_network_runtime/ops/conv2d_transpose_builder.h @@ -25,6 +25,8 @@ namespace NeuralNetworkRuntime { namespace Ops { class Conv2DTransposeBuilder : public OpsBuilder { public: + typedef OH_NN_ReturnCode(Conv2DTransposeBuilder::*FuncPtr)(std::shared_ptr); + Conv2DTransposeBuilder(); ~Conv2DTransposeBuilder() override; OH_NN_ReturnCode Build(const std::vector& paramsIndex, @@ -58,6 +60,15 @@ private: std::vector m_outputPaddings; mindspore::lite::PadMode m_padMode{mindspore::lite::PAD_MODE_PAD}; mindspore::lite::ActivationType m_activationType{mindspore::lite::ACTIVATION_TYPE_NO_ACTIVATION}; + std::unordered_map m_paramMap = { + {OH_NN_CONV2D_TRANSPOSE_STRIDES, &Conv2DTransposeBuilder::SetStrides}, + {OH_NN_CONV2D_TRANSPOSE_PAD, &Conv2DTransposeBuilder::SetPad}, + {OH_NN_CONV2D_TRANSPOSE_DILATION, &Conv2DTransposeBuilder::SetDilation}, + {OH_NN_CONV2D_TRANSPOSE_OUTPUT_PADDINGS, &Conv2DTransposeBuilder::SetOutPadding}, + {OH_NN_CONV2D_TRANSPOSE_PAD_MODE, &Conv2DTransposeBuilder::SetPad}, + {OH_NN_CONV2D_TRANSPOSE_ACTIVATION_TYPE, &Conv2DTransposeBuilder::SetActivation}, + {OH_NN_CONV2D_TRANSPOSE_GROUP, &Conv2DTransposeBuilder::SetGroup} + }; }; } // namespace Ops } // namespace NeuralNetworkRuntime diff --git a/frameworks/native/neural_network_runtime/ops/crop_builder.cpp b/frameworks/native/neural_network_runtime/ops/crop_builder.cpp index 0baa083..a5a5e34 100644 --- a/frameworks/native/neural_network_runtime/ops/crop_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/crop_builder.cpp @@ -103,25 +103,19 @@ OH_NN_ReturnCode CropBuilder::Build(const std::vector& paramsIndex, m_inputsIndex = inputsIndex; m_outputsIndex = outputsIndex; - OH_NN_ReturnCode returnCode; for (int i : paramsIndex) { std::shared_ptr tensor = allTensors[i]; tensor->IdentifyOpParameter(); - switch (tensor->GetType()) { - case OH_NN_CROP_AXIS: - returnCode = SetAxis(tensor); - break; - case OH_NN_CROP_OFFSET: - returnCode = SetOffset(tensor); - break; - default: - LOGE("[Crop] Build failed, param invalid, type=%d", tensor->GetType()); - return OH_NN_INVALID_PARAMETER; + if (m_paramMap.find(tensor->GetType()) != m_paramMap.end()) { + ret = (this->*(m_paramMap[tensor->GetType()]))(tensor); + } else { + LOGE("[Crop] Build failed, param invalid, type=%d", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; } - if (returnCode != OH_NN_SUCCESS) { + if (ret != OH_NN_SUCCESS) { LOGE("[Crop] Build failed, passed invalid param."); - return returnCode; + return ret; } } diff --git a/frameworks/native/neural_network_runtime/ops/crop_builder.h b/frameworks/native/neural_network_runtime/ops/crop_builder.h index feee26b..125eefd 100644 --- a/frameworks/native/neural_network_runtime/ops/crop_builder.h +++ b/frameworks/native/neural_network_runtime/ops/crop_builder.h @@ -23,6 +23,8 @@ namespace NeuralNetworkRuntime { namespace Ops { class CropBuilder : public OpsBuilder { public: + typedef OH_NN_ReturnCode(CropBuilder::*FuncPtr)(std::shared_ptr); + CropBuilder(); ~CropBuilder() override; OH_NN_ReturnCode Build(const std::vector& paramsIndex, @@ -39,6 +41,10 @@ private: private: int64_t m_axis {0}; std::vector m_offset; + std::unordered_map m_paramMap = { + {OH_NN_CROP_AXIS, &CropBuilder::SetAxis}, + {OH_NN_CROP_OFFSET, &CropBuilder::SetOffset} + }; }; } // namespace Ops } // namespace NeuralNetworkRuntime diff --git a/frameworks/native/neural_network_runtime/ops/depth_to_space_builder.cpp b/frameworks/native/neural_network_runtime/ops/depth_to_space_builder.cpp index 561caef..8c705a1 100755 --- a/frameworks/native/neural_network_runtime/ops/depth_to_space_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/depth_to_space_builder.cpp @@ -108,16 +108,11 @@ OH_NN_ReturnCode DepthToSpaceBuilder::Build(const std::vector& paramsI for (int i : paramsIndex) { std::shared_ptr tensor = allTensors[i]; tensor->IdentifyOpParameter(); - switch (tensor->GetType()) { - case OH_NN_DEPTH_TO_SPACE_BLOCK_SIZE: - ret = SetBlockSize(tensor); - break; - case OH_NN_DEPTH_TO_SPACE_MODE: - ret = SetMode(tensor); - break; - default: - LOGE("[DepthToSpace] Build failed, param invalid, type=%d", tensor->GetType()); - return OH_NN_INVALID_PARAMETER; + if (m_paramMap.find(tensor->GetType()) != m_paramMap.end()) { + ret = (this->*(m_paramMap[tensor->GetType()]))(tensor); + } else { + LOGE("[DepthToSpace] Build failed, param invalid, type=%d", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; } if (ret != OH_NN_SUCCESS) { diff --git a/frameworks/native/neural_network_runtime/ops/depth_to_space_builder.h b/frameworks/native/neural_network_runtime/ops/depth_to_space_builder.h index 5ecc071..2fdbc0d 100755 --- a/frameworks/native/neural_network_runtime/ops/depth_to_space_builder.h +++ b/frameworks/native/neural_network_runtime/ops/depth_to_space_builder.h @@ -26,6 +26,8 @@ namespace NeuralNetworkRuntime { namespace Ops { class DepthToSpaceBuilder : public OpsBuilder { public: + typedef OH_NN_ReturnCode(DepthToSpaceBuilder::*FuncPtr)(std::shared_ptr); + DepthToSpaceBuilder(); ~DepthToSpaceBuilder() override; OH_NN_ReturnCode Build(const std::vector& paramsIndex, @@ -42,6 +44,10 @@ private: private: int64_t m_blockSize {0}; std::string m_mode; + std::unordered_map m_paramMap = { + {OH_NN_DEPTH_TO_SPACE_BLOCK_SIZE, &DepthToSpaceBuilder::SetBlockSize}, + {OH_NN_DEPTH_TO_SPACE_MODE, &DepthToSpaceBuilder::SetMode} + }; }; } // namespace Ops } // namespace NeuralNetworkRuntime diff --git a/frameworks/native/neural_network_runtime/ops/depthwise_conv2d_native_builder.cpp b/frameworks/native/neural_network_runtime/ops/depthwise_conv2d_native_builder.cpp index 93db5eb..eab33ca 100644 --- a/frameworks/native/neural_network_runtime/ops/depthwise_conv2d_native_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/depthwise_conv2d_native_builder.cpp @@ -233,24 +233,13 @@ OH_NN_ReturnCode DepthwiseConv2DNativeBuilder::Build(const std::vector for (int i : paramsIndex) { std::shared_ptr tensor = allTensors[i]; // 参数 tensor - switch (tensor->GetType()) { - case OH_NN_DEPTHWISE_CONV2D_NATIVE_STRIDES: - ret = SetStrides(tensor); - break; - case OH_NN_DEPTHWISE_CONV2D_NATIVE_DILATION: - ret = SetDilation(tensor); - break; - case OH_NN_DEPTHWISE_CONV2D_NATIVE_PAD_MODE: - case OH_NN_DEPTHWISE_CONV2D_NATIVE_PAD: - ret = SetPadModeOrPaddings(tensor); - break; - case OH_NN_DEPTHWISE_CONV2D_NATIVE_ACTIVATION_TYPE: - ret = SetActivation(tensor); - break; - default: - LOGE("[DepthwiseConv2DNative] Build failed, param invalid, type = %d.", tensor->GetType()); - return OH_NN_INVALID_PARAMETER; + if (m_paramMap.find(tensor->GetType()) != m_paramMap.end()) { + ret = (this->*(m_paramMap[tensor->GetType()]))(tensor); + } else { + LOGE("[DepthwiseConv2DNative] Build failed, param invalid, type=%d", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; } + if (ret != OH_NN_SUCCESS) { LOGE("[DepthwiseConv2DNative] Build failed, passed invalid param."); return ret; diff --git a/frameworks/native/neural_network_runtime/ops/depthwise_conv2d_native_builder.h b/frameworks/native/neural_network_runtime/ops/depthwise_conv2d_native_builder.h index df8879d..60b2e08 100644 --- a/frameworks/native/neural_network_runtime/ops/depthwise_conv2d_native_builder.h +++ b/frameworks/native/neural_network_runtime/ops/depthwise_conv2d_native_builder.h @@ -25,6 +25,8 @@ namespace NeuralNetworkRuntime { namespace Ops { class DepthwiseConv2DNativeBuilder : public OpsBuilder { public: + typedef OH_NN_ReturnCode(DepthwiseConv2DNativeBuilder::*FuncPtr)(std::shared_ptr); + DepthwiseConv2DNativeBuilder(); ~DepthwiseConv2DNativeBuilder() override; OH_NN_ReturnCode Build(const std::vector& paramsIndex, @@ -54,6 +56,13 @@ private: std::vector m_dilation; mindspore::lite::PadMode m_padMode{mindspore::lite::PAD_MODE_PAD}; mindspore::lite::ActivationType m_activationType{mindspore::lite::ACTIVATION_TYPE_NO_ACTIVATION}; + std::unordered_map m_paramMap = { + {OH_NN_DEPTHWISE_CONV2D_NATIVE_STRIDES, &DepthwiseConv2DNativeBuilder::SetStrides}, + {OH_NN_DEPTHWISE_CONV2D_NATIVE_PAD, &DepthwiseConv2DNativeBuilder::SetPadModeOrPaddings}, + {OH_NN_DEPTHWISE_CONV2D_NATIVE_DILATION, &DepthwiseConv2DNativeBuilder::SetDilation}, + {OH_NN_DEPTHWISE_CONV2D_NATIVE_PAD_MODE, &DepthwiseConv2DNativeBuilder::SetPadModeOrPaddings}, + {OH_NN_DEPTHWISE_CONV2D_NATIVE_ACTIVATION_TYPE, &DepthwiseConv2DNativeBuilder::SetActivation} + }; }; } // namespace Ops } // namespace NeuralNetworkRuntime diff --git a/frameworks/native/neural_network_runtime/ops/detection_post_process_builder.cpp b/frameworks/native/neural_network_runtime/ops/detection_post_process_builder.cpp index 627d49b..0617955 100644 --- a/frameworks/native/neural_network_runtime/ops/detection_post_process_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/detection_post_process_builder.cpp @@ -287,40 +287,11 @@ OH_NN_ReturnCode DetectionPostProcessBuilder::Build(const std::vector& for (int i : paramsIndex) { std::shared_ptr tensor = allTensors[i]; tensor->IdentifyOpParameter(); - switch (tensor->GetType()) { - case OH_NN_DETECTION_POST_PROCESS_INPUT_SIZE: - ret = SetInputSize(tensor); - break; - case OH_NN_DETECTION_POST_PROCESS_SCALE: - ret = SetScale(tensor); - break; - case OH_NN_DETECTION_POST_PROCESS_NMS_IOU_THRESHOLD: - ret = SetNmsIoUThreshold(tensor); - break; - case OH_NN_DETECTION_POST_PROCESS_NMS_SCORE_THRESHOLD: - ret = SetNmsScoreThreshold(tensor); - break; - case OH_NN_DETECTION_POST_PROCESS_MAX_DETECTIONS: - ret = SetMaxDetections(tensor); - break; - case OH_NN_DETECTION_POST_PROCESS_DETECTIONS_PER_CLASS: - ret = SetDetectionsPerClass(tensor); - break; - case OH_NN_DETECTION_POST_PROCESS_MAX_CLASSES_PER_DETECTION: - ret = SetMaxClassesPerDetection(tensor); - break; - case OH_NN_DETECTION_POST_PROCESS_NUM_CLASSES: - ret = SetNumClasses(tensor); - break; - case OH_NN_DETECTION_POST_PROCESS_USE_REGULAR_NMS: - ret = SetUseRegularNms(tensor); - break; - case OH_NN_DETECTION_POST_PROCESS_OUT_QUANTIZED: - ret = SetOutQuantized(tensor); - break; - default: - LOGE("[DetectionPostProcess] Build failed, param invalid, type=%d", tensor->GetType()); - return OH_NN_INVALID_PARAMETER; + if (m_paramMap.find(tensor->GetType()) != m_paramMap.end()) { + ret = (this->*(m_paramMap[tensor->GetType()]))(tensor); + } else { + LOGE("[DetectionPostProcess] Build failed, param invalid, type=%d", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; } if (ret != OH_NN_SUCCESS) { diff --git a/frameworks/native/neural_network_runtime/ops/detection_post_process_builder.h b/frameworks/native/neural_network_runtime/ops/detection_post_process_builder.h index c1bb8db..658b731 100644 --- a/frameworks/native/neural_network_runtime/ops/detection_post_process_builder.h +++ b/frameworks/native/neural_network_runtime/ops/detection_post_process_builder.h @@ -23,6 +23,9 @@ namespace NeuralNetworkRuntime { namespace Ops { class DetectionPostProcessBuilder : public OpsBuilder { public: + typedef DetectionPostProcessBuilder DPPBuilder; + typedef OH_NN_ReturnCode(DPPBuilder::*FuncPtr)(std::shared_ptr); + DetectionPostProcessBuilder(); ~DetectionPostProcessBuilder() override; OH_NN_ReturnCode Build(const std::vector& paramsIndex, @@ -55,6 +58,18 @@ private: int64_t m_numClasses {0}; bool m_useRegularNms {false}; bool m_outQuantized {false}; + std::unordered_map m_paramMap = { + {OH_NN_DETECTION_POST_PROCESS_INPUT_SIZE, &DPPBuilder::SetInputSize}, + {OH_NN_DETECTION_POST_PROCESS_SCALE, &DPPBuilder::SetScale}, + {OH_NN_DETECTION_POST_PROCESS_NMS_IOU_THRESHOLD, &DPPBuilder::SetNmsIoUThreshold}, + {OH_NN_DETECTION_POST_PROCESS_NMS_SCORE_THRESHOLD, &DPPBuilder::SetNmsScoreThreshold}, + {OH_NN_DETECTION_POST_PROCESS_MAX_DETECTIONS, &DPPBuilder::SetMaxDetections}, + {OH_NN_DETECTION_POST_PROCESS_DETECTIONS_PER_CLASS, &DPPBuilder::SetDetectionsPerClass}, + {OH_NN_DETECTION_POST_PROCESS_MAX_CLASSES_PER_DETECTION, &DPPBuilder::SetMaxClassesPerDetection}, + {OH_NN_DETECTION_POST_PROCESS_NUM_CLASSES, &DPPBuilder::SetNumClasses}, + {OH_NN_DETECTION_POST_PROCESS_USE_REGULAR_NMS, &DPPBuilder::SetUseRegularNms}, + {OH_NN_DETECTION_POST_PROCESS_OUT_QUANTIZED, &DPPBuilder::SetOutQuantized} + }; }; } // namespace Ops } // namespace NeuralNetworkRuntime diff --git a/frameworks/native/neural_network_runtime/ops/div_builder.cpp b/frameworks/native/neural_network_runtime/ops/div_builder.cpp index 89227b0..4a45571 100644 --- a/frameworks/native/neural_network_runtime/ops/div_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/div_builder.cpp @@ -87,14 +87,13 @@ OH_NN_ReturnCode DivBuilder::Build(const std::vector& paramsIndex, for (int i : paramsIndex) { std::shared_ptr tensor = allTensors[i]; - switch (tensor->GetType()) { - case OH_NN_DIV_ACTIVATIONTYPE: - returnCode = SetActicationType(tensor); - break; - default: - LOGE("[Div] Build failed, param invalid, type = %d.", tensor->GetType()); - return OH_NN_INVALID_PARAMETER; + if (m_paramMap.find(tensor->GetType()) != m_paramMap.end()) { + returnCode = (this->*(m_paramMap[tensor->GetType()]))(tensor); + } else { + LOGE("[Div] Build failed, param invalid, type=%d", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; } + if (returnCode != OH_NN_SUCCESS) { LOGE("[Div] Build failed, passed invalid param."); return returnCode; diff --git a/frameworks/native/neural_network_runtime/ops/div_builder.h b/frameworks/native/neural_network_runtime/ops/div_builder.h index 1c7daaf..be217a9 100644 --- a/frameworks/native/neural_network_runtime/ops/div_builder.h +++ b/frameworks/native/neural_network_runtime/ops/div_builder.h @@ -26,6 +26,8 @@ namespace NeuralNetworkRuntime { namespace Ops { class DivBuilder : public OpsBuilder { public: + typedef OH_NN_ReturnCode(DivBuilder::*FuncPtr)(std::shared_ptr); + DivBuilder(); ~DivBuilder() override; OH_NN_ReturnCode Build(const std::vector& paramsIndex, @@ -40,6 +42,9 @@ private: private: mindspore::lite::ActivationType m_activationType {mindspore::lite::ACTIVATION_TYPE_NO_ACTIVATION}; + std::unordered_map m_paramMap = { + {OH_NN_DIV_ACTIVATIONTYPE, &DivBuilder::SetActicationType} + }; }; } // namespace Ops } // namespace NeuralNetworkRuntime diff --git a/frameworks/native/neural_network_runtime/ops/eltwise_builder.cpp b/frameworks/native/neural_network_runtime/ops/eltwise_builder.cpp index 8a77b3d..9b1e6fa 100644 --- a/frameworks/native/neural_network_runtime/ops/eltwise_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/eltwise_builder.cpp @@ -86,14 +86,13 @@ OH_NN_ReturnCode EltwiseBuilder::Build(const std::vector& paramsIndex, for (int i : paramsIndex) { std::shared_ptr tensor = allTensors[i]; - switch (tensor->GetType()) { - case OH_NN_ELTWISE_MODE: - returnCode = SetMode(tensor); - break; - default: - LOGE("[Eltwise] Build failed, param invalid, type = %d.", tensor->GetType()); - return OH_NN_INVALID_PARAMETER; + if (m_paramMap.find(tensor->GetType()) != m_paramMap.end()) { + returnCode = (this->*(m_paramMap[tensor->GetType()]))(tensor); + } else { + LOGE("[Eltwise] Build failed, param invalid, type=%d", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; } + if (returnCode != OH_NN_SUCCESS) { LOGE("[Eltwise] Build failed, passed invalid param."); return returnCode; diff --git a/frameworks/native/neural_network_runtime/ops/eltwise_builder.h b/frameworks/native/neural_network_runtime/ops/eltwise_builder.h index 78d4dc5..73d9998 100644 --- a/frameworks/native/neural_network_runtime/ops/eltwise_builder.h +++ b/frameworks/native/neural_network_runtime/ops/eltwise_builder.h @@ -26,6 +26,8 @@ namespace NeuralNetworkRuntime { namespace Ops { class EltwiseBuilder : public OpsBuilder { public: + typedef OH_NN_ReturnCode(EltwiseBuilder::*FuncPtr)(std::shared_ptr); + EltwiseBuilder(); ~EltwiseBuilder() override; OH_NN_ReturnCode Build(const std::vector& paramsIndex, @@ -40,6 +42,9 @@ private: private: mindspore::lite::EltwiseMode m_mode {mindspore::lite::ELTWISE_MODE_PROD}; + std::unordered_map m_paramMap = { + {OH_NN_ELTWISE_MODE, &EltwiseBuilder::SetMode} + }; }; } // namespace Ops } // namespace NeuralNetworkRuntime diff --git a/frameworks/native/neural_network_runtime/ops/exp_builder.cpp b/frameworks/native/neural_network_runtime/ops/exp_builder.cpp index 4afb105..6706ac1 100755 --- a/frameworks/native/neural_network_runtime/ops/exp_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/exp_builder.cpp @@ -122,19 +122,11 @@ OH_NN_ReturnCode ExpBuilder::Build(const std::vector& paramsIndex, for (int i : paramsIndex) { std::shared_ptr tensor = allTensors[i]; tensor->IdentifyOpParameter(); - switch (tensor->GetType()) { - case OH_NN_EXP_BASE: - ret = SetBase(tensor); - break; - case OH_NN_EXP_SCALE: - ret = SetScale(tensor); - break; - case OH_NN_EXP_SHIFT: - ret = SetShift(tensor); - break; - default: - LOGE("[Exp] Build failed, param invalid, type=%d", tensor->GetType()); - return OH_NN_INVALID_PARAMETER; + if (m_paramMap.find(tensor->GetType()) != m_paramMap.end()) { + ret = (this->*(m_paramMap[tensor->GetType()]))(tensor); + } else { + LOGE("[Exp] Build failed, param invalid, type=%d", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; } if (ret != OH_NN_SUCCESS) { diff --git a/frameworks/native/neural_network_runtime/ops/exp_builder.h b/frameworks/native/neural_network_runtime/ops/exp_builder.h index edacbfc..efd809e 100755 --- a/frameworks/native/neural_network_runtime/ops/exp_builder.h +++ b/frameworks/native/neural_network_runtime/ops/exp_builder.h @@ -26,6 +26,8 @@ namespace NeuralNetworkRuntime { namespace Ops { class ExpBuilder : public OpsBuilder { public: + typedef OH_NN_ReturnCode(ExpBuilder::*FuncPtr)(std::shared_ptr); + ExpBuilder(); ~ExpBuilder() override; OH_NN_ReturnCode Build(const std::vector& paramsIndex, @@ -44,6 +46,11 @@ private: float m_base {-1.0f}; float m_scale {1.0f}; float m_shift {0.0f}; + std::unordered_map m_paramMap = { + {OH_NN_EXP_BASE, &ExpBuilder::SetBase}, + {OH_NN_EXP_SCALE, &ExpBuilder::SetScale}, + {OH_NN_EXP_SHIFT, &ExpBuilder::SetShift} + }; }; } // namespace Ops } // namespace NeuralNetworkRuntime diff --git a/frameworks/native/neural_network_runtime/ops/flatten_builder.cpp b/frameworks/native/neural_network_runtime/ops/flatten_builder.cpp index f9b16c1..ef6c75b 100755 --- a/frameworks/native/neural_network_runtime/ops/flatten_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/flatten_builder.cpp @@ -78,13 +78,11 @@ OH_NN_ReturnCode FlattenBuilder::Build(const std::vector& paramsIndex, for (int i : paramsIndex) { std::shared_ptr tensor = allTensors[i]; tensor->IdentifyOpParameter(); - switch (tensor->GetType()) { - case OH_NN_FLATTEN_AXIS: - ret = SetAxis(tensor); - break; - default: - LOGE("[Flatten] Build failed, param invalid, type=%d", tensor->GetType()); - return OH_NN_INVALID_PARAMETER; + if (m_paramMap.find(tensor->GetType()) != m_paramMap.end()) { + ret = (this->*(m_paramMap[tensor->GetType()]))(tensor); + } else { + LOGE("[Flatten] Build failed, param invalid, type=%d", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; } if (ret != OH_NN_SUCCESS) { diff --git a/frameworks/native/neural_network_runtime/ops/flatten_builder.h b/frameworks/native/neural_network_runtime/ops/flatten_builder.h index 98916b7..b61261c 100755 --- a/frameworks/native/neural_network_runtime/ops/flatten_builder.h +++ b/frameworks/native/neural_network_runtime/ops/flatten_builder.h @@ -26,6 +26,8 @@ namespace NeuralNetworkRuntime { namespace Ops { class FlattenBuilder : public OpsBuilder { public: + typedef OH_NN_ReturnCode(FlattenBuilder::*FuncPtr)(std::shared_ptr); + FlattenBuilder(); ~FlattenBuilder() override; OH_NN_ReturnCode Build(const std::vector& paramsIndex, @@ -40,6 +42,9 @@ private: private: int64_t m_axis {1}; + std::unordered_map m_paramMap = { + {OH_NN_FLATTEN_AXIS, &FlattenBuilder::SetAxis} + }; }; } // namespace Ops } // namespace NeuralNetworkRuntime diff --git a/frameworks/native/neural_network_runtime/ops/fullconnection_builder.cpp b/frameworks/native/neural_network_runtime/ops/fullconnection_builder.cpp index 3219a64..7dba24a 100644 --- a/frameworks/native/neural_network_runtime/ops/fullconnection_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/fullconnection_builder.cpp @@ -189,23 +189,13 @@ OH_NN_ReturnCode FullConnectionBuilder::Build(const std::vector& param for (int i : paramsIndex) { std::shared_ptr tensor = allTensors[i]; // 参数 tensor - switch (tensor->GetType()) { - case OH_NN_FULL_CONNECTION_AXIS: - returnCode = SetAxis(tensor); - break; - case OH_NN_FULL_CONNECTION_HAS_BIAS: - returnCode = SetHasBias(tensor); - break; - case OH_NN_FULL_CONNECTION_USE_AXIS: - returnCode = SetUseAxis(tensor); - break; - case OH_NN_FULL_CONNECTION_ACTIVATIONTYPE: - returnCode = SetFullConnectionActivation(tensor); - break; - default: - LOGE("[FullConnection] Build failed, param invalid, type = %{public}d.", tensor->GetType()); - return OH_NN_INVALID_PARAMETER; + if (m_paramMap.find(tensor->GetType()) != m_paramMap.end()) { + returnCode = (this->*(m_paramMap[tensor->GetType()]))(tensor); + } else { + LOGE("[FullConnection] Build failed, param invalid, type=%d", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; } + if (returnCode != OH_NN_SUCCESS) { LOGE("[FullConnection] Build failed, passed invalid param."); return returnCode; diff --git a/frameworks/native/neural_network_runtime/ops/fullconnection_builder.h b/frameworks/native/neural_network_runtime/ops/fullconnection_builder.h index bdc1648..0a21bdb 100644 --- a/frameworks/native/neural_network_runtime/ops/fullconnection_builder.h +++ b/frameworks/native/neural_network_runtime/ops/fullconnection_builder.h @@ -26,6 +26,8 @@ namespace NeuralNetworkRuntime { namespace Ops { class FullConnectionBuilder : public OpsBuilder { public: + typedef OH_NN_ReturnCode(FullConnectionBuilder::*FuncPtr)(std::shared_ptr); + FullConnectionBuilder(); ~FullConnectionBuilder() override; OH_NN_ReturnCode Build(const std::vector& paramsIndex, @@ -49,6 +51,12 @@ private: bool m_useAxis {false}; int64_t m_axis {0}; mindspore::lite::ActivationType m_activationType {mindspore::lite::ACTIVATION_TYPE_NO_ACTIVATION}; + std::unordered_map m_paramMap = { + {OH_NN_FULL_CONNECTION_ACTIVATIONTYPE, &FullConnectionBuilder::SetFullConnectionActivation}, + {OH_NN_FULL_CONNECTION_HAS_BIAS, &FullConnectionBuilder::SetHasBias}, + {OH_NN_FULL_CONNECTION_USE_AXIS, &FullConnectionBuilder::SetUseAxis}, + {OH_NN_FULL_CONNECTION_AXIS, &FullConnectionBuilder::SetAxis} + }; bool m_axisIsSet {false}; bool m_useAxisIsSet {false}; diff --git a/frameworks/native/neural_network_runtime/ops/gelu_builder.cpp b/frameworks/native/neural_network_runtime/ops/gelu_builder.cpp index a610dd6..65ea4bc 100644 --- a/frameworks/native/neural_network_runtime/ops/gelu_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/gelu_builder.cpp @@ -78,14 +78,13 @@ OH_NN_ReturnCode GeluBuilder::Build(const std::vector& paramsIndex, for (int i : paramsIndex) { std::shared_ptr tensor = allTensors[i]; tensor->IdentifyOpParameter(); - switch (tensor->GetType()) { - case OH_NN_GELU_APPROXIMATE: - returnCode = SetApproximate(tensor); - break; - default: - LOGE("[Gelu] Build failed, param invalid, type = %d.", tensor->GetType()); - return OH_NN_INVALID_PARAMETER; + if (m_paramMap.find(tensor->GetType()) != m_paramMap.end()) { + returnCode = (this->*(m_paramMap[tensor->GetType()]))(tensor); + } else { + LOGE("[Gelu] Build failed, param invalid, type=%d", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; } + if (returnCode != OH_NN_SUCCESS) { LOGE("[Gelu] Build failed, passed invalid param."); return returnCode; diff --git a/frameworks/native/neural_network_runtime/ops/gelu_builder.h b/frameworks/native/neural_network_runtime/ops/gelu_builder.h index 0a590dc..7e2bf6b 100644 --- a/frameworks/native/neural_network_runtime/ops/gelu_builder.h +++ b/frameworks/native/neural_network_runtime/ops/gelu_builder.h @@ -23,6 +23,8 @@ namespace NeuralNetworkRuntime { namespace Ops { class GeluBuilder : public OpsBuilder { public: + typedef OH_NN_ReturnCode(GeluBuilder::*FuncPtr)(std::shared_ptr); + GeluBuilder(); ~GeluBuilder() override; OH_NN_ReturnCode Build(const std::vector& paramsIndex, @@ -36,6 +38,9 @@ private: private: bool m_approximate {false}; + std::unordered_map m_paramMap = { + {OH_NN_GELU_APPROXIMATE, &GeluBuilder::SetApproximate} + }; }; } // namespace Ops } // namespace NeuralNetworkRuntime diff --git a/frameworks/native/neural_network_runtime/ops/instance_norm_builder.cpp b/frameworks/native/neural_network_runtime/ops/instance_norm_builder.cpp index c6a6e43..7b7c874 100755 --- a/frameworks/native/neural_network_runtime/ops/instance_norm_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/instance_norm_builder.cpp @@ -78,13 +78,11 @@ OH_NN_ReturnCode InstanceNormBuilder::Build(const std::vector& paramsI for (int i : paramsIndex) { std::shared_ptr tensor = allTensors[i]; tensor->IdentifyOpParameter(); - switch (tensor->GetType()) { - case OH_NN_INSTANCE_NORM_EPSILON: - ret = SetEpsilon(tensor); - break; - default: - LOGE("[InstanceNorm] Build failed, param invalid, type=%d", tensor->GetType()); - return OH_NN_INVALID_PARAMETER; + if (m_paramMap.find(tensor->GetType()) != m_paramMap.end()) { + ret = (this->*(m_paramMap[tensor->GetType()]))(tensor); + } else { + LOGE("[InstanceNorm] Build failed, param invalid, type=%d", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; } if (ret != OH_NN_SUCCESS) { diff --git a/frameworks/native/neural_network_runtime/ops/instance_norm_builder.h b/frameworks/native/neural_network_runtime/ops/instance_norm_builder.h index c811684..b645f07 100755 --- a/frameworks/native/neural_network_runtime/ops/instance_norm_builder.h +++ b/frameworks/native/neural_network_runtime/ops/instance_norm_builder.h @@ -26,6 +26,8 @@ namespace NeuralNetworkRuntime { namespace Ops { class InstanceNormBuilder : public OpsBuilder { public: + typedef OH_NN_ReturnCode(InstanceNormBuilder::*FuncPtr)(std::shared_ptr); + InstanceNormBuilder(); ~InstanceNormBuilder() override; OH_NN_ReturnCode Build(const std::vector& paramsIndex, @@ -40,6 +42,9 @@ private: private: float m_epsilon {0.0f}; + std::unordered_map m_paramMap = { + {OH_NN_INSTANCE_NORM_EPSILON, &InstanceNormBuilder::SetEpsilon} + }; }; } // namespace Ops } // namespace NeuralNetworkRuntime diff --git a/frameworks/native/neural_network_runtime/ops/l2_normalize_builder.cpp b/frameworks/native/neural_network_runtime/ops/l2_normalize_builder.cpp index 24ce736..9f81876 100644 --- a/frameworks/native/neural_network_runtime/ops/l2_normalize_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/l2_normalize_builder.cpp @@ -136,19 +136,11 @@ OH_NN_ReturnCode L2NormalizeBuilder::Build(const std::vector& paramsIn for (int i : paramsIndex) { std::shared_ptr tensor = allTensors[i]; tensor->IdentifyOpParameter(); - switch (tensor->GetType()) { - case OH_NN_L2_NORMALIZE_AXIS: - ret = SetAxis(tensor); - break; - case OH_NN_L2_NORMALIZE_EPSILON: - ret = SetEpsilon(tensor); - break; - case OH_NN_L2_NORMALIZE_ACTIVATION_TYPE: - ret = SetActivationType(tensor); - break; - default: - LOGE("[L2Normalize] Build failed, param invalid, type=%d", tensor->GetType()); - return OH_NN_INVALID_PARAMETER; + if (m_paramMap.find(tensor->GetType()) != m_paramMap.end()) { + ret = (this->*(m_paramMap[tensor->GetType()]))(tensor); + } else { + LOGE("[L2Normalize] Build failed, param invalid, type=%d", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; } if (ret != OH_NN_SUCCESS) { diff --git a/frameworks/native/neural_network_runtime/ops/l2_normalize_builder.h b/frameworks/native/neural_network_runtime/ops/l2_normalize_builder.h index 271d8d6..7cffe58 100644 --- a/frameworks/native/neural_network_runtime/ops/l2_normalize_builder.h +++ b/frameworks/native/neural_network_runtime/ops/l2_normalize_builder.h @@ -24,6 +24,8 @@ namespace NeuralNetworkRuntime { namespace Ops { class L2NormalizeBuilder : public OpsBuilder { public: + typedef OH_NN_ReturnCode(L2NormalizeBuilder::*FuncPtr)(std::shared_ptr); + L2NormalizeBuilder(); ~L2NormalizeBuilder() override; OH_NN_ReturnCode Build(const std::vector& paramsIndex, @@ -42,6 +44,11 @@ private: std::vector m_axis; float m_epsilon {1e-6}; mindspore::lite::ActivationType m_activationType {mindspore::lite::ACTIVATION_TYPE_NO_ACTIVATION}; + std::unordered_map m_paramMap = { + {OH_NN_L2_NORMALIZE_ACTIVATION_TYPE, &L2NormalizeBuilder::SetActivationType}, + {OH_NN_L2_NORMALIZE_EPSILON, &L2NormalizeBuilder::SetEpsilon}, + {OH_NN_L2_NORMALIZE_AXIS, &L2NormalizeBuilder::SetAxis} + }; }; } // namespace Ops } // namespace NeuralNetworkRuntime diff --git a/frameworks/native/neural_network_runtime/ops/layernorm_builder.cpp b/frameworks/native/neural_network_runtime/ops/layernorm_builder.cpp index 63e1446..11d6ca2 100644 --- a/frameworks/native/neural_network_runtime/ops/layernorm_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/layernorm_builder.cpp @@ -129,19 +129,11 @@ OH_NN_ReturnCode LayerNormBuilder::Build(const std::vector& paramsInde for (int i : paramsIndex) { std::shared_ptr tensor = allTensors[i]; - switch (tensor->GetType()) { - case OH_NN_LAYER_NORM_BEGIN_NORM_AXIS: - returnCode = SetBeginNormAxis(tensor); - break; - case OH_NN_LAYER_NORM_EPSILON: - returnCode = SetEpsilon(tensor); - break; - case OH_NN_LAYER_NORM_BEGIN_PARAM_AXIS: - returnCode = SetBeginParamsAxis(tensor); - break; - default: - LOGE("[LayerNormBuilder] Parameter Type is invalid, type=%d", tensor->GetType()); - return OH_NN_INVALID_PARAMETER; + if (m_paramMap.find(tensor->GetType()) != m_paramMap.end()) { + returnCode = (this->*(m_paramMap[tensor->GetType()]))(tensor); + } else { + LOGE("[LayerNormBuilder] Build failed, param invalid, type=%d", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; } if (returnCode != OH_NN_SUCCESS) { diff --git a/frameworks/native/neural_network_runtime/ops/layernorm_builder.h b/frameworks/native/neural_network_runtime/ops/layernorm_builder.h index 74b6bd6..4598955 100644 --- a/frameworks/native/neural_network_runtime/ops/layernorm_builder.h +++ b/frameworks/native/neural_network_runtime/ops/layernorm_builder.h @@ -23,6 +23,8 @@ namespace NeuralNetworkRuntime { namespace Ops { class LayerNormBuilder : public OpsBuilder { public: + typedef OH_NN_ReturnCode(LayerNormBuilder::*FuncPtr)(std::shared_ptr); + LayerNormBuilder(); ~LayerNormBuilder() override; OH_NN_ReturnCode Build(const std::vector& paramsIndex, @@ -43,6 +45,11 @@ private: float m_epsilon {1e-7}; bool m_elementwiseAffine {true}; int64_t m_beginParamsAxis {1}; + std::unordered_map m_paramMap = { + {OH_NN_LAYER_NORM_BEGIN_NORM_AXIS, &LayerNormBuilder::SetBeginNormAxis}, + {OH_NN_LAYER_NORM_EPSILON, &LayerNormBuilder::SetEpsilon}, + {OH_NN_LAYER_NORM_BEGIN_PARAM_AXIS, &LayerNormBuilder::SetBeginParamsAxis} + }; }; } // namespace Ops } // namespace NeuralNetworkRuntime diff --git a/frameworks/native/neural_network_runtime/ops/leaky_relu_builder.cpp b/frameworks/native/neural_network_runtime/ops/leaky_relu_builder.cpp index 11fc65c..189bcf3 100644 --- a/frameworks/native/neural_network_runtime/ops/leaky_relu_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/leaky_relu_builder.cpp @@ -78,13 +78,11 @@ OH_NN_ReturnCode LeakyReluBuilder::Build(const std::vector& paramsInde for (int i : paramsIndex) { std::shared_ptr tensor = allTensors[i]; tensor->IdentifyOpParameter(); - switch (tensor->GetType()) { - case OH_NN_LEAKY_RELU_NEGATIVE_SLOPE: - ret = SetNegativeSlope(tensor); - break; - default: - LOGE("[LeakyRelu] Build failed, param invalid, type=%d", tensor->GetType()); - return OH_NN_INVALID_PARAMETER; + if (m_paramMap.find(tensor->GetType()) != m_paramMap.end()) { + ret = (this->*(m_paramMap[tensor->GetType()]))(tensor); + } else { + LOGE("[LeakyRelu] Build failed, param invalid, type=%d", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; } if (ret != OH_NN_SUCCESS) { diff --git a/frameworks/native/neural_network_runtime/ops/leaky_relu_builder.h b/frameworks/native/neural_network_runtime/ops/leaky_relu_builder.h index cd17bcc..656a84f 100644 --- a/frameworks/native/neural_network_runtime/ops/leaky_relu_builder.h +++ b/frameworks/native/neural_network_runtime/ops/leaky_relu_builder.h @@ -26,6 +26,8 @@ namespace NeuralNetworkRuntime { namespace Ops { class LeakyReluBuilder : public OpsBuilder { public: + typedef OH_NN_ReturnCode(LeakyReluBuilder::*FuncPtr)(std::shared_ptr); + LeakyReluBuilder(); ~LeakyReluBuilder() override; OH_NN_ReturnCode Build(const std::vector& paramsIndex, @@ -40,6 +42,9 @@ private: private: float m_negativeSlope {0.0f}; + std::unordered_map m_paramMap = { + {OH_NN_LEAKY_RELU_NEGATIVE_SLOPE, &LeakyReluBuilder::SetNegativeSlope} + }; }; } // namespace Ops } // namespace NeuralNetworkRuntime diff --git a/frameworks/native/neural_network_runtime/ops/log_softmax_builder.cpp b/frameworks/native/neural_network_runtime/ops/log_softmax_builder.cpp index 7f0139f..f9fea6c 100644 --- a/frameworks/native/neural_network_runtime/ops/log_softmax_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/log_softmax_builder.cpp @@ -31,7 +31,7 @@ LogSoftmaxBuilder::LogSoftmaxBuilder() {} LogSoftmaxBuilder::~LogSoftmaxBuilder() {} -OH_NN_ReturnCode LogSoftmaxBuilder::SetAxis(std::shared_ptr& tensor) +OH_NN_ReturnCode LogSoftmaxBuilder::SetAxis(std::shared_ptr tensor) { if (tensor->GetDataType() != OH_NN_INT64) { LOGE("[LogSoftmax] The axis should be type OH_NN_INT64."); @@ -81,13 +81,11 @@ OH_NN_ReturnCode LogSoftmaxBuilder::Build(const std::vector& paramsInd for (int i : paramsIndex) { std::shared_ptr tensor = allTensors[i]; tensor->IdentifyOpParameter(); - switch (tensor->GetType()) { - case OH_NN_LOG_SOFTMAX_AXIS: - ret = SetAxis(tensor); - break; - default: - LOGE("[LogSoftmax] Build failed, param invalid, type=%d", tensor->GetType()); - return OH_NN_INVALID_PARAMETER; + if (m_paramMap.find(tensor->GetType()) != m_paramMap.end()) { + ret = (this->*(m_paramMap[tensor->GetType()]))(tensor); + } else { + LOGE("[LogSoftmax] Build failed, param invalid, type=%d", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; } if (ret != OH_NN_SUCCESS) { diff --git a/frameworks/native/neural_network_runtime/ops/log_softmax_builder.h b/frameworks/native/neural_network_runtime/ops/log_softmax_builder.h index b5e8b77..e8291e7 100644 --- a/frameworks/native/neural_network_runtime/ops/log_softmax_builder.h +++ b/frameworks/native/neural_network_runtime/ops/log_softmax_builder.h @@ -23,6 +23,8 @@ namespace NeuralNetworkRuntime { namespace Ops { class LogSoftmaxBuilder : public OpsBuilder { public: + typedef OH_NN_ReturnCode(LogSoftmaxBuilder::*FuncPtr)(std::shared_ptr); + LogSoftmaxBuilder(); ~LogSoftmaxBuilder() override; OH_NN_ReturnCode Build(const std::vector& paramsIndex, @@ -33,10 +35,13 @@ public: LiteGraphPrimitvePtr GetPrimitive() override; private: - OH_NN_ReturnCode SetAxis(std::shared_ptr& tensor); + OH_NN_ReturnCode SetAxis(std::shared_ptr tensor); private: int64_t m_axis {0}; + std::unordered_map m_paramMap = { + {OH_NN_LOG_SOFTMAX_AXIS, &LogSoftmaxBuilder::SetAxis} + }; }; } // namespace Ops } // namespace NeuralNetworkRuntime diff --git a/frameworks/native/neural_network_runtime/ops/lrn_builder.cpp b/frameworks/native/neural_network_runtime/ops/lrn_builder.cpp index 13fc2c8..a68e700 100644 --- a/frameworks/native/neural_network_runtime/ops/lrn_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/lrn_builder.cpp @@ -178,25 +178,11 @@ OH_NN_ReturnCode LRNBuilder::Build(const std::vector& paramsIndex, for (int i : paramsIndex) { std::shared_ptr tensor = allTensors[i]; tensor->IdentifyOpParameter(); - switch (tensor->GetType()) { - case OH_NN_LRN_DEPTH_RADIUS: - ret = SetDepthRadius(tensor); - break; - case OH_NN_LRN_BIAS: - ret = SetBias(tensor); - break; - case OH_NN_LRN_ALPHA: - ret = SetAlpha(tensor); - break; - case OH_NN_LRN_BETA: - ret = SetBeta(tensor); - break; - case OH_NN_LRN_NORM_REGION: - ret = SetNormRegion(tensor); - break; - default: - LOGE("[LRN] Build failed, param invalid, type=%d", tensor->GetType()); - return OH_NN_INVALID_PARAMETER; + if (m_paramMap.find(tensor->GetType()) != m_paramMap.end()) { + ret = (this->*(m_paramMap[tensor->GetType()]))(tensor); + } else { + LOGE("[LRN] Build failed, param invalid, type=%d", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; } if (ret != OH_NN_SUCCESS) { diff --git a/frameworks/native/neural_network_runtime/ops/lrn_builder.h b/frameworks/native/neural_network_runtime/ops/lrn_builder.h index c5cf81e..ee04b32 100644 --- a/frameworks/native/neural_network_runtime/ops/lrn_builder.h +++ b/frameworks/native/neural_network_runtime/ops/lrn_builder.h @@ -23,6 +23,8 @@ namespace NeuralNetworkRuntime { namespace Ops { class LRNBuilder : public OpsBuilder { public: + typedef OH_NN_ReturnCode(LRNBuilder::*FuncPtr)(std::shared_ptr); + LRNBuilder(); ~LRNBuilder() override; OH_NN_ReturnCode Build(const std::vector& paramsIndex, @@ -45,6 +47,13 @@ private: float m_alpha {0.0f}; float m_beta {0.0f}; std::string m_normRegion {"ACROSS_CHANNELS"}; + std::unordered_map m_paramMap = { + {OH_NN_LRN_ALPHA, &LRNBuilder::SetAlpha}, + {OH_NN_LRN_DEPTH_RADIUS, &LRNBuilder::SetDepthRadius}, + {OH_NN_LRN_BIAS, &LRNBuilder::SetBias}, + {OH_NN_LRN_BETA, &LRNBuilder::SetBeta}, + {OH_NN_LRN_NORM_REGION, &LRNBuilder::SetNormRegion} + }; }; } // namespace Ops } // namespace NeuralNetworkRuntime diff --git a/frameworks/native/neural_network_runtime/ops/lstm_builder.cpp b/frameworks/native/neural_network_runtime/ops/lstm_builder.cpp index 5273ea8..03986ef 100644 --- a/frameworks/native/neural_network_runtime/ops/lstm_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/lstm_builder.cpp @@ -255,41 +255,13 @@ OH_NN_ReturnCode LSTMBuilder::ParseParam(const std::vector& paramsInde for (int i : paramsIndex) { std::shared_ptr tensor = allTensors[i]; tensor->IdentifyOpParameter(); - switch (tensor->GetType()) { - case OH_NN_LSTM_BIDIRECTIONAL: - returnCode = SetBidirectional(tensor); - break; - case OH_NN_LSTM_HAS_BIAS: - returnCode = SetHasBias(tensor); - break; - case OH_NN_LSTM_INPUT_SIZE: - returnCode = SetInputSize(tensor); - break; - case OH_NN_LSTM_HIDDEN_SIZE: - returnCode = SetHiddenSize(tensor); - break; - case OH_NN_LSTM_NUM_LAYERS: - returnCode = SetNumLayers(tensor); - break; - case OH_NN_LSTM_NUM_DIRECTIONS: - returnCode = SetNumDirections(tensor); - break; - case OH_NN_LSTM_DROPOUT: - returnCode = SetDropout(tensor); - break; - case OH_NN_LSTM_ZONEOUT_CELL: - returnCode = SetZoneoutCell(tensor); - break; - case OH_NN_LSTM_ZONEOUT_HIDDEN: - returnCode = SetZoneoutHidden(tensor); - break; - case OH_NN_LSTM_PROJ_SIZE: - returnCode = SetProjSize(tensor); - break; - default: - LOGE("[LSTM] Build failed, param invalid, type=%d", tensor->GetType()); - return OH_NN_INVALID_PARAMETER; + if (m_paramMap.find(tensor->GetType()) != m_paramMap.end()) { + returnCode = (this->*(m_paramMap[tensor->GetType()]))(tensor); + } else { + LOGE("[lSTM] Build failed, param invalid, type=%d", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; } + if (returnCode != OH_NN_SUCCESS) { LOGE("[LSTM] Build failed, passed invalid param."); return returnCode; diff --git a/frameworks/native/neural_network_runtime/ops/lstm_builder.h b/frameworks/native/neural_network_runtime/ops/lstm_builder.h index 075c74b..136a67a 100644 --- a/frameworks/native/neural_network_runtime/ops/lstm_builder.h +++ b/frameworks/native/neural_network_runtime/ops/lstm_builder.h @@ -26,6 +26,8 @@ namespace NeuralNetworkRuntime { namespace Ops { class LSTMBuilder : public OpsBuilder { public: + typedef OH_NN_ReturnCode(LSTMBuilder::*FuncPtr)(std::shared_ptr); + LSTMBuilder(); ~LSTMBuilder() override; OH_NN_ReturnCode Build(const std::vector& paramsIndex, @@ -59,6 +61,18 @@ private: float m_zoneoutCell {0.0f}; float m_zoneoutHidden {0.0f}; int64_t m_projSize {0}; + std::unordered_map m_paramMap = { + {OH_NN_LSTM_BIDIRECTIONAL, &LSTMBuilder::SetBidirectional}, + {OH_NN_LSTM_HAS_BIAS, &LSTMBuilder::SetHasBias}, + {OH_NN_LSTM_INPUT_SIZE, &LSTMBuilder::SetInputSize}, + {OH_NN_LSTM_HIDDEN_SIZE, &LSTMBuilder::SetHiddenSize}, + {OH_NN_LSTM_NUM_LAYERS, &LSTMBuilder::SetNumLayers}, + {OH_NN_LSTM_NUM_DIRECTIONS, &LSTMBuilder::SetNumDirections}, + {OH_NN_LSTM_DROPOUT, &LSTMBuilder::SetDropout}, + {OH_NN_LSTM_ZONEOUT_CELL, &LSTMBuilder::SetZoneoutCell}, + {OH_NN_LSTM_ZONEOUT_HIDDEN, &LSTMBuilder::SetZoneoutHidden}, + {OH_NN_LSTM_PROJ_SIZE, &LSTMBuilder::SetProjSize} + }; }; } // namespace Ops } // namespace NeuralNetworkRuntime diff --git a/frameworks/native/neural_network_runtime/ops/matmul_builder.cpp b/frameworks/native/neural_network_runtime/ops/matmul_builder.cpp index 5913934..fb042b5 100644 --- a/frameworks/native/neural_network_runtime/ops/matmul_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/matmul_builder.cpp @@ -135,19 +135,11 @@ OH_NN_ReturnCode MatmulBuilder::Build(const std::vector& paramsIndex, for (int i : paramsIndex) { std::shared_ptr tensor = allTensors[i]; - switch (tensor->GetType()) { - case OH_NN_MATMUL_TRANSPOSE_A: - returnCode = SetTransposeA(tensor); - break; - case OH_NN_MATMUL_TRANSPOSE_B: - returnCode = SetTransposeB(tensor); - break; - case OH_NN_MATMUL_ACTIVATION_TYPE: - returnCode = SetActivationType(tensor); - break; - default: - LOGE("[Matmul] Parameter Type is invalid, type=%d", tensor->GetType()); - return OH_NN_INVALID_PARAMETER; + if (m_paramMap.find(tensor->GetType()) != m_paramMap.end()) { + returnCode = (this->*(m_paramMap[tensor->GetType()]))(tensor); + } else { + LOGE("[Matmul] Build failed, param invalid, type=%d", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; } if (returnCode != OH_NN_SUCCESS) { diff --git a/frameworks/native/neural_network_runtime/ops/matmul_builder.h b/frameworks/native/neural_network_runtime/ops/matmul_builder.h index 3d39f20..039d9c2 100644 --- a/frameworks/native/neural_network_runtime/ops/matmul_builder.h +++ b/frameworks/native/neural_network_runtime/ops/matmul_builder.h @@ -25,6 +25,8 @@ namespace NeuralNetworkRuntime { namespace Ops { class MatmulBuilder : public OpsBuilder { public: + typedef OH_NN_ReturnCode(MatmulBuilder::*FuncPtr)(std::shared_ptr); + MatmulBuilder(); ~MatmulBuilder() override; OH_NN_ReturnCode Build(const std::vector& paramsIndex, @@ -42,6 +44,11 @@ private: mindspore::lite::ActivationType m_activationType{mindspore::lite::ACTIVATION_TYPE_NO_ACTIVATION}; bool m_transposeA{false}; bool m_transposeB{false}; + std::unordered_map m_paramMap = { + {OH_NN_MATMUL_TRANSPOSE_A, &MatmulBuilder::SetTransposeA}, + {OH_NN_MATMUL_TRANSPOSE_B, &MatmulBuilder::SetTransposeB}, + {OH_NN_MATMUL_ACTIVATION_TYPE, &MatmulBuilder::SetActivationType} + }; }; } // namespace Ops } // namespace NeuralNetworkRuntime diff --git a/frameworks/native/neural_network_runtime/ops/mul_builder.h b/frameworks/native/neural_network_runtime/ops/mul_builder.h index 62c0b24..b42fedb 100644 --- a/frameworks/native/neural_network_runtime/ops/mul_builder.h +++ b/frameworks/native/neural_network_runtime/ops/mul_builder.h @@ -25,6 +25,8 @@ namespace NeuralNetworkRuntime { namespace Ops { class MulBuilder : public OpsBuilder { public: + typedef OH_NN_ReturnCode(MulBuilder::*FuncPtr)(std::shared_ptr); + MulBuilder(); ~MulBuilder() override; OH_NN_ReturnCode Build(const std::vector& paramsIndex, @@ -38,6 +40,9 @@ private: private: mindspore::lite::ActivationType m_activationType {mindspore::lite::ACTIVATION_TYPE_NO_ACTIVATION}; + std::unordered_map m_paramMap = { + {OH_NN_MUL_ACTIVATION_TYPE, &MulBuilder::SetActivationType} + }; }; } // namespace Ops } // namespace NeuralNetworkRuntime diff --git a/frameworks/native/neural_network_runtime/ops/onehot_builder.cpp b/frameworks/native/neural_network_runtime/ops/onehot_builder.cpp index 88474d5..5a375fe 100644 --- a/frameworks/native/neural_network_runtime/ops/onehot_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/onehot_builder.cpp @@ -76,13 +76,11 @@ OH_NN_ReturnCode OnehotBuilder::Build(const std::vector& paramsIndex, for (int i : paramsIndex) { std::shared_ptr tensor = allTensors[i]; - switch (tensor->GetType()) { - case OH_NN_ONE_HOT_AXIS: - returnCode = SetAxis(tensor); - break; - default: - LOGE("[Onehot] Parameter Type is invalid, type=%d", tensor->GetType()); - return OH_NN_INVALID_PARAMETER; + if (m_paramMap.find(tensor->GetType()) != m_paramMap.end()) { + returnCode = (this->*(m_paramMap[tensor->GetType()]))(tensor); + } else { + LOGE("[Onehot] Build failed, param invalid, type=%d", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; } if (returnCode != OH_NN_SUCCESS) { diff --git a/frameworks/native/neural_network_runtime/ops/onehot_builder.h b/frameworks/native/neural_network_runtime/ops/onehot_builder.h index cca7d01..ec48f3b 100644 --- a/frameworks/native/neural_network_runtime/ops/onehot_builder.h +++ b/frameworks/native/neural_network_runtime/ops/onehot_builder.h @@ -23,6 +23,8 @@ namespace NeuralNetworkRuntime { namespace Ops { class OnehotBuilder : public OpsBuilder { public: + typedef OH_NN_ReturnCode(OnehotBuilder::*FuncPtr)(std::shared_ptr); + OnehotBuilder(); ~OnehotBuilder() override; OH_NN_ReturnCode Build(const std::vector& paramsIndex, @@ -36,6 +38,9 @@ private: private: int64_t m_axis {-1}; + std::unordered_map m_paramMap = { + {OH_NN_ONE_HOT_AXIS, &OnehotBuilder::SetAxis} + }; }; } // namespace Ops } // namespace NeuralNetworkRuntime diff --git a/frameworks/native/neural_network_runtime/ops/pad_builder.cpp b/frameworks/native/neural_network_runtime/ops/pad_builder.cpp index f62e071..56a8a63 100644 --- a/frameworks/native/neural_network_runtime/ops/pad_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/pad_builder.cpp @@ -120,16 +120,11 @@ OH_NN_ReturnCode PadBuilder::Build(const std::vector& paramsIndex, for (int i : paramsIndex) { std::shared_ptr tensor = allTensors[i]; - switch (tensor->GetType()) { - case OH_NN_PAD_CONSTANT_VALUE: - returnCode = SetConstantValue(tensor); - break; - case OH_NN_PAD_PADDING_MODE: - returnCode = SetPaddingMode(tensor); - break; - default: - LOGE("[Pad] Parameter Type is invalid, type=%d", tensor->GetType()); - return OH_NN_INVALID_PARAMETER; + if (m_paramMap.find(tensor->GetType()) != m_paramMap.end()) { + returnCode = (this->*(m_paramMap[tensor->GetType()]))(tensor); + } else { + LOGE("[Pad] Build failed, param invalid, type=%d", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; } if (returnCode != OH_NN_SUCCESS) { diff --git a/frameworks/native/neural_network_runtime/ops/pad_builder.h b/frameworks/native/neural_network_runtime/ops/pad_builder.h index ca523af..0ba2c23 100644 --- a/frameworks/native/neural_network_runtime/ops/pad_builder.h +++ b/frameworks/native/neural_network_runtime/ops/pad_builder.h @@ -24,6 +24,8 @@ namespace NeuralNetworkRuntime { namespace Ops { class PadBuilder : public OpsBuilder { public: + typedef OH_NN_ReturnCode(PadBuilder::*FuncPtr)(std::shared_ptr); + PadBuilder(); ~PadBuilder() override; OH_NN_ReturnCode Build(const std::vector& paramsIndex, @@ -39,6 +41,10 @@ private: private: float m_constantValue {0.0f}; mindspore::lite::PaddingMode m_paddingMode {mindspore::lite::PADDING_MODE_CONSTANT}; + std::unordered_map m_paramMap = { + {OH_NN_PAD_CONSTANT_VALUE, &PadBuilder::SetConstantValue}, + {OH_NN_PAD_PADDING_MODE, &PadBuilder::SetPaddingMode} + }; }; } // namespace Ops } // namespace NeuralNetworkRuntime diff --git a/frameworks/native/neural_network_runtime/ops/pooling_builder.cpp b/frameworks/native/neural_network_runtime/ops/pooling_builder.cpp index 42131d3..2213941 100644 --- a/frameworks/native/neural_network_runtime/ops/pooling_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/pooling_builder.cpp @@ -57,37 +57,13 @@ OH_NN_ReturnCode PoolingBuilder::PoolingBuild(const std::vector& param for (int i : paramsIndex) { std::shared_ptr tensor = allTensors[i]; - switch (tensor->GetType()) { - case OH_NN_AVG_POOL_KERNEL_SIZE: - case OH_NN_MAX_POOL_KERNEL_SIZE: - returnCode = SetKernel(tensor); - break; - case OH_NN_AVG_POOL_STRIDE: - case OH_NN_MAX_POOL_STRIDE: - returnCode = SetStrides(tensor); - break; - case OH_NN_AVG_POOL_PAD_MODE: - case OH_NN_MAX_POOL_PAD_MODE: - case OH_NN_MAX_POOL_PAD: - case OH_NN_AVG_POOL_PAD: - returnCode = SetPadModeOrPaddings(tensor); - break; - case OH_NN_AVG_POOL_ROUND_MODE: - case OH_NN_MAX_POOL_ROUND_MODE: - returnCode = SetRoundMode(tensor); - break; - case OH_NN_AVG_POOL_ACTIVATION_TYPE: - case OH_NN_MAX_POOL_ACTIVATION_TYPE: - returnCode = SetActivation(tensor); - break; - case OH_NN_AVG_POOL_GLOBAL: - case OH_NN_MAX_POOL_GLOBAL: - returnCode = SetGlobal(tensor); - break; - default: - LOGE("[PoolingBuilder] Build failed, param invalid, type = %d.", tensor->GetType()); - return OH_NN_INVALID_PARAMETER; + if (m_paramMap.find(tensor->GetType()) != m_paramMap.end()) { + returnCode = (this->*(m_paramMap[tensor->GetType()]))(tensor); + } else { + LOGE("[PoolingBuilder] Build failed, param invalid, type=%d", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; } + if (returnCode != OH_NN_SUCCESS) { LOGE("[PoolingBuilder] PoolingBuild failed, passed invalid param."); return returnCode; diff --git a/frameworks/native/neural_network_runtime/ops/pooling_builder.h b/frameworks/native/neural_network_runtime/ops/pooling_builder.h index 3f020d8..f4199eb 100644 --- a/frameworks/native/neural_network_runtime/ops/pooling_builder.h +++ b/frameworks/native/neural_network_runtime/ops/pooling_builder.h @@ -25,6 +25,8 @@ namespace NeuralNetworkRuntime { namespace Ops { class PoolingBuilder : public OpsBuilder { public: + typedef OH_NN_ReturnCode(PoolingBuilder::*FuncPtr)(std::shared_ptr); + PoolingBuilder() = default; virtual ~PoolingBuilder() = default; @@ -53,6 +55,24 @@ protected: mindspore::lite::RoundMode m_roundMode {mindspore::lite::ROUND_MODE_FLOOR}; mindspore::lite::Format m_format {mindspore::lite::FORMAT_NCHW}; bool m_global {false}; + std::unordered_map m_paramMap = { + {OH_NN_MAX_POOL_KERNEL_SIZE, &PoolingBuilder::SetKernel}, + {OH_NN_MAX_POOL_STRIDE, &PoolingBuilder::SetStrides}, + {OH_NN_MAX_POOL_PAD_MODE, &PoolingBuilder::SetPadModeOrPaddings}, + {OH_NN_MAX_POOL_PAD, &PoolingBuilder::SetPadModeOrPaddings}, + {OH_NN_MAX_POOL_ACTIVATION_TYPE, &PoolingBuilder::SetActivation}, + {OH_NN_MAX_POOL_ROUND_MODE, &PoolingBuilder::SetRoundMode}, + {OH_NN_MAX_POOL_GLOBAL, &PoolingBuilder::SetGlobal}, + + + {OH_NN_AVG_POOL_KERNEL_SIZE, &PoolingBuilder::SetKernel}, + {OH_NN_AVG_POOL_STRIDE, &PoolingBuilder::SetStrides}, + {OH_NN_AVG_POOL_PAD_MODE, &PoolingBuilder::SetPadModeOrPaddings}, + {OH_NN_AVG_POOL_PAD, &PoolingBuilder::SetPadModeOrPaddings}, + {OH_NN_AVG_POOL_ACTIVATION_TYPE, &PoolingBuilder::SetActivation}, + {OH_NN_AVG_POOL_ROUND_MODE, &PoolingBuilder::SetRoundMode}, + {OH_NN_AVG_POOL_GLOBAL, &PoolingBuilder::SetGlobal} + }; }; } // namespace Ops } // namespace NeuralNetworkRuntime diff --git a/frameworks/native/neural_network_runtime/ops/pow_builder.cpp b/frameworks/native/neural_network_runtime/ops/pow_builder.cpp index 008ddec..83864c6 100644 --- a/frameworks/native/neural_network_runtime/ops/pow_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/pow_builder.cpp @@ -27,10 +27,7 @@ static const int PARAM_MAX_NUM = 2; static const int SCALAR_LENGTH = 1; static const std::string OP_NAME = "Pow"; -PowBuilder::PowBuilder() { - //ParamHashMap[OH_NN_POW_SCALE] = &PowBuilder::SetScale; - //ParamHashMap[OH_NN_POW_SHIFT] = &PowBuilder::SetShift; -} +PowBuilder::PowBuilder() {} PowBuilder::~PowBuilder() {} @@ -107,8 +104,8 @@ OH_NN_ReturnCode PowBuilder::Build(const std::vector& paramsIndex, for (int i : paramsIndex) { std::shared_ptr tensor = allTensors[i]; tensor->IdentifyOpParameter(); - if (ParamHashMap.find(tensor->GetType()) != ParamHashMap.end()) { - returnCode = (this->*(ParamHashMap[tensor->GetType()]))(tensor); + if (m_paramMap.find(tensor->GetType()) != m_paramMap.end()) { + returnCode = (this->*(m_paramMap[tensor->GetType()]))(tensor); } else { LOGE("[Pow] Build failed, param invalid, type=%d", tensor->GetType()); return OH_NN_INVALID_PARAMETER; diff --git a/frameworks/native/neural_network_runtime/ops/pow_builder.h b/frameworks/native/neural_network_runtime/ops/pow_builder.h index f48612a..9c34ed4 100644 --- a/frameworks/native/neural_network_runtime/ops/pow_builder.h +++ b/frameworks/native/neural_network_runtime/ops/pow_builder.h @@ -23,6 +23,8 @@ namespace NeuralNetworkRuntime { namespace Ops { class PowBuilder : public OpsBuilder { public: + typedef OH_NN_ReturnCode(PowBuilder::*FuncPtr)(std::shared_ptr); + PowBuilder(); ~PowBuilder() override; OH_NN_ReturnCode Build(const std::vector& paramsIndex, @@ -39,7 +41,7 @@ private: private: float m_scale {1.0f}; float m_shift {0.0f}; - std::unordered_map)> ParamHashMap = { + std::unordered_map m_paramMap = { {OH_NN_POW_SCALE, &PowBuilder::SetScale}, {OH_NN_POW_SHIFT, &PowBuilder::SetShift} }; diff --git a/frameworks/native/neural_network_runtime/ops/quant_dtype_cast_builder.cpp b/frameworks/native/neural_network_runtime/ops/quant_dtype_cast_builder.cpp index 7b693c6..ff9044c 100644 --- a/frameworks/native/neural_network_runtime/ops/quant_dtype_cast_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/quant_dtype_cast_builder.cpp @@ -111,19 +111,11 @@ OH_NN_ReturnCode QuantDTypeCastBuilder::Build(const std::vector& param for (uint32_t i : paramsIndex) { std::shared_ptr tensor = allTensors[i]; - switch (tensor->GetType()) { - case OH_NN_QUANT_DTYPE_CAST_SRC_T: - returnCode = SetSrcT(tensor); - break; - case OH_NN_QUANT_DTYPE_CAST_DST_T: - returnCode = SetDstT(tensor); - break; - case OH_NN_QUANT_DTYPE_CAST_AXIS: - returnCode = SetAxis(tensor); - break; - default: - LOGE("[QuantDTypeCast] Build failed, parameter type is invalid. type=%d", tensor->GetType()); - return OH_NN_INVALID_PARAMETER; + if (m_paramMap.find(tensor->GetType()) != m_paramMap.end()) { + returnCode = (this->*(m_paramMap[tensor->GetType()]))(tensor); + } else { + LOGE("[QunatDTypeCast] Build failed, param invalid, type=%d", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; } if (returnCode != OH_NN_SUCCESS) { diff --git a/frameworks/native/neural_network_runtime/ops/quant_dtype_cast_builder.h b/frameworks/native/neural_network_runtime/ops/quant_dtype_cast_builder.h index f85922b..109df64 100644 --- a/frameworks/native/neural_network_runtime/ops/quant_dtype_cast_builder.h +++ b/frameworks/native/neural_network_runtime/ops/quant_dtype_cast_builder.h @@ -23,6 +23,8 @@ namespace NeuralNetworkRuntime { namespace Ops { class QuantDTypeCastBuilder : public OpsBuilder { public: + typedef OH_NN_ReturnCode(QuantDTypeCastBuilder::*FuncPtr)(std::shared_ptr); + QuantDTypeCastBuilder(); ~QuantDTypeCastBuilder() override; OH_NN_ReturnCode Build(const std::vector& paramsIndex, @@ -41,6 +43,11 @@ private: const uint64_t* m_src_t{nullptr}; const uint64_t* m_dst_t{nullptr}; int64_t m_axis {0}; + std::unordered_map m_paramMap = { + {OH_NN_QUANT_DTYPE_CAST_SRC_T, &QuantDTypeCastBuilder::SetSrcT}, + {OH_NN_QUANT_DTYPE_CAST_DST_T, &QuantDTypeCastBuilder::SetDstT}, + {OH_NN_QUANT_DTYPE_CAST_AXIS, &QuantDTypeCastBuilder::SetAxis} + }; }; } // namespace Ops } // namespace NeuralNetworkRuntime diff --git a/frameworks/native/neural_network_runtime/ops/range_builder.cpp b/frameworks/native/neural_network_runtime/ops/range_builder.cpp index c2a6e75..2c2a0e9 100755 --- a/frameworks/native/neural_network_runtime/ops/range_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/range_builder.cpp @@ -122,19 +122,11 @@ OH_NN_ReturnCode RangeBuilder::Build(const std::vector& paramsIndex, for (int i : paramsIndex) { std::shared_ptr tensor = allTensors[i]; tensor->IdentifyOpParameter(); - switch (tensor->GetType()) { - case OH_NN_RANGE_START: - ret = SetStart(tensor); - break; - case OH_NN_RANGE_LIMIT: - ret = SetLimit(tensor); - break; - case OH_NN_RANGE_DELTA: - ret = SetDelta(tensor); - break; - default: - LOGE("[Range] Build failed, param invalid, type=%d", tensor->GetType()); - return OH_NN_INVALID_PARAMETER; + if (m_paramMap.find(tensor->GetType()) != m_paramMap.end()) { + ret = (this->*(m_paramMap[tensor->GetType()]))(tensor); + } else { + LOGE("[Range] Build failed, param invalid, type=%d", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; } if (ret != OH_NN_SUCCESS) { diff --git a/frameworks/native/neural_network_runtime/ops/range_builder.h b/frameworks/native/neural_network_runtime/ops/range_builder.h index 8bc33f1..1d4ec40 100755 --- a/frameworks/native/neural_network_runtime/ops/range_builder.h +++ b/frameworks/native/neural_network_runtime/ops/range_builder.h @@ -26,6 +26,8 @@ namespace NeuralNetworkRuntime { namespace Ops { class RangeBuilder : public OpsBuilder { public: + typedef OH_NN_ReturnCode(RangeBuilder::*FuncPtr)(std::shared_ptr); + RangeBuilder(); ~RangeBuilder() override; OH_NN_ReturnCode Build(const std::vector& paramsIndex, @@ -44,6 +46,11 @@ private: int64_t m_start {0}; int64_t m_limit {0}; int64_t m_delta {1}; + std::unordered_map m_paramMap = { + {OH_NN_RANGE_START, &RangeBuilder::SetStart}, + {OH_NN_RANGE_LIMIT, &RangeBuilder::SetLimit}, + {OH_NN_RANGE_DELTA, &RangeBuilder::SetDelta} + }; }; } // namespace Ops } // namespace NeuralNetworkRuntime diff --git a/frameworks/native/neural_network_runtime/ops/reduceL2_builder.cpp b/frameworks/native/neural_network_runtime/ops/reduceL2_builder.cpp index e2d7ffb..58fde17 100644 --- a/frameworks/native/neural_network_runtime/ops/reduceL2_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/reduceL2_builder.cpp @@ -126,19 +126,11 @@ OH_NN_ReturnCode ReduceL2Builder::Build(const std::vector& paramsIndex for (uint32_t i : paramsIndex) { std::shared_ptr tensor = allTensors[i]; - switch (tensor->GetType()) { - case OH_NN_REDUCE_L2_KEEP_DIMS: - returnCode = SetKeepDims(tensor); - break; - case OH_NN_REDUCE_L2_REDUCE_TO_END: - returnCode = SetReduceToEnd(tensor); - break; - case OH_NN_REDUCE_L2_COEFF: - returnCode = SetCoeff(tensor); - break; - default: - LOGE("[ReduceL2] Build failed, parameter type is invalid. type=%d", tensor->GetType()); - return OH_NN_INVALID_PARAMETER; + if (m_paramMap.find(tensor->GetType()) != m_paramMap.end()) { + returnCode = (this->*(m_paramMap[tensor->GetType()]))(tensor); + } else { + LOGE("[ReduceL2] Build failed, param invalid, type=%d", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; } if (returnCode != OH_NN_SUCCESS) { diff --git a/frameworks/native/neural_network_runtime/ops/reduceL2_builder.h b/frameworks/native/neural_network_runtime/ops/reduceL2_builder.h index 9c61bd2..34f786e 100644 --- a/frameworks/native/neural_network_runtime/ops/reduceL2_builder.h +++ b/frameworks/native/neural_network_runtime/ops/reduceL2_builder.h @@ -23,6 +23,8 @@ namespace NeuralNetworkRuntime { namespace Ops { class ReduceL2Builder : public OpsBuilder { public: + typedef OH_NN_ReturnCode(ReduceL2Builder::*FuncPtr)(std::shared_ptr); + ReduceL2Builder(); ~ReduceL2Builder() override; OH_NN_ReturnCode Build(const std::vector& paramsIndex, @@ -41,6 +43,11 @@ private: float m_coeff {0.0f}; bool m_reduceToEnd {false}; bool m_keepDims {false}; + std::unordered_map m_paramMap = { + {OH_NN_REDUCE_L2_COEFF, &ReduceL2Builder::SetCoeff}, + {OH_NN_REDUCE_L2_REDUCE_TO_END, &ReduceL2Builder::SetReduceToEnd}, + {OH_NN_REDUCE_L2_KEEP_DIMS, &ReduceL2Builder::SetKeepDims} + }; }; } // namespace Ops } // namespace NeuralNetworkRuntime diff --git a/frameworks/native/neural_network_runtime/ops/reduceall_builder.cpp b/frameworks/native/neural_network_runtime/ops/reduceall_builder.cpp index 50eb5de..6bca687 100644 --- a/frameworks/native/neural_network_runtime/ops/reduceall_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/reduceall_builder.cpp @@ -126,19 +126,11 @@ OH_NN_ReturnCode ReduceAllBuilder::Build(const std::vector& paramsInde for (uint32_t i : paramsIndex) { std::shared_ptr tensor = allTensors[i]; - switch (tensor->GetType()) { - case OH_NN_REDUCE_ALL_KEEP_DIMS: - returnCode = SetKeepDims(tensor); - break; - case OH_NN_REDUCE_ALL_REDUCE_TO_END: - returnCode = SetReduceToEnd(tensor); - break; - case OH_NN_REDUCE_ALL_COEFF: - returnCode = SetCoeff(tensor); - break; - default: - LOGE("[ReduceAll] Build failed, parameter type is invalid. type=%d", tensor->GetType()); - return OH_NN_INVALID_PARAMETER; + if (m_paramMap.find(tensor->GetType()) != m_paramMap.end()) { + returnCode = (this->*(m_paramMap[tensor->GetType()]))(tensor); + } else { + LOGE("[ReduceAll] Build failed, param invalid, type=%d", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; } if (returnCode != OH_NN_SUCCESS) { diff --git a/frameworks/native/neural_network_runtime/ops/reduceall_builder.h b/frameworks/native/neural_network_runtime/ops/reduceall_builder.h index 8efd7c0..a18f22a 100644 --- a/frameworks/native/neural_network_runtime/ops/reduceall_builder.h +++ b/frameworks/native/neural_network_runtime/ops/reduceall_builder.h @@ -23,6 +23,8 @@ namespace NeuralNetworkRuntime { namespace Ops { class ReduceAllBuilder : public OpsBuilder { public: + typedef OH_NN_ReturnCode(ReduceAllBuilder::*FuncPtr)(std::shared_ptr); + ReduceAllBuilder(); ~ReduceAllBuilder() override; OH_NN_ReturnCode Build(const std::vector& paramsIndex, @@ -41,6 +43,11 @@ private: float m_coeff {0.0f}; bool m_reduceToEnd {false}; bool m_keepDims {false}; + std::unordered_map m_paramMap = { + {OH_NN_REDUCE_ALL_COEFF, &ReduceAllBuilder::SetCoeff}, + {OH_NN_REDUCE_ALL_REDUCE_TO_END, &ReduceAllBuilder::SetReduceToEnd}, + {OH_NN_REDUCE_ALL_KEEP_DIMS, &ReduceAllBuilder::SetKeepDims} + }; }; } // namespace Ops } // namespace NeuralNetworkRuntime diff --git a/frameworks/native/neural_network_runtime/ops/reducemax_builder.cpp b/frameworks/native/neural_network_runtime/ops/reducemax_builder.cpp index 7b255c9..9ec85d5 100644 --- a/frameworks/native/neural_network_runtime/ops/reducemax_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/reducemax_builder.cpp @@ -126,19 +126,11 @@ OH_NN_ReturnCode ReduceMaxBuilder::Build(const std::vector& paramsInde for (uint32_t i : paramsIndex) { std::shared_ptr tensor = allTensors[i]; - switch (tensor->GetType()) { - case OH_NN_REDUCE_MAX_KEEP_DIMS: - returnCode = SetKeepDims(tensor); - break; - case OH_NN_REDUCE_MAX_REDUCE_TO_END: - returnCode = SetReduceToEnd(tensor); - break; - case OH_NN_REDUCE_MAX_COEFF: - returnCode = SetCoeff(tensor); - break; - default: - LOGE("[ReduceMax] Build failed, parameter type is invalid. type=%d", tensor->GetType()); - return OH_NN_INVALID_PARAMETER; + if (m_paramMap.find(tensor->GetType()) != m_paramMap.end()) { + returnCode = (this->*(m_paramMap[tensor->GetType()]))(tensor); + } else { + LOGE("[ReduceMax] Build failed, param invalid, type=%d", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; } if (returnCode != OH_NN_SUCCESS) { diff --git a/frameworks/native/neural_network_runtime/ops/reducemax_builder.h b/frameworks/native/neural_network_runtime/ops/reducemax_builder.h index 08f469b..5f39651 100644 --- a/frameworks/native/neural_network_runtime/ops/reducemax_builder.h +++ b/frameworks/native/neural_network_runtime/ops/reducemax_builder.h @@ -23,6 +23,8 @@ namespace NeuralNetworkRuntime { namespace Ops { class ReduceMaxBuilder : public OpsBuilder { public: + typedef OH_NN_ReturnCode(ReduceMaxBuilder::*FuncPtr)(std::shared_ptr); + ReduceMaxBuilder(); ~ReduceMaxBuilder() override; OH_NN_ReturnCode Build(const std::vector& paramsIndex, @@ -41,6 +43,11 @@ private: float m_coeff {0.0f}; bool m_reduceToEnd {false}; bool m_keepDims {false}; + std::unordered_map m_paramMap = { + {OH_NN_REDUCE_MAX_COEFF, &ReduceMaxBuilder::SetCoeff}, + {OH_NN_REDUCE_MAX_REDUCE_TO_END, &ReduceMaxBuilder::SetReduceToEnd}, + {OH_NN_REDUCE_MAX_KEEP_DIMS, &ReduceMaxBuilder::SetKeepDims} + }; }; } // namespace Ops } // namespace NeuralNetworkRuntime diff --git a/frameworks/native/neural_network_runtime/ops/reducemean_builder.cpp b/frameworks/native/neural_network_runtime/ops/reducemean_builder.cpp index 8f821ce..6f6739f 100644 --- a/frameworks/native/neural_network_runtime/ops/reducemean_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/reducemean_builder.cpp @@ -126,19 +126,11 @@ OH_NN_ReturnCode ReduceMeanBuilder::Build(const std::vector& paramsInd for (uint32_t i : paramsIndex) { std::shared_ptr tensor = allTensors[i]; - switch (tensor->GetType()) { - case OH_NN_REDUCE_MEAN_KEEP_DIMS: - returnCode = SetKeepDims(tensor); - break; - case OH_NN_REDUCE_MEAN_REDUCE_TO_END: - returnCode = SetReduceToEnd(tensor); - break; - case OH_NN_REDUCE_MEAN_COEFF: - returnCode = SetCoeff(tensor); - break; - default: - LOGE("[ReduceMean] Build failed, parameter type is invalid. type=%d", tensor->GetType()); - return OH_NN_INVALID_PARAMETER; + if (m_paramMap.find(tensor->GetType()) != m_paramMap.end()) { + returnCode = (this->*(m_paramMap[tensor->GetType()]))(tensor); + } else { + LOGE("[ReduceMean] Build failed, param invalid, type=%d", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; } if (returnCode != OH_NN_SUCCESS) { diff --git a/frameworks/native/neural_network_runtime/ops/reducemean_builder.h b/frameworks/native/neural_network_runtime/ops/reducemean_builder.h index 64af503..c1b5026 100644 --- a/frameworks/native/neural_network_runtime/ops/reducemean_builder.h +++ b/frameworks/native/neural_network_runtime/ops/reducemean_builder.h @@ -23,6 +23,8 @@ namespace NeuralNetworkRuntime { namespace Ops { class ReduceMeanBuilder : public OpsBuilder { public: + typedef OH_NN_ReturnCode(ReduceMeanBuilder::*FuncPtr)(std::shared_ptr); + ReduceMeanBuilder(); ~ReduceMeanBuilder() override; OH_NN_ReturnCode Build(const std::vector& paramsIndex, @@ -41,6 +43,11 @@ private: bool m_keepDims{false}; float m_coeff {0.0f}; bool m_reduceToEnd {false}; + std::unordered_map m_paramMap = { + {OH_NN_REDUCE_MEAN_COEFF, &ReduceMeanBuilder::SetCoeff}, + {OH_NN_REDUCE_MEAN_REDUCE_TO_END, &ReduceMeanBuilder::SetReduceToEnd}, + {OH_NN_REDUCE_MEAN_KEEP_DIMS, &ReduceMeanBuilder::SetKeepDims} + }; }; } // namespace Ops } // namespace NeuralNetworkRuntime diff --git a/frameworks/native/neural_network_runtime/ops/reducemin_builder.cpp b/frameworks/native/neural_network_runtime/ops/reducemin_builder.cpp index 9cf7fed..907a962 100644 --- a/frameworks/native/neural_network_runtime/ops/reducemin_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/reducemin_builder.cpp @@ -126,19 +126,11 @@ OH_NN_ReturnCode ReduceMinBuilder::Build(const std::vector& paramsInde for (uint32_t i : paramsIndex) { std::shared_ptr tensor = allTensors[i]; - switch (tensor->GetType()) { - case OH_NN_REDUCE_MIN_KEEP_DIMS: - returnCode = SetKeepDims(tensor); - break; - case OH_NN_REDUCE_MIN_REDUCE_TO_END: - returnCode = SetReduceToEnd(tensor); - break; - case OH_NN_REDUCE_MIN_COEFF: - returnCode = SetCoeff(tensor); - break; - default: - LOGE("[ReduceMin] Build failed, parameter type is invalid. type=%d", tensor->GetType()); - return OH_NN_INVALID_PARAMETER; + if (m_paramMap.find(tensor->GetType()) != m_paramMap.end()) { + returnCode = (this->*(m_paramMap[tensor->GetType()]))(tensor); + } else { + LOGE("[ReduceMin] Build failed, param invalid, type=%d", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; } if (returnCode != OH_NN_SUCCESS) { diff --git a/frameworks/native/neural_network_runtime/ops/reducemin_builder.h b/frameworks/native/neural_network_runtime/ops/reducemin_builder.h index 01e1e9b..2f0bba3 100644 --- a/frameworks/native/neural_network_runtime/ops/reducemin_builder.h +++ b/frameworks/native/neural_network_runtime/ops/reducemin_builder.h @@ -23,6 +23,8 @@ namespace NeuralNetworkRuntime { namespace Ops { class ReduceMinBuilder : public OpsBuilder { public: + typedef OH_NN_ReturnCode(ReduceMinBuilder::*FuncPtr)(std::shared_ptr); + ReduceMinBuilder(); ~ReduceMinBuilder() override; OH_NN_ReturnCode Build(const std::vector& paramsIndex, @@ -41,6 +43,11 @@ private: float m_coeff {0.0f}; bool m_reduceToEnd {false}; bool m_keepDims {false}; + std::unordered_map m_paramMap = { + {OH_NN_REDUCE_MIN_COEFF, &ReduceMinBuilder::SetCoeff}, + {OH_NN_REDUCE_MIN_REDUCE_TO_END, &ReduceMinBuilder::SetReduceToEnd}, + {OH_NN_REDUCE_MIN_KEEP_DIMS, &ReduceMinBuilder::SetKeepDims} + }; }; } // namespace Ops } // namespace NeuralNetworkRuntime diff --git a/frameworks/native/neural_network_runtime/ops/reduceprod_builder.cpp b/frameworks/native/neural_network_runtime/ops/reduceprod_builder.cpp index cfcee8c..654b9a6 100644 --- a/frameworks/native/neural_network_runtime/ops/reduceprod_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/reduceprod_builder.cpp @@ -126,19 +126,11 @@ OH_NN_ReturnCode ReduceProdBuilder::Build(const std::vector& paramsInd for (uint32_t i : paramsIndex) { std::shared_ptr tensor = allTensors[i]; - switch (tensor->GetType()) { - case OH_NN_REDUCE_PROD_KEEP_DIMS: - returnCode = SetKeepDims(tensor); - break; - case OH_NN_REDUCE_PROD_REDUCE_TO_END: - returnCode = SetReduceToEnd(tensor); - break; - case OH_NN_REDUCE_PROD_COEFF: - returnCode = SetCoeff(tensor); - break; - default: - LOGE("[ReduceProd] Build failed, parameter type is invalid. type=%d", tensor->GetType()); - return OH_NN_INVALID_PARAMETER; + if (m_paramMap.find(tensor->GetType()) != m_paramMap.end()) { + returnCode = (this->*(m_paramMap[tensor->GetType()]))(tensor); + } else { + LOGE("[ReduceProd] Build failed, param invalid, type=%d", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; } if (returnCode != OH_NN_SUCCESS) { diff --git a/frameworks/native/neural_network_runtime/ops/reduceprod_builder.h b/frameworks/native/neural_network_runtime/ops/reduceprod_builder.h index 7ca13c1..b961ad9 100644 --- a/frameworks/native/neural_network_runtime/ops/reduceprod_builder.h +++ b/frameworks/native/neural_network_runtime/ops/reduceprod_builder.h @@ -23,6 +23,8 @@ namespace NeuralNetworkRuntime { namespace Ops { class ReduceProdBuilder : public OpsBuilder { public: + typedef OH_NN_ReturnCode(ReduceProdBuilder::*FuncPtr)(std::shared_ptr); + ReduceProdBuilder(); ~ReduceProdBuilder() override; OH_NN_ReturnCode Build(const std::vector& paramsIndex, @@ -41,6 +43,11 @@ private: bool m_keepDims{false}; float m_coeff {0.0f}; bool m_reduceToEnd {false}; + std::unordered_map m_paramMap = { + {OH_NN_REDUCE_PROD_COEFF, &ReduceProdBuilder::SetCoeff}, + {OH_NN_REDUCE_PROD_REDUCE_TO_END, &ReduceProdBuilder::SetReduceToEnd}, + {OH_NN_REDUCE_PROD_KEEP_DIMS, &ReduceProdBuilder::SetKeepDims} + }; }; } // namespace Ops } // namespace NeuralNetworkRuntime diff --git a/frameworks/native/neural_network_runtime/ops/reducesum_builder.cpp b/frameworks/native/neural_network_runtime/ops/reducesum_builder.cpp index 18e1805..1a9b465 100644 --- a/frameworks/native/neural_network_runtime/ops/reducesum_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/reducesum_builder.cpp @@ -126,19 +126,11 @@ OH_NN_ReturnCode ReduceSumBuilder::Build(const std::vector& paramsInde for (uint32_t i : paramsIndex) { std::shared_ptr tensor = allTensors[i]; - switch (tensor->GetType()) { - case OH_NN_REDUCE_SUM_KEEP_DIMS: - returnCode = SetKeepDims(tensor); - break; - case OH_NN_REDUCE_SUM_REDUCE_TO_END: - returnCode = SetReduceToEnd(tensor); - break; - case OH_NN_REDUCE_SUM_COEFF: - returnCode = SetCoeff(tensor); - break; - default: - LOGE("[ReduceSum] Build failed, parameter type is invalid. type=%d", tensor->GetType()); - return OH_NN_INVALID_PARAMETER; + if (m_paramMap.find(tensor->GetType()) != m_paramMap.end()) { + returnCode = (this->*(m_paramMap[tensor->GetType()]))(tensor); + } else { + LOGE("[ReduceSum] Build failed, param invalid, type=%d", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; } if (returnCode != OH_NN_SUCCESS) { diff --git a/frameworks/native/neural_network_runtime/ops/reducesum_builder.h b/frameworks/native/neural_network_runtime/ops/reducesum_builder.h index 778f9cb..794628d 100644 --- a/frameworks/native/neural_network_runtime/ops/reducesum_builder.h +++ b/frameworks/native/neural_network_runtime/ops/reducesum_builder.h @@ -23,6 +23,8 @@ namespace NeuralNetworkRuntime { namespace Ops { class ReduceSumBuilder : public OpsBuilder { public: + typedef OH_NN_ReturnCode(ReduceSumBuilder::*FuncPtr)(std::shared_ptr); + ReduceSumBuilder(); ~ReduceSumBuilder() override; OH_NN_ReturnCode Build(const std::vector& paramsIndex, @@ -41,6 +43,11 @@ private: float m_coeff {0.0f}; bool m_reduceToEnd {false}; bool m_keepDims {false}; + std::unordered_map m_paramMap = { + {OH_NN_REDUCE_SUM_COEFF, &ReduceSumBuilder::SetCoeff}, + {OH_NN_REDUCE_SUM_REDUCE_TO_END, &ReduceSumBuilder::SetReduceToEnd}, + {OH_NN_REDUCE_SUM_KEEP_DIMS, &ReduceSumBuilder::SetKeepDims} + }; }; } // namespace Ops } // namespace NeuralNetworkRuntime diff --git a/frameworks/native/neural_network_runtime/ops/resize_bilinear_builder.cpp b/frameworks/native/neural_network_runtime/ops/resize_bilinear_builder.cpp index 137a62e..fed0825 100644 --- a/frameworks/native/neural_network_runtime/ops/resize_bilinear_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/resize_bilinear_builder.cpp @@ -175,25 +175,11 @@ OH_NN_ReturnCode ResizeBilinearBuilder::Build(const std::vector& param for (uint32_t i : paramsIndex) { std::shared_ptr tensor = allTensors[i]; - switch (tensor->GetType()) { - case OH_NN_RESIZE_BILINEAR_NEW_HEIGHT: - returnCode = SetNewHeight(tensor); - break; - case OH_NN_RESIZE_BILINEAR_NEW_WIDTH: - returnCode = SetNewWidth(tensor); - break; - case OH_NN_RESIZE_BILINEAR_PRESERVE_ASPECT_RATIO: - returnCode = SetPreserveAspectRatio(tensor); - break; - case OH_NN_RESIZE_BILINEAR_COORDINATE_TRANSFORM_MODE: - returnCode = SetCoordinateTransformMode(tensor); - break; - case OH_NN_RESIZE_BILINEAR_EXCLUDE_OUTSIDE: - returnCode = SetExcludeOutside(tensor); - break; - default: - LOGE("[ResizeBilinear] Build failed, parameter type is invalid. type=%d", tensor->GetType()); - return OH_NN_INVALID_PARAMETER; + if (m_paramMap.find(tensor->GetType()) != m_paramMap.end()) { + returnCode = (this->*(m_paramMap[tensor->GetType()]))(tensor); + } else { + LOGE("[ResizeBilinear] Build failed, param invalid, type=%d", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; } if (returnCode != OH_NN_SUCCESS) { diff --git a/frameworks/native/neural_network_runtime/ops/resize_bilinear_builder.h b/frameworks/native/neural_network_runtime/ops/resize_bilinear_builder.h index 6503e77..25fac57 100644 --- a/frameworks/native/neural_network_runtime/ops/resize_bilinear_builder.h +++ b/frameworks/native/neural_network_runtime/ops/resize_bilinear_builder.h @@ -25,6 +25,8 @@ namespace NeuralNetworkRuntime { namespace Ops { class ResizeBilinearBuilder : public OpsBuilder { public: + typedef OH_NN_ReturnCode(ResizeBilinearBuilder::*FuncPtr)(std::shared_ptr); + ResizeBilinearBuilder(); ~ResizeBilinearBuilder() override; OH_NN_ReturnCode Build(const std::vector& paramsIndex, @@ -49,6 +51,13 @@ private: mindspore::lite::CoordinateTransformMode m_coordinateTransformMode { mindspore::lite::COORDINATE_TRANSFORM_MODE_ASYMMETRIC}; uint64_t m_excludeOutside{0}; + std::unordered_map m_paramMap = { + {OH_NN_RESIZE_BILINEAR_NEW_HEIGHT, &ResizeBilinearBuilder::SetNewHeight}, + {OH_NN_RESIZE_BILINEAR_NEW_WIDTH, &ResizeBilinearBuilder::SetNewWidth}, + {OH_NN_RESIZE_BILINEAR_PRESERVE_ASPECT_RATIO, &ResizeBilinearBuilder::SetPreserveAspectRatio}, + {OH_NN_RESIZE_BILINEAR_COORDINATE_TRANSFORM_MODE, &ResizeBilinearBuilder::SetCoordinateTransformMode}, + {OH_NN_RESIZE_BILINEAR_EXCLUDE_OUTSIDE, &ResizeBilinearBuilder::SetExcludeOutside} + }; }; } // namespace Ops } // namespace NeuralNetworkRuntime diff --git a/frameworks/native/neural_network_runtime/ops/scale_builder.cpp b/frameworks/native/neural_network_runtime/ops/scale_builder.cpp index 9c2256a..cecd8b4 100644 --- a/frameworks/native/neural_network_runtime/ops/scale_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/scale_builder.cpp @@ -112,16 +112,11 @@ OH_NN_ReturnCode ScaleBuilder::Build(const std::vector& paramsIndex, for (uint32_t i : paramsIndex) { std::shared_ptr tensor = allTensors[i]; - switch (tensor->GetType()) { - case OH_NN_SCALE_AXIS: - returnCode = SetAxis(tensor); - break; - case OH_NN_SCALE_ACTIVATIONTYPE: - returnCode = SetActivationType(tensor); - break; - default: - LOGE("[ResizeBilinear] Build failed, parameter type is invalid. type=%d", tensor->GetType()); - return OH_NN_INVALID_PARAMETER; + if (m_paramMap.find(tensor->GetType()) != m_paramMap.end()) { + returnCode = (this->*(m_paramMap[tensor->GetType()]))(tensor); + } else { + LOGE("[ScaleBuilder] Build failed, param invalid, type=%d", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; } if (returnCode != OH_NN_SUCCESS) { diff --git a/frameworks/native/neural_network_runtime/ops/scale_builder.h b/frameworks/native/neural_network_runtime/ops/scale_builder.h index d9b011e..da8822f 100644 --- a/frameworks/native/neural_network_runtime/ops/scale_builder.h +++ b/frameworks/native/neural_network_runtime/ops/scale_builder.h @@ -25,6 +25,8 @@ namespace NeuralNetworkRuntime { namespace Ops { class ScaleBuilder : public OpsBuilder { public: + typedef OH_NN_ReturnCode(ScaleBuilder::*FuncPtr)(std::shared_ptr); + ScaleBuilder(); ~ScaleBuilder() override; OH_NN_ReturnCode Build(const std::vector& paramsIndex, @@ -41,6 +43,10 @@ private: private: mindspore::lite::ActivationType m_activationType{mindspore::lite::ACTIVATION_TYPE_NO_ACTIVATION}; const uint64_t* m_axis{nullptr}; + std::unordered_map m_paramMap = { + {OH_NN_SCALE_ACTIVATIONTYPE, &ScaleBuilder::SetActivationType}, + {OH_NN_SCALE_AXIS, &ScaleBuilder::SetAxis} + }; }; } // namespace Ops } // namespace NeuralNetworkRuntime diff --git a/frameworks/native/neural_network_runtime/ops/slice_builder.cpp b/frameworks/native/neural_network_runtime/ops/slice_builder.cpp index 4ab1d94..3ee017f 100644 --- a/frameworks/native/neural_network_runtime/ops/slice_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/slice_builder.cpp @@ -85,14 +85,13 @@ OH_NN_ReturnCode SliceBuilder::Build(const std::vector& paramsIndex, for (int i : paramsIndex) { std::shared_ptr tensor = allTensors[i]; tensor->IdentifyOpParameter(); - switch (tensor->GetType()) { - case OH_NN_SLICE_AXES: - returnCode = SetAxes(tensor); - break; - default: - LOGE("[SliceBuilder] Build failed, param invalid, type = %d.", tensor->GetType()); - return OH_NN_INVALID_PARAMETER; + if (m_paramMap.find(tensor->GetType()) != m_paramMap.end()) { + returnCode = (this->*(m_paramMap[tensor->GetType()]))(tensor); + } else { + LOGE("[SliceBuilder] Build failed, param invalid, type=%d", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; } + if (returnCode != OH_NN_SUCCESS) { LOGE("[SliceBuilder] Build failed, passed invalid param."); return returnCode; diff --git a/frameworks/native/neural_network_runtime/ops/slice_builder.h b/frameworks/native/neural_network_runtime/ops/slice_builder.h index f6f5ee6..b791b7a 100644 --- a/frameworks/native/neural_network_runtime/ops/slice_builder.h +++ b/frameworks/native/neural_network_runtime/ops/slice_builder.h @@ -24,6 +24,8 @@ namespace NeuralNetworkRuntime { namespace Ops { class SliceBuilder : public OpsBuilder { public: + typedef OH_NN_ReturnCode(SliceBuilder::*FuncPtr)(std::shared_ptr); + SliceBuilder(); ~SliceBuilder() override; OH_NN_ReturnCode Build(const std::vector& paramsIndex, @@ -38,6 +40,9 @@ private: private: std::vector m_axes; + std::unordered_map m_paramMap = { + {OH_NN_SLICE_AXES, &SliceBuilder::SetAxes} + }; }; } // namespace Ops } // namespace NeuralNetworkRuntime diff --git a/frameworks/native/neural_network_runtime/ops/softmax_builder.cpp b/frameworks/native/neural_network_runtime/ops/softmax_builder.cpp index 6b64c66..2e825bb 100644 --- a/frameworks/native/neural_network_runtime/ops/softmax_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/softmax_builder.cpp @@ -82,13 +82,11 @@ OH_NN_ReturnCode SoftmaxBuilder::Build(const std::vector& paramsIndex, for (int i : paramsIndex) { std::shared_ptr tensor = allTensors[i]; tensor->IdentifyOpParameter(); - switch (tensor->GetType()) { - case OH_NN_SOFTMAX_AXIS: - returnCode = SetAxis(tensor); - break; - default: - LOGE("[SoftmaxBuilder] Parameter Type is invalid. type=%d", tensor->GetType()); - return OH_NN_INVALID_PARAMETER; + if (m_paramMap.find(tensor->GetType()) != m_paramMap.end()) { + returnCode = (this->*(m_paramMap[tensor->GetType()]))(tensor); + } else { + LOGE("[SoftmaxBuilder] Build failed, param invalid, type=%d", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; } if (returnCode != OH_NN_SUCCESS) { diff --git a/frameworks/native/neural_network_runtime/ops/softmax_builder.h b/frameworks/native/neural_network_runtime/ops/softmax_builder.h index 2b83133..405c69e 100644 --- a/frameworks/native/neural_network_runtime/ops/softmax_builder.h +++ b/frameworks/native/neural_network_runtime/ops/softmax_builder.h @@ -24,6 +24,8 @@ namespace NeuralNetworkRuntime { namespace Ops { class SoftmaxBuilder : public OpsBuilder { public: + typedef OH_NN_ReturnCode(SoftmaxBuilder::*FuncPtr)(std::shared_ptr); + SoftmaxBuilder(); ~SoftmaxBuilder() override; OH_NN_ReturnCode Build(const std::vector& paramsIndex, @@ -38,6 +40,9 @@ private: private: std::vector m_axis; + std::unordered_map m_paramMap = { + {OH_NN_SOFTMAX_AXIS, &SoftmaxBuilder::SetAxis} + }; }; } // namespace Ops } // namespace NeuralNetworkRuntime diff --git a/frameworks/native/neural_network_runtime/ops/space_to_batch_nd_builder.cpp b/frameworks/native/neural_network_runtime/ops/space_to_batch_nd_builder.cpp index 6fd6635..0c20c61 100644 --- a/frameworks/native/neural_network_runtime/ops/space_to_batch_nd_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/space_to_batch_nd_builder.cpp @@ -128,16 +128,11 @@ OH_NN_ReturnCode SpaceToBatchNDBuilder::Build(const std::vector& param for (int i : paramsIndex) { std::shared_ptr tensor = allTensors[i]; tensor->IdentifyOpParameter(); - switch (tensor->GetType()) { - case OH_NN_SPACE_TO_BATCH_ND_BLOCK_SHAPE: - returnCode = SetBlockShape(tensor); - break; - case OH_NN_SPACE_TO_BATCH_ND_PADDINGS: - returnCode = SetPaddings(tensor); - break; - default: - LOGE("[SpaceToBatchNDBuilder] Parameter Type is invalid. type=%d", tensor->GetType()); - return OH_NN_INVALID_PARAMETER; + if (m_paramMap.find(tensor->GetType()) != m_paramMap.end()) { + returnCode = (this->*(m_paramMap[tensor->GetType()]))(tensor); + } else { + LOGE("[SpaceToBatchNDBuilder] Build failed, param invalid, type=%d", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; } if (returnCode != OH_NN_SUCCESS) { diff --git a/frameworks/native/neural_network_runtime/ops/space_to_batch_nd_builder.h b/frameworks/native/neural_network_runtime/ops/space_to_batch_nd_builder.h index 4f0d067..bfe292d 100644 --- a/frameworks/native/neural_network_runtime/ops/space_to_batch_nd_builder.h +++ b/frameworks/native/neural_network_runtime/ops/space_to_batch_nd_builder.h @@ -24,6 +24,8 @@ namespace NeuralNetworkRuntime { namespace Ops { class SpaceToBatchNDBuilder : public OpsBuilder { public: + typedef OH_NN_ReturnCode(SpaceToBatchNDBuilder::*FuncPtr)(std::shared_ptr); + SpaceToBatchNDBuilder(); ~SpaceToBatchNDBuilder() override; OH_NN_ReturnCode Build(const std::vector& paramsIndex, @@ -41,6 +43,10 @@ private: private: std::vector> paddings; std::vector block_shape {}; + std::unordered_map m_paramMap = { + {OH_NN_SPACE_TO_BATCH_ND_BLOCK_SHAPE, &SpaceToBatchNDBuilder::SetBlockShape}, + {OH_NN_SPACE_TO_BATCH_ND_PADDINGS, &SpaceToBatchNDBuilder::SetPaddings} + }; }; } // namespace Ops } // namespace NeuralNetworkRuntime diff --git a/frameworks/native/neural_network_runtime/ops/space_to_depth_builder.cpp b/frameworks/native/neural_network_runtime/ops/space_to_depth_builder.cpp index 3829df7..50b2a3c 100644 --- a/frameworks/native/neural_network_runtime/ops/space_to_depth_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/space_to_depth_builder.cpp @@ -83,13 +83,11 @@ OH_NN_ReturnCode SpaceToDepthBuilder::Build(const std::vector& paramsI for (int i : paramsIndex) { std::shared_ptr tensor = allTensors[i]; tensor->IdentifyOpParameter(); - switch (tensor->GetType()) { - case OH_NN_SPACE_TO_DEPTH_BLOCK_SIZE: - ret = SetBlockSize(tensor); - break; - default: - LOGE("[SpaceToDepth] Build failed, param invalid, type=%d", tensor->GetType()); - return OH_NN_INVALID_PARAMETER; + if (m_paramMap.find(tensor->GetType()) != m_paramMap.end()) { + ret = (this->*(m_paramMap[tensor->GetType()]))(tensor); + } else { + LOGE("[SpaceToDepth] Build failed, param invalid, type=%d", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; } if (ret != OH_NN_SUCCESS) { diff --git a/frameworks/native/neural_network_runtime/ops/space_to_depth_builder.h b/frameworks/native/neural_network_runtime/ops/space_to_depth_builder.h index 425347f..fafa8aa 100644 --- a/frameworks/native/neural_network_runtime/ops/space_to_depth_builder.h +++ b/frameworks/native/neural_network_runtime/ops/space_to_depth_builder.h @@ -23,6 +23,8 @@ namespace NeuralNetworkRuntime { namespace Ops { class SpaceToDepthBuilder : public OpsBuilder { public: + typedef OH_NN_ReturnCode(SpaceToDepthBuilder::*FuncPtr)(std::shared_ptr); + SpaceToDepthBuilder(); ~SpaceToDepthBuilder() override; OH_NN_ReturnCode Build(const std::vector& paramsIndex, @@ -37,6 +39,9 @@ private: private: int64_t m_blockSize {0}; + std::unordered_map m_paramMap = { + {OH_NN_SPACE_TO_DEPTH_BLOCK_SIZE, &SpaceToDepthBuilder::SetBlockSize} + }; }; } // namespace Ops } // namespace NeuralNetworkRuntime diff --git a/frameworks/native/neural_network_runtime/ops/split_builder.cpp b/frameworks/native/neural_network_runtime/ops/split_builder.cpp index ad6091c..81f80fb 100644 --- a/frameworks/native/neural_network_runtime/ops/split_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/split_builder.cpp @@ -146,19 +146,11 @@ OH_NN_ReturnCode SplitBuilder::Build(const std::vector ¶msIndex, for (int i : paramsIndex) { std::shared_ptr tensor = allTensors[i]; tensor->IdentifyOpParameter(); - switch (tensor->GetType()) { - case OH_NN_SPLIT_AXIS: - returnCode = SetAxis(tensor); - break; - case OH_NN_SPLIT_OUTPUT_NUM: - returnCode = SetOutputNum(tensor); - break; - case OH_NN_SPLIT_SIZE_SPLITS: - returnCode = SetSizeSplits(tensor); - break; - default: - LOGE("[SplitBuilder] Parameter Type is invalid. type=%d", tensor->GetType()); - return OH_NN_INVALID_PARAMETER; + if (m_paramMap.find(tensor->GetType()) != m_paramMap.end()) { + returnCode = (this->*(m_paramMap[tensor->GetType()]))(tensor); + } else { + LOGE("[SplitBuilder] Build failed, param invalid, type=%d", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; } if (returnCode != OH_NN_SUCCESS) { diff --git a/frameworks/native/neural_network_runtime/ops/split_builder.h b/frameworks/native/neural_network_runtime/ops/split_builder.h index 3ebee8d..4c7cde6 100644 --- a/frameworks/native/neural_network_runtime/ops/split_builder.h +++ b/frameworks/native/neural_network_runtime/ops/split_builder.h @@ -26,6 +26,8 @@ namespace NeuralNetworkRuntime { namespace Ops { class SplitBuilder : public OpsBuilder { public: + typedef OH_NN_ReturnCode(SplitBuilder::*FuncPtr)(std::shared_ptr); + SplitBuilder(); ~SplitBuilder() override; OH_NN_ReturnCode Build(const std::vector& paramsIndex, @@ -47,6 +49,11 @@ private: int64_t m_output_num {0}; std::vector m_size_splits; int64_t m_axis {0}; + std::unordered_map m_paramMap = { + {OH_NN_SPLIT_AXIS, &SplitBuilder::SetAxis}, + {OH_NN_SPLIT_OUTPUT_NUM, &SplitBuilder::SetOutputNum}, + {OH_NN_SPLIT_SIZE_SPLITS, &SplitBuilder::SetSizeSplits} + }; }; } // namespace Ops } // namespace NeuralNetworkRuntime diff --git a/frameworks/native/neural_network_runtime/ops/squeeze_builder.cpp b/frameworks/native/neural_network_runtime/ops/squeeze_builder.cpp index 6160466..3e99c5c 100644 --- a/frameworks/native/neural_network_runtime/ops/squeeze_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/squeeze_builder.cpp @@ -86,13 +86,11 @@ OH_NN_ReturnCode SqueezeBuilder::Build(const std::vector ¶msIndex, for (int i : paramsIndex) { std::shared_ptr tensor = allTensors[i]; tensor->IdentifyOpParameter(); - switch (tensor->GetType()) { - case OH_NN_SQUEEZE_AXIS: - returnCode = SetAxis(tensor); - break; - default: - LOGE("[SqueezeBuilder] Parameter Type is invalid. type=%d", tensor->GetType()); - return OH_NN_INVALID_PARAMETER; + if (m_paramMap.find(tensor->GetType()) != m_paramMap.end()) { + returnCode = (this->*(m_paramMap[tensor->GetType()]))(tensor); + } else { + LOGE("[SqueezeBuilder] Build failed, param invalid, type=%d", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; } if (returnCode != OH_NN_SUCCESS) { diff --git a/frameworks/native/neural_network_runtime/ops/squeeze_builder.h b/frameworks/native/neural_network_runtime/ops/squeeze_builder.h index f02ed38..2c8b999 100644 --- a/frameworks/native/neural_network_runtime/ops/squeeze_builder.h +++ b/frameworks/native/neural_network_runtime/ops/squeeze_builder.h @@ -24,6 +24,8 @@ namespace NeuralNetworkRuntime { namespace Ops { class SqueezeBuilder : public OpsBuilder { public: + typedef OH_NN_ReturnCode(SqueezeBuilder::*FuncPtr)(std::shared_ptr); + SqueezeBuilder(); ~SqueezeBuilder() override; OH_NN_ReturnCode Build(const std::vector& paramsIndex, @@ -38,6 +40,9 @@ private: private: std::vector m_axis; + std::unordered_map m_paramMap = { + {OH_NN_SQUEEZE_AXIS, &SqueezeBuilder::SetAxis} + }; }; } // namespace Ops } // namespace NeuralNetworkRuntime diff --git a/frameworks/native/neural_network_runtime/ops/stack_builder.cpp b/frameworks/native/neural_network_runtime/ops/stack_builder.cpp index f3aa2b3..2d86a53 100644 --- a/frameworks/native/neural_network_runtime/ops/stack_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/stack_builder.cpp @@ -88,13 +88,11 @@ OH_NN_ReturnCode StackBuilder::Build(const std::vector& paramsIndex, for (int i : paramsIndex) { std::shared_ptr tensor = allTensors[i]; tensor->IdentifyOpParameter(); - switch (tensor->GetType()) { - case OH_NN_STACK_AXIS: - returnCode = SetAxis(tensor); - break; - default: - LOGE("[StackBuilder] Parameter Type is invalid. type=%d", tensor->GetType()); - return OH_NN_INVALID_PARAMETER; + if (m_paramMap.find(tensor->GetType()) != m_paramMap.end()) { + returnCode = (this->*(m_paramMap[tensor->GetType()]))(tensor); + } else { + LOGE("[StackBuilder] Build failed, param invalid, type=%d", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; } if (returnCode != OH_NN_SUCCESS) { diff --git a/frameworks/native/neural_network_runtime/ops/stack_builder.h b/frameworks/native/neural_network_runtime/ops/stack_builder.h index 665fb16..ba4ae02 100644 --- a/frameworks/native/neural_network_runtime/ops/stack_builder.h +++ b/frameworks/native/neural_network_runtime/ops/stack_builder.h @@ -24,6 +24,8 @@ namespace NeuralNetworkRuntime { namespace Ops { class StackBuilder : public OpsBuilder { public: + typedef OH_NN_ReturnCode(StackBuilder::*FuncPtr)(std::shared_ptr); + StackBuilder(); ~StackBuilder() override; OH_NN_ReturnCode Build(const std::vector& paramsIndex, @@ -38,6 +40,9 @@ private: private: int64_t m_axis = {0}; + std::unordered_map m_paramMap = { + {OH_NN_STACK_AXIS, &StackBuilder::SetAxis} + }; }; } // namespace Ops } // namespace NeuralNetworkRuntime diff --git a/frameworks/native/neural_network_runtime/ops/strided_slice_builder.cpp b/frameworks/native/neural_network_runtime/ops/strided_slice_builder.cpp index bc53754..f5e7682 100644 --- a/frameworks/native/neural_network_runtime/ops/strided_slice_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/strided_slice_builder.cpp @@ -163,25 +163,11 @@ OH_NN_ReturnCode StridedSliceBuilder::Build(const std::vector& paramsI for (int i : paramsIndex) { std::shared_ptr tensor = allTensors[i]; tensor->IdentifyOpParameter(); - switch (tensor->GetType()) { - case OH_NN_STRIDED_SLICE_BEGIN_MASK: - returnCode = SetBeginMask(tensor); - break; - case OH_NN_STRIDED_SLICE_END_MASK: - returnCode = SetEndMask(tensor); - break; - case OH_NN_STRIDED_SLICE_ELLIPSIS_MASK: - returnCode = SetEllipsisMask(tensor); - break; - case OH_NN_STRIDED_SLICE_NEW_AXIS_MASK: - returnCode = SetNewAxisMask(tensor); - break; - case OH_NN_STRIDED_SLICE_SHRINK_AXIS_MASK: - returnCode = SetShrinkAxisMask(tensor); - break; - default: - LOGE("[StridedSliceBuilder] Parameter Type is invalid. type=%d", tensor->GetType()); - return OH_NN_INVALID_PARAMETER; + if (m_paramMap.find(tensor->GetType()) != m_paramMap.end()) { + returnCode = (this->*(m_paramMap[tensor->GetType()]))(tensor); + } else { + LOGE("[StridedSliceBuilder] Build failed, param invalid, type=%d", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; } if (returnCode != OH_NN_SUCCESS) { diff --git a/frameworks/native/neural_network_runtime/ops/strided_slice_builder.h b/frameworks/native/neural_network_runtime/ops/strided_slice_builder.h index 06ccb65..23690cb 100644 --- a/frameworks/native/neural_network_runtime/ops/strided_slice_builder.h +++ b/frameworks/native/neural_network_runtime/ops/strided_slice_builder.h @@ -24,6 +24,8 @@ namespace NeuralNetworkRuntime { namespace Ops { class StridedSliceBuilder : public OpsBuilder { public: + typedef OH_NN_ReturnCode(StridedSliceBuilder::*FuncPtr)(std::shared_ptr); + StridedSliceBuilder(); ~StridedSliceBuilder() override; OH_NN_ReturnCode Build(const std::vector& paramsIndex, @@ -49,6 +51,13 @@ private: int64_t m_ellipsis_mask = {0}; int64_t m_new_axis_mask = {0}; int64_t m_shrink_axis_mask = {0}; + std::unordered_map m_paramMap = { + {OH_NN_STRIDED_SLICE_BEGIN_MASK, &StridedSliceBuilder::SetBeginMask}, + {OH_NN_STRIDED_SLICE_END_MASK, &StridedSliceBuilder::SetEndMask}, + {OH_NN_STRIDED_SLICE_ELLIPSIS_MASK, &StridedSliceBuilder::SetEllipsisMask}, + {OH_NN_STRIDED_SLICE_NEW_AXIS_MASK, &StridedSliceBuilder::SetNewAxisMask}, + {OH_NN_STRIDED_SLICE_SHRINK_AXIS_MASK, &StridedSliceBuilder::SetShrinkAxisMask} + }; }; } // namespace Ops } // namespace NeuralNetworkRuntime diff --git a/frameworks/native/neural_network_runtime/ops/sub_builder.cpp b/frameworks/native/neural_network_runtime/ops/sub_builder.cpp index a298235..bb7b50a 100644 --- a/frameworks/native/neural_network_runtime/ops/sub_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/sub_builder.cpp @@ -92,13 +92,11 @@ OH_NN_ReturnCode SubBuilder::Build(const std::vector& paramsIndex, for (int i : paramsIndex) { std::shared_ptr tensor = allTensors[i]; tensor->IdentifyOpParameter(); - switch (tensor->GetType()) { - case OH_NN_SUB_ACTIVATIONTYPE: - returnCode = SetActivationType(tensor); - break; - default: - LOGE("[SubBuilder] Parameter Type is invalid. type=%d", tensor->GetType()); - return OH_NN_INVALID_PARAMETER; + if (m_paramMap.find(tensor->GetType()) != m_paramMap.end()) { + returnCode = (this->*(m_paramMap[tensor->GetType()]))(tensor); + } else { + LOGE("[SubBuilder] Build failed, param invalid, type=%d", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; } if (returnCode != OH_NN_SUCCESS) { diff --git a/frameworks/native/neural_network_runtime/ops/sub_builder.h b/frameworks/native/neural_network_runtime/ops/sub_builder.h index 6e638a5..5fd944a 100644 --- a/frameworks/native/neural_network_runtime/ops/sub_builder.h +++ b/frameworks/native/neural_network_runtime/ops/sub_builder.h @@ -26,6 +26,8 @@ namespace NeuralNetworkRuntime { namespace Ops { class SubBuilder : public OpsBuilder { public: + typedef OH_NN_ReturnCode(SubBuilder::*FuncPtr)(std::shared_ptr); + SubBuilder(); ~SubBuilder() override; OH_NN_ReturnCode Build(const std::vector& paramsIndex, @@ -40,6 +42,9 @@ private: private: mindspore::lite::ActivationType m_activationType {mindspore::lite::ACTIVATION_TYPE_NO_ACTIVATION}; + std::unordered_map m_paramMap = { + {OH_NN_SUB_ACTIVATIONTYPE, &SubBuilder::SetActivationType} + }; }; } // namespace Ops } // namespace NeuralNetworkRuntime diff --git a/frameworks/native/neural_network_runtime/ops/tile_builder.cpp b/frameworks/native/neural_network_runtime/ops/tile_builder.cpp index 11ae5ea..1854639 100644 --- a/frameworks/native/neural_network_runtime/ops/tile_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/tile_builder.cpp @@ -85,14 +85,13 @@ OH_NN_ReturnCode TileBuilder::Build(const std::vector& paramsIndex, for (int i : paramsIndex) { std::shared_ptr tensor = allTensors[i]; tensor->IdentifyOpParameter(); - switch (tensor->GetType()) { - case OH_NN_TILE_DIMS: - returnCode = SetDims(tensor); - break; - default: - LOGE("[TileBuilder] Build failed, param invalid, type = %d.", tensor->GetType()); - return OH_NN_INVALID_PARAMETER; + if (m_paramMap.find(tensor->GetType()) != m_paramMap.end()) { + returnCode = (this->*(m_paramMap[tensor->GetType()]))(tensor); + } else { + LOGE("[TileBuilder] Build failed, param invalid, type=%d", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; } + if (returnCode != OH_NN_SUCCESS) { LOGE("[TileBuilder] Build failed, passed invalid param."); return returnCode; diff --git a/frameworks/native/neural_network_runtime/ops/tile_builder.h b/frameworks/native/neural_network_runtime/ops/tile_builder.h index e504403..0d5fcf4 100644 --- a/frameworks/native/neural_network_runtime/ops/tile_builder.h +++ b/frameworks/native/neural_network_runtime/ops/tile_builder.h @@ -24,6 +24,8 @@ namespace NeuralNetworkRuntime { namespace Ops { class TileBuilder : public OpsBuilder { public: + typedef OH_NN_ReturnCode(TileBuilder::*FuncPtr)(std::shared_ptr); + TileBuilder(); ~TileBuilder() override; OH_NN_ReturnCode Build(const std::vector& paramsIndex, @@ -38,6 +40,9 @@ private: private: std::vector m_dims {0}; + std::unordered_map m_paramMap = { + {OH_NN_TILE_DIMS, &TileBuilder::SetDims} + }; }; } // namespace Ops } // namespace NeuralNetworkRuntime diff --git a/frameworks/native/neural_network_runtime/ops/top_k_builder.cpp b/frameworks/native/neural_network_runtime/ops/top_k_builder.cpp index 0300403..af81e8d 100644 --- a/frameworks/native/neural_network_runtime/ops/top_k_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/top_k_builder.cpp @@ -103,16 +103,11 @@ OH_NN_ReturnCode TopKBuilder::Build(const std::vector& paramsIndex, for (int i : paramsIndex) { std::shared_ptr tensor = allTensors[i]; tensor->IdentifyOpParameter(); - switch (tensor->GetType()) { - case OH_NN_TOP_K_SORTED: - returnCode = SetSorted(tensor); - break; - case OH_NN_TOP_K_AXIS: - returnCode = SetAxis(tensor); - break; - default: - LOGE("[TopK] Parameter Type is invalid. type=%d", tensor->GetType()); - return OH_NN_INVALID_PARAMETER; + if (m_paramMap.find(tensor->GetType()) != m_paramMap.end()) { + returnCode = (this->*(m_paramMap[tensor->GetType()]))(tensor); + } else { + LOGE("[TopK] Build failed, param invalid, type=%d", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; } if (returnCode != OH_NN_SUCCESS) { diff --git a/frameworks/native/neural_network_runtime/ops/top_k_builder.h b/frameworks/native/neural_network_runtime/ops/top_k_builder.h index 4c8ccf5..f8d3c80 100644 --- a/frameworks/native/neural_network_runtime/ops/top_k_builder.h +++ b/frameworks/native/neural_network_runtime/ops/top_k_builder.h @@ -24,6 +24,8 @@ namespace NeuralNetworkRuntime { namespace Ops { class TopKBuilder : public OpsBuilder { public: + typedef OH_NN_ReturnCode(TopKBuilder::*FuncPtr)(std::shared_ptr); + TopKBuilder(); ~TopKBuilder() override; OH_NN_ReturnCode Build(const std::vector& paramsIndex, @@ -39,6 +41,10 @@ private: private: int64_t m_axis {0}; bool m_sorted {true}; // true means sorting in the descending order. + std::unordered_map m_paramMap = { + {OH_NN_TOP_K_SORTED, &TopKBuilder::SetSorted}, + {OH_NN_TOP_K_AXIS, &TopKBuilder::SetAxis} + }; }; } // namespace Ops } // namespace NeuralNetworkRuntime diff --git a/frameworks/native/neural_network_runtime/ops/unsqueeze_builder.cpp b/frameworks/native/neural_network_runtime/ops/unsqueeze_builder.cpp index 25d0231..8aa5358 100644 --- a/frameworks/native/neural_network_runtime/ops/unsqueeze_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/unsqueeze_builder.cpp @@ -81,13 +81,11 @@ OH_NN_ReturnCode UnsqueezeBuilder::Build(const std::vector& paramsInde for (int i : paramsIndex) { std::shared_ptr tensor = allTensors[i]; tensor->IdentifyOpParameter(); - switch (tensor->GetType()) { - case OH_NN_UNSQUEEZE_AXIS: - returnCode = SetAxis(tensor); - break; - default: - LOGE("[UnsqueezeBuilder] Parameter Type is invalid. type=%d", tensor->GetType()); - return OH_NN_INVALID_PARAMETER; + if (m_paramMap.find(tensor->GetType()) != m_paramMap.end()) { + returnCode = (this->*(m_paramMap[tensor->GetType()]))(tensor); + } else { + LOGE("[UnsqueezeBuilder] Build failed, param invalid, type=%d", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; } if (returnCode != OH_NN_SUCCESS) { diff --git a/frameworks/native/neural_network_runtime/ops/unsqueeze_builder.h b/frameworks/native/neural_network_runtime/ops/unsqueeze_builder.h index 08a5ad2..36381ee 100644 --- a/frameworks/native/neural_network_runtime/ops/unsqueeze_builder.h +++ b/frameworks/native/neural_network_runtime/ops/unsqueeze_builder.h @@ -24,6 +24,8 @@ namespace NeuralNetworkRuntime { namespace Ops { class UnsqueezeBuilder : public OpsBuilder { public: + typedef OH_NN_ReturnCode(UnsqueezeBuilder::*FuncPtr)(std::shared_ptr); + UnsqueezeBuilder(); ~UnsqueezeBuilder() override; OH_NN_ReturnCode Build(const std::vector& paramsIndex, @@ -38,6 +40,9 @@ private: private: std::vector m_axis; + std::unordered_map m_paramMap = { + {OH_NN_UNSQUEEZE_AXIS, &UnsqueezeBuilder::SetAxis} + }; }; } // namespace Ops } // namespace NeuralNetworkRuntime diff --git a/frameworks/native/neural_network_runtime/ops/unstack_builder.cpp b/frameworks/native/neural_network_runtime/ops/unstack_builder.cpp index 4a0aea9..575759c 100755 --- a/frameworks/native/neural_network_runtime/ops/unstack_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/unstack_builder.cpp @@ -97,13 +97,11 @@ OH_NN_ReturnCode UnstackBuilder::Build(const std::vector& paramsIndex, for (int i : paramsIndex) { std::shared_ptr tensor = allTensors[i]; tensor->IdentifyOpParameter(); - switch (tensor->GetType()) { - case OH_NN_UNSTACK_AXIS: - returnCode = SetAxis(tensor); - break; - default: - LOGE("[Unstack] Build failed, param invalid, type=%d", tensor->GetType()); - return OH_NN_INVALID_PARAMETER; + if (m_paramMap.find(tensor->GetType()) != m_paramMap.end()) { + returnCode = (this->*(m_paramMap[tensor->GetType()]))(tensor); + } else { + LOGE("[Unstack] Build failed, param invalid, type=%d", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; } if (returnCode != OH_NN_SUCCESS) { diff --git a/frameworks/native/neural_network_runtime/ops/unstack_builder.h b/frameworks/native/neural_network_runtime/ops/unstack_builder.h index 40a23f5..ad33430 100755 --- a/frameworks/native/neural_network_runtime/ops/unstack_builder.h +++ b/frameworks/native/neural_network_runtime/ops/unstack_builder.h @@ -26,6 +26,8 @@ namespace NeuralNetworkRuntime { namespace Ops { class UnstackBuilder : public OpsBuilder { public: + typedef OH_NN_ReturnCode(UnstackBuilder::*FuncPtr)(std::shared_ptr); + UnstackBuilder(); ~UnstackBuilder() override; OH_NN_ReturnCode Build(const std::vector& paramsIndex, @@ -40,6 +42,9 @@ private: private: int64_t m_axis {0}; + std::unordered_map m_paramMap = { + {OH_NN_UNSTACK_AXIS, &UnstackBuilder::SetAxis} + }; }; } // namespace Ops } // namespace NeuralNetworkRuntime -- Gitee From c083487844b080af92fc4264a44e981621f00182 Mon Sep 17 00:00:00 2001 From: wang-yangsong Date: Tue, 16 Apr 2024 16:29:28 +0800 Subject: [PATCH 6/6] bugfix Signed-off-by: wang-yangsong --- .../ops/add_builder.cpp | 2 +- .../neural_network_runtime/ops/add_builder.h | 5 +++-- .../ops/all_builder.cpp | 2 +- .../neural_network_runtime/ops/all_builder.h | 5 ++--- .../ops/argmax_builder.cpp | 8 +++---- .../ops/argmax_builder.h | 10 ++++----- .../ops/assert_builder.cpp | 2 +- .../ops/assert_builder.h | 4 ++-- .../ops/batch_to_space_nd_builder.cpp | 4 ++-- .../ops/batch_to_space_nd_builder.h | 6 ++--- .../ops/batchnorm_builder.cpp | 2 +- .../ops/batchnorm_builder.h | 4 ++-- .../ops/broadcast_to_builder.cpp | 2 +- .../ops/broadcast_to_builder.h | 4 ++-- .../ops/clip_builder.cpp | 4 ++-- .../neural_network_runtime/ops/clip_builder.h | 6 ++--- .../ops/concat_builder.cpp | 2 +- .../ops/concat_builder.h | 4 ++-- .../ops/constant_of_shape_builder.cpp | 4 ++-- .../ops/constant_of_shape_builder.h | 6 ++--- .../ops/conv2d_builder.cpp | 10 ++++----- .../ops/conv2d_builder.h | 12 +++++----- .../ops/conv2d_transpose_builder.cpp | 12 +++++----- .../ops/conv2d_transpose_builder.h | 14 ++++++------ .../ops/crop_builder.cpp | 4 ++-- .../neural_network_runtime/ops/crop_builder.h | 6 ++--- .../ops/depth_to_space_builder.cpp | 4 ++-- .../ops/depth_to_space_builder.h | 6 ++--- .../ops/depthwise_conv2d_native_builder.cpp | 11 +++++----- .../ops/depthwise_conv2d_native_builder.h | 12 +++++----- .../ops/detection_post_process_builder.cpp | 20 ++++++++--------- .../ops/detection_post_process_builder.h | 22 +++++++++---------- .../ops/div_builder.cpp | 2 +- .../neural_network_runtime/ops/div_builder.h | 4 ++-- .../ops/eltwise_builder.cpp | 2 +- .../ops/eltwise_builder.h | 4 ++-- .../ops/exp_builder.cpp | 6 ++--- .../neural_network_runtime/ops/exp_builder.h | 8 +++---- .../ops/flatten_builder.cpp | 2 +- .../ops/flatten_builder.h | 4 ++-- .../ops/fullconnection_builder.cpp | 8 +++---- .../ops/fullconnection_builder.h | 10 ++++----- .../ops/gelu_builder.cpp | 2 +- .../neural_network_runtime/ops/gelu_builder.h | 4 ++-- .../ops/instance_norm_builder.cpp | 2 +- .../ops/instance_norm_builder.h | 4 ++-- .../ops/l2_normalize_builder.cpp | 6 ++--- .../ops/l2_normalize_builder.h | 8 +++---- .../ops/layernorm_builder.cpp | 6 ++--- .../ops/layernorm_builder.h | 8 +++---- .../ops/leaky_relu_builder.cpp | 2 +- .../ops/leaky_relu_builder.h | 4 ++-- .../ops/log_softmax_builder.cpp | 2 +- .../ops/log_softmax_builder.h | 4 ++-- .../ops/lrn_builder.cpp | 10 ++++----- .../neural_network_runtime/ops/lrn_builder.h | 12 +++++----- .../ops/lstm_builder.cpp | 20 ++++++++--------- .../neural_network_runtime/ops/lstm_builder.h | 22 +++++++++---------- .../ops/matmul_builder.cpp | 6 ++--- .../ops/matmul_builder.h | 8 +++---- .../ops/mul_builder.cpp | 2 +- .../neural_network_runtime/ops/mul_builder.h | 4 ++-- .../ops/onehot_builder.cpp | 2 +- .../ops/onehot_builder.h | 4 ++-- .../ops/pad_builder.cpp | 4 ++-- .../neural_network_runtime/ops/pad_builder.h | 6 ++--- .../ops/pooling_builder.cpp | 12 +++++----- .../ops/pooling_builder.h | 15 ++++++------- .../ops/pow_builder.cpp | 4 ++-- .../neural_network_runtime/ops/pow_builder.h | 6 ++--- .../ops/quant_dtype_cast_builder.cpp | 6 ++--- .../ops/quant_dtype_cast_builder.h | 8 +++---- .../ops/range_builder.cpp | 6 ++--- .../ops/range_builder.h | 8 +++---- .../ops/reduceL2_builder.cpp | 6 ++--- .../ops/reduceL2_builder.h | 8 +++---- .../ops/reduceall_builder.cpp | 6 ++--- .../ops/reduceall_builder.h | 8 +++---- .../ops/reducemax_builder.cpp | 6 ++--- .../ops/reducemax_builder.h | 8 +++---- .../ops/reducemean_builder.cpp | 6 ++--- .../ops/reducemean_builder.h | 8 +++---- .../ops/reducemin_builder.cpp | 6 ++--- .../ops/reducemin_builder.h | 8 +++---- .../ops/reduceprod_builder.cpp | 6 ++--- .../ops/reduceprod_builder.h | 8 +++---- .../ops/reducesum_builder.cpp | 6 ++--- .../ops/reducesum_builder.h | 8 +++---- .../ops/resize_bilinear_builder.cpp | 10 ++++----- .../ops/resize_bilinear_builder.h | 12 +++++----- .../ops/scale_builder.cpp | 4 ++-- .../ops/scale_builder.h | 6 ++--- .../ops/slice_builder.cpp | 2 +- .../ops/slice_builder.h | 4 ++-- .../ops/softmax_builder.cpp | 2 +- .../ops/softmax_builder.h | 4 ++-- .../ops/space_to_batch_nd_builder.cpp | 6 ++--- .../ops/space_to_batch_nd_builder.h | 8 +++---- .../ops/space_to_depth_builder.cpp | 2 +- .../ops/space_to_depth_builder.h | 4 ++-- .../ops/split_builder.cpp | 6 ++--- .../ops/split_builder.h | 8 +++---- .../ops/squeeze_builder.cpp | 2 +- .../ops/squeeze_builder.h | 4 ++-- .../ops/stack_builder.cpp | 2 +- .../ops/stack_builder.h | 4 ++-- .../ops/strided_slice_builder.cpp | 10 ++++----- .../ops/strided_slice_builder.h | 12 +++++----- .../ops/sub_builder.cpp | 2 +- .../neural_network_runtime/ops/sub_builder.h | 4 ++-- .../ops/tile_builder.cpp | 2 +- .../neural_network_runtime/ops/tile_builder.h | 4 ++-- .../ops/top_k_builder.cpp | 4 ++-- .../ops/top_k_builder.h | 6 ++--- .../ops/unsqueeze_builder.cpp | 2 +- .../ops/unsqueeze_builder.h | 4 ++-- .../ops/unstack_builder.cpp | 2 +- .../ops/unstack_builder.h | 4 ++-- 118 files changed, 364 insertions(+), 366 deletions(-) diff --git a/frameworks/native/neural_network_runtime/ops/add_builder.cpp b/frameworks/native/neural_network_runtime/ops/add_builder.cpp index 3b5b6e3..73a6f95 100644 --- a/frameworks/native/neural_network_runtime/ops/add_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/add_builder.cpp @@ -30,7 +30,7 @@ AddBuilder::AddBuilder() {} AddBuilder::~AddBuilder() {} -OH_NN_ReturnCode AddBuilder::SetActivation(std::shared_ptr tensor) +OH_NN_ReturnCode AddBuilder::SetActivation(const std::shared_ptr& tensor) { tensor->IdentifyOpParameter(); diff --git a/frameworks/native/neural_network_runtime/ops/add_builder.h b/frameworks/native/neural_network_runtime/ops/add_builder.h index 067ab73..59f6179 100644 --- a/frameworks/native/neural_network_runtime/ops/add_builder.h +++ b/frameworks/native/neural_network_runtime/ops/add_builder.h @@ -20,12 +20,13 @@ #include "ops_builder.h" #include "ops_registry.h" + namespace OHOS { namespace NeuralNetworkRuntime { namespace Ops { class AddBuilder : public OpsBuilder { public: - typedef OH_NN_ReturnCode(AddBuilder::*FuncPtr)(std::shared_ptr); + typedef OH_NN_ReturnCode (AddBuilder::*FuncPtr)(const std::shared_ptr&); AddBuilder(); ~AddBuilder() override; @@ -37,7 +38,7 @@ public: LiteGraphPrimitvePtr GetPrimitive() override; private: - OH_NN_ReturnCode SetActivation(std::shared_ptr tensor); + OH_NN_ReturnCode SetActivation(const std::shared_ptr& tensor); private: mindspore::lite::ActivationType m_activationType {mindspore::lite::ACTIVATION_TYPE_NO_ACTIVATION}; diff --git a/frameworks/native/neural_network_runtime/ops/all_builder.cpp b/frameworks/native/neural_network_runtime/ops/all_builder.cpp index bdeb03c..e2432a4 100644 --- a/frameworks/native/neural_network_runtime/ops/all_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/all_builder.cpp @@ -28,7 +28,7 @@ AllBuilder::AllBuilder() {} AllBuilder::~AllBuilder() {} -OH_NN_ReturnCode AllBuilder::SetKeepDims(std::shared_ptr tensor) +OH_NN_ReturnCode AllBuilder::SetKeepDims(const std::shared_ptr& tensor) { if (tensor->GetDataType() != OH_NN_INT64) { LOGE("[All] The keep_dims should be type OH_NN_INT64."); diff --git a/frameworks/native/neural_network_runtime/ops/all_builder.h b/frameworks/native/neural_network_runtime/ops/all_builder.h index 0c1a006..0aff05b 100644 --- a/frameworks/native/neural_network_runtime/ops/all_builder.h +++ b/frameworks/native/neural_network_runtime/ops/all_builder.h @@ -26,7 +26,7 @@ namespace NeuralNetworkRuntime { namespace Ops { class AllBuilder : public OpsBuilder { public: - typedef OH_NN_ReturnCode(AllBuilder::*FuncPtr)(std::shared_ptr); + typedef OH_NN_ReturnCode (AllBuilder::*FuncPtr)(const std::shared_ptr&); AllBuilder(); ~AllBuilder() override; @@ -38,14 +38,13 @@ public: LiteGraphPrimitvePtr GetPrimitive() override; private: - OH_NN_ReturnCode SetKeepDims(std::shared_ptr tensor); + OH_NN_ReturnCode SetKeepDims(const std::shared_ptr& tensor); private: int64_t m_keepDims {0}; std::unordered_map m_paramMap = { {OH_NN_ALL_KEEP_DIMS, &AllBuilder::SetKeepDims} }; - }; } // namespace Ops } // namespace NeuralNetworkRuntime diff --git a/frameworks/native/neural_network_runtime/ops/argmax_builder.cpp b/frameworks/native/neural_network_runtime/ops/argmax_builder.cpp index 8e032a3..7adae29 100644 --- a/frameworks/native/neural_network_runtime/ops/argmax_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/argmax_builder.cpp @@ -27,7 +27,7 @@ ArgMaxBuilder::ArgMaxBuilder() {} ArgMaxBuilder::~ArgMaxBuilder() {} -OH_NN_ReturnCode ArgMaxBuilder::SetAxis(std::shared_ptr tensor) +OH_NN_ReturnCode ArgMaxBuilder::SetAxis(const std::shared_ptr& tensor) { tensor->IdentifyOpParameter(); @@ -46,7 +46,7 @@ OH_NN_ReturnCode ArgMaxBuilder::SetAxis(std::shared_ptr tensor) return OH_NN_SUCCESS; } -OH_NN_ReturnCode ArgMaxBuilder::SetTopK(std::shared_ptr tensor) +OH_NN_ReturnCode ArgMaxBuilder::SetTopK(const std::shared_ptr& tensor) { tensor->IdentifyOpParameter(); @@ -65,7 +65,7 @@ OH_NN_ReturnCode ArgMaxBuilder::SetTopK(std::shared_ptr tensor) return OH_NN_SUCCESS; } -OH_NN_ReturnCode ArgMaxBuilder::SetKeepdims(std::shared_ptr tensor) +OH_NN_ReturnCode ArgMaxBuilder::SetKeepdims(const std::shared_ptr& tensor) { tensor->IdentifyOpParameter(); @@ -84,7 +84,7 @@ OH_NN_ReturnCode ArgMaxBuilder::SetKeepdims(std::shared_ptr tensor) return OH_NN_SUCCESS; } -OH_NN_ReturnCode ArgMaxBuilder::SetOutMaxValue(std::shared_ptr tensor) +OH_NN_ReturnCode ArgMaxBuilder::SetOutMaxValue(const std::shared_ptr& tensor) { tensor->IdentifyOpParameter(); diff --git a/frameworks/native/neural_network_runtime/ops/argmax_builder.h b/frameworks/native/neural_network_runtime/ops/argmax_builder.h index 0e7992e..9d4c7ed 100644 --- a/frameworks/native/neural_network_runtime/ops/argmax_builder.h +++ b/frameworks/native/neural_network_runtime/ops/argmax_builder.h @@ -26,7 +26,7 @@ namespace NeuralNetworkRuntime { namespace Ops { class ArgMaxBuilder : public OpsBuilder { public: - typedef OH_NN_ReturnCode(ArgMaxBuilder::*FuncPtr)(std::shared_ptr); + typedef OH_NN_ReturnCode (ArgMaxBuilder::*FuncPtr)(const std::shared_ptr&); ArgMaxBuilder(); ~ArgMaxBuilder() override; @@ -37,10 +37,10 @@ public: LiteGraphPrimitvePtr GetPrimitive() override; private: - OH_NN_ReturnCode SetAxis(std::shared_ptr tensor); - OH_NN_ReturnCode SetKeepdims(std::shared_ptr tensor); - OH_NN_ReturnCode SetTopK(std::shared_ptr tensor); - OH_NN_ReturnCode SetOutMaxValue(std::shared_ptr tensor); + OH_NN_ReturnCode SetAxis(const std::shared_ptr& tensor); + OH_NN_ReturnCode SetKeepdims(const std::shared_ptr& tensor); + OH_NN_ReturnCode SetTopK(const std::shared_ptr& tensor); + OH_NN_ReturnCode SetOutMaxValue(const std::shared_ptr& tensor); private: int64_t m_axis {-1}; diff --git a/frameworks/native/neural_network_runtime/ops/assert_builder.cpp b/frameworks/native/neural_network_runtime/ops/assert_builder.cpp index 5f50532..28c39eb 100644 --- a/frameworks/native/neural_network_runtime/ops/assert_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/assert_builder.cpp @@ -28,7 +28,7 @@ AssertBuilder::AssertBuilder() {} AssertBuilder::~AssertBuilder() {} -OH_NN_ReturnCode AssertBuilder::SetSummarize(std::shared_ptr tensor) +OH_NN_ReturnCode AssertBuilder::SetSummarize(const std::shared_ptr& tensor) { if (tensor->GetDataType() != OH_NN_INT64) { LOGE("[Assert] The summarize should be type OH_NN_INT64."); diff --git a/frameworks/native/neural_network_runtime/ops/assert_builder.h b/frameworks/native/neural_network_runtime/ops/assert_builder.h index 062aea0..5f2b408 100644 --- a/frameworks/native/neural_network_runtime/ops/assert_builder.h +++ b/frameworks/native/neural_network_runtime/ops/assert_builder.h @@ -26,7 +26,7 @@ namespace NeuralNetworkRuntime { namespace Ops { class AssertBuilder : public OpsBuilder { public: - typedef OH_NN_ReturnCode(AssertBuilder::*FuncPtr)(std::shared_ptr); + typedef OH_NN_ReturnCode (AssertBuilder::*FuncPtr)(const std::shared_ptr&); AssertBuilder(); ~AssertBuilder() override; @@ -38,7 +38,7 @@ public: LiteGraphPrimitvePtr GetPrimitive() override; private: - OH_NN_ReturnCode SetSummarize(std::shared_ptr tensor); + OH_NN_ReturnCode SetSummarize(const std::shared_ptr& tensor); private: int64_t m_summarize {0}; diff --git a/frameworks/native/neural_network_runtime/ops/batch_to_space_nd_builder.cpp b/frameworks/native/neural_network_runtime/ops/batch_to_space_nd_builder.cpp index a96239b..84ae89f 100644 --- a/frameworks/native/neural_network_runtime/ops/batch_to_space_nd_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/batch_to_space_nd_builder.cpp @@ -29,7 +29,7 @@ BatchToSpaceNDBuilder::BatchToSpaceNDBuilder() {} BatchToSpaceNDBuilder::~BatchToSpaceNDBuilder() {} -OH_NN_ReturnCode BatchToSpaceNDBuilder::SetInputBlock(std::shared_ptr tensor) +OH_NN_ReturnCode BatchToSpaceNDBuilder::SetInputBlock(const std::shared_ptr& tensor) { tensor->IdentifyOpParameter(); @@ -53,7 +53,7 @@ OH_NN_ReturnCode BatchToSpaceNDBuilder::SetInputBlock(std::shared_ptr return OH_NN_SUCCESS; } -OH_NN_ReturnCode BatchToSpaceNDBuilder::SetInputCrops(std::shared_ptr tensor) +OH_NN_ReturnCode BatchToSpaceNDBuilder::SetInputCrops(const std::shared_ptr& tensor) { tensor->IdentifyOpParameter(); diff --git a/frameworks/native/neural_network_runtime/ops/batch_to_space_nd_builder.h b/frameworks/native/neural_network_runtime/ops/batch_to_space_nd_builder.h index 4e10a5f..c93c8ec 100644 --- a/frameworks/native/neural_network_runtime/ops/batch_to_space_nd_builder.h +++ b/frameworks/native/neural_network_runtime/ops/batch_to_space_nd_builder.h @@ -26,7 +26,7 @@ namespace NeuralNetworkRuntime { namespace Ops { class BatchToSpaceNDBuilder : public OpsBuilder { public: - typedef OH_NN_ReturnCode(BatchToSpaceNDBuilder::*FuncPtr)(std::shared_ptr); + typedef OH_NN_ReturnCode (BatchToSpaceNDBuilder::*FuncPtr)(const std::shared_ptr&); BatchToSpaceNDBuilder(); ~BatchToSpaceNDBuilder() override; @@ -41,8 +41,8 @@ private: OH_NN_ReturnCode SetBatchToSpaceInput(const std::vector& inputsIndex, const std::vector& outputsIndex, const std::vector>& allTensors); - OH_NN_ReturnCode SetInputBlock(std::shared_ptr tensor); - OH_NN_ReturnCode SetInputCrops(std::shared_ptr tensor); + OH_NN_ReturnCode SetInputBlock(const std::shared_ptr& tensor); + OH_NN_ReturnCode SetInputCrops(const std::shared_ptr& tensor); private: std::vector m_blockSize; diff --git a/frameworks/native/neural_network_runtime/ops/batchnorm_builder.cpp b/frameworks/native/neural_network_runtime/ops/batchnorm_builder.cpp index 2364b20..d671490 100644 --- a/frameworks/native/neural_network_runtime/ops/batchnorm_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/batchnorm_builder.cpp @@ -32,7 +32,7 @@ BatchNormBuilder::BatchNormBuilder() {} BatchNormBuilder::~BatchNormBuilder() {} -OH_NN_ReturnCode BatchNormBuilder::SetEpsilon(std::shared_ptr tensor) +OH_NN_ReturnCode BatchNormBuilder::SetEpsilon(const std::shared_ptr& tensor) { tensor->IdentifyOpParameter(); if (tensor->GetDataType() != OH_NN_FLOAT32) { diff --git a/frameworks/native/neural_network_runtime/ops/batchnorm_builder.h b/frameworks/native/neural_network_runtime/ops/batchnorm_builder.h index ee395c0..d630df7 100644 --- a/frameworks/native/neural_network_runtime/ops/batchnorm_builder.h +++ b/frameworks/native/neural_network_runtime/ops/batchnorm_builder.h @@ -23,7 +23,7 @@ namespace NeuralNetworkRuntime { namespace Ops { class BatchNormBuilder : public OpsBuilder { public: - typedef OH_NN_ReturnCode(BatchNormBuilder::*FuncPtr)(std::shared_ptr); + typedef OH_NN_ReturnCode (BatchNormBuilder::*FuncPtr)(const std::shared_ptr&); BatchNormBuilder(); ~BatchNormBuilder() override; @@ -34,7 +34,7 @@ public: LiteGraphPrimitvePtr GetPrimitive() override; private: - OH_NN_ReturnCode SetEpsilon(std::shared_ptr tensor); + OH_NN_ReturnCode SetEpsilon(const std::shared_ptr& tensor); private: float m_epsilon {0.0001f}; diff --git a/frameworks/native/neural_network_runtime/ops/broadcast_to_builder.cpp b/frameworks/native/neural_network_runtime/ops/broadcast_to_builder.cpp index 3fe4664..0c0e8cf 100755 --- a/frameworks/native/neural_network_runtime/ops/broadcast_to_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/broadcast_to_builder.cpp @@ -27,7 +27,7 @@ BroadcastToBuilder::BroadcastToBuilder() {} BroadcastToBuilder::~BroadcastToBuilder() {} -OH_NN_ReturnCode BroadcastToBuilder::SetShape(std::shared_ptr tensor) +OH_NN_ReturnCode BroadcastToBuilder::SetShape(const std::shared_ptr& tensor) { if (tensor->GetDataType() != OH_NN_INT64) { LOGE("[BroadcastTo] The shape should be type OH_NN_INT64."); diff --git a/frameworks/native/neural_network_runtime/ops/broadcast_to_builder.h b/frameworks/native/neural_network_runtime/ops/broadcast_to_builder.h index 297b310..25b7783 100755 --- a/frameworks/native/neural_network_runtime/ops/broadcast_to_builder.h +++ b/frameworks/native/neural_network_runtime/ops/broadcast_to_builder.h @@ -26,7 +26,7 @@ namespace NeuralNetworkRuntime { namespace Ops { class BroadcastToBuilder : public OpsBuilder { public: - typedef OH_NN_ReturnCode(BroadcastToBuilder::*FuncPtr)(std::shared_ptr); + typedef OH_NN_ReturnCode (BroadcastToBuilder::*FuncPtr)(const std::shared_ptr&); BroadcastToBuilder(); ~BroadcastToBuilder() override; @@ -38,7 +38,7 @@ public: LiteGraphPrimitvePtr GetPrimitive() override; private: - OH_NN_ReturnCode SetShape(std::shared_ptr tensor); + OH_NN_ReturnCode SetShape(const std::shared_ptr& tensor); private: std::vector m_shape; diff --git a/frameworks/native/neural_network_runtime/ops/clip_builder.cpp b/frameworks/native/neural_network_runtime/ops/clip_builder.cpp index 766bce4..88c6864 100644 --- a/frameworks/native/neural_network_runtime/ops/clip_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/clip_builder.cpp @@ -28,7 +28,7 @@ ClipBuilder::ClipBuilder() {} ClipBuilder::~ClipBuilder() {} -OH_NN_ReturnCode ClipBuilder::SetMax(std::shared_ptr tensor) +OH_NN_ReturnCode ClipBuilder::SetMax(const std::shared_ptr& tensor) { if (tensor->GetDataType() != OH_NN_FLOAT32) { LOGE("[Clip] The max should be type OH_NN_FLOAT32."); @@ -50,7 +50,7 @@ OH_NN_ReturnCode ClipBuilder::SetMax(std::shared_ptr tensor) return OH_NN_SUCCESS; } -OH_NN_ReturnCode ClipBuilder::SetMin(std::shared_ptr tensor) +OH_NN_ReturnCode ClipBuilder::SetMin(const std::shared_ptr& tensor) { if (tensor->GetDataType() != OH_NN_FLOAT32) { LOGE("[Clip] The min should be type OH_NN_FLOAT32."); diff --git a/frameworks/native/neural_network_runtime/ops/clip_builder.h b/frameworks/native/neural_network_runtime/ops/clip_builder.h index 161a6e7..d869679 100644 --- a/frameworks/native/neural_network_runtime/ops/clip_builder.h +++ b/frameworks/native/neural_network_runtime/ops/clip_builder.h @@ -26,7 +26,7 @@ namespace NeuralNetworkRuntime { namespace Ops { class ClipBuilder : public OpsBuilder { public: - typedef OH_NN_ReturnCode(ClipBuilder::*FuncPtr)(std::shared_ptr); + typedef OH_NN_ReturnCode (ClipBuilder::*FuncPtr)(const std::shared_ptr&); ClipBuilder(); ~ClipBuilder() override; @@ -38,8 +38,8 @@ public: LiteGraphPrimitvePtr GetPrimitive() override; private: - OH_NN_ReturnCode SetMax(std::shared_ptr tensor); - OH_NN_ReturnCode SetMin(std::shared_ptr tensor); + OH_NN_ReturnCode SetMax(const std::shared_ptr& tensor); + OH_NN_ReturnCode SetMin(const std::shared_ptr& tensor); private: float m_max {0.0f}; diff --git a/frameworks/native/neural_network_runtime/ops/concat_builder.cpp b/frameworks/native/neural_network_runtime/ops/concat_builder.cpp index b478514..55fed55 100644 --- a/frameworks/native/neural_network_runtime/ops/concat_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/concat_builder.cpp @@ -28,7 +28,7 @@ ConcatBuilder::ConcatBuilder() {} ConcatBuilder::~ConcatBuilder() {} -OH_NN_ReturnCode ConcatBuilder::SetAxis(std::shared_ptr tensor) +OH_NN_ReturnCode ConcatBuilder::SetAxis(const std::shared_ptr& tensor) { tensor->IdentifyOpParameter(); diff --git a/frameworks/native/neural_network_runtime/ops/concat_builder.h b/frameworks/native/neural_network_runtime/ops/concat_builder.h index 3d28914..7b5290f 100644 --- a/frameworks/native/neural_network_runtime/ops/concat_builder.h +++ b/frameworks/native/neural_network_runtime/ops/concat_builder.h @@ -26,7 +26,7 @@ namespace NeuralNetworkRuntime { namespace Ops { class ConcatBuilder : public OpsBuilder { public: - typedef OH_NN_ReturnCode(ConcatBuilder::*FuncPtr)(std::shared_ptr); + typedef OH_NN_ReturnCode (ConcatBuilder::*FuncPtr)(const std::shared_ptr&); ConcatBuilder(); ~ConcatBuilder() override; @@ -38,7 +38,7 @@ public: LiteGraphPrimitvePtr GetPrimitive() override; private: - OH_NN_ReturnCode SetAxis(std::shared_ptr tensor); + OH_NN_ReturnCode SetAxis(const std::shared_ptr& tensor); OH_NN_ReturnCode SetInputsAndOutputs(const std::vector& inputsIndex, const std::vector& outputsIndex, const std::vector>& allTensors); diff --git a/frameworks/native/neural_network_runtime/ops/constant_of_shape_builder.cpp b/frameworks/native/neural_network_runtime/ops/constant_of_shape_builder.cpp index aaf73d1..80458bc 100755 --- a/frameworks/native/neural_network_runtime/ops/constant_of_shape_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/constant_of_shape_builder.cpp @@ -28,7 +28,7 @@ ConstantOfShapeBuilder::ConstantOfShapeBuilder() {} ConstantOfShapeBuilder::~ConstantOfShapeBuilder() {} -OH_NN_ReturnCode ConstantOfShapeBuilder::SetDataType(std::shared_ptr tensor) +OH_NN_ReturnCode ConstantOfShapeBuilder::SetDataType(const std::shared_ptr& tensor) { if (tensor->GetDataType() != OH_NN_INT64) { LOGE("[ConstantOfShape] The dataType should be type OH_NN_INT64."); @@ -50,7 +50,7 @@ OH_NN_ReturnCode ConstantOfShapeBuilder::SetDataType(std::shared_ptr t return OH_NN_SUCCESS; } -OH_NN_ReturnCode ConstantOfShapeBuilder::SetValue(std::shared_ptr tensor) +OH_NN_ReturnCode ConstantOfShapeBuilder::SetValue(const std::shared_ptr& tensor) { if (tensor->GetDataType() != OH_NN_FLOAT32) { LOGE("[ConstantOfShape] The value should be type OH_NN_FLOAT32."); diff --git a/frameworks/native/neural_network_runtime/ops/constant_of_shape_builder.h b/frameworks/native/neural_network_runtime/ops/constant_of_shape_builder.h index 08c3395..3106fc7 100755 --- a/frameworks/native/neural_network_runtime/ops/constant_of_shape_builder.h +++ b/frameworks/native/neural_network_runtime/ops/constant_of_shape_builder.h @@ -26,7 +26,7 @@ namespace NeuralNetworkRuntime { namespace Ops { class ConstantOfShapeBuilder : public OpsBuilder { public: - typedef OH_NN_ReturnCode(ConstantOfShapeBuilder::*FuncPtr)(std::shared_ptr); + typedef OH_NN_ReturnCode (ConstantOfShapeBuilder::*FuncPtr)(const std::shared_ptr&); ConstantOfShapeBuilder(); ~ConstantOfShapeBuilder() override; @@ -38,8 +38,8 @@ public: LiteGraphPrimitvePtr GetPrimitive() override; private: - OH_NN_ReturnCode SetDataType(std::shared_ptr tensor); - OH_NN_ReturnCode SetValue(std::shared_ptr tensor); + OH_NN_ReturnCode SetDataType(const std::shared_ptr& tensor); + OH_NN_ReturnCode SetValue(const std::shared_ptr& tensor); private: int64_t m_dataType {0}; diff --git a/frameworks/native/neural_network_runtime/ops/conv2d_builder.cpp b/frameworks/native/neural_network_runtime/ops/conv2d_builder.cpp index 9a1a2b7..0fac254 100644 --- a/frameworks/native/neural_network_runtime/ops/conv2d_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/conv2d_builder.cpp @@ -83,7 +83,7 @@ void Conv2DBuilder::SetKernelSize(const std::vector& inputsIndex, m_kernelSize.emplace_back(weightShape[KERNEL_WEIGHT_INDEX]); } -OH_NN_ReturnCode Conv2DBuilder::SetStrides(std::shared_ptr tensor) +OH_NN_ReturnCode Conv2DBuilder::SetStrides(const std::shared_ptr& tensor) { tensor->IdentifyOpParameter(); // Set Strides @@ -104,7 +104,7 @@ OH_NN_ReturnCode Conv2DBuilder::SetStrides(std::shared_ptr tensor) return OH_NN_SUCCESS; } -OH_NN_ReturnCode Conv2DBuilder::SetDilation(std::shared_ptr tensor) +OH_NN_ReturnCode Conv2DBuilder::SetDilation(const std::shared_ptr& tensor) { tensor->IdentifyOpParameter(); // Set Dilation @@ -125,7 +125,7 @@ OH_NN_ReturnCode Conv2DBuilder::SetDilation(std::shared_ptr tensor) return OH_NN_SUCCESS; } -OH_NN_ReturnCode Conv2DBuilder::SetPad(std::shared_ptr tensor) +OH_NN_ReturnCode Conv2DBuilder::SetPad(const std::shared_ptr& tensor) { tensor->IdentifyOpParameter(); @@ -170,7 +170,7 @@ OH_NN_ReturnCode Conv2DBuilder::SetPad(std::shared_ptr tensor) return OH_NN_SUCCESS; } -OH_NN_ReturnCode Conv2DBuilder::SetGroup(std::shared_ptr tensor) +OH_NN_ReturnCode Conv2DBuilder::SetGroup(const std::shared_ptr& tensor) { tensor->IdentifyOpParameter(); // Set Group @@ -194,7 +194,7 @@ OH_NN_ReturnCode Conv2DBuilder::SetGroup(std::shared_ptr tensor) return OH_NN_SUCCESS; } -OH_NN_ReturnCode Conv2DBuilder::SetActavitation(std::shared_ptr tensor) +OH_NN_ReturnCode Conv2DBuilder::SetActavitation(const std::shared_ptr& tensor) { tensor->IdentifyOpParameter(); diff --git a/frameworks/native/neural_network_runtime/ops/conv2d_builder.h b/frameworks/native/neural_network_runtime/ops/conv2d_builder.h index 5f74688..610270d 100644 --- a/frameworks/native/neural_network_runtime/ops/conv2d_builder.h +++ b/frameworks/native/neural_network_runtime/ops/conv2d_builder.h @@ -25,7 +25,7 @@ namespace NeuralNetworkRuntime { namespace Ops { class Conv2DBuilder : public OpsBuilder { public: - typedef OH_NN_ReturnCode(Conv2DBuilder::*FuncPtr)(std::shared_ptr); + typedef OH_NN_ReturnCode (Conv2DBuilder::*FuncPtr)(const std::shared_ptr&); Conv2DBuilder(); ~Conv2DBuilder() override; @@ -44,11 +44,11 @@ private: const std::vector>& allTensors); void SetKernelSize(const std::vector& inputsIndex, const std::vector>& allTensors); - OH_NN_ReturnCode SetStrides(std::shared_ptr tensor); - OH_NN_ReturnCode SetDilation(std::shared_ptr tensor); - OH_NN_ReturnCode SetPad(std::shared_ptr tensor); - OH_NN_ReturnCode SetGroup(std::shared_ptr tensor); - OH_NN_ReturnCode SetActavitation(std::shared_ptr tensor); + OH_NN_ReturnCode SetStrides(const std::shared_ptr& tensor); + OH_NN_ReturnCode SetDilation(const std::shared_ptr& tensor); + OH_NN_ReturnCode SetPad(const std::shared_ptr& tensor); + OH_NN_ReturnCode SetGroup(const std::shared_ptr& tensor); + OH_NN_ReturnCode SetActavitation(const std::shared_ptr& tensor); private: int64_t m_group {1}; diff --git a/frameworks/native/neural_network_runtime/ops/conv2d_transpose_builder.cpp b/frameworks/native/neural_network_runtime/ops/conv2d_transpose_builder.cpp index 2b10933..fc090b5 100644 --- a/frameworks/native/neural_network_runtime/ops/conv2d_transpose_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/conv2d_transpose_builder.cpp @@ -76,7 +76,7 @@ void Conv2DTransposeBuilder::SetKernelSize(const std::vector& inputsIn m_kernelSize.emplace_back(weightShape[KERNEL_WEIGHT_INDEX]); } -OH_NN_ReturnCode Conv2DTransposeBuilder::SetStrides(std::shared_ptr tensor) +OH_NN_ReturnCode Conv2DTransposeBuilder::SetStrides(const std::shared_ptr& tensor) { tensor->IdentifyOpParameter(); // Set Strides @@ -97,7 +97,7 @@ OH_NN_ReturnCode Conv2DTransposeBuilder::SetStrides(std::shared_ptr te return OH_NN_SUCCESS; } -OH_NN_ReturnCode Conv2DTransposeBuilder::SetDilation(std::shared_ptr tensor) +OH_NN_ReturnCode Conv2DTransposeBuilder::SetDilation(const std::shared_ptr& tensor) { tensor->IdentifyOpParameter(); // Set Dilation @@ -118,7 +118,7 @@ OH_NN_ReturnCode Conv2DTransposeBuilder::SetDilation(std::shared_ptr t return OH_NN_SUCCESS; } -OH_NN_ReturnCode Conv2DTransposeBuilder::SetPad(std::shared_ptr tensor) +OH_NN_ReturnCode Conv2DTransposeBuilder::SetPad(const std::shared_ptr& tensor) { tensor->IdentifyOpParameter(); @@ -163,7 +163,7 @@ OH_NN_ReturnCode Conv2DTransposeBuilder::SetPad(std::shared_ptr tensor return OH_NN_SUCCESS; } -OH_NN_ReturnCode Conv2DTransposeBuilder::SetGroup(std::shared_ptr tensor) +OH_NN_ReturnCode Conv2DTransposeBuilder::SetGroup(const std::shared_ptr& tensor) { tensor->IdentifyOpParameter(); // Set Group @@ -187,7 +187,7 @@ OH_NN_ReturnCode Conv2DTransposeBuilder::SetGroup(std::shared_ptr tens return OH_NN_SUCCESS; } -OH_NN_ReturnCode Conv2DTransposeBuilder::SetOutPadding(std::shared_ptr tensor) +OH_NN_ReturnCode Conv2DTransposeBuilder::SetOutPadding(const std::shared_ptr& tensor) { tensor->IdentifyOpParameter(); // Set outputPadding @@ -208,7 +208,7 @@ OH_NN_ReturnCode Conv2DTransposeBuilder::SetOutPadding(std::shared_ptr return OH_NN_SUCCESS; } -OH_NN_ReturnCode Conv2DTransposeBuilder::SetActivation(std::shared_ptr tensor) +OH_NN_ReturnCode Conv2DTransposeBuilder::SetActivation(const std::shared_ptr& tensor) { tensor->IdentifyOpParameter(); diff --git a/frameworks/native/neural_network_runtime/ops/conv2d_transpose_builder.h b/frameworks/native/neural_network_runtime/ops/conv2d_transpose_builder.h index 809b7f4..b0dc7d8 100644 --- a/frameworks/native/neural_network_runtime/ops/conv2d_transpose_builder.h +++ b/frameworks/native/neural_network_runtime/ops/conv2d_transpose_builder.h @@ -25,7 +25,7 @@ namespace NeuralNetworkRuntime { namespace Ops { class Conv2DTransposeBuilder : public OpsBuilder { public: - typedef OH_NN_ReturnCode(Conv2DTransposeBuilder::*FuncPtr)(std::shared_ptr); + typedef OH_NN_ReturnCode (Conv2DTransposeBuilder::*FuncPtr)(const std::shared_ptr&); Conv2DTransposeBuilder(); ~Conv2DTransposeBuilder() override; @@ -42,12 +42,12 @@ private: const std::vector>& allTensors); void SetKernelSize(const std::vector& inputsIndex, const std::vector>& allTensors); - OH_NN_ReturnCode SetStrides(std::shared_ptr tensor); - OH_NN_ReturnCode SetDilation(std::shared_ptr tensor); - OH_NN_ReturnCode SetPad(std::shared_ptr tensor); - OH_NN_ReturnCode SetGroup(std::shared_ptr tensor); - OH_NN_ReturnCode SetOutPadding(std::shared_ptr tensor); - OH_NN_ReturnCode SetActivation(std::shared_ptr tensor); + OH_NN_ReturnCode SetStrides(const std::shared_ptr& tensor); + OH_NN_ReturnCode SetDilation(const std::shared_ptr& tensor); + OH_NN_ReturnCode SetPad(const std::shared_ptr& tensor); + OH_NN_ReturnCode SetGroup(const std::shared_ptr& tensor); + OH_NN_ReturnCode SetOutPadding(const std::shared_ptr& tensor); + OH_NN_ReturnCode SetActivation(const std::shared_ptr& tensor); private: int64_t m_group {1}; diff --git a/frameworks/native/neural_network_runtime/ops/crop_builder.cpp b/frameworks/native/neural_network_runtime/ops/crop_builder.cpp index a5a5e34..fe971d7 100644 --- a/frameworks/native/neural_network_runtime/ops/crop_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/crop_builder.cpp @@ -31,7 +31,7 @@ CropBuilder::CropBuilder() {} CropBuilder::~CropBuilder() {} -OH_NN_ReturnCode CropBuilder::SetAxis(std::shared_ptr tensor) +OH_NN_ReturnCode CropBuilder::SetAxis(const std::shared_ptr& tensor) { if (tensor->GetDataType() != OH_NN_INT64) { LOGE("[Crop] The axis should be type OH_NN_INT64."); @@ -53,7 +53,7 @@ OH_NN_ReturnCode CropBuilder::SetAxis(std::shared_ptr tensor) return OH_NN_SUCCESS; } -OH_NN_ReturnCode CropBuilder::SetOffset(std::shared_ptr tensor) +OH_NN_ReturnCode CropBuilder::SetOffset(const std::shared_ptr& tensor) { if (tensor->GetDataType() != OH_NN_INT64) { LOGE("[Crop] The offset should be type OH_NN_INT64."); diff --git a/frameworks/native/neural_network_runtime/ops/crop_builder.h b/frameworks/native/neural_network_runtime/ops/crop_builder.h index 125eefd..61ca483 100644 --- a/frameworks/native/neural_network_runtime/ops/crop_builder.h +++ b/frameworks/native/neural_network_runtime/ops/crop_builder.h @@ -23,7 +23,7 @@ namespace NeuralNetworkRuntime { namespace Ops { class CropBuilder : public OpsBuilder { public: - typedef OH_NN_ReturnCode(CropBuilder::*FuncPtr)(std::shared_ptr); + typedef OH_NN_ReturnCode (CropBuilder::*FuncPtr)(const std::shared_ptr&); CropBuilder(); ~CropBuilder() override; @@ -35,8 +35,8 @@ public: LiteGraphPrimitvePtr GetPrimitive() override; private: - OH_NN_ReturnCode SetAxis(std::shared_ptr tensor); - OH_NN_ReturnCode SetOffset(std::shared_ptr tensor); + OH_NN_ReturnCode SetAxis(const std::shared_ptr& tensor); + OH_NN_ReturnCode SetOffset(const std::shared_ptr& tensor); private: int64_t m_axis {0}; diff --git a/frameworks/native/neural_network_runtime/ops/depth_to_space_builder.cpp b/frameworks/native/neural_network_runtime/ops/depth_to_space_builder.cpp index 8c705a1..c09be38 100755 --- a/frameworks/native/neural_network_runtime/ops/depth_to_space_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/depth_to_space_builder.cpp @@ -32,7 +32,7 @@ DepthToSpaceBuilder::DepthToSpaceBuilder() {} DepthToSpaceBuilder::~DepthToSpaceBuilder() {} -OH_NN_ReturnCode DepthToSpaceBuilder::SetBlockSize(std::shared_ptr tensor) +OH_NN_ReturnCode DepthToSpaceBuilder::SetBlockSize(const std::shared_ptr& tensor) { if (tensor->GetDataType() != OH_NN_INT64) { LOGE("[DepthToSpace] The blockSize should be type OH_NN_INT64."); @@ -54,7 +54,7 @@ OH_NN_ReturnCode DepthToSpaceBuilder::SetBlockSize(std::shared_ptr ten return OH_NN_SUCCESS; } -OH_NN_ReturnCode DepthToSpaceBuilder::SetMode(std::shared_ptr tensor) +OH_NN_ReturnCode DepthToSpaceBuilder::SetMode(const std::shared_ptr& tensor) { if (tensor->GetDataType() != OH_NN_INT32) { LOGE("[DepthToSpace] The mode should be type OH_NN_INT32."); diff --git a/frameworks/native/neural_network_runtime/ops/depth_to_space_builder.h b/frameworks/native/neural_network_runtime/ops/depth_to_space_builder.h index 2fdbc0d..e0d8980 100755 --- a/frameworks/native/neural_network_runtime/ops/depth_to_space_builder.h +++ b/frameworks/native/neural_network_runtime/ops/depth_to_space_builder.h @@ -26,7 +26,7 @@ namespace NeuralNetworkRuntime { namespace Ops { class DepthToSpaceBuilder : public OpsBuilder { public: - typedef OH_NN_ReturnCode(DepthToSpaceBuilder::*FuncPtr)(std::shared_ptr); + typedef OH_NN_ReturnCode (DepthToSpaceBuilder::*FuncPtr)(const std::shared_ptr&); DepthToSpaceBuilder(); ~DepthToSpaceBuilder() override; @@ -38,8 +38,8 @@ public: LiteGraphPrimitvePtr GetPrimitive() override; private: - OH_NN_ReturnCode SetBlockSize(std::shared_ptr tensor); - OH_NN_ReturnCode SetMode(std::shared_ptr tensor); + OH_NN_ReturnCode SetBlockSize(const std::shared_ptr& tensor); + OH_NN_ReturnCode SetMode(const std::shared_ptr& tensor); private: int64_t m_blockSize {0}; diff --git a/frameworks/native/neural_network_runtime/ops/depthwise_conv2d_native_builder.cpp b/frameworks/native/neural_network_runtime/ops/depthwise_conv2d_native_builder.cpp index eab33ca..3e50f1d 100644 --- a/frameworks/native/neural_network_runtime/ops/depthwise_conv2d_native_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/depthwise_conv2d_native_builder.cpp @@ -41,7 +41,7 @@ DepthwiseConv2DNativeBuilder::DepthwiseConv2DNativeBuilder() {} DepthwiseConv2DNativeBuilder::~DepthwiseConv2DNativeBuilder() {} -OH_NN_ReturnCode DepthwiseConv2DNativeBuilder::SetIsPadMode(std::shared_ptr tensor, +OH_NN_ReturnCode DepthwiseConv2DNativeBuilder::SetIsPadMode(const std::shared_ptr& tensor, bool &isPadMode) { if (tensor->GetElementCount() == PAD_MODE_SIZE) { @@ -55,7 +55,7 @@ OH_NN_ReturnCode DepthwiseConv2DNativeBuilder::SetIsPadMode(std::shared_ptr tensor) +OH_NN_ReturnCode DepthwiseConv2DNativeBuilder::SetActivation(const std::shared_ptr& tensor) { tensor->IdentifyOpParameter(); // Set ActivationType @@ -101,7 +101,7 @@ OH_NN_ReturnCode DepthwiseConv2DNativeBuilder::SetKernelSize(const std::vector tensor) +OH_NN_ReturnCode DepthwiseConv2DNativeBuilder::SetStrides(const std::shared_ptr& tensor) { tensor->IdentifyOpParameter(); if (tensor->GetDataType() != OH_NN_INT64) { @@ -120,7 +120,7 @@ OH_NN_ReturnCode DepthwiseConv2DNativeBuilder::SetStrides(std::shared_ptr tensor) +OH_NN_ReturnCode DepthwiseConv2DNativeBuilder::SetDilation(const std::shared_ptr& tensor) { tensor->IdentifyOpParameter(); if (tensor->GetDataType() != OH_NN_INT64) { @@ -140,8 +140,7 @@ OH_NN_ReturnCode DepthwiseConv2DNativeBuilder::SetDilation(std::shared_ptr tensor) +OH_NN_ReturnCode DepthwiseConv2DNativeBuilder::SetPadModeOrPaddings(const std::shared_ptr& tensor) { tensor->IdentifyOpParameter(); diff --git a/frameworks/native/neural_network_runtime/ops/depthwise_conv2d_native_builder.h b/frameworks/native/neural_network_runtime/ops/depthwise_conv2d_native_builder.h index 60b2e08..ce97986 100644 --- a/frameworks/native/neural_network_runtime/ops/depthwise_conv2d_native_builder.h +++ b/frameworks/native/neural_network_runtime/ops/depthwise_conv2d_native_builder.h @@ -25,7 +25,7 @@ namespace NeuralNetworkRuntime { namespace Ops { class DepthwiseConv2DNativeBuilder : public OpsBuilder { public: - typedef OH_NN_ReturnCode(DepthwiseConv2DNativeBuilder::*FuncPtr)(std::shared_ptr); + typedef OH_NN_ReturnCode (DepthwiseConv2DNativeBuilder::*FuncPtr)(const std::shared_ptr&); DepthwiseConv2DNativeBuilder(); ~DepthwiseConv2DNativeBuilder() override; @@ -38,14 +38,14 @@ public: private: OH_NN_ReturnCode SetInputAndOutput(const std::vector& inputsIndex, const std::vector& outputsIndex, const std::vector>& allTensors); - OH_NN_ReturnCode SetIsPadMode(std::shared_ptr tensor, + OH_NN_ReturnCode SetIsPadMode(const std::shared_ptr& tensor, bool &isPadMode); - OH_NN_ReturnCode SetPadModeOrPaddings(std::shared_ptr tensor); + OH_NN_ReturnCode SetPadModeOrPaddings(const std::shared_ptr& tensor); OH_NN_ReturnCode SetKernelSize(const std::vector& inputsIndex, const std::vector>& allTensors); - OH_NN_ReturnCode SetDilation(std::shared_ptr tensor); - OH_NN_ReturnCode SetStrides(std::shared_ptr tensor); - OH_NN_ReturnCode SetActivation(std::shared_ptr tensor); + OH_NN_ReturnCode SetDilation(const std::shared_ptr& tensor); + OH_NN_ReturnCode SetStrides(const std::shared_ptr& tensor); + OH_NN_ReturnCode SetActivation(const std::shared_ptr& tensor); private: int64_t m_inChannel{0}; diff --git a/frameworks/native/neural_network_runtime/ops/detection_post_process_builder.cpp b/frameworks/native/neural_network_runtime/ops/detection_post_process_builder.cpp index 0617955..a710d4b 100644 --- a/frameworks/native/neural_network_runtime/ops/detection_post_process_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/detection_post_process_builder.cpp @@ -33,7 +33,7 @@ DetectionPostProcessBuilder::DetectionPostProcessBuilder() {} DetectionPostProcessBuilder::~DetectionPostProcessBuilder() {} -OH_NN_ReturnCode DetectionPostProcessBuilder::SetInputSize(std::shared_ptr tensor) +OH_NN_ReturnCode DetectionPostProcessBuilder::SetInputSize(const std::shared_ptr& tensor) { if (tensor->GetDataType() != OH_NN_INT64) { LOGE("[DetectionPostProcess] The inputSize should be type OH_NN_INT64."); @@ -55,7 +55,7 @@ OH_NN_ReturnCode DetectionPostProcessBuilder::SetInputSize(std::shared_ptr tensor) +OH_NN_ReturnCode DetectionPostProcessBuilder::SetScale(const std::shared_ptr& tensor) { if (tensor->GetDataType() != OH_NN_FLOAT32) { LOGE("[DetectionPostProcess] The scale should be type OH_NN_FLOAT32."); @@ -81,7 +81,7 @@ OH_NN_ReturnCode DetectionPostProcessBuilder::SetScale(std::shared_ptr return OH_NN_SUCCESS; } -OH_NN_ReturnCode DetectionPostProcessBuilder::SetNmsIoUThreshold(std::shared_ptr tensor) +OH_NN_ReturnCode DetectionPostProcessBuilder::SetNmsIoUThreshold(const std::shared_ptr& tensor) { if (tensor->GetDataType() != OH_NN_FLOAT32) { LOGE("[DetectionPostProcess] The nmsIoUThreshold should be type OH_NN_FLOAT32."); @@ -103,7 +103,7 @@ OH_NN_ReturnCode DetectionPostProcessBuilder::SetNmsIoUThreshold(std::shared_ptr return OH_NN_SUCCESS; } -OH_NN_ReturnCode DetectionPostProcessBuilder::SetNmsScoreThreshold(std::shared_ptr tensor) +OH_NN_ReturnCode DetectionPostProcessBuilder::SetNmsScoreThreshold(const std::shared_ptr& tensor) { if (tensor->GetDataType() != OH_NN_FLOAT32) { LOGE("[DetectionPostProcess] The scoreThreshold should be type OH_NN_FLOAT32."); @@ -125,7 +125,7 @@ OH_NN_ReturnCode DetectionPostProcessBuilder::SetNmsScoreThreshold(std::shared_p return OH_NN_SUCCESS; } -OH_NN_ReturnCode DetectionPostProcessBuilder::SetMaxDetections(std::shared_ptr tensor) +OH_NN_ReturnCode DetectionPostProcessBuilder::SetMaxDetections(const std::shared_ptr& tensor) { if (tensor->GetDataType() != OH_NN_INT64) { LOGE("[DetectionPostProcess] The maxDetections should be type OH_NN_INT64."); @@ -147,7 +147,7 @@ OH_NN_ReturnCode DetectionPostProcessBuilder::SetMaxDetections(std::shared_ptr tensor) +OH_NN_ReturnCode DetectionPostProcessBuilder::SetDetectionsPerClass(const std::shared_ptr& tensor) { if (tensor->GetDataType() != OH_NN_INT64) { LOGE("[DetectionPostProcess] The detectionsPerClass should be type OH_NN_INT64."); @@ -169,7 +169,7 @@ OH_NN_ReturnCode DetectionPostProcessBuilder::SetDetectionsPerClass(std::shared_ return OH_NN_SUCCESS; } -OH_NN_ReturnCode DetectionPostProcessBuilder::SetMaxClassesPerDetection(std::shared_ptr tensor) +OH_NN_ReturnCode DetectionPostProcessBuilder::SetMaxClassesPerDetection(const std::shared_ptr& tensor) { if (tensor->GetDataType() != OH_NN_INT64) { LOGE("[DetectionPostProcess] The maxClassesPerDetection should be type OH_NN_INT64."); @@ -191,7 +191,7 @@ OH_NN_ReturnCode DetectionPostProcessBuilder::SetMaxClassesPerDetection(std::sha return OH_NN_SUCCESS; } -OH_NN_ReturnCode DetectionPostProcessBuilder::SetNumClasses(std::shared_ptr tensor) +OH_NN_ReturnCode DetectionPostProcessBuilder::SetNumClasses(const std::shared_ptr& tensor) { if (tensor->GetDataType() != OH_NN_INT64) { LOGE("[DetectionPostProcess] The numClasses should be type OH_NN_INT64."); @@ -213,7 +213,7 @@ OH_NN_ReturnCode DetectionPostProcessBuilder::SetNumClasses(std::shared_ptr tensor) +OH_NN_ReturnCode DetectionPostProcessBuilder::SetUseRegularNms(const std::shared_ptr& tensor) { if (tensor->GetDataType() != OH_NN_BOOL) { LOGE("[DetectionPostProcess] The useRegularNms should be type OH_NN_BOOL."); @@ -235,7 +235,7 @@ OH_NN_ReturnCode DetectionPostProcessBuilder::SetUseRegularNms(std::shared_ptr tensor) +OH_NN_ReturnCode DetectionPostProcessBuilder::SetOutQuantized(const std::shared_ptr& tensor) { if (tensor->GetDataType() != OH_NN_BOOL) { LOGE("[DetectionPostProcess] The outQuantized should be type OH_NN_BOOL."); diff --git a/frameworks/native/neural_network_runtime/ops/detection_post_process_builder.h b/frameworks/native/neural_network_runtime/ops/detection_post_process_builder.h index 658b731..505a48f 100644 --- a/frameworks/native/neural_network_runtime/ops/detection_post_process_builder.h +++ b/frameworks/native/neural_network_runtime/ops/detection_post_process_builder.h @@ -24,7 +24,7 @@ namespace Ops { class DetectionPostProcessBuilder : public OpsBuilder { public: typedef DetectionPostProcessBuilder DPPBuilder; - typedef OH_NN_ReturnCode(DPPBuilder::*FuncPtr)(std::shared_ptr); + typedef OH_NN_ReturnCode (DPPBuilder::*FuncPtr)(const std::shared_ptr&); DetectionPostProcessBuilder(); ~DetectionPostProcessBuilder() override; @@ -36,16 +36,16 @@ public: LiteGraphPrimitvePtr GetPrimitive() override; private: - OH_NN_ReturnCode SetInputSize(std::shared_ptr tensor); - OH_NN_ReturnCode SetScale(std::shared_ptr tensor); - OH_NN_ReturnCode SetNmsIoUThreshold(std::shared_ptr tensor); - OH_NN_ReturnCode SetNmsScoreThreshold(std::shared_ptr tensor); - OH_NN_ReturnCode SetMaxDetections(std::shared_ptr tensor); - OH_NN_ReturnCode SetDetectionsPerClass(std::shared_ptr tensor); - OH_NN_ReturnCode SetMaxClassesPerDetection(std::shared_ptr tensor); - OH_NN_ReturnCode SetNumClasses(std::shared_ptr tensor); - OH_NN_ReturnCode SetUseRegularNms(std::shared_ptr tensor); - OH_NN_ReturnCode SetOutQuantized(std::shared_ptr tensor); + OH_NN_ReturnCode SetInputSize(const std::shared_ptr& tensor); + OH_NN_ReturnCode SetScale(const std::shared_ptr& tensor); + OH_NN_ReturnCode SetNmsIoUThreshold(const std::shared_ptr& tensor); + OH_NN_ReturnCode SetNmsScoreThreshold(const std::shared_ptr& tensor); + OH_NN_ReturnCode SetMaxDetections(const std::shared_ptr& tensor); + OH_NN_ReturnCode SetDetectionsPerClass(const std::shared_ptr& tensor); + OH_NN_ReturnCode SetMaxClassesPerDetection(const std::shared_ptr& tensor); + OH_NN_ReturnCode SetNumClasses(const std::shared_ptr& tensor); + OH_NN_ReturnCode SetUseRegularNms(const std::shared_ptr& tensor); + OH_NN_ReturnCode SetOutQuantized(const std::shared_ptr& tensor); private: int64_t m_inputSize {0}; diff --git a/frameworks/native/neural_network_runtime/ops/div_builder.cpp b/frameworks/native/neural_network_runtime/ops/div_builder.cpp index 4a45571..e57c4a3 100644 --- a/frameworks/native/neural_network_runtime/ops/div_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/div_builder.cpp @@ -31,7 +31,7 @@ DivBuilder::DivBuilder() {} DivBuilder::~DivBuilder() {} -OH_NN_ReturnCode DivBuilder::SetActicationType(std::shared_ptr tensor) +OH_NN_ReturnCode DivBuilder::SetActicationType(const std::shared_ptr& tensor) { tensor->IdentifyOpParameter(); diff --git a/frameworks/native/neural_network_runtime/ops/div_builder.h b/frameworks/native/neural_network_runtime/ops/div_builder.h index be217a9..9576715 100644 --- a/frameworks/native/neural_network_runtime/ops/div_builder.h +++ b/frameworks/native/neural_network_runtime/ops/div_builder.h @@ -26,7 +26,7 @@ namespace NeuralNetworkRuntime { namespace Ops { class DivBuilder : public OpsBuilder { public: - typedef OH_NN_ReturnCode(DivBuilder::*FuncPtr)(std::shared_ptr); + typedef OH_NN_ReturnCode (DivBuilder::*FuncPtr)(const std::shared_ptr&); DivBuilder(); ~DivBuilder() override; @@ -38,7 +38,7 @@ public: LiteGraphPrimitvePtr GetPrimitive() override; private: - OH_NN_ReturnCode SetActicationType(std::shared_ptr tensor); + OH_NN_ReturnCode SetActicationType(const std::shared_ptr& tensor); private: mindspore::lite::ActivationType m_activationType {mindspore::lite::ACTIVATION_TYPE_NO_ACTIVATION}; diff --git a/frameworks/native/neural_network_runtime/ops/eltwise_builder.cpp b/frameworks/native/neural_network_runtime/ops/eltwise_builder.cpp index 9b1e6fa..7e8d40a 100644 --- a/frameworks/native/neural_network_runtime/ops/eltwise_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/eltwise_builder.cpp @@ -28,7 +28,7 @@ EltwiseBuilder::EltwiseBuilder() {} EltwiseBuilder::~EltwiseBuilder() {} -OH_NN_ReturnCode EltwiseBuilder::SetMode(std::shared_ptr tensor) +OH_NN_ReturnCode EltwiseBuilder::SetMode(const std::shared_ptr& tensor) { tensor->IdentifyOpParameter(); diff --git a/frameworks/native/neural_network_runtime/ops/eltwise_builder.h b/frameworks/native/neural_network_runtime/ops/eltwise_builder.h index 73d9998..9299b8a 100644 --- a/frameworks/native/neural_network_runtime/ops/eltwise_builder.h +++ b/frameworks/native/neural_network_runtime/ops/eltwise_builder.h @@ -26,7 +26,7 @@ namespace NeuralNetworkRuntime { namespace Ops { class EltwiseBuilder : public OpsBuilder { public: - typedef OH_NN_ReturnCode(EltwiseBuilder::*FuncPtr)(std::shared_ptr); + typedef OH_NN_ReturnCode (EltwiseBuilder::*FuncPtr)(const std::shared_ptr&); EltwiseBuilder(); ~EltwiseBuilder() override; @@ -38,7 +38,7 @@ public: LiteGraphPrimitvePtr GetPrimitive() override; private: - OH_NN_ReturnCode SetMode(std::shared_ptr tensor); + OH_NN_ReturnCode SetMode(const std::shared_ptr& tensor); private: mindspore::lite::EltwiseMode m_mode {mindspore::lite::ELTWISE_MODE_PROD}; diff --git a/frameworks/native/neural_network_runtime/ops/exp_builder.cpp b/frameworks/native/neural_network_runtime/ops/exp_builder.cpp index 6706ac1..2350380 100755 --- a/frameworks/native/neural_network_runtime/ops/exp_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/exp_builder.cpp @@ -28,7 +28,7 @@ ExpBuilder::ExpBuilder() {} ExpBuilder::~ExpBuilder() {} -OH_NN_ReturnCode ExpBuilder::SetBase(std::shared_ptr tensor) +OH_NN_ReturnCode ExpBuilder::SetBase(const std::shared_ptr& tensor) { if (tensor->GetDataType() != OH_NN_FLOAT32) { LOGE("[Exp] The base should be type OH_NN_FLOAT32."); @@ -50,7 +50,7 @@ OH_NN_ReturnCode ExpBuilder::SetBase(std::shared_ptr tensor) return OH_NN_SUCCESS; } -OH_NN_ReturnCode ExpBuilder::SetScale(std::shared_ptr tensor) +OH_NN_ReturnCode ExpBuilder::SetScale(const std::shared_ptr& tensor) { if (tensor->GetDataType() != OH_NN_FLOAT32) { LOGE("[Exp] The scale should be type OH_NN_FLOAT32."); @@ -72,7 +72,7 @@ OH_NN_ReturnCode ExpBuilder::SetScale(std::shared_ptr tensor) return OH_NN_SUCCESS; } -OH_NN_ReturnCode ExpBuilder::SetShift(std::shared_ptr tensor) +OH_NN_ReturnCode ExpBuilder::SetShift(const std::shared_ptr& tensor) { if (tensor->GetDataType() != OH_NN_FLOAT32) { LOGE("[Exp] The shift should be type OH_NN_FLOAT32."); diff --git a/frameworks/native/neural_network_runtime/ops/exp_builder.h b/frameworks/native/neural_network_runtime/ops/exp_builder.h index efd809e..9f2b64e 100755 --- a/frameworks/native/neural_network_runtime/ops/exp_builder.h +++ b/frameworks/native/neural_network_runtime/ops/exp_builder.h @@ -26,7 +26,7 @@ namespace NeuralNetworkRuntime { namespace Ops { class ExpBuilder : public OpsBuilder { public: - typedef OH_NN_ReturnCode(ExpBuilder::*FuncPtr)(std::shared_ptr); + typedef OH_NN_ReturnCode (ExpBuilder::*FuncPtr)(const std::shared_ptr&); ExpBuilder(); ~ExpBuilder() override; @@ -38,9 +38,9 @@ public: LiteGraphPrimitvePtr GetPrimitive() override; private: - OH_NN_ReturnCode SetBase(std::shared_ptr tensor); - OH_NN_ReturnCode SetScale(std::shared_ptr tensor); - OH_NN_ReturnCode SetShift(std::shared_ptr tensor); + OH_NN_ReturnCode SetBase(const std::shared_ptr& tensor); + OH_NN_ReturnCode SetScale(const std::shared_ptr& tensor); + OH_NN_ReturnCode SetShift(const std::shared_ptr& tensor); private: float m_base {-1.0f}; diff --git a/frameworks/native/neural_network_runtime/ops/flatten_builder.cpp b/frameworks/native/neural_network_runtime/ops/flatten_builder.cpp index ef6c75b..ea58861 100755 --- a/frameworks/native/neural_network_runtime/ops/flatten_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/flatten_builder.cpp @@ -28,7 +28,7 @@ FlattenBuilder::FlattenBuilder() {} FlattenBuilder::~FlattenBuilder() {} -OH_NN_ReturnCode FlattenBuilder::SetAxis(std::shared_ptr tensor) +OH_NN_ReturnCode FlattenBuilder::SetAxis(const std::shared_ptr& tensor) { if (tensor->GetDataType() != OH_NN_INT64) { LOGE("[Flatten] The axis should be type OH_NN_INT64."); diff --git a/frameworks/native/neural_network_runtime/ops/flatten_builder.h b/frameworks/native/neural_network_runtime/ops/flatten_builder.h index b61261c..57068ac 100755 --- a/frameworks/native/neural_network_runtime/ops/flatten_builder.h +++ b/frameworks/native/neural_network_runtime/ops/flatten_builder.h @@ -26,7 +26,7 @@ namespace NeuralNetworkRuntime { namespace Ops { class FlattenBuilder : public OpsBuilder { public: - typedef OH_NN_ReturnCode(FlattenBuilder::*FuncPtr)(std::shared_ptr); + typedef OH_NN_ReturnCode (FlattenBuilder::*FuncPtr)(const std::shared_ptr&); FlattenBuilder(); ~FlattenBuilder() override; @@ -38,7 +38,7 @@ public: LiteGraphPrimitvePtr GetPrimitive() override; private: - OH_NN_ReturnCode SetAxis(std::shared_ptr tensor); + OH_NN_ReturnCode SetAxis(const std::shared_ptr& tensor); private: int64_t m_axis {1}; diff --git a/frameworks/native/neural_network_runtime/ops/fullconnection_builder.cpp b/frameworks/native/neural_network_runtime/ops/fullconnection_builder.cpp index 7dba24a..2ac5602 100644 --- a/frameworks/native/neural_network_runtime/ops/fullconnection_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/fullconnection_builder.cpp @@ -52,7 +52,7 @@ OH_NN_ReturnCode FullConnectionBuilder::SetFullConnectionInput(const std::vector return OH_NN_SUCCESS; } -OH_NN_ReturnCode FullConnectionBuilder::SetHasBias(std::shared_ptr tensor) +OH_NN_ReturnCode FullConnectionBuilder::SetHasBias(const std::shared_ptr& tensor) { if (tensor->GetDataType() != OH_NN_BOOL) { LOGE("[FullConnection] The hasBias should be type OH_NN_BOOL."); @@ -74,7 +74,7 @@ OH_NN_ReturnCode FullConnectionBuilder::SetHasBias(std::shared_ptr ten return OH_NN_SUCCESS; } -OH_NN_ReturnCode FullConnectionBuilder::SetUseAxis(std::shared_ptr tensor) +OH_NN_ReturnCode FullConnectionBuilder::SetUseAxis(const std::shared_ptr& tensor) { if (tensor->GetDataType() != OH_NN_BOOL) { LOGE("[FullConnection] The useAxis should be type OH_NN_BOOL."); @@ -103,7 +103,7 @@ OH_NN_ReturnCode FullConnectionBuilder::SetUseAxis(std::shared_ptr ten return OH_NN_SUCCESS; } -OH_NN_ReturnCode FullConnectionBuilder::SetFullConnectionActivation(std::shared_ptr tensor) +OH_NN_ReturnCode FullConnectionBuilder::SetFullConnectionActivation(const std::shared_ptr& tensor) { tensor->IdentifyOpParameter(); // Set Activation @@ -133,7 +133,7 @@ OH_NN_ReturnCode FullConnectionBuilder::SetFullConnectionActivation(std::shared_ return OH_NN_SUCCESS; } -OH_NN_ReturnCode FullConnectionBuilder::SetAxis(std::shared_ptr tensor) +OH_NN_ReturnCode FullConnectionBuilder::SetAxis(const std::shared_ptr& tensor) { tensor->IdentifyOpParameter(); diff --git a/frameworks/native/neural_network_runtime/ops/fullconnection_builder.h b/frameworks/native/neural_network_runtime/ops/fullconnection_builder.h index 0a21bdb..c7cbc39 100644 --- a/frameworks/native/neural_network_runtime/ops/fullconnection_builder.h +++ b/frameworks/native/neural_network_runtime/ops/fullconnection_builder.h @@ -26,7 +26,7 @@ namespace NeuralNetworkRuntime { namespace Ops { class FullConnectionBuilder : public OpsBuilder { public: - typedef OH_NN_ReturnCode(FullConnectionBuilder::*FuncPtr)(std::shared_ptr); + typedef OH_NN_ReturnCode (FullConnectionBuilder::*FuncPtr)(const std::shared_ptr&); FullConnectionBuilder(); ~FullConnectionBuilder() override; @@ -41,10 +41,10 @@ private: OH_NN_ReturnCode SetFullConnectionInput(const std::vector& inputsIndex, const std::vector& outputsIndex, const std::vector>& allTensors); - OH_NN_ReturnCode SetHasBias(std::shared_ptr tensor); - OH_NN_ReturnCode SetUseAxis(std::shared_ptr tensor); - OH_NN_ReturnCode SetFullConnectionActivation(std::shared_ptr tensor); - OH_NN_ReturnCode SetAxis(std::shared_ptr tensor); + OH_NN_ReturnCode SetHasBias(const std::shared_ptr& tensor); + OH_NN_ReturnCode SetUseAxis(const std::shared_ptr& tensor); + OH_NN_ReturnCode SetFullConnectionActivation(const std::shared_ptr& tensor); + OH_NN_ReturnCode SetAxis(const std::shared_ptr& tensor); private: bool m_hasBias {false}; diff --git a/frameworks/native/neural_network_runtime/ops/gelu_builder.cpp b/frameworks/native/neural_network_runtime/ops/gelu_builder.cpp index 65ea4bc..34b1eae 100644 --- a/frameworks/native/neural_network_runtime/ops/gelu_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/gelu_builder.cpp @@ -31,7 +31,7 @@ GeluBuilder::GeluBuilder() {} GeluBuilder::~GeluBuilder() {} -OH_NN_ReturnCode GeluBuilder::SetApproximate(std::shared_ptr tensor) +OH_NN_ReturnCode GeluBuilder::SetApproximate(const std::shared_ptr& tensor) { if (tensor->GetDataType() != OH_NN_BOOL) { LOGE("[GeLU] The approximate should be type OH_NN_BOOL."); diff --git a/frameworks/native/neural_network_runtime/ops/gelu_builder.h b/frameworks/native/neural_network_runtime/ops/gelu_builder.h index 7e2bf6b..920341f 100644 --- a/frameworks/native/neural_network_runtime/ops/gelu_builder.h +++ b/frameworks/native/neural_network_runtime/ops/gelu_builder.h @@ -23,7 +23,7 @@ namespace NeuralNetworkRuntime { namespace Ops { class GeluBuilder : public OpsBuilder { public: - typedef OH_NN_ReturnCode(GeluBuilder::*FuncPtr)(std::shared_ptr); + typedef OH_NN_ReturnCode (GeluBuilder::*FuncPtr)(const std::shared_ptr&); GeluBuilder(); ~GeluBuilder() override; @@ -34,7 +34,7 @@ public: LiteGraphPrimitvePtr GetPrimitive() override; private: - OH_NN_ReturnCode SetApproximate(std::shared_ptr tensor); + OH_NN_ReturnCode SetApproximate(const std::shared_ptr& tensor); private: bool m_approximate {false}; diff --git a/frameworks/native/neural_network_runtime/ops/instance_norm_builder.cpp b/frameworks/native/neural_network_runtime/ops/instance_norm_builder.cpp index 7b7c874..218eb0e 100755 --- a/frameworks/native/neural_network_runtime/ops/instance_norm_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/instance_norm_builder.cpp @@ -28,7 +28,7 @@ InstanceNormBuilder::InstanceNormBuilder() {} InstanceNormBuilder::~InstanceNormBuilder() {} -OH_NN_ReturnCode InstanceNormBuilder::SetEpsilon(std::shared_ptr tensor) +OH_NN_ReturnCode InstanceNormBuilder::SetEpsilon(const std::shared_ptr& tensor) { if (tensor->GetDataType() != OH_NN_FLOAT32) { LOGE("[InstanceNorm] The epsilon should be type OH_NN_FLOAT32."); diff --git a/frameworks/native/neural_network_runtime/ops/instance_norm_builder.h b/frameworks/native/neural_network_runtime/ops/instance_norm_builder.h index b645f07..c12629b 100755 --- a/frameworks/native/neural_network_runtime/ops/instance_norm_builder.h +++ b/frameworks/native/neural_network_runtime/ops/instance_norm_builder.h @@ -26,7 +26,7 @@ namespace NeuralNetworkRuntime { namespace Ops { class InstanceNormBuilder : public OpsBuilder { public: - typedef OH_NN_ReturnCode(InstanceNormBuilder::*FuncPtr)(std::shared_ptr); + typedef OH_NN_ReturnCode (InstanceNormBuilder::*FuncPtr)(const std::shared_ptr&); InstanceNormBuilder(); ~InstanceNormBuilder() override; @@ -38,7 +38,7 @@ public: LiteGraphPrimitvePtr GetPrimitive() override; private: - OH_NN_ReturnCode SetEpsilon(std::shared_ptr tensor); + OH_NN_ReturnCode SetEpsilon(const std::shared_ptr& tensor); private: float m_epsilon {0.0f}; diff --git a/frameworks/native/neural_network_runtime/ops/l2_normalize_builder.cpp b/frameworks/native/neural_network_runtime/ops/l2_normalize_builder.cpp index 9f81876..eddad6c 100644 --- a/frameworks/native/neural_network_runtime/ops/l2_normalize_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/l2_normalize_builder.cpp @@ -32,7 +32,7 @@ L2NormalizeBuilder::L2NormalizeBuilder() {} L2NormalizeBuilder::~L2NormalizeBuilder() {} -OH_NN_ReturnCode L2NormalizeBuilder::SetAxis(std::shared_ptr tensor) +OH_NN_ReturnCode L2NormalizeBuilder::SetAxis(const std::shared_ptr& tensor) { if (tensor->GetDataType() != OH_NN_INT64) { LOGE("[L2Normalize] The axis should be type OH_NN_INT64."); @@ -58,7 +58,7 @@ OH_NN_ReturnCode L2NormalizeBuilder::SetAxis(std::shared_ptr tensor) return OH_NN_SUCCESS; } -OH_NN_ReturnCode L2NormalizeBuilder::SetEpsilon(std::shared_ptr tensor) +OH_NN_ReturnCode L2NormalizeBuilder::SetEpsilon(const std::shared_ptr& tensor) { if (tensor->GetDataType() != OH_NN_FLOAT32) { LOGE("[L2Normalize] The epsilon should be type OH_NN_FLOAT32."); @@ -80,7 +80,7 @@ OH_NN_ReturnCode L2NormalizeBuilder::SetEpsilon(std::shared_ptr tensor return OH_NN_SUCCESS; } -OH_NN_ReturnCode L2NormalizeBuilder::SetActivationType(std::shared_ptr tensor) +OH_NN_ReturnCode L2NormalizeBuilder::SetActivationType(const std::shared_ptr& tensor) { if (tensor->GetDataType() != OH_NN_INT8) { LOGE("[L2Normalize] SetActivationType failed, the activationType should have type OH_NN_INT8."); diff --git a/frameworks/native/neural_network_runtime/ops/l2_normalize_builder.h b/frameworks/native/neural_network_runtime/ops/l2_normalize_builder.h index 7cffe58..f4d5d8c 100644 --- a/frameworks/native/neural_network_runtime/ops/l2_normalize_builder.h +++ b/frameworks/native/neural_network_runtime/ops/l2_normalize_builder.h @@ -24,7 +24,7 @@ namespace NeuralNetworkRuntime { namespace Ops { class L2NormalizeBuilder : public OpsBuilder { public: - typedef OH_NN_ReturnCode(L2NormalizeBuilder::*FuncPtr)(std::shared_ptr); + typedef OH_NN_ReturnCode (L2NormalizeBuilder::*FuncPtr)(const std::shared_ptr&); L2NormalizeBuilder(); ~L2NormalizeBuilder() override; @@ -36,9 +36,9 @@ public: LiteGraphPrimitvePtr GetPrimitive() override; private: - OH_NN_ReturnCode SetAxis(std::shared_ptr tensor); - OH_NN_ReturnCode SetEpsilon(std::shared_ptr tensor); - OH_NN_ReturnCode SetActivationType(std::shared_ptr tensor); + OH_NN_ReturnCode SetAxis(const std::shared_ptr& tensor); + OH_NN_ReturnCode SetEpsilon(const std::shared_ptr& tensor); + OH_NN_ReturnCode SetActivationType(const std::shared_ptr& tensor); private: std::vector m_axis; diff --git a/frameworks/native/neural_network_runtime/ops/layernorm_builder.cpp b/frameworks/native/neural_network_runtime/ops/layernorm_builder.cpp index 11d6ca2..26cb483 100644 --- a/frameworks/native/neural_network_runtime/ops/layernorm_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/layernorm_builder.cpp @@ -33,7 +33,7 @@ LayerNormBuilder::LayerNormBuilder() {} LayerNormBuilder::~LayerNormBuilder() {} -OH_NN_ReturnCode LayerNormBuilder::SetBeginNormAxis(std::shared_ptr tensor) +OH_NN_ReturnCode LayerNormBuilder::SetBeginNormAxis(const std::shared_ptr& tensor) { tensor->IdentifyOpParameter(); if (tensor->GetDataType() != OH_NN_INT64) { @@ -56,7 +56,7 @@ OH_NN_ReturnCode LayerNormBuilder::SetBeginNormAxis(std::shared_ptr te return OH_NN_SUCCESS; } -OH_NN_ReturnCode LayerNormBuilder::SetEpsilon(std::shared_ptr tensor) +OH_NN_ReturnCode LayerNormBuilder::SetEpsilon(const std::shared_ptr& tensor) { tensor->IdentifyOpParameter(); if (tensor->GetDataType() != OH_NN_FLOAT32) { @@ -79,7 +79,7 @@ OH_NN_ReturnCode LayerNormBuilder::SetEpsilon(std::shared_ptr tensor) return OH_NN_SUCCESS; } -OH_NN_ReturnCode LayerNormBuilder::SetBeginParamsAxis(std::shared_ptr tensor) +OH_NN_ReturnCode LayerNormBuilder::SetBeginParamsAxis(const std::shared_ptr& tensor) { tensor->IdentifyOpParameter(); if (tensor->GetDataType() != OH_NN_INT64) { diff --git a/frameworks/native/neural_network_runtime/ops/layernorm_builder.h b/frameworks/native/neural_network_runtime/ops/layernorm_builder.h index 4598955..85708dc 100644 --- a/frameworks/native/neural_network_runtime/ops/layernorm_builder.h +++ b/frameworks/native/neural_network_runtime/ops/layernorm_builder.h @@ -23,7 +23,7 @@ namespace NeuralNetworkRuntime { namespace Ops { class LayerNormBuilder : public OpsBuilder { public: - typedef OH_NN_ReturnCode(LayerNormBuilder::*FuncPtr)(std::shared_ptr); + typedef OH_NN_ReturnCode (LayerNormBuilder::*FuncPtr)(const std::shared_ptr&); LayerNormBuilder(); ~LayerNormBuilder() override; @@ -34,9 +34,9 @@ public: LiteGraphPrimitvePtr GetPrimitive() override; private: - OH_NN_ReturnCode SetBeginNormAxis(std::shared_ptr tensor); - OH_NN_ReturnCode SetEpsilon(std::shared_ptr tensor); - OH_NN_ReturnCode SetBeginParamsAxis(std::shared_ptr tensor); + OH_NN_ReturnCode SetBeginNormAxis(const std::shared_ptr& tensor); + OH_NN_ReturnCode SetEpsilon(const std::shared_ptr& tensor); + OH_NN_ReturnCode SetBeginParamsAxis(const std::shared_ptr& tensor); OH_NN_ReturnCode ValidateGammaAndBetaShape(const std::vector& inputsIndex, int64_t beginAxis, const std::vector>& allTensors) const; diff --git a/frameworks/native/neural_network_runtime/ops/leaky_relu_builder.cpp b/frameworks/native/neural_network_runtime/ops/leaky_relu_builder.cpp index 189bcf3..6f5dd5a 100644 --- a/frameworks/native/neural_network_runtime/ops/leaky_relu_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/leaky_relu_builder.cpp @@ -28,7 +28,7 @@ LeakyReluBuilder::LeakyReluBuilder() {} LeakyReluBuilder::~LeakyReluBuilder() {} -OH_NN_ReturnCode LeakyReluBuilder::SetNegativeSlope(std::shared_ptr tensor) +OH_NN_ReturnCode LeakyReluBuilder::SetNegativeSlope(const std::shared_ptr& tensor) { if (tensor->GetDataType() != OH_NN_FLOAT32) { LOGE("[LeakyRelu] The negativeSlope should be type OH_NN_FLOAT32."); diff --git a/frameworks/native/neural_network_runtime/ops/leaky_relu_builder.h b/frameworks/native/neural_network_runtime/ops/leaky_relu_builder.h index 656a84f..f311863 100644 --- a/frameworks/native/neural_network_runtime/ops/leaky_relu_builder.h +++ b/frameworks/native/neural_network_runtime/ops/leaky_relu_builder.h @@ -26,7 +26,7 @@ namespace NeuralNetworkRuntime { namespace Ops { class LeakyReluBuilder : public OpsBuilder { public: - typedef OH_NN_ReturnCode(LeakyReluBuilder::*FuncPtr)(std::shared_ptr); + typedef OH_NN_ReturnCode (LeakyReluBuilder::*FuncPtr)(const std::shared_ptr&); LeakyReluBuilder(); ~LeakyReluBuilder() override; @@ -38,7 +38,7 @@ public: LiteGraphPrimitvePtr GetPrimitive() override; private: - OH_NN_ReturnCode SetNegativeSlope(std::shared_ptr tensor); + OH_NN_ReturnCode SetNegativeSlope(const std::shared_ptr& tensor); private: float m_negativeSlope {0.0f}; diff --git a/frameworks/native/neural_network_runtime/ops/log_softmax_builder.cpp b/frameworks/native/neural_network_runtime/ops/log_softmax_builder.cpp index f9fea6c..839577e 100644 --- a/frameworks/native/neural_network_runtime/ops/log_softmax_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/log_softmax_builder.cpp @@ -31,7 +31,7 @@ LogSoftmaxBuilder::LogSoftmaxBuilder() {} LogSoftmaxBuilder::~LogSoftmaxBuilder() {} -OH_NN_ReturnCode LogSoftmaxBuilder::SetAxis(std::shared_ptr tensor) +OH_NN_ReturnCode LogSoftmaxBuilder::SetAxis(const std::shared_ptr& tensor) { if (tensor->GetDataType() != OH_NN_INT64) { LOGE("[LogSoftmax] The axis should be type OH_NN_INT64."); diff --git a/frameworks/native/neural_network_runtime/ops/log_softmax_builder.h b/frameworks/native/neural_network_runtime/ops/log_softmax_builder.h index e8291e7..4f030bd 100644 --- a/frameworks/native/neural_network_runtime/ops/log_softmax_builder.h +++ b/frameworks/native/neural_network_runtime/ops/log_softmax_builder.h @@ -23,7 +23,7 @@ namespace NeuralNetworkRuntime { namespace Ops { class LogSoftmaxBuilder : public OpsBuilder { public: - typedef OH_NN_ReturnCode(LogSoftmaxBuilder::*FuncPtr)(std::shared_ptr); + typedef OH_NN_ReturnCode (LogSoftmaxBuilder::*FuncPtr)(const std::shared_ptr&); LogSoftmaxBuilder(); ~LogSoftmaxBuilder() override; @@ -35,7 +35,7 @@ public: LiteGraphPrimitvePtr GetPrimitive() override; private: - OH_NN_ReturnCode SetAxis(std::shared_ptr tensor); + OH_NN_ReturnCode SetAxis(const std::shared_ptr& tensor); private: int64_t m_axis {0}; diff --git a/frameworks/native/neural_network_runtime/ops/lrn_builder.cpp b/frameworks/native/neural_network_runtime/ops/lrn_builder.cpp index a68e700..ffc6fad 100644 --- a/frameworks/native/neural_network_runtime/ops/lrn_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/lrn_builder.cpp @@ -32,7 +32,7 @@ LRNBuilder::LRNBuilder() {} LRNBuilder::~LRNBuilder() {} -OH_NN_ReturnCode LRNBuilder::SetDepthRadius(std::shared_ptr tensor) +OH_NN_ReturnCode LRNBuilder::SetDepthRadius(const std::shared_ptr& tensor) { if (tensor->GetDataType() != OH_NN_INT64) { LOGE("[LRN] The depthRadius should be type OH_NN_INT64."); @@ -54,7 +54,7 @@ OH_NN_ReturnCode LRNBuilder::SetDepthRadius(std::shared_ptr tensor) return OH_NN_SUCCESS; } -OH_NN_ReturnCode LRNBuilder::SetBias(std::shared_ptr tensor) +OH_NN_ReturnCode LRNBuilder::SetBias(const std::shared_ptr& tensor) { if (tensor->GetDataType() != OH_NN_FLOAT32) { LOGE("[LRN] The bias should be type OH_NN_FLOAT32."); @@ -76,7 +76,7 @@ OH_NN_ReturnCode LRNBuilder::SetBias(std::shared_ptr tensor) return OH_NN_SUCCESS; } -OH_NN_ReturnCode LRNBuilder::SetAlpha(std::shared_ptr tensor) +OH_NN_ReturnCode LRNBuilder::SetAlpha(const std::shared_ptr& tensor) { if (tensor->GetDataType() != OH_NN_FLOAT32) { LOGE("[LRN] The alpha should be type OH_NN_FLOAT32."); @@ -98,7 +98,7 @@ OH_NN_ReturnCode LRNBuilder::SetAlpha(std::shared_ptr tensor) return OH_NN_SUCCESS; } -OH_NN_ReturnCode LRNBuilder::SetBeta(std::shared_ptr tensor) +OH_NN_ReturnCode LRNBuilder::SetBeta(const std::shared_ptr& tensor) { if (tensor->GetDataType() != OH_NN_FLOAT32) { LOGE("[LRN] The beta should be type OH_NN_FLOAT32."); @@ -120,7 +120,7 @@ OH_NN_ReturnCode LRNBuilder::SetBeta(std::shared_ptr tensor) return OH_NN_SUCCESS; } -OH_NN_ReturnCode LRNBuilder::SetNormRegion(std::shared_ptr tensor) +OH_NN_ReturnCode LRNBuilder::SetNormRegion(const std::shared_ptr& tensor) { if (tensor->GetDataType() != OH_NN_INT32) { LOGE("[LRN] The normRegion should be type OH_NN_INT32."); diff --git a/frameworks/native/neural_network_runtime/ops/lrn_builder.h b/frameworks/native/neural_network_runtime/ops/lrn_builder.h index ee04b32..845f114 100644 --- a/frameworks/native/neural_network_runtime/ops/lrn_builder.h +++ b/frameworks/native/neural_network_runtime/ops/lrn_builder.h @@ -23,7 +23,7 @@ namespace NeuralNetworkRuntime { namespace Ops { class LRNBuilder : public OpsBuilder { public: - typedef OH_NN_ReturnCode(LRNBuilder::*FuncPtr)(std::shared_ptr); + typedef OH_NN_ReturnCode (LRNBuilder::*FuncPtr)(const std::shared_ptr&); LRNBuilder(); ~LRNBuilder() override; @@ -35,11 +35,11 @@ public: LiteGraphPrimitvePtr GetPrimitive() override; private: - OH_NN_ReturnCode SetDepthRadius(std::shared_ptr tensor); - OH_NN_ReturnCode SetBias(std::shared_ptr tensor); - OH_NN_ReturnCode SetAlpha(std::shared_ptr tensor); - OH_NN_ReturnCode SetBeta(std::shared_ptr tensor); - OH_NN_ReturnCode SetNormRegion(std::shared_ptr tensor); + OH_NN_ReturnCode SetDepthRadius(const std::shared_ptr& tensor); + OH_NN_ReturnCode SetBias(const std::shared_ptr& tensor); + OH_NN_ReturnCode SetAlpha(const std::shared_ptr& tensor); + OH_NN_ReturnCode SetBeta(const std::shared_ptr& tensor); + OH_NN_ReturnCode SetNormRegion(const std::shared_ptr& tensor); private: int64_t m_depthRadius {0}; diff --git a/frameworks/native/neural_network_runtime/ops/lstm_builder.cpp b/frameworks/native/neural_network_runtime/ops/lstm_builder.cpp index 03986ef..6104dfd 100644 --- a/frameworks/native/neural_network_runtime/ops/lstm_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/lstm_builder.cpp @@ -28,7 +28,7 @@ LSTMBuilder::LSTMBuilder() {} LSTMBuilder::~LSTMBuilder() {} -OH_NN_ReturnCode LSTMBuilder::SetBidirectional(std::shared_ptr tensor) +OH_NN_ReturnCode LSTMBuilder::SetBidirectional(const std::shared_ptr& tensor) { if (tensor->GetDataType() != OH_NN_BOOL) { LOGE("[LSTM] The bidirectional should be type OH_NN_BOOL."); @@ -50,7 +50,7 @@ OH_NN_ReturnCode LSTMBuilder::SetBidirectional(std::shared_ptr tensor) return OH_NN_SUCCESS; } -OH_NN_ReturnCode LSTMBuilder::SetHasBias(std::shared_ptr tensor) +OH_NN_ReturnCode LSTMBuilder::SetHasBias(const std::shared_ptr& tensor) { if (tensor->GetDataType() != OH_NN_BOOL) { LOGE("[LSTM] The hasBias should be type OH_NN_BOOL."); @@ -72,7 +72,7 @@ OH_NN_ReturnCode LSTMBuilder::SetHasBias(std::shared_ptr tensor) return OH_NN_SUCCESS; } -OH_NN_ReturnCode LSTMBuilder::SetInputSize(std::shared_ptr tensor) +OH_NN_ReturnCode LSTMBuilder::SetInputSize(const std::shared_ptr& tensor) { if (tensor->GetDataType() != OH_NN_INT64) { LOGE("[LSTM] The inputSize should be type OH_NN_INT64."); @@ -94,7 +94,7 @@ OH_NN_ReturnCode LSTMBuilder::SetInputSize(std::shared_ptr tensor) return OH_NN_SUCCESS; } -OH_NN_ReturnCode LSTMBuilder::SetHiddenSize(std::shared_ptr tensor) +OH_NN_ReturnCode LSTMBuilder::SetHiddenSize(const std::shared_ptr& tensor) { if (tensor->GetDataType() != OH_NN_INT64) { LOGE("[LSTM] The hiddenSize should be type OH_NN_INT64."); @@ -116,7 +116,7 @@ OH_NN_ReturnCode LSTMBuilder::SetHiddenSize(std::shared_ptr tensor) return OH_NN_SUCCESS; } -OH_NN_ReturnCode LSTMBuilder::SetNumLayers(std::shared_ptr tensor) +OH_NN_ReturnCode LSTMBuilder::SetNumLayers(const std::shared_ptr& tensor) { if (tensor->GetDataType() != OH_NN_INT64) { LOGE("[LSTM] The numLayers should be type OH_NN_INT64."); @@ -138,7 +138,7 @@ OH_NN_ReturnCode LSTMBuilder::SetNumLayers(std::shared_ptr tensor) return OH_NN_SUCCESS; } -OH_NN_ReturnCode LSTMBuilder::SetNumDirections(std::shared_ptr tensor) +OH_NN_ReturnCode LSTMBuilder::SetNumDirections(const std::shared_ptr& tensor) { if (tensor->GetDataType() != OH_NN_INT64) { LOGE("[LSTM] The numDirections should be type OH_NN_INT64."); @@ -160,7 +160,7 @@ OH_NN_ReturnCode LSTMBuilder::SetNumDirections(std::shared_ptr tensor) return OH_NN_SUCCESS; } -OH_NN_ReturnCode LSTMBuilder::SetDropout(std::shared_ptr tensor) +OH_NN_ReturnCode LSTMBuilder::SetDropout(const std::shared_ptr& tensor) { if (tensor->GetDataType() != OH_NN_FLOAT32) { LOGE("[LSTM] The dropout should be type OH_NN_FLOAT32."); @@ -182,7 +182,7 @@ OH_NN_ReturnCode LSTMBuilder::SetDropout(std::shared_ptr tensor) return OH_NN_SUCCESS; } -OH_NN_ReturnCode LSTMBuilder::SetZoneoutCell(std::shared_ptr tensor) +OH_NN_ReturnCode LSTMBuilder::SetZoneoutCell(const std::shared_ptr& tensor) { if (tensor->GetDataType() != OH_NN_FLOAT32) { LOGE("[LSTM] The zoneoutCell should be type OH_NN_FLOAT32."); @@ -204,7 +204,7 @@ OH_NN_ReturnCode LSTMBuilder::SetZoneoutCell(std::shared_ptr tensor) return OH_NN_SUCCESS; } -OH_NN_ReturnCode LSTMBuilder::SetZoneoutHidden(std::shared_ptr tensor) +OH_NN_ReturnCode LSTMBuilder::SetZoneoutHidden(const std::shared_ptr& tensor) { if (tensor->GetDataType() != OH_NN_FLOAT32) { LOGE("[LSTM] The zoneoutHidden should be type OH_NN_FLOAT32."); @@ -226,7 +226,7 @@ OH_NN_ReturnCode LSTMBuilder::SetZoneoutHidden(std::shared_ptr tensor) return OH_NN_SUCCESS; } -OH_NN_ReturnCode LSTMBuilder::SetProjSize(std::shared_ptr tensor) +OH_NN_ReturnCode LSTMBuilder::SetProjSize(const std::shared_ptr& tensor) { if (tensor->GetDataType() != OH_NN_INT64) { LOGE("[LSTM] The projSize should be type OH_NN_INT64."); diff --git a/frameworks/native/neural_network_runtime/ops/lstm_builder.h b/frameworks/native/neural_network_runtime/ops/lstm_builder.h index 136a67a..efb00c2 100644 --- a/frameworks/native/neural_network_runtime/ops/lstm_builder.h +++ b/frameworks/native/neural_network_runtime/ops/lstm_builder.h @@ -26,7 +26,7 @@ namespace NeuralNetworkRuntime { namespace Ops { class LSTMBuilder : public OpsBuilder { public: - typedef OH_NN_ReturnCode(LSTMBuilder::*FuncPtr)(std::shared_ptr); + typedef OH_NN_ReturnCode (LSTMBuilder::*FuncPtr)(const std::shared_ptr&); LSTMBuilder(); ~LSTMBuilder() override; @@ -37,16 +37,16 @@ public: LiteGraphPrimitvePtr GetPrimitive() override; private: - OH_NN_ReturnCode SetBidirectional(std::shared_ptr tensor); - OH_NN_ReturnCode SetHasBias(std::shared_ptr tensor); - OH_NN_ReturnCode SetInputSize(std::shared_ptr tensor); - OH_NN_ReturnCode SetHiddenSize(std::shared_ptr tensor); - OH_NN_ReturnCode SetNumLayers(std::shared_ptr tensor); - OH_NN_ReturnCode SetNumDirections(std::shared_ptr tensor); - OH_NN_ReturnCode SetDropout(std::shared_ptr tensor); - OH_NN_ReturnCode SetZoneoutCell(std::shared_ptr tensor); - OH_NN_ReturnCode SetZoneoutHidden(std::shared_ptr tensor); - OH_NN_ReturnCode SetProjSize(std::shared_ptr tensor); + OH_NN_ReturnCode SetBidirectional(const std::shared_ptr& tensor); + OH_NN_ReturnCode SetHasBias(const std::shared_ptr& tensor); + OH_NN_ReturnCode SetInputSize(const std::shared_ptr& tensor); + OH_NN_ReturnCode SetHiddenSize(const std::shared_ptr& tensor); + OH_NN_ReturnCode SetNumLayers(const std::shared_ptr& tensor); + OH_NN_ReturnCode SetNumDirections(const std::shared_ptr& tensor); + OH_NN_ReturnCode SetDropout(const std::shared_ptr& tensor); + OH_NN_ReturnCode SetZoneoutCell(const std::shared_ptr& tensor); + OH_NN_ReturnCode SetZoneoutHidden(const std::shared_ptr& tensor); + OH_NN_ReturnCode SetProjSize(const std::shared_ptr& tensor); OH_NN_ReturnCode ParseParam(const std::vector& paramsIndex, const std::vector>& allTensors); diff --git a/frameworks/native/neural_network_runtime/ops/matmul_builder.cpp b/frameworks/native/neural_network_runtime/ops/matmul_builder.cpp index fb042b5..909cf79 100644 --- a/frameworks/native/neural_network_runtime/ops/matmul_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/matmul_builder.cpp @@ -32,7 +32,7 @@ MatmulBuilder::MatmulBuilder() {} MatmulBuilder::~MatmulBuilder() {} -OH_NN_ReturnCode MatmulBuilder::SetTransposeA(std::shared_ptr tensor) +OH_NN_ReturnCode MatmulBuilder::SetTransposeA(const std::shared_ptr& tensor) { tensor->IdentifyOpParameter(); if (tensor->GetElementCount() != SCALE_LENGTH) { @@ -55,7 +55,7 @@ OH_NN_ReturnCode MatmulBuilder::SetTransposeA(std::shared_ptr tensor) return OH_NN_SUCCESS; } -OH_NN_ReturnCode MatmulBuilder::SetTransposeB(std::shared_ptr tensor) +OH_NN_ReturnCode MatmulBuilder::SetTransposeB(const std::shared_ptr& tensor) { tensor->IdentifyOpParameter(); if (tensor->GetElementCount() != SCALE_LENGTH) { @@ -78,7 +78,7 @@ OH_NN_ReturnCode MatmulBuilder::SetTransposeB(std::shared_ptr tensor) return OH_NN_SUCCESS; } -OH_NN_ReturnCode MatmulBuilder::SetActivationType(std::shared_ptr tensor) +OH_NN_ReturnCode MatmulBuilder::SetActivationType(const std::shared_ptr& tensor) { tensor->IdentifyOpParameter(); if (tensor->GetElementCount() != SCALE_LENGTH) { diff --git a/frameworks/native/neural_network_runtime/ops/matmul_builder.h b/frameworks/native/neural_network_runtime/ops/matmul_builder.h index 039d9c2..24d9182 100644 --- a/frameworks/native/neural_network_runtime/ops/matmul_builder.h +++ b/frameworks/native/neural_network_runtime/ops/matmul_builder.h @@ -25,7 +25,7 @@ namespace NeuralNetworkRuntime { namespace Ops { class MatmulBuilder : public OpsBuilder { public: - typedef OH_NN_ReturnCode(MatmulBuilder::*FuncPtr)(std::shared_ptr); + typedef OH_NN_ReturnCode (MatmulBuilder::*FuncPtr)(const std::shared_ptr&); MatmulBuilder(); ~MatmulBuilder() override; @@ -36,9 +36,9 @@ public: LiteGraphPrimitvePtr GetPrimitive() override; private: - OH_NN_ReturnCode SetTransposeA(std::shared_ptr tensor); - OH_NN_ReturnCode SetTransposeB(std::shared_ptr tensor); - OH_NN_ReturnCode SetActivationType(std::shared_ptr tensor); + OH_NN_ReturnCode SetTransposeA(const std::shared_ptr& tensor); + OH_NN_ReturnCode SetTransposeB(const std::shared_ptr& tensor); + OH_NN_ReturnCode SetActivationType(const std::shared_ptr& tensor); private: mindspore::lite::ActivationType m_activationType{mindspore::lite::ACTIVATION_TYPE_NO_ACTIVATION}; diff --git a/frameworks/native/neural_network_runtime/ops/mul_builder.cpp b/frameworks/native/neural_network_runtime/ops/mul_builder.cpp index ff92906..12c97ec 100644 --- a/frameworks/native/neural_network_runtime/ops/mul_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/mul_builder.cpp @@ -32,7 +32,7 @@ MulBuilder::MulBuilder() {} MulBuilder::~MulBuilder() {} -OH_NN_ReturnCode MulBuilder::SetActivationType(std::shared_ptr tensor) +OH_NN_ReturnCode MulBuilder::SetActivationType(const std::shared_ptr& tensor) { tensor->IdentifyOpParameter(); if (tensor->GetElementCount() != SCALE_LENGTH) { diff --git a/frameworks/native/neural_network_runtime/ops/mul_builder.h b/frameworks/native/neural_network_runtime/ops/mul_builder.h index b42fedb..977ccba 100644 --- a/frameworks/native/neural_network_runtime/ops/mul_builder.h +++ b/frameworks/native/neural_network_runtime/ops/mul_builder.h @@ -25,7 +25,7 @@ namespace NeuralNetworkRuntime { namespace Ops { class MulBuilder : public OpsBuilder { public: - typedef OH_NN_ReturnCode(MulBuilder::*FuncPtr)(std::shared_ptr); + typedef OH_NN_ReturnCode (MulBuilder::*FuncPtr)(const std::shared_ptr&); MulBuilder(); ~MulBuilder() override; @@ -36,7 +36,7 @@ public: LiteGraphPrimitvePtr GetPrimitive() override; private: - OH_NN_ReturnCode SetActivationType(std::shared_ptr tensor); + OH_NN_ReturnCode SetActivationType(const std::shared_ptr& tensor); private: mindspore::lite::ActivationType m_activationType {mindspore::lite::ACTIVATION_TYPE_NO_ACTIVATION}; diff --git a/frameworks/native/neural_network_runtime/ops/onehot_builder.cpp b/frameworks/native/neural_network_runtime/ops/onehot_builder.cpp index 5a375fe..7d5fdaf 100644 --- a/frameworks/native/neural_network_runtime/ops/onehot_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/onehot_builder.cpp @@ -31,7 +31,7 @@ OnehotBuilder::OnehotBuilder() {} OnehotBuilder::~OnehotBuilder() {} -OH_NN_ReturnCode OnehotBuilder::SetAxis(std::shared_ptr tensor) +OH_NN_ReturnCode OnehotBuilder::SetAxis(const std::shared_ptr& tensor) { tensor->IdentifyOpParameter(); if (tensor->GetDataType() != OH_NN_INT64) { diff --git a/frameworks/native/neural_network_runtime/ops/onehot_builder.h b/frameworks/native/neural_network_runtime/ops/onehot_builder.h index ec48f3b..c44d34e 100644 --- a/frameworks/native/neural_network_runtime/ops/onehot_builder.h +++ b/frameworks/native/neural_network_runtime/ops/onehot_builder.h @@ -23,7 +23,7 @@ namespace NeuralNetworkRuntime { namespace Ops { class OnehotBuilder : public OpsBuilder { public: - typedef OH_NN_ReturnCode(OnehotBuilder::*FuncPtr)(std::shared_ptr); + typedef OH_NN_ReturnCode (OnehotBuilder::*FuncPtr)(const std::shared_ptr&); OnehotBuilder(); ~OnehotBuilder() override; @@ -34,7 +34,7 @@ public: LiteGraphPrimitvePtr GetPrimitive() override; private: - OH_NN_ReturnCode SetAxis(std::shared_ptr tensor); + OH_NN_ReturnCode SetAxis(const std::shared_ptr& tensor); private: int64_t m_axis {-1}; diff --git a/frameworks/native/neural_network_runtime/ops/pad_builder.cpp b/frameworks/native/neural_network_runtime/ops/pad_builder.cpp index 56a8a63..4d8a2ea 100644 --- a/frameworks/native/neural_network_runtime/ops/pad_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/pad_builder.cpp @@ -37,7 +37,7 @@ PadBuilder::PadBuilder() {} PadBuilder::~PadBuilder() {} -OH_NN_ReturnCode PadBuilder::SetPaddingMode(std::shared_ptr tensor) +OH_NN_ReturnCode PadBuilder::SetPaddingMode(const std::shared_ptr& tensor) { tensor->IdentifyOpParameter(); if (tensor->GetElementCount() != SCALE_LENGTH) { @@ -70,7 +70,7 @@ OH_NN_ReturnCode PadBuilder::SetPaddingMode(std::shared_ptr tensor) return OH_NN_SUCCESS; } -OH_NN_ReturnCode PadBuilder::SetConstantValue(std::shared_ptr tensor) +OH_NN_ReturnCode PadBuilder::SetConstantValue(const std::shared_ptr& tensor) { tensor->IdentifyOpParameter(); if (tensor->GetElementCount() != SCALE_LENGTH) { diff --git a/frameworks/native/neural_network_runtime/ops/pad_builder.h b/frameworks/native/neural_network_runtime/ops/pad_builder.h index 0ba2c23..c5b58c8 100644 --- a/frameworks/native/neural_network_runtime/ops/pad_builder.h +++ b/frameworks/native/neural_network_runtime/ops/pad_builder.h @@ -24,7 +24,7 @@ namespace NeuralNetworkRuntime { namespace Ops { class PadBuilder : public OpsBuilder { public: - typedef OH_NN_ReturnCode(PadBuilder::*FuncPtr)(std::shared_ptr); + typedef OH_NN_ReturnCode (PadBuilder::*FuncPtr)(const std::shared_ptr&); PadBuilder(); ~PadBuilder() override; @@ -35,8 +35,8 @@ public: LiteGraphPrimitvePtr GetPrimitive() override; private: - OH_NN_ReturnCode SetConstantValue(std::shared_ptr tensor); - OH_NN_ReturnCode SetPaddingMode(std::shared_ptr tensor); + OH_NN_ReturnCode SetConstantValue(const std::shared_ptr& tensor); + OH_NN_ReturnCode SetPaddingMode(const std::shared_ptr& tensor); private: float m_constantValue {0.0f}; diff --git a/frameworks/native/neural_network_runtime/ops/pooling_builder.cpp b/frameworks/native/neural_network_runtime/ops/pooling_builder.cpp index 2213941..bb3a506 100644 --- a/frameworks/native/neural_network_runtime/ops/pooling_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/pooling_builder.cpp @@ -92,7 +92,7 @@ OH_NN_ReturnCode PoolingBuilder::SetInputAndOutput(const std::vector& return OH_NN_SUCCESS; } -OH_NN_ReturnCode PoolingBuilder::SetKernel(std::shared_ptr tensor) +OH_NN_ReturnCode PoolingBuilder::SetKernel(const std::shared_ptr& tensor) { tensor->IdentifyOpParameter(); // Set kernelSize @@ -114,7 +114,7 @@ OH_NN_ReturnCode PoolingBuilder::SetKernel(std::shared_ptr tensor) return OH_NN_SUCCESS; } -OH_NN_ReturnCode PoolingBuilder::SetStrides(std::shared_ptr tensor) +OH_NN_ReturnCode PoolingBuilder::SetStrides(const std::shared_ptr& tensor) { tensor->IdentifyOpParameter(); // Set Strides @@ -136,7 +136,7 @@ OH_NN_ReturnCode PoolingBuilder::SetStrides(std::shared_ptr tensor) return OH_NN_SUCCESS; } -OH_NN_ReturnCode PoolingBuilder::SetPadModeOrPaddings(std::shared_ptr tensor) +OH_NN_ReturnCode PoolingBuilder::SetPadModeOrPaddings(const std::shared_ptr& tensor) { tensor->IdentifyOpParameter(); @@ -180,7 +180,7 @@ OH_NN_ReturnCode PoolingBuilder::SetPadModeOrPaddings(std::shared_ptr return OH_NN_SUCCESS; } -OH_NN_ReturnCode PoolingBuilder::SetRoundMode(std::shared_ptr tensor) +OH_NN_ReturnCode PoolingBuilder::SetRoundMode(const std::shared_ptr& tensor) { tensor->IdentifyOpParameter(); @@ -213,7 +213,7 @@ OH_NN_ReturnCode PoolingBuilder::SetRoundMode(std::shared_ptr tensor) return OH_NN_SUCCESS; } -OH_NN_ReturnCode PoolingBuilder::SetActivation(std::shared_ptr tensor) +OH_NN_ReturnCode PoolingBuilder::SetActivation(const std::shared_ptr& tensor) { tensor->IdentifyOpParameter(); @@ -244,7 +244,7 @@ OH_NN_ReturnCode PoolingBuilder::SetActivation(std::shared_ptr tensor) return OH_NN_SUCCESS; } -OH_NN_ReturnCode PoolingBuilder::SetGlobal(std::shared_ptr tensor) +OH_NN_ReturnCode PoolingBuilder::SetGlobal(const std::shared_ptr& tensor) { if (tensor->GetDataType() != OH_NN_BOOL) { LOGE("[PoolingBuilder] The global should be type OH_NN_BOOL."); diff --git a/frameworks/native/neural_network_runtime/ops/pooling_builder.h b/frameworks/native/neural_network_runtime/ops/pooling_builder.h index f4199eb..681685f 100644 --- a/frameworks/native/neural_network_runtime/ops/pooling_builder.h +++ b/frameworks/native/neural_network_runtime/ops/pooling_builder.h @@ -25,7 +25,7 @@ namespace NeuralNetworkRuntime { namespace Ops { class PoolingBuilder : public OpsBuilder { public: - typedef OH_NN_ReturnCode(PoolingBuilder::*FuncPtr)(std::shared_ptr); + typedef OH_NN_ReturnCode (PoolingBuilder::*FuncPtr)(const std::shared_ptr&); PoolingBuilder() = default; virtual ~PoolingBuilder() = default; @@ -39,12 +39,12 @@ public: const std::vector& outputsIndex, const std::vector>& allTensors); - OH_NN_ReturnCode SetKernel(std::shared_ptr tensor); - OH_NN_ReturnCode SetStrides(std::shared_ptr tensor); - OH_NN_ReturnCode SetPadModeOrPaddings(std::shared_ptr tensor); - OH_NN_ReturnCode SetRoundMode(std::shared_ptr tensor); - OH_NN_ReturnCode SetActivation(std::shared_ptr tensor); - OH_NN_ReturnCode SetGlobal(std::shared_ptr tensor); + OH_NN_ReturnCode SetKernel(const std::shared_ptr& tensor); + OH_NN_ReturnCode SetStrides(const std::shared_ptr& tensor); + OH_NN_ReturnCode SetPadModeOrPaddings(const std::shared_ptr& tensor); + OH_NN_ReturnCode SetRoundMode(const std::shared_ptr& tensor); + OH_NN_ReturnCode SetActivation(const std::shared_ptr& tensor); + OH_NN_ReturnCode SetGlobal(const std::shared_ptr& tensor); protected: std::vector m_kernelSize; @@ -63,7 +63,6 @@ protected: {OH_NN_MAX_POOL_ACTIVATION_TYPE, &PoolingBuilder::SetActivation}, {OH_NN_MAX_POOL_ROUND_MODE, &PoolingBuilder::SetRoundMode}, {OH_NN_MAX_POOL_GLOBAL, &PoolingBuilder::SetGlobal}, - {OH_NN_AVG_POOL_KERNEL_SIZE, &PoolingBuilder::SetKernel}, {OH_NN_AVG_POOL_STRIDE, &PoolingBuilder::SetStrides}, diff --git a/frameworks/native/neural_network_runtime/ops/pow_builder.cpp b/frameworks/native/neural_network_runtime/ops/pow_builder.cpp index 83864c6..fd5d765 100644 --- a/frameworks/native/neural_network_runtime/ops/pow_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/pow_builder.cpp @@ -31,7 +31,7 @@ PowBuilder::PowBuilder() {} PowBuilder::~PowBuilder() {} -OH_NN_ReturnCode PowBuilder::SetScale(std::shared_ptr tensor) +OH_NN_ReturnCode PowBuilder::SetScale(const std::shared_ptr& tensor) { if (tensor->GetDataType() != OH_NN_FLOAT32) { LOGE("[Pow] The scale should be type OH_NN_FLOAT32."); @@ -53,7 +53,7 @@ OH_NN_ReturnCode PowBuilder::SetScale(std::shared_ptr tensor) return OH_NN_SUCCESS; } -OH_NN_ReturnCode PowBuilder::SetShift(std::shared_ptr tensor) +OH_NN_ReturnCode PowBuilder::SetShift(const std::shared_ptr& tensor) { if (tensor->GetDataType() != OH_NN_FLOAT32) { LOGE("[Pow] The shift should be type OH_NN_FLOAT32."); diff --git a/frameworks/native/neural_network_runtime/ops/pow_builder.h b/frameworks/native/neural_network_runtime/ops/pow_builder.h index 9c34ed4..78d1ba2 100644 --- a/frameworks/native/neural_network_runtime/ops/pow_builder.h +++ b/frameworks/native/neural_network_runtime/ops/pow_builder.h @@ -23,7 +23,7 @@ namespace NeuralNetworkRuntime { namespace Ops { class PowBuilder : public OpsBuilder { public: - typedef OH_NN_ReturnCode(PowBuilder::*FuncPtr)(std::shared_ptr); + typedef OH_NN_ReturnCode (PowBuilder::*FuncPtr)(const std::shared_ptr&); PowBuilder(); ~PowBuilder() override; @@ -35,8 +35,8 @@ public: LiteGraphPrimitvePtr GetPrimitive() override; private: - OH_NN_ReturnCode SetScale(std::shared_ptr tensor); - OH_NN_ReturnCode SetShift(std::shared_ptr tensor); + OH_NN_ReturnCode SetScale(const std::shared_ptr& tensor); + OH_NN_ReturnCode SetShift(const std::shared_ptr& tensor); private: float m_scale {1.0f}; diff --git a/frameworks/native/neural_network_runtime/ops/quant_dtype_cast_builder.cpp b/frameworks/native/neural_network_runtime/ops/quant_dtype_cast_builder.cpp index ff9044c..c45c146 100644 --- a/frameworks/native/neural_network_runtime/ops/quant_dtype_cast_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/quant_dtype_cast_builder.cpp @@ -30,7 +30,7 @@ QuantDTypeCastBuilder::QuantDTypeCastBuilder() {} QuantDTypeCastBuilder::~QuantDTypeCastBuilder() {} -OH_NN_ReturnCode QuantDTypeCastBuilder::SetSrcT(std::shared_ptr tensor) +OH_NN_ReturnCode QuantDTypeCastBuilder::SetSrcT(const std::shared_ptr& tensor) { tensor->IdentifyOpParameter(); if (tensor->GetDataType() != OH_NN_INT64) { @@ -48,7 +48,7 @@ OH_NN_ReturnCode QuantDTypeCastBuilder::SetSrcT(std::shared_ptr tensor return OH_NN_SUCCESS; } -OH_NN_ReturnCode QuantDTypeCastBuilder::SetDstT(std::shared_ptr tensor) +OH_NN_ReturnCode QuantDTypeCastBuilder::SetDstT(const std::shared_ptr& tensor) { tensor->IdentifyOpParameter(); if (tensor->GetDataType() != OH_NN_INT64) { @@ -66,7 +66,7 @@ OH_NN_ReturnCode QuantDTypeCastBuilder::SetDstT(std::shared_ptr tensor return OH_NN_SUCCESS; } -OH_NN_ReturnCode QuantDTypeCastBuilder::SetAxis(std::shared_ptr tensor) +OH_NN_ReturnCode QuantDTypeCastBuilder::SetAxis(const std::shared_ptr& tensor) { tensor->IdentifyOpParameter(); if (tensor->GetDataType() != OH_NN_INT64) { diff --git a/frameworks/native/neural_network_runtime/ops/quant_dtype_cast_builder.h b/frameworks/native/neural_network_runtime/ops/quant_dtype_cast_builder.h index 109df64..0bd1fd2 100644 --- a/frameworks/native/neural_network_runtime/ops/quant_dtype_cast_builder.h +++ b/frameworks/native/neural_network_runtime/ops/quant_dtype_cast_builder.h @@ -23,7 +23,7 @@ namespace NeuralNetworkRuntime { namespace Ops { class QuantDTypeCastBuilder : public OpsBuilder { public: - typedef OH_NN_ReturnCode(QuantDTypeCastBuilder::*FuncPtr)(std::shared_ptr); + typedef OH_NN_ReturnCode (QuantDTypeCastBuilder::*FuncPtr)(const std::shared_ptr&); QuantDTypeCastBuilder(); ~QuantDTypeCastBuilder() override; @@ -35,9 +35,9 @@ public: LiteGraphPrimitvePtr GetPrimitive() override; private: - OH_NN_ReturnCode SetSrcT(std::shared_ptr tensor); - OH_NN_ReturnCode SetDstT(std::shared_ptr tensor); - OH_NN_ReturnCode SetAxis(std::shared_ptr tensor); + OH_NN_ReturnCode SetSrcT(const std::shared_ptr& tensor); + OH_NN_ReturnCode SetDstT(const std::shared_ptr& tensor); + OH_NN_ReturnCode SetAxis(const std::shared_ptr& tensor); private: const uint64_t* m_src_t{nullptr}; diff --git a/frameworks/native/neural_network_runtime/ops/range_builder.cpp b/frameworks/native/neural_network_runtime/ops/range_builder.cpp index 2c2a0e9..4484aa9 100755 --- a/frameworks/native/neural_network_runtime/ops/range_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/range_builder.cpp @@ -28,7 +28,7 @@ RangeBuilder::RangeBuilder() {} RangeBuilder::~RangeBuilder() {} -OH_NN_ReturnCode RangeBuilder::SetStart(std::shared_ptr tensor) +OH_NN_ReturnCode RangeBuilder::SetStart(const std::shared_ptr& tensor) { if (tensor->GetDataType() != OH_NN_INT64) { LOGE("[Range] The start should be type OH_NN_INT64."); @@ -50,7 +50,7 @@ OH_NN_ReturnCode RangeBuilder::SetStart(std::shared_ptr tensor) return OH_NN_SUCCESS; } -OH_NN_ReturnCode RangeBuilder::SetLimit(std::shared_ptr tensor) +OH_NN_ReturnCode RangeBuilder::SetLimit(const std::shared_ptr& tensor) { if (tensor->GetDataType() != OH_NN_INT64) { LOGE("[Range] The limit should be type OH_NN_INT64."); @@ -72,7 +72,7 @@ OH_NN_ReturnCode RangeBuilder::SetLimit(std::shared_ptr tensor) return OH_NN_SUCCESS; } -OH_NN_ReturnCode RangeBuilder::SetDelta(std::shared_ptr tensor) +OH_NN_ReturnCode RangeBuilder::SetDelta(const std::shared_ptr& tensor) { if (tensor->GetDataType() != OH_NN_INT64) { LOGE("[Range] The delta should be type OH_NN_INT64."); diff --git a/frameworks/native/neural_network_runtime/ops/range_builder.h b/frameworks/native/neural_network_runtime/ops/range_builder.h index 1d4ec40..4ac5ab5 100755 --- a/frameworks/native/neural_network_runtime/ops/range_builder.h +++ b/frameworks/native/neural_network_runtime/ops/range_builder.h @@ -26,7 +26,7 @@ namespace NeuralNetworkRuntime { namespace Ops { class RangeBuilder : public OpsBuilder { public: - typedef OH_NN_ReturnCode(RangeBuilder::*FuncPtr)(std::shared_ptr); + typedef OH_NN_ReturnCode (RangeBuilder::*FuncPtr)(const std::shared_ptr&); RangeBuilder(); ~RangeBuilder() override; @@ -38,9 +38,9 @@ public: LiteGraphPrimitvePtr GetPrimitive() override; private: - OH_NN_ReturnCode SetStart(std::shared_ptr tensor); - OH_NN_ReturnCode SetLimit(std::shared_ptr tensor); - OH_NN_ReturnCode SetDelta(std::shared_ptr tensor); + OH_NN_ReturnCode SetStart(const std::shared_ptr& tensor); + OH_NN_ReturnCode SetLimit(const std::shared_ptr& tensor); + OH_NN_ReturnCode SetDelta(const std::shared_ptr& tensor); private: int64_t m_start {0}; diff --git a/frameworks/native/neural_network_runtime/ops/reduceL2_builder.cpp b/frameworks/native/neural_network_runtime/ops/reduceL2_builder.cpp index 58fde17..19c7148 100644 --- a/frameworks/native/neural_network_runtime/ops/reduceL2_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/reduceL2_builder.cpp @@ -31,7 +31,7 @@ ReduceL2Builder::ReduceL2Builder() {} ReduceL2Builder::~ReduceL2Builder() {} -OH_NN_ReturnCode ReduceL2Builder::SetCoeff(std::shared_ptr tensor) +OH_NN_ReturnCode ReduceL2Builder::SetCoeff(const std::shared_ptr& tensor) { if (tensor->GetDataType() != OH_NN_FLOAT32) { LOGE("[ReduceL2] The coeff should be type OH_NN_FLOAT32."); @@ -53,7 +53,7 @@ OH_NN_ReturnCode ReduceL2Builder::SetCoeff(std::shared_ptr tensor) return OH_NN_SUCCESS; } -OH_NN_ReturnCode ReduceL2Builder::SetReduceToEnd(std::shared_ptr tensor) +OH_NN_ReturnCode ReduceL2Builder::SetReduceToEnd(const std::shared_ptr& tensor) { if (tensor->GetDataType() != OH_NN_BOOL) { LOGE("[ReduceL2] SetReduceToEnd failed, the reduceToEnd should be type OH_NN_BOOL."); @@ -76,7 +76,7 @@ OH_NN_ReturnCode ReduceL2Builder::SetReduceToEnd(std::shared_ptr tenso return OH_NN_SUCCESS; } -OH_NN_ReturnCode ReduceL2Builder::SetKeepDims(std::shared_ptr tensor) +OH_NN_ReturnCode ReduceL2Builder::SetKeepDims(const std::shared_ptr& tensor) { if (tensor->GetDataType() != OH_NN_BOOL) { LOGE("[ReduceL2] SetKeepDims failed, the keep_dims should be type OH_NN_BOOL."); diff --git a/frameworks/native/neural_network_runtime/ops/reduceL2_builder.h b/frameworks/native/neural_network_runtime/ops/reduceL2_builder.h index 34f786e..5672284 100644 --- a/frameworks/native/neural_network_runtime/ops/reduceL2_builder.h +++ b/frameworks/native/neural_network_runtime/ops/reduceL2_builder.h @@ -23,7 +23,7 @@ namespace NeuralNetworkRuntime { namespace Ops { class ReduceL2Builder : public OpsBuilder { public: - typedef OH_NN_ReturnCode(ReduceL2Builder::*FuncPtr)(std::shared_ptr); + typedef OH_NN_ReturnCode (ReduceL2Builder::*FuncPtr)(const std::shared_ptr&); ReduceL2Builder(); ~ReduceL2Builder() override; @@ -35,9 +35,9 @@ public: LiteGraphPrimitvePtr GetPrimitive() override; private: - OH_NN_ReturnCode SetCoeff(std::shared_ptr tensor); - OH_NN_ReturnCode SetReduceToEnd(std::shared_ptr tensor); - OH_NN_ReturnCode SetKeepDims(std::shared_ptr tensor); + OH_NN_ReturnCode SetCoeff(const std::shared_ptr& tensor); + OH_NN_ReturnCode SetReduceToEnd(const std::shared_ptr& tensor); + OH_NN_ReturnCode SetKeepDims(const std::shared_ptr& tensor); private: float m_coeff {0.0f}; diff --git a/frameworks/native/neural_network_runtime/ops/reduceall_builder.cpp b/frameworks/native/neural_network_runtime/ops/reduceall_builder.cpp index 6bca687..81a7cf3 100644 --- a/frameworks/native/neural_network_runtime/ops/reduceall_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/reduceall_builder.cpp @@ -31,7 +31,7 @@ ReduceAllBuilder::ReduceAllBuilder() {} ReduceAllBuilder::~ReduceAllBuilder() {} -OH_NN_ReturnCode ReduceAllBuilder::SetCoeff(std::shared_ptr tensor) +OH_NN_ReturnCode ReduceAllBuilder::SetCoeff(const std::shared_ptr& tensor) { if (tensor->GetDataType() != OH_NN_FLOAT32) { LOGE("[ReduceAll] The coeff should be type OH_NN_FLOAT32."); @@ -53,7 +53,7 @@ OH_NN_ReturnCode ReduceAllBuilder::SetCoeff(std::shared_ptr tensor) return OH_NN_SUCCESS; } -OH_NN_ReturnCode ReduceAllBuilder::SetReduceToEnd(std::shared_ptr tensor) +OH_NN_ReturnCode ReduceAllBuilder::SetReduceToEnd(const std::shared_ptr& tensor) { if (tensor->GetDataType() != OH_NN_BOOL) { LOGE("[ReduceAll] SetReduceToEnd failed, the reduceToEnd should be type OH_NN_BOOL."); @@ -76,7 +76,7 @@ OH_NN_ReturnCode ReduceAllBuilder::SetReduceToEnd(std::shared_ptr tens return OH_NN_SUCCESS; } -OH_NN_ReturnCode ReduceAllBuilder::SetKeepDims(std::shared_ptr tensor) +OH_NN_ReturnCode ReduceAllBuilder::SetKeepDims(const std::shared_ptr& tensor) { if (tensor->GetDataType() != OH_NN_BOOL) { LOGE("[ReduceAll] SetKeepDims failed, the keep_dims should be type OH_NN_BOOL."); diff --git a/frameworks/native/neural_network_runtime/ops/reduceall_builder.h b/frameworks/native/neural_network_runtime/ops/reduceall_builder.h index a18f22a..9c48029 100644 --- a/frameworks/native/neural_network_runtime/ops/reduceall_builder.h +++ b/frameworks/native/neural_network_runtime/ops/reduceall_builder.h @@ -23,7 +23,7 @@ namespace NeuralNetworkRuntime { namespace Ops { class ReduceAllBuilder : public OpsBuilder { public: - typedef OH_NN_ReturnCode(ReduceAllBuilder::*FuncPtr)(std::shared_ptr); + typedef OH_NN_ReturnCode (ReduceAllBuilder::*FuncPtr)(const std::shared_ptr&); ReduceAllBuilder(); ~ReduceAllBuilder() override; @@ -35,9 +35,9 @@ public: LiteGraphPrimitvePtr GetPrimitive() override; private: - OH_NN_ReturnCode SetCoeff(std::shared_ptr tensor); - OH_NN_ReturnCode SetReduceToEnd(std::shared_ptr tensor); - OH_NN_ReturnCode SetKeepDims(std::shared_ptr tensor); + OH_NN_ReturnCode SetCoeff(const std::shared_ptr& tensor); + OH_NN_ReturnCode SetReduceToEnd(const std::shared_ptr& tensor); + OH_NN_ReturnCode SetKeepDims(const std::shared_ptr& tensor); private: float m_coeff {0.0f}; diff --git a/frameworks/native/neural_network_runtime/ops/reducemax_builder.cpp b/frameworks/native/neural_network_runtime/ops/reducemax_builder.cpp index 9ec85d5..cea4e04 100644 --- a/frameworks/native/neural_network_runtime/ops/reducemax_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/reducemax_builder.cpp @@ -31,7 +31,7 @@ ReduceMaxBuilder::ReduceMaxBuilder() {} ReduceMaxBuilder::~ReduceMaxBuilder() {} -OH_NN_ReturnCode ReduceMaxBuilder::SetCoeff(std::shared_ptr tensor) +OH_NN_ReturnCode ReduceMaxBuilder::SetCoeff(const std::shared_ptr& tensor) { if (tensor->GetDataType() != OH_NN_FLOAT32) { LOGE("[ReduceMax] The coeff should be type OH_NN_FLOAT32."); @@ -53,7 +53,7 @@ OH_NN_ReturnCode ReduceMaxBuilder::SetCoeff(std::shared_ptr tensor) return OH_NN_SUCCESS; } -OH_NN_ReturnCode ReduceMaxBuilder::SetReduceToEnd(std::shared_ptr tensor) +OH_NN_ReturnCode ReduceMaxBuilder::SetReduceToEnd(const std::shared_ptr& tensor) { if (tensor->GetDataType() != OH_NN_BOOL) { LOGE("[ReduceMax] SetReduceToEnd failed, the reduceToEnd should be type OH_NN_BOOL."); @@ -76,7 +76,7 @@ OH_NN_ReturnCode ReduceMaxBuilder::SetReduceToEnd(std::shared_ptr tens return OH_NN_SUCCESS; } -OH_NN_ReturnCode ReduceMaxBuilder::SetKeepDims(std::shared_ptr tensor) +OH_NN_ReturnCode ReduceMaxBuilder::SetKeepDims(const std::shared_ptr& tensor) { if (tensor->GetDataType() != OH_NN_BOOL) { LOGE("[ReduceMax] SetKeepDims failed, the keep_dims should be type OH_NN_BOOL."); diff --git a/frameworks/native/neural_network_runtime/ops/reducemax_builder.h b/frameworks/native/neural_network_runtime/ops/reducemax_builder.h index 5f39651..8ba6b11 100644 --- a/frameworks/native/neural_network_runtime/ops/reducemax_builder.h +++ b/frameworks/native/neural_network_runtime/ops/reducemax_builder.h @@ -23,7 +23,7 @@ namespace NeuralNetworkRuntime { namespace Ops { class ReduceMaxBuilder : public OpsBuilder { public: - typedef OH_NN_ReturnCode(ReduceMaxBuilder::*FuncPtr)(std::shared_ptr); + typedef OH_NN_ReturnCode (ReduceMaxBuilder::*FuncPtr)(const std::shared_ptr&); ReduceMaxBuilder(); ~ReduceMaxBuilder() override; @@ -35,9 +35,9 @@ public: LiteGraphPrimitvePtr GetPrimitive() override; private: - OH_NN_ReturnCode SetCoeff(std::shared_ptr tensor); - OH_NN_ReturnCode SetReduceToEnd(std::shared_ptr tensor); - OH_NN_ReturnCode SetKeepDims(std::shared_ptr tensor); + OH_NN_ReturnCode SetCoeff(const std::shared_ptr& tensor); + OH_NN_ReturnCode SetReduceToEnd(const std::shared_ptr& tensor); + OH_NN_ReturnCode SetKeepDims(const std::shared_ptr& tensor); private: float m_coeff {0.0f}; diff --git a/frameworks/native/neural_network_runtime/ops/reducemean_builder.cpp b/frameworks/native/neural_network_runtime/ops/reducemean_builder.cpp index 6f6739f..c208d1c 100644 --- a/frameworks/native/neural_network_runtime/ops/reducemean_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/reducemean_builder.cpp @@ -31,7 +31,7 @@ ReduceMeanBuilder::ReduceMeanBuilder() {} ReduceMeanBuilder:: ~ReduceMeanBuilder() {} -OH_NN_ReturnCode ReduceMeanBuilder::SetCoeff(std::shared_ptr tensor) +OH_NN_ReturnCode ReduceMeanBuilder::SetCoeff(const std::shared_ptr& tensor) { if (tensor->GetDataType() != OH_NN_FLOAT32) { LOGE("[ReduceAll] The coeff should be type OH_NN_FLOAT32."); @@ -53,7 +53,7 @@ OH_NN_ReturnCode ReduceMeanBuilder::SetCoeff(std::shared_ptr tensor) return OH_NN_SUCCESS; } -OH_NN_ReturnCode ReduceMeanBuilder::SetReduceToEnd(std::shared_ptr tensor) +OH_NN_ReturnCode ReduceMeanBuilder::SetReduceToEnd(const std::shared_ptr& tensor) { if (tensor->GetDataType() != OH_NN_BOOL) { LOGE("[ReduceAll] SetReduceToEnd failed, the reduceToEnd should be type OH_NN_BOOL."); @@ -76,7 +76,7 @@ OH_NN_ReturnCode ReduceMeanBuilder::SetReduceToEnd(std::shared_ptr ten return OH_NN_SUCCESS; } -OH_NN_ReturnCode ReduceMeanBuilder::SetKeepDims(std::shared_ptr tensor) +OH_NN_ReturnCode ReduceMeanBuilder::SetKeepDims(const std::shared_ptr& tensor) { tensor->IdentifyOpParameter(); if (tensor->GetElementCount() != SCALE_LENGTH) { diff --git a/frameworks/native/neural_network_runtime/ops/reducemean_builder.h b/frameworks/native/neural_network_runtime/ops/reducemean_builder.h index c1b5026..9d24a74 100644 --- a/frameworks/native/neural_network_runtime/ops/reducemean_builder.h +++ b/frameworks/native/neural_network_runtime/ops/reducemean_builder.h @@ -23,7 +23,7 @@ namespace NeuralNetworkRuntime { namespace Ops { class ReduceMeanBuilder : public OpsBuilder { public: - typedef OH_NN_ReturnCode(ReduceMeanBuilder::*FuncPtr)(std::shared_ptr); + typedef OH_NN_ReturnCode (ReduceMeanBuilder::*FuncPtr)(const std::shared_ptr&); ReduceMeanBuilder(); ~ReduceMeanBuilder() override; @@ -35,9 +35,9 @@ public: LiteGraphPrimitvePtr GetPrimitive() override; private: - OH_NN_ReturnCode SetCoeff(std::shared_ptr tensor); - OH_NN_ReturnCode SetReduceToEnd(std::shared_ptr tensor); - OH_NN_ReturnCode SetKeepDims(std::shared_ptr tensor); + OH_NN_ReturnCode SetCoeff(const std::shared_ptr& tensor); + OH_NN_ReturnCode SetReduceToEnd(const std::shared_ptr& tensor); + OH_NN_ReturnCode SetKeepDims(const std::shared_ptr& tensor); private: bool m_keepDims{false}; diff --git a/frameworks/native/neural_network_runtime/ops/reducemin_builder.cpp b/frameworks/native/neural_network_runtime/ops/reducemin_builder.cpp index 907a962..65a9229 100644 --- a/frameworks/native/neural_network_runtime/ops/reducemin_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/reducemin_builder.cpp @@ -31,7 +31,7 @@ ReduceMinBuilder::ReduceMinBuilder() {} ReduceMinBuilder::~ReduceMinBuilder() {} -OH_NN_ReturnCode ReduceMinBuilder::SetCoeff(std::shared_ptr tensor) +OH_NN_ReturnCode ReduceMinBuilder::SetCoeff(const std::shared_ptr& tensor) { if (tensor->GetDataType() != OH_NN_FLOAT32) { LOGE("[ReduceMin] The coeff should be type OH_NN_FLOAT32."); @@ -53,7 +53,7 @@ OH_NN_ReturnCode ReduceMinBuilder::SetCoeff(std::shared_ptr tensor) return OH_NN_SUCCESS; } -OH_NN_ReturnCode ReduceMinBuilder::SetReduceToEnd(std::shared_ptr tensor) +OH_NN_ReturnCode ReduceMinBuilder::SetReduceToEnd(const std::shared_ptr& tensor) { if (tensor->GetDataType() != OH_NN_BOOL) { LOGE("[ReduceMin] SetReduceToEnd failed, the reduceToEnd should be type OH_NN_BOOL."); @@ -76,7 +76,7 @@ OH_NN_ReturnCode ReduceMinBuilder::SetReduceToEnd(std::shared_ptr tens return OH_NN_SUCCESS; } -OH_NN_ReturnCode ReduceMinBuilder::SetKeepDims(std::shared_ptr tensor) +OH_NN_ReturnCode ReduceMinBuilder::SetKeepDims(const std::shared_ptr& tensor) { if (tensor->GetDataType() != OH_NN_BOOL) { LOGE("[ReduceMin] SetKeepDims failed, the keep_dims should be type OH_NN_BOOL."); diff --git a/frameworks/native/neural_network_runtime/ops/reducemin_builder.h b/frameworks/native/neural_network_runtime/ops/reducemin_builder.h index 2f0bba3..d55b30a 100644 --- a/frameworks/native/neural_network_runtime/ops/reducemin_builder.h +++ b/frameworks/native/neural_network_runtime/ops/reducemin_builder.h @@ -23,7 +23,7 @@ namespace NeuralNetworkRuntime { namespace Ops { class ReduceMinBuilder : public OpsBuilder { public: - typedef OH_NN_ReturnCode(ReduceMinBuilder::*FuncPtr)(std::shared_ptr); + typedef OH_NN_ReturnCode (ReduceMinBuilder::*FuncPtr)(const std::shared_ptr&); ReduceMinBuilder(); ~ReduceMinBuilder() override; @@ -35,9 +35,9 @@ public: LiteGraphPrimitvePtr GetPrimitive() override; private: - OH_NN_ReturnCode SetCoeff(std::shared_ptr tensor); - OH_NN_ReturnCode SetReduceToEnd(std::shared_ptr tensor); - OH_NN_ReturnCode SetKeepDims(std::shared_ptr tensor); + OH_NN_ReturnCode SetCoeff(const std::shared_ptr& tensor); + OH_NN_ReturnCode SetReduceToEnd(const std::shared_ptr& tensor); + OH_NN_ReturnCode SetKeepDims(const std::shared_ptr& tensor); private: float m_coeff {0.0f}; diff --git a/frameworks/native/neural_network_runtime/ops/reduceprod_builder.cpp b/frameworks/native/neural_network_runtime/ops/reduceprod_builder.cpp index 654b9a6..ba8cadd 100644 --- a/frameworks/native/neural_network_runtime/ops/reduceprod_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/reduceprod_builder.cpp @@ -31,7 +31,7 @@ ReduceProdBuilder::ReduceProdBuilder() {} ReduceProdBuilder:: ~ReduceProdBuilder() {} -OH_NN_ReturnCode ReduceProdBuilder::SetCoeff(std::shared_ptr tensor) +OH_NN_ReturnCode ReduceProdBuilder::SetCoeff(const std::shared_ptr& tensor) { if (tensor->GetDataType() != OH_NN_FLOAT32) { LOGE("[ReduceAll] The coeff should be type OH_NN_FLOAT32."); @@ -53,7 +53,7 @@ OH_NN_ReturnCode ReduceProdBuilder::SetCoeff(std::shared_ptr tensor) return OH_NN_SUCCESS; } -OH_NN_ReturnCode ReduceProdBuilder::SetReduceToEnd(std::shared_ptr tensor) +OH_NN_ReturnCode ReduceProdBuilder::SetReduceToEnd(const std::shared_ptr& tensor) { if (tensor->GetDataType() != OH_NN_BOOL) { LOGE("[ReduceAll] SetReduceToEnd failed, the reduceToEnd should be type OH_NN_BOOL."); @@ -76,7 +76,7 @@ OH_NN_ReturnCode ReduceProdBuilder::SetReduceToEnd(std::shared_ptr ten return OH_NN_SUCCESS; } -OH_NN_ReturnCode ReduceProdBuilder::SetKeepDims(std::shared_ptr tensor) +OH_NN_ReturnCode ReduceProdBuilder::SetKeepDims(const std::shared_ptr& tensor) { tensor->IdentifyOpParameter(); if (tensor->GetElementCount() != SCALE_LENGTH) { diff --git a/frameworks/native/neural_network_runtime/ops/reduceprod_builder.h b/frameworks/native/neural_network_runtime/ops/reduceprod_builder.h index b961ad9..9d520f8 100644 --- a/frameworks/native/neural_network_runtime/ops/reduceprod_builder.h +++ b/frameworks/native/neural_network_runtime/ops/reduceprod_builder.h @@ -23,7 +23,7 @@ namespace NeuralNetworkRuntime { namespace Ops { class ReduceProdBuilder : public OpsBuilder { public: - typedef OH_NN_ReturnCode(ReduceProdBuilder::*FuncPtr)(std::shared_ptr); + typedef OH_NN_ReturnCode (ReduceProdBuilder::*FuncPtr)(const std::shared_ptr&); ReduceProdBuilder(); ~ReduceProdBuilder() override; @@ -35,9 +35,9 @@ public: LiteGraphPrimitvePtr GetPrimitive() override; private: - OH_NN_ReturnCode SetCoeff(std::shared_ptr tensor); - OH_NN_ReturnCode SetReduceToEnd(std::shared_ptr tensor); - OH_NN_ReturnCode SetKeepDims(std::shared_ptr tensor); + OH_NN_ReturnCode SetCoeff(const std::shared_ptr& tensor); + OH_NN_ReturnCode SetReduceToEnd(const std::shared_ptr& tensor); + OH_NN_ReturnCode SetKeepDims(const std::shared_ptr& tensor); private: bool m_keepDims{false}; diff --git a/frameworks/native/neural_network_runtime/ops/reducesum_builder.cpp b/frameworks/native/neural_network_runtime/ops/reducesum_builder.cpp index 1a9b465..d00c897 100644 --- a/frameworks/native/neural_network_runtime/ops/reducesum_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/reducesum_builder.cpp @@ -31,7 +31,7 @@ ReduceSumBuilder::ReduceSumBuilder() {} ReduceSumBuilder::~ReduceSumBuilder() {} -OH_NN_ReturnCode ReduceSumBuilder::SetCoeff(std::shared_ptr tensor) +OH_NN_ReturnCode ReduceSumBuilder::SetCoeff(const std::shared_ptr& tensor) { if (tensor->GetDataType() != OH_NN_FLOAT32) { LOGE("[ReduceSum] The coeff should be type OH_NN_FLOAT32."); @@ -53,7 +53,7 @@ OH_NN_ReturnCode ReduceSumBuilder::SetCoeff(std::shared_ptr tensor) return OH_NN_SUCCESS; } -OH_NN_ReturnCode ReduceSumBuilder::SetReduceToEnd(std::shared_ptr tensor) +OH_NN_ReturnCode ReduceSumBuilder::SetReduceToEnd(const std::shared_ptr& tensor) { if (tensor->GetDataType() != OH_NN_BOOL) { LOGE("[ReduceSum] SetReduceToEnd failed, the reduceToEnd should be type OH_NN_BOOL."); @@ -76,7 +76,7 @@ OH_NN_ReturnCode ReduceSumBuilder::SetReduceToEnd(std::shared_ptr tens return OH_NN_SUCCESS; } -OH_NN_ReturnCode ReduceSumBuilder::SetKeepDims(std::shared_ptr tensor) +OH_NN_ReturnCode ReduceSumBuilder::SetKeepDims(const std::shared_ptr& tensor) { if (tensor->GetDataType() != OH_NN_BOOL) { LOGE("[ReduceSum] SetKeepDims failed, the keep_dims should be type OH_NN_BOOL."); diff --git a/frameworks/native/neural_network_runtime/ops/reducesum_builder.h b/frameworks/native/neural_network_runtime/ops/reducesum_builder.h index 794628d..7b1fbf8 100644 --- a/frameworks/native/neural_network_runtime/ops/reducesum_builder.h +++ b/frameworks/native/neural_network_runtime/ops/reducesum_builder.h @@ -23,7 +23,7 @@ namespace NeuralNetworkRuntime { namespace Ops { class ReduceSumBuilder : public OpsBuilder { public: - typedef OH_NN_ReturnCode(ReduceSumBuilder::*FuncPtr)(std::shared_ptr); + typedef OH_NN_ReturnCode (ReduceSumBuilder::*FuncPtr)(const std::shared_ptr&); ReduceSumBuilder(); ~ReduceSumBuilder() override; @@ -35,9 +35,9 @@ public: LiteGraphPrimitvePtr GetPrimitive() override; private: - OH_NN_ReturnCode SetCoeff(std::shared_ptr tensor); - OH_NN_ReturnCode SetReduceToEnd(std::shared_ptr tensor); - OH_NN_ReturnCode SetKeepDims(std::shared_ptr tensor); + OH_NN_ReturnCode SetCoeff(const std::shared_ptr& tensor); + OH_NN_ReturnCode SetReduceToEnd(const std::shared_ptr& tensor); + OH_NN_ReturnCode SetKeepDims(const std::shared_ptr& tensor); private: float m_coeff {0.0f}; diff --git a/frameworks/native/neural_network_runtime/ops/resize_bilinear_builder.cpp b/frameworks/native/neural_network_runtime/ops/resize_bilinear_builder.cpp index fed0825..dadc50d 100644 --- a/frameworks/native/neural_network_runtime/ops/resize_bilinear_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/resize_bilinear_builder.cpp @@ -30,7 +30,7 @@ ResizeBilinearBuilder::ResizeBilinearBuilder() {} ResizeBilinearBuilder::~ResizeBilinearBuilder() {} -OH_NN_ReturnCode ResizeBilinearBuilder::SetNewHeight(std::shared_ptr tensor) +OH_NN_ReturnCode ResizeBilinearBuilder::SetNewHeight(const std::shared_ptr& tensor) { tensor->IdentifyOpParameter(); if (tensor->GetElementCount() != SCALE_LENGTH) { @@ -53,7 +53,7 @@ OH_NN_ReturnCode ResizeBilinearBuilder::SetNewHeight(std::shared_ptr t return OH_NN_SUCCESS; } -OH_NN_ReturnCode ResizeBilinearBuilder::SetNewWidth(std::shared_ptr tensor) +OH_NN_ReturnCode ResizeBilinearBuilder::SetNewWidth(const std::shared_ptr& tensor) { tensor->IdentifyOpParameter(); if (tensor->GetElementCount() != SCALE_LENGTH) { @@ -76,7 +76,7 @@ OH_NN_ReturnCode ResizeBilinearBuilder::SetNewWidth(std::shared_ptr te return OH_NN_SUCCESS; } -OH_NN_ReturnCode ResizeBilinearBuilder::SetPreserveAspectRatio(std::shared_ptr tensor) +OH_NN_ReturnCode ResizeBilinearBuilder::SetPreserveAspectRatio(const std::shared_ptr& tensor) { tensor->IdentifyOpParameter(); if (tensor->GetElementCount() != SCALE_LENGTH) { @@ -99,7 +99,7 @@ OH_NN_ReturnCode ResizeBilinearBuilder::SetPreserveAspectRatio(std::shared_ptr tensor) +OH_NN_ReturnCode ResizeBilinearBuilder::SetCoordinateTransformMode(const std::shared_ptr& tensor) { tensor->IdentifyOpParameter(); if (tensor->GetElementCount() != SCALE_LENGTH) { @@ -125,7 +125,7 @@ OH_NN_ReturnCode ResizeBilinearBuilder::SetCoordinateTransformMode(std::shared_p return OH_NN_SUCCESS; } -OH_NN_ReturnCode ResizeBilinearBuilder::SetExcludeOutside(std::shared_ptr tensor) +OH_NN_ReturnCode ResizeBilinearBuilder::SetExcludeOutside(const std::shared_ptr& tensor) { tensor->IdentifyOpParameter(); if (tensor->GetElementCount() != SCALE_LENGTH) { diff --git a/frameworks/native/neural_network_runtime/ops/resize_bilinear_builder.h b/frameworks/native/neural_network_runtime/ops/resize_bilinear_builder.h index 25fac57..df495ab 100644 --- a/frameworks/native/neural_network_runtime/ops/resize_bilinear_builder.h +++ b/frameworks/native/neural_network_runtime/ops/resize_bilinear_builder.h @@ -25,7 +25,7 @@ namespace NeuralNetworkRuntime { namespace Ops { class ResizeBilinearBuilder : public OpsBuilder { public: - typedef OH_NN_ReturnCode(ResizeBilinearBuilder::*FuncPtr)(std::shared_ptr); + typedef OH_NN_ReturnCode (ResizeBilinearBuilder::*FuncPtr)(const std::shared_ptr&); ResizeBilinearBuilder(); ~ResizeBilinearBuilder() override; @@ -37,11 +37,11 @@ public: LiteGraphPrimitvePtr GetPrimitive() override; private: - OH_NN_ReturnCode SetNewHeight(std::shared_ptr tensor); - OH_NN_ReturnCode SetNewWidth(std::shared_ptr tensor); - OH_NN_ReturnCode SetPreserveAspectRatio(std::shared_ptr tensor); - OH_NN_ReturnCode SetCoordinateTransformMode(std::shared_ptr tensor); - OH_NN_ReturnCode SetExcludeOutside(std::shared_ptr tensor); + OH_NN_ReturnCode SetNewHeight(const std::shared_ptr& tensor); + OH_NN_ReturnCode SetNewWidth(const std::shared_ptr& tensor); + OH_NN_ReturnCode SetPreserveAspectRatio(const std::shared_ptr& tensor); + OH_NN_ReturnCode SetCoordinateTransformMode(const std::shared_ptr& tensor); + OH_NN_ReturnCode SetExcludeOutside(const std::shared_ptr& tensor); private: mindspore::lite::ResizeMethod m_method {mindspore::lite::RESIZE_METHOD_LINEAR}; diff --git a/frameworks/native/neural_network_runtime/ops/scale_builder.cpp b/frameworks/native/neural_network_runtime/ops/scale_builder.cpp index cecd8b4..86085ef 100644 --- a/frameworks/native/neural_network_runtime/ops/scale_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/scale_builder.cpp @@ -32,7 +32,7 @@ ScaleBuilder::ScaleBuilder() {} ScaleBuilder::~ScaleBuilder() {} -OH_NN_ReturnCode ScaleBuilder::SetAxis(std::shared_ptr tensor) +OH_NN_ReturnCode ScaleBuilder::SetAxis(const std::shared_ptr& tensor) { tensor->IdentifyOpParameter(); if (tensor->GetDataType() != OH_NN_INT64) { @@ -55,7 +55,7 @@ OH_NN_ReturnCode ScaleBuilder::SetAxis(std::shared_ptr tensor) return OH_NN_SUCCESS; } -OH_NN_ReturnCode ScaleBuilder::SetActivationType(std::shared_ptr tensor) +OH_NN_ReturnCode ScaleBuilder::SetActivationType(const std::shared_ptr& tensor) { tensor->IdentifyOpParameter(); if (tensor->GetDataType() != OH_NN_INT8) { diff --git a/frameworks/native/neural_network_runtime/ops/scale_builder.h b/frameworks/native/neural_network_runtime/ops/scale_builder.h index da8822f..7a0d552 100644 --- a/frameworks/native/neural_network_runtime/ops/scale_builder.h +++ b/frameworks/native/neural_network_runtime/ops/scale_builder.h @@ -25,7 +25,7 @@ namespace NeuralNetworkRuntime { namespace Ops { class ScaleBuilder : public OpsBuilder { public: - typedef OH_NN_ReturnCode(ScaleBuilder::*FuncPtr)(std::shared_ptr); + typedef OH_NN_ReturnCode (ScaleBuilder::*FuncPtr)(const std::shared_ptr&); ScaleBuilder(); ~ScaleBuilder() override; @@ -37,8 +37,8 @@ public: LiteGraphPrimitvePtr GetPrimitive() override; private: - OH_NN_ReturnCode SetAxis(std::shared_ptr tensor); - OH_NN_ReturnCode SetActivationType(std::shared_ptr tensor); + OH_NN_ReturnCode SetAxis(const std::shared_ptr& tensor); + OH_NN_ReturnCode SetActivationType(const std::shared_ptr& tensor); private: mindspore::lite::ActivationType m_activationType{mindspore::lite::ACTIVATION_TYPE_NO_ACTIVATION}; diff --git a/frameworks/native/neural_network_runtime/ops/slice_builder.cpp b/frameworks/native/neural_network_runtime/ops/slice_builder.cpp index 3ee017f..826c813 100644 --- a/frameworks/native/neural_network_runtime/ops/slice_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/slice_builder.cpp @@ -29,7 +29,7 @@ SliceBuilder::SliceBuilder() {} SliceBuilder::~SliceBuilder() {} -OH_NN_ReturnCode SliceBuilder::SetAxes(std::shared_ptr tensor) +OH_NN_ReturnCode SliceBuilder::SetAxes(const std::shared_ptr& tensor) { if (tensor->GetDataType() != OH_NN_INT64) { LOGE("[SliceBuilder] The axes should be type OH_NN_INT64."); diff --git a/frameworks/native/neural_network_runtime/ops/slice_builder.h b/frameworks/native/neural_network_runtime/ops/slice_builder.h index b791b7a..0cbebf3 100644 --- a/frameworks/native/neural_network_runtime/ops/slice_builder.h +++ b/frameworks/native/neural_network_runtime/ops/slice_builder.h @@ -24,7 +24,7 @@ namespace NeuralNetworkRuntime { namespace Ops { class SliceBuilder : public OpsBuilder { public: - typedef OH_NN_ReturnCode(SliceBuilder::*FuncPtr)(std::shared_ptr); + typedef OH_NN_ReturnCode (SliceBuilder::*FuncPtr)(const std::shared_ptr&); SliceBuilder(); ~SliceBuilder() override; @@ -36,7 +36,7 @@ public: LiteGraphTensorPtr GetPrimitive() override; private: - OH_NN_ReturnCode SetAxes(std::shared_ptr tensor); + OH_NN_ReturnCode SetAxes(const std::shared_ptr& tensor); private: std::vector m_axes; diff --git a/frameworks/native/neural_network_runtime/ops/softmax_builder.cpp b/frameworks/native/neural_network_runtime/ops/softmax_builder.cpp index 2e825bb..be45b08 100644 --- a/frameworks/native/neural_network_runtime/ops/softmax_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/softmax_builder.cpp @@ -29,7 +29,7 @@ SoftmaxBuilder::SoftmaxBuilder() {} SoftmaxBuilder::~SoftmaxBuilder() {} -OH_NN_ReturnCode SoftmaxBuilder::SetAxis(std::shared_ptr tensor) +OH_NN_ReturnCode SoftmaxBuilder::SetAxis(const std::shared_ptr& tensor) { // Set Axis if (tensor->GetDataType() != OH_NN_INT64) { diff --git a/frameworks/native/neural_network_runtime/ops/softmax_builder.h b/frameworks/native/neural_network_runtime/ops/softmax_builder.h index 405c69e..a0607e3 100644 --- a/frameworks/native/neural_network_runtime/ops/softmax_builder.h +++ b/frameworks/native/neural_network_runtime/ops/softmax_builder.h @@ -24,7 +24,7 @@ namespace NeuralNetworkRuntime { namespace Ops { class SoftmaxBuilder : public OpsBuilder { public: - typedef OH_NN_ReturnCode(SoftmaxBuilder::*FuncPtr)(std::shared_ptr); + typedef OH_NN_ReturnCode (SoftmaxBuilder::*FuncPtr)(const std::shared_ptr&); SoftmaxBuilder(); ~SoftmaxBuilder() override; @@ -36,7 +36,7 @@ public: LiteGraphTensorPtr GetPrimitive() override; private: - OH_NN_ReturnCode SetAxis(std::shared_ptr tensor); + OH_NN_ReturnCode SetAxis(const std::shared_ptr& tensor); private: std::vector m_axis; diff --git a/frameworks/native/neural_network_runtime/ops/space_to_batch_nd_builder.cpp b/frameworks/native/neural_network_runtime/ops/space_to_batch_nd_builder.cpp index 0c20c61..9c94999 100644 --- a/frameworks/native/neural_network_runtime/ops/space_to_batch_nd_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/space_to_batch_nd_builder.cpp @@ -35,7 +35,7 @@ SpaceToBatchNDBuilder::SpaceToBatchNDBuilder() {} SpaceToBatchNDBuilder::~SpaceToBatchNDBuilder() {} -OH_NN_ReturnCode SpaceToBatchNDBuilder::SetBlockShape(std::shared_ptr tensor) +OH_NN_ReturnCode SpaceToBatchNDBuilder::SetBlockShape(const std::shared_ptr& tensor) { if (tensor->GetDataType() != OH_NN_INT64) { LOGE("[SpaceToBatchNDBuilder] The 2nd input blockShape should be type OH_NN_INT64."); @@ -68,7 +68,7 @@ OH_NN_ReturnCode SpaceToBatchNDBuilder::SetBlockShape(std::shared_ptr return OH_NN_SUCCESS; } -OH_NN_ReturnCode SpaceToBatchNDBuilder::SetPaddings(std::shared_ptr tensor) +OH_NN_ReturnCode SpaceToBatchNDBuilder::SetPaddings(const std::shared_ptr& tensor) { if (tensor->GetDataType() != OH_NN_INT64) { LOGE("[SpaceToBatchNDBuilder] The 3rd input paddings should be type OH_NN_INT64."); @@ -149,7 +149,7 @@ OH_NN_ReturnCode SpaceToBatchNDBuilder::Build(const std::vector& param return OH_NN_SUCCESS; } -OH_NN_ReturnCode SpaceToBatchNDBuilder::SetPadData(std::shared_ptr tensor) +OH_NN_ReturnCode SpaceToBatchNDBuilder::SetPadData(const std::shared_ptr& tensor) { paddings.clear(); diff --git a/frameworks/native/neural_network_runtime/ops/space_to_batch_nd_builder.h b/frameworks/native/neural_network_runtime/ops/space_to_batch_nd_builder.h index bfe292d..d4de195 100644 --- a/frameworks/native/neural_network_runtime/ops/space_to_batch_nd_builder.h +++ b/frameworks/native/neural_network_runtime/ops/space_to_batch_nd_builder.h @@ -24,7 +24,7 @@ namespace NeuralNetworkRuntime { namespace Ops { class SpaceToBatchNDBuilder : public OpsBuilder { public: - typedef OH_NN_ReturnCode(SpaceToBatchNDBuilder::*FuncPtr)(std::shared_ptr); + typedef OH_NN_ReturnCode (SpaceToBatchNDBuilder::*FuncPtr)(const std::shared_ptr&); SpaceToBatchNDBuilder(); ~SpaceToBatchNDBuilder() override; @@ -36,9 +36,9 @@ public: LiteGraphTensorPtr GetPrimitive() override; private: - OH_NN_ReturnCode SetPadData(std::shared_ptr tensor); - OH_NN_ReturnCode SetBlockShape(std::shared_ptr tensor); - OH_NN_ReturnCode SetPaddings(std::shared_ptr tensor); + OH_NN_ReturnCode SetPadData(const std::shared_ptr& tensor); + OH_NN_ReturnCode SetBlockShape(const std::shared_ptr& tensor); + OH_NN_ReturnCode SetPaddings(const std::shared_ptr& tensor); private: std::vector> paddings; diff --git a/frameworks/native/neural_network_runtime/ops/space_to_depth_builder.cpp b/frameworks/native/neural_network_runtime/ops/space_to_depth_builder.cpp index 50b2a3c..c1c28c6 100644 --- a/frameworks/native/neural_network_runtime/ops/space_to_depth_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/space_to_depth_builder.cpp @@ -33,7 +33,7 @@ SpaceToDepthBuilder::SpaceToDepthBuilder() {} SpaceToDepthBuilder::~SpaceToDepthBuilder() {} -OH_NN_ReturnCode SpaceToDepthBuilder::SetBlockSize(std::shared_ptr tensor) +OH_NN_ReturnCode SpaceToDepthBuilder::SetBlockSize(const std::shared_ptr& tensor) { if (tensor->GetDataType() != OH_NN_INT64) { LOGE("[SpaceToDepth] The blockSize should be type OH_NN_INT64."); diff --git a/frameworks/native/neural_network_runtime/ops/space_to_depth_builder.h b/frameworks/native/neural_network_runtime/ops/space_to_depth_builder.h index fafa8aa..370a4a6 100644 --- a/frameworks/native/neural_network_runtime/ops/space_to_depth_builder.h +++ b/frameworks/native/neural_network_runtime/ops/space_to_depth_builder.h @@ -23,7 +23,7 @@ namespace NeuralNetworkRuntime { namespace Ops { class SpaceToDepthBuilder : public OpsBuilder { public: - typedef OH_NN_ReturnCode(SpaceToDepthBuilder::*FuncPtr)(std::shared_ptr); + typedef OH_NN_ReturnCode (SpaceToDepthBuilder::*FuncPtr)(const std::shared_ptr&); SpaceToDepthBuilder(); ~SpaceToDepthBuilder() override; @@ -35,7 +35,7 @@ public: LiteGraphPrimitvePtr GetPrimitive() override; private: - OH_NN_ReturnCode SetBlockSize(std::shared_ptr tensor); + OH_NN_ReturnCode SetBlockSize(const std::shared_ptr& tensor); private: int64_t m_blockSize {0}; diff --git a/frameworks/native/neural_network_runtime/ops/split_builder.cpp b/frameworks/native/neural_network_runtime/ops/split_builder.cpp index 81f80fb..a788009 100644 --- a/frameworks/native/neural_network_runtime/ops/split_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/split_builder.cpp @@ -61,7 +61,7 @@ OH_NN_ReturnCode SplitBuilder::SetInputAndOutput(const std::vector &in return OH_NN_SUCCESS; } -OH_NN_ReturnCode SplitBuilder::SetAxis(std::shared_ptr tensor) +OH_NN_ReturnCode SplitBuilder::SetAxis(const std::shared_ptr& tensor) { if (tensor->GetDataType() != OH_NN_INT64) { LOGE("[SplitBuilder] The 4th input axis should be type OH_NN_INT64."); @@ -83,7 +83,7 @@ OH_NN_ReturnCode SplitBuilder::SetAxis(std::shared_ptr tensor) return OH_NN_SUCCESS; } -OH_NN_ReturnCode SplitBuilder::SetOutputNum(std::shared_ptr tensor) +OH_NN_ReturnCode SplitBuilder::SetOutputNum(const std::shared_ptr& tensor) { if (tensor->GetDataType() != OH_NN_INT64) { LOGE("[SplitBuilder] The 2nd input outputNum should be type OH_NN_INT64."); @@ -100,7 +100,7 @@ OH_NN_ReturnCode SplitBuilder::SetOutputNum(std::shared_ptr tensor) return OH_NN_SUCCESS; } -OH_NN_ReturnCode SplitBuilder::SetSizeSplits(std::shared_ptr tensor) +OH_NN_ReturnCode SplitBuilder::SetSizeSplits(const std::shared_ptr& tensor) { if (tensor->GetDataType() != OH_NN_INT64) { LOGE("[SplitBuilder] The 3rd input sizeSplit should be type OH_NN_INT64."); diff --git a/frameworks/native/neural_network_runtime/ops/split_builder.h b/frameworks/native/neural_network_runtime/ops/split_builder.h index 4c7cde6..6453b3d 100644 --- a/frameworks/native/neural_network_runtime/ops/split_builder.h +++ b/frameworks/native/neural_network_runtime/ops/split_builder.h @@ -26,7 +26,7 @@ namespace NeuralNetworkRuntime { namespace Ops { class SplitBuilder : public OpsBuilder { public: - typedef OH_NN_ReturnCode(SplitBuilder::*FuncPtr)(std::shared_ptr); + typedef OH_NN_ReturnCode (SplitBuilder::*FuncPtr)(const std::shared_ptr&); SplitBuilder(); ~SplitBuilder() override; @@ -41,9 +41,9 @@ private: OH_NN_ReturnCode SetInputAndOutput(const std::vector& inputsIndex, const std::vector& outputsIndex, const std::vector>& allTensors); - OH_NN_ReturnCode SetAxis(std::shared_ptr tensor); - OH_NN_ReturnCode SetOutputNum(std::shared_ptr tensor); - OH_NN_ReturnCode SetSizeSplits(std::shared_ptr tensor); + OH_NN_ReturnCode SetAxis(const std::shared_ptr& tensor); + OH_NN_ReturnCode SetOutputNum(const std::shared_ptr& tensor); + OH_NN_ReturnCode SetSizeSplits(const std::shared_ptr& tensor); private: int64_t m_output_num {0}; diff --git a/frameworks/native/neural_network_runtime/ops/squeeze_builder.cpp b/frameworks/native/neural_network_runtime/ops/squeeze_builder.cpp index 3e99c5c..18d63fb 100644 --- a/frameworks/native/neural_network_runtime/ops/squeeze_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/squeeze_builder.cpp @@ -29,7 +29,7 @@ SqueezeBuilder::SqueezeBuilder() {} SqueezeBuilder::~SqueezeBuilder() {} -OH_NN_ReturnCode SqueezeBuilder::SetAxis(std::shared_ptr tensor) +OH_NN_ReturnCode SqueezeBuilder::SetAxis(const std::shared_ptr& tensor) { if (tensor->GetDataType() != OH_NN_INT64) { LOGE("[SqueezeBuilder] The 2nd input axis should be type OH_NN_INT64."); diff --git a/frameworks/native/neural_network_runtime/ops/squeeze_builder.h b/frameworks/native/neural_network_runtime/ops/squeeze_builder.h index 2c8b999..a611184 100644 --- a/frameworks/native/neural_network_runtime/ops/squeeze_builder.h +++ b/frameworks/native/neural_network_runtime/ops/squeeze_builder.h @@ -24,7 +24,7 @@ namespace NeuralNetworkRuntime { namespace Ops { class SqueezeBuilder : public OpsBuilder { public: - typedef OH_NN_ReturnCode(SqueezeBuilder::*FuncPtr)(std::shared_ptr); + typedef OH_NN_ReturnCode (SqueezeBuilder::*FuncPtr)(const std::shared_ptr&); SqueezeBuilder(); ~SqueezeBuilder() override; @@ -36,7 +36,7 @@ public: LiteGraphTensorPtr GetPrimitive() override; private: - OH_NN_ReturnCode SetAxis(std::shared_ptr tensor); + OH_NN_ReturnCode SetAxis(const std::shared_ptr& tensor); private: std::vector m_axis; diff --git a/frameworks/native/neural_network_runtime/ops/stack_builder.cpp b/frameworks/native/neural_network_runtime/ops/stack_builder.cpp index 2d86a53..fc95848 100644 --- a/frameworks/native/neural_network_runtime/ops/stack_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/stack_builder.cpp @@ -29,7 +29,7 @@ StackBuilder::StackBuilder() {} StackBuilder::~StackBuilder() {} -OH_NN_ReturnCode StackBuilder::SetAxis(std::shared_ptr tensor) +OH_NN_ReturnCode StackBuilder::SetAxis(const std::shared_ptr& tensor) { if (tensor->GetDataType() != OH_NN_INT64) { LOGE("[StackBuilder] The last input axis should be type OH_NN_INT64."); diff --git a/frameworks/native/neural_network_runtime/ops/stack_builder.h b/frameworks/native/neural_network_runtime/ops/stack_builder.h index ba4ae02..512ddb0 100644 --- a/frameworks/native/neural_network_runtime/ops/stack_builder.h +++ b/frameworks/native/neural_network_runtime/ops/stack_builder.h @@ -24,7 +24,7 @@ namespace NeuralNetworkRuntime { namespace Ops { class StackBuilder : public OpsBuilder { public: - typedef OH_NN_ReturnCode(StackBuilder::*FuncPtr)(std::shared_ptr); + typedef OH_NN_ReturnCode (StackBuilder::*FuncPtr)(const std::shared_ptr&); StackBuilder(); ~StackBuilder() override; @@ -36,7 +36,7 @@ public: LiteGraphTensorPtr GetPrimitive() override; private: - OH_NN_ReturnCode SetAxis(std::shared_ptr tensor); + OH_NN_ReturnCode SetAxis(const std::shared_ptr& tensor); private: int64_t m_axis = {0}; diff --git a/frameworks/native/neural_network_runtime/ops/strided_slice_builder.cpp b/frameworks/native/neural_network_runtime/ops/strided_slice_builder.cpp index f5e7682..93a1bbf 100644 --- a/frameworks/native/neural_network_runtime/ops/strided_slice_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/strided_slice_builder.cpp @@ -47,7 +47,7 @@ OH_NN_ReturnCode StridedSliceBuilder::SetInputOutput(const std::vector return OH_NN_SUCCESS; } -OH_NN_ReturnCode StridedSliceBuilder::SetBeginMask(std::shared_ptr tensor) +OH_NN_ReturnCode StridedSliceBuilder::SetBeginMask(const std::shared_ptr& tensor) { if (tensor->GetDataType() != OH_NN_INT64) { LOGE("[StridedSliceBuilder] The 5th input beginMask should be type HNN_INT64."); @@ -64,7 +64,7 @@ OH_NN_ReturnCode StridedSliceBuilder::SetBeginMask(std::shared_ptr ten return OH_NN_SUCCESS; } -OH_NN_ReturnCode StridedSliceBuilder::SetEndMask(std::shared_ptr tensor) +OH_NN_ReturnCode StridedSliceBuilder::SetEndMask(const std::shared_ptr& tensor) { if (tensor->GetDataType() != OH_NN_INT64) { LOGE("[StridedSliceBuilder] The 6th input endMask should be type HNN_INT64."); @@ -81,7 +81,7 @@ OH_NN_ReturnCode StridedSliceBuilder::SetEndMask(std::shared_ptr tenso return OH_NN_SUCCESS; } -OH_NN_ReturnCode StridedSliceBuilder::SetEllipsisMask(std::shared_ptr tensor) +OH_NN_ReturnCode StridedSliceBuilder::SetEllipsisMask(const std::shared_ptr& tensor) { if (tensor->GetDataType() != OH_NN_INT64) { LOGE("[StridedSliceBuilder] The 7th input ellipsisMask should be type HNN_INT64."); @@ -98,7 +98,7 @@ OH_NN_ReturnCode StridedSliceBuilder::SetEllipsisMask(std::shared_ptr return OH_NN_SUCCESS; } -OH_NN_ReturnCode StridedSliceBuilder::SetNewAxisMask(std::shared_ptr tensor) +OH_NN_ReturnCode StridedSliceBuilder::SetNewAxisMask(const std::shared_ptr& tensor) { if (tensor->GetDataType() != OH_NN_INT64) { LOGE("[StridedSliceBuilder] The 8th input newAxisMask should be type HNN_INT64."); @@ -115,7 +115,7 @@ OH_NN_ReturnCode StridedSliceBuilder::SetNewAxisMask(std::shared_ptr t return OH_NN_SUCCESS; } -OH_NN_ReturnCode StridedSliceBuilder::SetShrinkAxisMask(std::shared_ptr tensor) +OH_NN_ReturnCode StridedSliceBuilder::SetShrinkAxisMask(const std::shared_ptr& tensor) { if (tensor->GetDataType() != OH_NN_INT64) { LOGE("[StridedSliceBuilder] The 9th input shrinkAxisMAsk should be type HNN_INT64."); diff --git a/frameworks/native/neural_network_runtime/ops/strided_slice_builder.h b/frameworks/native/neural_network_runtime/ops/strided_slice_builder.h index 23690cb..bd4787d 100644 --- a/frameworks/native/neural_network_runtime/ops/strided_slice_builder.h +++ b/frameworks/native/neural_network_runtime/ops/strided_slice_builder.h @@ -24,7 +24,7 @@ namespace NeuralNetworkRuntime { namespace Ops { class StridedSliceBuilder : public OpsBuilder { public: - typedef OH_NN_ReturnCode(StridedSliceBuilder::*FuncPtr)(std::shared_ptr); + typedef OH_NN_ReturnCode (StridedSliceBuilder::*FuncPtr)(const std::shared_ptr&); StridedSliceBuilder(); ~StridedSliceBuilder() override; @@ -39,11 +39,11 @@ private: OH_NN_ReturnCode SetInputOutput(const std::vector& inputsIndex, const std::vector& outputsIndex, const std::vector>& allTensors); - OH_NN_ReturnCode SetBeginMask(std::shared_ptr tensor); - OH_NN_ReturnCode SetEndMask(std::shared_ptr tensor); - OH_NN_ReturnCode SetEllipsisMask(std::shared_ptr tensor); - OH_NN_ReturnCode SetNewAxisMask(std::shared_ptr tensor); - OH_NN_ReturnCode SetShrinkAxisMask(std::shared_ptr tensor); + OH_NN_ReturnCode SetBeginMask(const std::shared_ptr& tensor); + OH_NN_ReturnCode SetEndMask(const std::shared_ptr& tensor); + OH_NN_ReturnCode SetEllipsisMask(const std::shared_ptr& tensor); + OH_NN_ReturnCode SetNewAxisMask(const std::shared_ptr& tensor); + OH_NN_ReturnCode SetShrinkAxisMask(const std::shared_ptr& tensor); private: int64_t m_begin_mask = {0}; diff --git a/frameworks/native/neural_network_runtime/ops/sub_builder.cpp b/frameworks/native/neural_network_runtime/ops/sub_builder.cpp index bb7b50a..9ea5f24 100644 --- a/frameworks/native/neural_network_runtime/ops/sub_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/sub_builder.cpp @@ -29,7 +29,7 @@ SubBuilder::SubBuilder() {} SubBuilder::~SubBuilder() {} -OH_NN_ReturnCode SubBuilder::SetActivationType(std::shared_ptr tensor) +OH_NN_ReturnCode SubBuilder::SetActivationType(const std::shared_ptr& tensor) { if (tensor->GetDataType() != OH_NN_INT8) { LOGE("[SubBuilder] The 3rd input activation should be type OH_NN_INT8."); diff --git a/frameworks/native/neural_network_runtime/ops/sub_builder.h b/frameworks/native/neural_network_runtime/ops/sub_builder.h index 5fd944a..f9ae173 100644 --- a/frameworks/native/neural_network_runtime/ops/sub_builder.h +++ b/frameworks/native/neural_network_runtime/ops/sub_builder.h @@ -26,7 +26,7 @@ namespace NeuralNetworkRuntime { namespace Ops { class SubBuilder : public OpsBuilder { public: - typedef OH_NN_ReturnCode(SubBuilder::*FuncPtr)(std::shared_ptr); + typedef OH_NN_ReturnCode (SubBuilder::*FuncPtr)(const std::shared_ptr&); SubBuilder(); ~SubBuilder() override; @@ -38,7 +38,7 @@ public: LiteGraphPrimitvePtr GetPrimitive() override; private: - OH_NN_ReturnCode SetActivationType(std::shared_ptr tensor); + OH_NN_ReturnCode SetActivationType(const std::shared_ptr& tensor); private: mindspore::lite::ActivationType m_activationType {mindspore::lite::ACTIVATION_TYPE_NO_ACTIVATION}; diff --git a/frameworks/native/neural_network_runtime/ops/tile_builder.cpp b/frameworks/native/neural_network_runtime/ops/tile_builder.cpp index 1854639..bb6ea1d 100644 --- a/frameworks/native/neural_network_runtime/ops/tile_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/tile_builder.cpp @@ -29,7 +29,7 @@ TileBuilder::TileBuilder() {} TileBuilder::~TileBuilder() {} -OH_NN_ReturnCode TileBuilder::SetDims(std::shared_ptr tensor) +OH_NN_ReturnCode TileBuilder::SetDims(const std::shared_ptr& tensor) { if (tensor->GetDataType() != OH_NN_INT64) { LOGE("[TileBuilder] The dims should be type OH_NN_INT64."); diff --git a/frameworks/native/neural_network_runtime/ops/tile_builder.h b/frameworks/native/neural_network_runtime/ops/tile_builder.h index 0d5fcf4..e291bbf 100644 --- a/frameworks/native/neural_network_runtime/ops/tile_builder.h +++ b/frameworks/native/neural_network_runtime/ops/tile_builder.h @@ -24,7 +24,7 @@ namespace NeuralNetworkRuntime { namespace Ops { class TileBuilder : public OpsBuilder { public: - typedef OH_NN_ReturnCode(TileBuilder::*FuncPtr)(std::shared_ptr); + typedef OH_NN_ReturnCode (TileBuilder::*FuncPtr)(const std::shared_ptr&); TileBuilder(); ~TileBuilder() override; @@ -36,7 +36,7 @@ public: LiteGraphPrimitvePtr GetPrimitive() override; private: - OH_NN_ReturnCode SetDims(std::shared_ptr tensor); + OH_NN_ReturnCode SetDims(const std::shared_ptr& tensor); private: std::vector m_dims {0}; diff --git a/frameworks/native/neural_network_runtime/ops/top_k_builder.cpp b/frameworks/native/neural_network_runtime/ops/top_k_builder.cpp index af81e8d..2972987 100644 --- a/frameworks/native/neural_network_runtime/ops/top_k_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/top_k_builder.cpp @@ -30,7 +30,7 @@ TopKBuilder::TopKBuilder() {} TopKBuilder::~TopKBuilder() {} -OH_NN_ReturnCode TopKBuilder::SetSorted(std::shared_ptr tensor) +OH_NN_ReturnCode TopKBuilder::SetSorted(const std::shared_ptr& tensor) { if (tensor->GetDataType() != OH_NN_BOOL) { LOGE("[TopK] The sorted should be type OH_NN_BOOL."); @@ -47,7 +47,7 @@ OH_NN_ReturnCode TopKBuilder::SetSorted(std::shared_ptr tensor) return OH_NN_SUCCESS; } -OH_NN_ReturnCode TopKBuilder::SetAxis(std::shared_ptr tensor) +OH_NN_ReturnCode TopKBuilder::SetAxis(const std::shared_ptr& tensor) { if (tensor->GetDataType() != OH_NN_INT64) { LOGE("[TopK] The axis should be type OH_NN_INT64."); diff --git a/frameworks/native/neural_network_runtime/ops/top_k_builder.h b/frameworks/native/neural_network_runtime/ops/top_k_builder.h index f8d3c80..1eeff5c 100644 --- a/frameworks/native/neural_network_runtime/ops/top_k_builder.h +++ b/frameworks/native/neural_network_runtime/ops/top_k_builder.h @@ -24,7 +24,7 @@ namespace NeuralNetworkRuntime { namespace Ops { class TopKBuilder : public OpsBuilder { public: - typedef OH_NN_ReturnCode(TopKBuilder::*FuncPtr)(std::shared_ptr); + typedef OH_NN_ReturnCode (TopKBuilder::*FuncPtr)(const std::shared_ptr&); TopKBuilder(); ~TopKBuilder() override; @@ -35,8 +35,8 @@ public: LiteGraphPrimitvePtr GetPrimitive() override; private: - OH_NN_ReturnCode SetSorted(std::shared_ptr tensor); - OH_NN_ReturnCode SetAxis(std::shared_ptr tensor); + OH_NN_ReturnCode SetSorted(const std::shared_ptr& tensor); + OH_NN_ReturnCode SetAxis(const std::shared_ptr& tensor); private: int64_t m_axis {0}; diff --git a/frameworks/native/neural_network_runtime/ops/unsqueeze_builder.cpp b/frameworks/native/neural_network_runtime/ops/unsqueeze_builder.cpp index 8aa5358..a9ddf26 100644 --- a/frameworks/native/neural_network_runtime/ops/unsqueeze_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/unsqueeze_builder.cpp @@ -29,7 +29,7 @@ UnsqueezeBuilder::UnsqueezeBuilder() {} UnsqueezeBuilder::~UnsqueezeBuilder() {} -OH_NN_ReturnCode UnsqueezeBuilder::SetAxis(std::shared_ptr tensor) +OH_NN_ReturnCode UnsqueezeBuilder::SetAxis(const std::shared_ptr& tensor) { // Set Axis if (tensor->GetDataType() != OH_NN_INT64) { diff --git a/frameworks/native/neural_network_runtime/ops/unsqueeze_builder.h b/frameworks/native/neural_network_runtime/ops/unsqueeze_builder.h index 36381ee..6f14c10 100644 --- a/frameworks/native/neural_network_runtime/ops/unsqueeze_builder.h +++ b/frameworks/native/neural_network_runtime/ops/unsqueeze_builder.h @@ -24,7 +24,7 @@ namespace NeuralNetworkRuntime { namespace Ops { class UnsqueezeBuilder : public OpsBuilder { public: - typedef OH_NN_ReturnCode(UnsqueezeBuilder::*FuncPtr)(std::shared_ptr); + typedef OH_NN_ReturnCode (UnsqueezeBuilder::*FuncPtr)(const std::shared_ptr&); UnsqueezeBuilder(); ~UnsqueezeBuilder() override; @@ -36,7 +36,7 @@ public: LiteGraphPrimitvePtr GetPrimitive() override; private: - OH_NN_ReturnCode SetAxis(std::shared_ptr tensor); + OH_NN_ReturnCode SetAxis(const std::shared_ptr& tensor); private: std::vector m_axis; diff --git a/frameworks/native/neural_network_runtime/ops/unstack_builder.cpp b/frameworks/native/neural_network_runtime/ops/unstack_builder.cpp index 575759c..f0d1628 100755 --- a/frameworks/native/neural_network_runtime/ops/unstack_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/unstack_builder.cpp @@ -28,7 +28,7 @@ UnstackBuilder::UnstackBuilder() {} UnstackBuilder::~UnstackBuilder() {} -OH_NN_ReturnCode UnstackBuilder::SetAxis(std::shared_ptr tensor) +OH_NN_ReturnCode UnstackBuilder::SetAxis(const std::shared_ptr& tensor) { if (tensor->GetDataType() != OH_NN_INT64) { LOGE("[Unstack] The axis should be type OH_NN_INT64."); diff --git a/frameworks/native/neural_network_runtime/ops/unstack_builder.h b/frameworks/native/neural_network_runtime/ops/unstack_builder.h index ad33430..6619733 100755 --- a/frameworks/native/neural_network_runtime/ops/unstack_builder.h +++ b/frameworks/native/neural_network_runtime/ops/unstack_builder.h @@ -26,7 +26,7 @@ namespace NeuralNetworkRuntime { namespace Ops { class UnstackBuilder : public OpsBuilder { public: - typedef OH_NN_ReturnCode(UnstackBuilder::*FuncPtr)(std::shared_ptr); + typedef OH_NN_ReturnCode (UnstackBuilder::*FuncPtr)(const std::shared_ptr&); UnstackBuilder(); ~UnstackBuilder() override; @@ -38,7 +38,7 @@ public: LiteGraphPrimitvePtr GetPrimitive() override; private: - OH_NN_ReturnCode SetAxis(std::shared_ptr tensor); + OH_NN_ReturnCode SetAxis(const std::shared_ptr& tensor); private: int64_t m_axis {0}; -- Gitee