diff --git a/frameworks/native/neural_network_runtime/lite_graph_to_hdi_model_v1_0.cpp b/frameworks/native/neural_network_runtime/lite_graph_to_hdi_model_v1_0.cpp index 2e5ef3ca008b512485031e71374a32f12977d5e2..fe062c4139790691926019968690cb19858d2640 100644 --- a/frameworks/native/neural_network_runtime/lite_graph_to_hdi_model_v1_0.cpp +++ b/frameworks/native/neural_network_runtime/lite_graph_to_hdi_model_v1_0.cpp @@ -877,156 +877,63 @@ std::vector ConvertUnsqueeze(PrimitivePtr primitive) return ret; } +std::unordered_map(*)(PrimitivePtr)> convertOpMap = { + {NODE_TYPE_ACTIVATION, &ConvertActivation}, + {NODE_TYPE_ADD_FUSION, &ConvertAddFusion}, + {NODE_TYPE_ARGMAX_FUSION, &ConvertArgMaxFusion}, + {NODE_TYPE_AVGPOOL_FUSION, &ConvertAvgPoolFusion}, + {NODE_TYPE_BATCH_TO_SPACE_ND, &ConvertBatchToSpaceND}, + {NODE_TYPE_BIAS_ADD, &ConvertBiasAdd}, + {NODE_TYPE_CAST, &ConvertCast}, + {NODE_TYPE_CONCAT, &ConvertConcat}, + {NODE_TYPE_CONV2D_FUSION, &ConvertConv2DFusion}, + {NODE_TYPE_CONV2D_TRANSPOSE_FUSION, &ConvertConv2dTransposeFusion}, + {NODE_TYPE_DIV_FUSION, &ConvertDivFusion}, + {NODE_TYPE_ELTWISE, &ConvertEltwise}, + {NODE_TYPE_EXPAND_DIMS, &ConvertExpandDims}, + {NODE_TYPE_FILL, &ConvertFill}, + {NODE_TYPE_FULL_CONNECTION, &ConvertFullConnection}, + {NODE_TYPE_FUSED_BATCH_NORM, &ConvertFusedBatchNorm}, + {NODE_TYPE_GATHER, &ConvertGather}, + {NODE_TYPE_LAYER_NORM_FUSION, &ConvertLayerNormFusion}, + {NODE_TYPE_LESS_EQUAL, &ConvertLessEqual}, + {NODE_TYPE_MATMUL_FUSION, &ConvertMatMulFusion}, + {NODE_TYPE_MAXIMUM, &ConvertMaximum}, + {NODE_TYPE_MAX_POOL_FUSION, &ConvertMaxPoolFusion}, + {NODE_TYPE_MUL_FUSION, &ConvertMulFusion}, + {NODE_TYPE_ONE_HOT, &ConvertOneHot}, + {NODE_TYPE_PAD_FUSION, &ConvertPadFusion}, + {NODE_TYPE_POW_FUSION, &ConvertPowFusion}, + {NODE_TYPE_PRELU_FUSION, &ConvertPReLUFusion}, + {NODE_TYPE_QUANT_DTYPE_CAST, &ConvertQuantDTypeCast}, + {NODE_TYPE_REDUCE_FUSION, &ConvertReduceFusion}, + {NODE_TYPE_RESHAPE, &ConvertReshape}, + {NODE_TYPE_RESIZE, &ConvertResize}, + {NODE_TYPE_RSQRT, &ConvertRsqrt}, + {NODE_TYPE_SCALE_FUSION, &ConvertScaleFusion}, + {NODE_TYPE_SHAPE, &ConvertShape}, + {NODE_TYPE_SLICE_FUSION, &ConvertSliceFusion}, + {NODE_TYPE_SOFTMAX, &ConvertSoftmax}, + {NODE_TYPE_SPACE_TO_BATCH_ND, &ConvertSpaceToBatchND}, + {NODE_TYPE_SPLIT, &ConvertSplit}, + {NODE_TYPE_SQRT, &ConvertSqrt}, + {NODE_TYPE_SQUARED_DIFFERENCE, &ConvertSquaredDifference}, + {NODE_TYPE_SQUEEZE, &ConvertSqueeze}, + {NODE_TYPE_STACK, &ConvertStack}, + {NODE_TYPE_STRIDED_SLICE, &ConvertStridedSlice}, + {NODE_TYPE_SUB_FUSION, &ConvertSubFusion}, + {NODE_TYPE_TILE_FUSION, &ConvertTileFusion}, + {NODE_TYPE_TOPK_FUSION, &ConvertTopKFusion}, + {NODE_TYPE_TRANSPOSE, &ConvertTranspose}, + {NODE_TYPE_UNSQUEEZE, &ConvertUnsqueeze}}; + std::vector Convert(NodeType type, PrimitivePtr primitive) { - switch (type) { - case NODE_TYPE_ACTIVATION: - return ConvertActivation(primitive); - break; - case NODE_TYPE_ADD_FUSION: - return ConvertAddFusion(primitive); - break; - case NODE_TYPE_ARGMAX_FUSION: - return ConvertArgMaxFusion(primitive); - break; - case NODE_TYPE_AVGPOOL_FUSION: - return ConvertAvgPoolFusion(primitive); - break; - case NODE_TYPE_BATCH_TO_SPACE_ND: - return ConvertBatchToSpaceND(primitive); - break; - case NODE_TYPE_BIAS_ADD: - return ConvertBiasAdd(primitive); - break; - case NODE_TYPE_CAST: - return ConvertCast(primitive); - break; - case NODE_TYPE_CONCAT: - return ConvertConcat(primitive); - break; - case NODE_TYPE_CONV2D_FUSION: - return ConvertConv2DFusion(primitive); - break; - case NODE_TYPE_CONV2D_TRANSPOSE_FUSION: - return ConvertConv2dTransposeFusion(primitive); - break; - case NODE_TYPE_DIV_FUSION: - return ConvertDivFusion(primitive); - break; - case NODE_TYPE_ELTWISE: - return ConvertEltwise(primitive); - break; - case NODE_TYPE_EXPAND_DIMS: - return ConvertExpandDims(primitive); - break; - case NODE_TYPE_FILL: - return ConvertFill(primitive); - break; - case NODE_TYPE_FULL_CONNECTION: - return ConvertFullConnection(primitive); - break; - case NODE_TYPE_FUSED_BATCH_NORM: - return ConvertFusedBatchNorm(primitive); - break; - case NODE_TYPE_GATHER: - return ConvertGather(primitive); - break; - case NODE_TYPE_LAYER_NORM_FUSION: - return ConvertLayerNormFusion(primitive); - break; - case NODE_TYPE_LESS_EQUAL: - return ConvertLessEqual(primitive); - break; - case NODE_TYPE_MATMUL_FUSION: - return ConvertMatMulFusion(primitive); - break; - case NODE_TYPE_MAXIMUM: - return ConvertMaximum(primitive); - break; - case NODE_TYPE_MAX_POOL_FUSION: - return ConvertMaxPoolFusion(primitive); - break; - case NODE_TYPE_MUL_FUSION: - return ConvertMulFusion(primitive); - break; - case NODE_TYPE_ONE_HOT: - return ConvertOneHot(primitive); - break; - case NODE_TYPE_PAD_FUSION: - return ConvertPadFusion(primitive); - break; - case NODE_TYPE_POW_FUSION: - return ConvertPowFusion(primitive); - break; - case NODE_TYPE_PRELU_FUSION: - return ConvertPReLUFusion(primitive); - break; - case NODE_TYPE_QUANT_DTYPE_CAST: - return ConvertQuantDTypeCast(primitive); - break; - case NODE_TYPE_REDUCE_FUSION: - return ConvertReduceFusion(primitive); - break; - case NODE_TYPE_RESHAPE: - return ConvertReshape(primitive); - break; - case NODE_TYPE_RESIZE: - return ConvertResize(primitive); - break; - case NODE_TYPE_RSQRT: - return ConvertRsqrt(primitive); - break; - case NODE_TYPE_SCALE_FUSION: - return ConvertScaleFusion(primitive); - break; - case NODE_TYPE_SHAPE: - return ConvertShape(primitive); - break; - case NODE_TYPE_SLICE_FUSION: - return ConvertSliceFusion(primitive); - break; - case NODE_TYPE_SOFTMAX: - return ConvertSoftmax(primitive); - break; - case NODE_TYPE_SPACE_TO_BATCH_ND: - return ConvertSpaceToBatchND(primitive); - break; - case NODE_TYPE_SPLIT: - return ConvertSplit(primitive); - break; - case NODE_TYPE_SQRT: - return ConvertSqrt(primitive); - break; - case NODE_TYPE_SQUARED_DIFFERENCE: - return ConvertSquaredDifference(primitive); - break; - case NODE_TYPE_SQUEEZE: - return ConvertSqueeze(primitive); - break; - case NODE_TYPE_STACK: - return ConvertStack(primitive); - break; - case NODE_TYPE_STRIDED_SLICE: - return ConvertStridedSlice(primitive); - break; - case NODE_TYPE_SUB_FUSION: - return ConvertSubFusion(primitive); - break; - case NODE_TYPE_TILE_FUSION: - return ConvertTileFusion(primitive); - break; - case NODE_TYPE_TOPK_FUSION: - return ConvertTopKFusion(primitive); - break; - case NODE_TYPE_TRANSPOSE: - return ConvertTranspose(primitive); - break; - case NODE_TYPE_UNSQUEEZE: - return ConvertUnsqueeze(primitive); - break; - default: - return {}; + if (convertOpMap.find(type) != convertOpMap.end()) { + return convertOpMap[type](primitive); } + LOGE("MindIR_LiteGraph_To_Model v1_0 failed, nodeType invalid, type =%d", type); + return {}; } inline std::vector MindIR_Tensor_GetQuantParams_OHOS(TensorPtr tensor) diff --git a/frameworks/native/neural_network_runtime/lite_graph_to_hdi_model_v2_0.cpp b/frameworks/native/neural_network_runtime/lite_graph_to_hdi_model_v2_0.cpp index 4849bafbd1cdb61173beab186dfb990c03e71106..0e238ff81fab575cbffe38497355d012206adb2d 100644 --- a/frameworks/native/neural_network_runtime/lite_graph_to_hdi_model_v2_0.cpp +++ b/frameworks/native/neural_network_runtime/lite_graph_to_hdi_model_v2_0.cpp @@ -878,156 +878,63 @@ std::vector ConvertUnsqueeze(PrimitivePtr primitive) return ret; } +std::unordered_map(*)(PrimitivePtr)> convertOpMap = { + {NODE_TYPE_ACTIVATION, &ConvertActivation}, + {NODE_TYPE_ADD_FUSION, &ConvertAddFusion}, + {NODE_TYPE_ARGMAX_FUSION, &ConvertArgMaxFusion}, + {NODE_TYPE_AVGPOOL_FUSION, &ConvertAvgPoolFusion}, + {NODE_TYPE_BATCH_TO_SPACE_ND, &ConvertBatchToSpaceND}, + {NODE_TYPE_BIAS_ADD, &ConvertBiasAdd}, + {NODE_TYPE_CAST, &ConvertCast}, + {NODE_TYPE_CONCAT, &ConvertConcat}, + {NODE_TYPE_CONV2D_FUSION, &ConvertConv2DFusion}, + {NODE_TYPE_CONV2D_TRANSPOSE_FUSION, &ConvertConv2dTransposeFusion}, + {NODE_TYPE_DIV_FUSION, &ConvertDivFusion}, + {NODE_TYPE_ELTWISE, &ConvertEltwise}, + {NODE_TYPE_EXPAND_DIMS, &ConvertExpandDims}, + {NODE_TYPE_FILL, &ConvertFill}, + {NODE_TYPE_FULL_CONNECTION, &ConvertFullConnection}, + {NODE_TYPE_FUSED_BATCH_NORM, &ConvertFusedBatchNorm}, + {NODE_TYPE_GATHER, &ConvertGather}, + {NODE_TYPE_LAYER_NORM_FUSION, &ConvertLayerNormFusion}, + {NODE_TYPE_LESS_EQUAL, &ConvertLessEqual}, + {NODE_TYPE_MATMUL_FUSION, &ConvertMatMulFusion}, + {NODE_TYPE_MAXIMUM, &ConvertMaximum}, + {NODE_TYPE_MAX_POOL_FUSION, &ConvertMaxPoolFusion}, + {NODE_TYPE_MUL_FUSION, &ConvertMulFusion}, + {NODE_TYPE_ONE_HOT, &ConvertOneHot}, + {NODE_TYPE_PAD_FUSION, &ConvertPadFusion}, + {NODE_TYPE_POW_FUSION, &ConvertPowFusion}, + {NODE_TYPE_PRELU_FUSION, &ConvertPReLUFusion}, + {NODE_TYPE_QUANT_DTYPE_CAST, &ConvertQuantDTypeCast}, + {NODE_TYPE_REDUCE_FUSION, &ConvertReduceFusion}, + {NODE_TYPE_RESHAPE, &ConvertReshape}, + {NODE_TYPE_RESIZE, &ConvertResize}, + {NODE_TYPE_RSQRT, &ConvertRsqrt}, + {NODE_TYPE_SCALE_FUSION, &ConvertScaleFusion}, + {NODE_TYPE_SHAPE, &ConvertShape}, + {NODE_TYPE_SLICE_FUSION, &ConvertSliceFusion}, + {NODE_TYPE_SOFTMAX, &ConvertSoftmax}, + {NODE_TYPE_SPACE_TO_BATCH_ND, &ConvertSpaceToBatchND}, + {NODE_TYPE_SPLIT, &ConvertSplit}, + {NODE_TYPE_SQRT, &ConvertSqrt}, + {NODE_TYPE_SQUARED_DIFFERENCE, &ConvertSquaredDifference}, + {NODE_TYPE_SQUEEZE, &ConvertSqueeze}, + {NODE_TYPE_STACK, &ConvertStack}, + {NODE_TYPE_STRIDED_SLICE, &ConvertStridedSlice}, + {NODE_TYPE_SUB_FUSION, &ConvertSubFusion}, + {NODE_TYPE_TILE_FUSION, &ConvertTileFusion}, + {NODE_TYPE_TOPK_FUSION, &ConvertTopKFusion}, + {NODE_TYPE_TRANSPOSE, &ConvertTranspose}, + {NODE_TYPE_UNSQUEEZE, &ConvertUnsqueeze}}; + std::vector Convert(NodeType type, PrimitivePtr primitive) { - switch (type) { - case NODE_TYPE_ACTIVATION: - return ConvertActivation(primitive); - break; - case NODE_TYPE_ADD_FUSION: - return ConvertAddFusion(primitive); - break; - case NODE_TYPE_ARGMAX_FUSION: - return ConvertArgMaxFusion(primitive); - break; - case NODE_TYPE_AVGPOOL_FUSION: - return ConvertAvgPoolFusion(primitive); - break; - case NODE_TYPE_BATCH_TO_SPACE_ND: - return ConvertBatchToSpaceND(primitive); - break; - case NODE_TYPE_BIAS_ADD: - return ConvertBiasAdd(primitive); - break; - case NODE_TYPE_CAST: - return ConvertCast(primitive); - break; - case NODE_TYPE_CONCAT: - return ConvertConcat(primitive); - break; - case NODE_TYPE_CONV2D_FUSION: - return ConvertConv2DFusion(primitive); - break; - case NODE_TYPE_CONV2D_TRANSPOSE_FUSION: - return ConvertConv2dTransposeFusion(primitive); - break; - case NODE_TYPE_DIV_FUSION: - return ConvertDivFusion(primitive); - break; - case NODE_TYPE_ELTWISE: - return ConvertEltwise(primitive); - break; - case NODE_TYPE_EXPAND_DIMS: - return ConvertExpandDims(primitive); - break; - case NODE_TYPE_FILL: - return ConvertFill(primitive); - break; - case NODE_TYPE_FULL_CONNECTION: - return ConvertFullConnection(primitive); - break; - case NODE_TYPE_FUSED_BATCH_NORM: - return ConvertFusedBatchNorm(primitive); - break; - case NODE_TYPE_GATHER: - return ConvertGather(primitive); - break; - case NODE_TYPE_LAYER_NORM_FUSION: - return ConvertLayerNormFusion(primitive); - break; - case NODE_TYPE_LESS_EQUAL: - return ConvertLessEqual(primitive); - break; - case NODE_TYPE_MATMUL_FUSION: - return ConvertMatMulFusion(primitive); - break; - case NODE_TYPE_MAXIMUM: - return ConvertMaximum(primitive); - break; - case NODE_TYPE_MAX_POOL_FUSION: - return ConvertMaxPoolFusion(primitive); - break; - case NODE_TYPE_MUL_FUSION: - return ConvertMulFusion(primitive); - break; - case NODE_TYPE_ONE_HOT: - return ConvertOneHot(primitive); - break; - case NODE_TYPE_PAD_FUSION: - return ConvertPadFusion(primitive); - break; - case NODE_TYPE_POW_FUSION: - return ConvertPowFusion(primitive); - break; - case NODE_TYPE_PRELU_FUSION: - return ConvertPReLUFusion(primitive); - break; - case NODE_TYPE_QUANT_DTYPE_CAST: - return ConvertQuantDTypeCast(primitive); - break; - case NODE_TYPE_REDUCE_FUSION: - return ConvertReduceFusion(primitive); - break; - case NODE_TYPE_RESHAPE: - return ConvertReshape(primitive); - break; - case NODE_TYPE_RESIZE: - return ConvertResize(primitive); - break; - case NODE_TYPE_RSQRT: - return ConvertRsqrt(primitive); - break; - case NODE_TYPE_SCALE_FUSION: - return ConvertScaleFusion(primitive); - break; - case NODE_TYPE_SHAPE: - return ConvertShape(primitive); - break; - case NODE_TYPE_SLICE_FUSION: - return ConvertSliceFusion(primitive); - break; - case NODE_TYPE_SOFTMAX: - return ConvertSoftmax(primitive); - break; - case NODE_TYPE_SPACE_TO_BATCH_ND: - return ConvertSpaceToBatchND(primitive); - break; - case NODE_TYPE_SPLIT: - return ConvertSplit(primitive); - break; - case NODE_TYPE_SQRT: - return ConvertSqrt(primitive); - break; - case NODE_TYPE_SQUARED_DIFFERENCE: - return ConvertSquaredDifference(primitive); - break; - case NODE_TYPE_SQUEEZE: - return ConvertSqueeze(primitive); - break; - case NODE_TYPE_STACK: - return ConvertStack(primitive); - break; - case NODE_TYPE_STRIDED_SLICE: - return ConvertStridedSlice(primitive); - break; - case NODE_TYPE_SUB_FUSION: - return ConvertSubFusion(primitive); - break; - case NODE_TYPE_TILE_FUSION: - return ConvertTileFusion(primitive); - break; - case NODE_TYPE_TOPK_FUSION: - return ConvertTopKFusion(primitive); - break; - case NODE_TYPE_TRANSPOSE: - return ConvertTranspose(primitive); - break; - case NODE_TYPE_UNSQUEEZE: - return ConvertUnsqueeze(primitive); - break; - default: - return {}; + if (convertOpMap.find(type) != convertOpMap.end()) { + return convertOpMap[type](primitive); } + LOGE("MindIR_LiteGraph_To_Model v2_0 failed, nodeType invalid, type =%d", type); + return {}; } inline std::vector MindIR_Tensor_GetQuantParams_OHOS(TensorPtr tensor) diff --git a/frameworks/native/neural_network_runtime/lite_graph_to_hdi_model_v2_1.cpp b/frameworks/native/neural_network_runtime/lite_graph_to_hdi_model_v2_1.cpp index f94b15976a16c882e461e434c0efa835856e347c..2f59aae69fe84859bf69c2626dd171891053162c 100644 --- a/frameworks/native/neural_network_runtime/lite_graph_to_hdi_model_v2_1.cpp +++ b/frameworks/native/neural_network_runtime/lite_graph_to_hdi_model_v2_1.cpp @@ -1638,288 +1638,107 @@ std::vector ConvertLogSoftmax(PrimitivePtr primitive) return ret; } +std::unordered_map(*)(PrimitivePtr)> convertOpMap = { + {NODE_TYPE_ACTIVATION, &ConvertActivation}, + {NODE_TYPE_ADD_FUSION, &ConvertAddFusion}, + {NODE_TYPE_ALL, &ConvertAll}, + {NODE_TYPE_ARGMAX_FUSION, &ConvertArgMaxFusion}, + {NODE_TYPE_ASSERT, &ConvertAssert}, + {NODE_TYPE_AVGPOOL_FUSION, &ConvertAvgPoolFusion}, + {NODE_TYPE_BATCH_TO_SPACE_ND, &ConvertBatchToSpaceND}, + {NODE_TYPE_BIAS_ADD, &ConvertBiasAdd}, + {NODE_TYPE_BROADCAST_TO, &ConvertBroadcastTo}, + {NODE_TYPE_CAST, &ConvertCast}, + {NODE_TYPE_CEIL, &ConvertCeil}, + {NODE_TYPE_CLIP, &ConvertClip}, + {NODE_TYPE_CONCAT, &ConvertConcat}, + {NODE_TYPE_CONV2D_FUSION, &ConvertConv2DFusion}, + {NODE_TYPE_CONV2D_TRANSPOSE_FUSION, &ConvertConv2dTransposeFusion}, + {NODE_TYPE_COS, &ConvertCos}, + {NODE_TYPE_CONSTANT_OF_SHAPE, &ConvertConstantOfShape}, + {NODE_TYPE_CROP, &ConvertCrop}, + {NODE_TYPE_DEPTH_TO_SPACE, &ConvertDepthToSpace}, + {NODE_TYPE_DETECTION_POST_PROCESS, &ConvertDetectionPostProcess}, + {NODE_TYPE_DIV_FUSION, &ConvertDivFusion}, + {NODE_TYPE_ELTWISE, &ConvertEltwise}, + {NODE_TYPE_EQUAL, &ConvertEqual}, + {NODE_TYPE_EXPFUSION, &ConvertExpFusion}, + {NODE_TYPE_EXPAND_DIMS, &ConvertExpandDims}, + {NODE_TYPE_FLATTEN, &ConvertFlatten}, + {NODE_TYPE_FLOOR, &ConvertFloor}, + {NODE_TYPE_FILL, &ConvertFill}, + {NODE_TYPE_FULL_CONNECTION, &ConvertFullConnection}, + {NODE_TYPE_FUSED_BATCH_NORM, &ConvertFusedBatchNorm}, + {NODE_TYPE_GATHER, &ConvertGather}, + {NODE_TYPE_GATHER_ND, &ConvertGatherNd}, + {NODE_TYPE_GREATER, &ConvertGreater}, + {NODE_TYPE_GREATER_EQUAL, &ConvertGreaterEqual}, + {NODE_TYPE_INSTANCE_NORM, &ConvertInstanceNorm}, + {NODE_TYPE_LAYER_NORM_FUSION, &ConvertLayerNormFusion}, + {NODE_TYPE_LESS, &ConvertLess}, + {NODE_TYPE_LESS_EQUAL, &ConvertLessEqual}, + {NODE_TYPE_LOG, &ConvertLog}, + {NODE_TYPE_LOGICAL_AND, &ConvertLogicalAnd}, + {NODE_TYPE_LOGICAL_NOT, &ConvertLogicalNot}, + {NODE_TYPE_LOGICAL_OR, &ConvertLogicalOr}, + {NODE_TYPE_LRN, &ConvertLRN}, + {NODE_TYPE_LSTM, &ConvertLSTM}, + {NODE_TYPE_L2_NORMALIZE_FUSION, &ConvertL2NormalizeFusion}, + {NODE_TYPE_MATMUL_FUSION, &ConvertMatMulFusion}, + {NODE_TYPE_MAXIMUM, &ConvertMaximum}, + {NODE_TYPE_MAX_POOL_FUSION, &ConvertMaxPoolFusion}, + {NODE_TYPE_MINIMUM, &ConvertMinimum}, + {NODE_TYPE_MOD, &ConvertMod}, + {NODE_TYPE_MUL_FUSION, &ConvertMulFusion}, + {NODE_TYPE_NEG, &ConvertNeg}, + {NODE_TYPE_NOT_EQUAL, &ConvertNotEqual}, + {NODE_TYPE_ONE_HOT, &ConvertOneHot}, + {NODE_TYPE_PAD_FUSION, &ConvertPadFusion}, + {NODE_TYPE_POW_FUSION, &ConvertPowFusion}, + {NODE_TYPE_PRELU_FUSION, &ConvertPReLUFusion}, + {NODE_TYPE_QUANT_DTYPE_CAST, &ConvertQuantDTypeCast}, + {NODE_TYPE_RANK, &ConvertRank}, + {NODE_TYPE_RANGE, &ConvertRange}, + {NODE_TYPE_RECIPROCAL, &ConvertReciprocal}, + {NODE_TYPE_REDUCE_FUSION, &ConvertReduceFusion}, + {NODE_TYPE_RESHAPE, &ConvertReshape}, + {NODE_TYPE_RESIZE, &ConvertResize}, + {NODE_TYPE_ROUND, &ConvertRound}, + {NODE_TYPE_RSQRT, &ConvertRsqrt}, + {NODE_TYPE_SCALE_FUSION, &ConvertScaleFusion}, + {NODE_TYPE_SCATTER_ND, &ConvertScatterNd}, + {NODE_TYPE_SHAPE, &ConvertShape}, + {NODE_TYPE_SIN, &ConvertSin}, + {NODE_TYPE_SLICE_FUSION, &ConvertSliceFusion}, + {NODE_TYPE_SOFTMAX, &ConvertSoftmax}, + {NODE_TYPE_SPACE_TO_BATCH_ND, &ConvertSpaceToBatchND}, + {NODE_TYPE_SPACE_TO_DEPTH, &ConvertSpaceToDepth}, + {NODE_TYPE_SPARSE_TO_DENSE, &ConvertSparseToDense}, + {NODE_TYPE_SPLIT, &ConvertSplit}, + {NODE_TYPE_SQRT, &ConvertSqrt}, + {NODE_TYPE_SQUARED_DIFFERENCE, &ConvertSquaredDifference}, + {NODE_TYPE_SQUEEZE, &ConvertSqueeze}, + {NODE_TYPE_SQUARE, &ConvertSquare}, + {NODE_TYPE_STACK, &ConvertStack}, + {NODE_TYPE_STRIDED_SLICE, &ConvertStridedSlice}, + {NODE_TYPE_SUB_FUSION, &ConvertSubFusion}, + {NODE_TYPE_TILE_FUSION, &ConvertTileFusion}, + {NODE_TYPE_TOPK_FUSION, &ConvertTopKFusion}, + {NODE_TYPE_TRANSPOSE, &ConvertTranspose}, + {NODE_TYPE_UNSQUEEZE, &ConvertUnsqueeze}, + {NODE_TYPE_UNSTACK, &ConvertUnstack}, + {NODE_TYPE_WHERE, &ConvertWhere}, + {NODE_TYPE_SELECT, &ConvertSelect}, + {NODE_TYPE_ERF, &ConvertErf}, + {NODE_TYPE_LOG_SOFTMAX, &ConvertLogSoftmax}}; + std::vector Convert(OHOS::HDI::Nnrt::V2_1::NodeType type, PrimitivePtr primitive) { - switch (type) { - case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_ACTIVATION: - return ConvertActivation(primitive); - break; - case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_ADD_FUSION: - return ConvertAddFusion(primitive); - break; - case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_ALL: - return ConvertAll(primitive); - break; - case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_ARGMAX_FUSION: - return ConvertArgMaxFusion(primitive); - break; - case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_ASSERT: - return ConvertAssert(primitive); - break; - case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_AVGPOOL_FUSION: - return ConvertAvgPoolFusion(primitive); - break; - case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_BATCH_TO_SPACE_ND: - return ConvertBatchToSpaceND(primitive); - break; - case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_BIAS_ADD: - return ConvertBiasAdd(primitive); - break; - case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_BROADCAST_TO: - return ConvertBroadcastTo(primitive); - break; - case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_CAST: - return ConvertCast(primitive); - break; - case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_CEIL: - return ConvertCeil(primitive); - break; - case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_CLIP: - return ConvertClip(primitive); - break; - case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_CONCAT: - return ConvertConcat(primitive); - break; - case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_CONV2D_FUSION: - return ConvertConv2DFusion(primitive); - break; - case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_CONV2D_TRANSPOSE_FUSION: - return ConvertConv2dTransposeFusion(primitive); - break; - case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_COS: - return ConvertCos(primitive); - break; - case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_CONSTANT_OF_SHAPE: - return ConvertConstantOfShape(primitive); - break; - case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_CROP: - return ConvertCrop(primitive); - break; - case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_DEPTH_TO_SPACE: - return ConvertDepthToSpace(primitive); - break; - case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_DETECTION_POST_PROCESS: - return ConvertDetectionPostProcess(primitive); - break; - case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_DIV_FUSION: - return ConvertDivFusion(primitive); - break; - case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_ELTWISE: - return ConvertEltwise(primitive); - break; - case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_EQUAL: - return ConvertEqual(primitive); - break; - case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_EXPFUSION: - return ConvertExpFusion(primitive); - break; - case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_EXPAND_DIMS: - return ConvertExpandDims(primitive); - break; - case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_FLATTEN: - return ConvertFlatten(primitive); - break; - case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_FLOOR: - return ConvertFloor(primitive); - break; - case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_FILL: - return ConvertFill(primitive); - break; - case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_FULL_CONNECTION: - return ConvertFullConnection(primitive); - break; - case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_FUSED_BATCH_NORM: - return ConvertFusedBatchNorm(primitive); - break; - case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_GATHER: - return ConvertGather(primitive); - break; - case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_GATHER_ND: - return ConvertGatherNd(primitive); - break; - case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_GREATER: - return ConvertGreater(primitive); - break; - case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_GREATER_EQUAL: - return ConvertGreaterEqual(primitive); - break; - case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_INSTANCE_NORM: - return ConvertInstanceNorm(primitive); - break; - case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_LAYER_NORM_FUSION: - return ConvertLayerNormFusion(primitive); - break; - case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_LESS: - return ConvertLess(primitive); - break; - case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_LESS_EQUAL: - return ConvertLessEqual(primitive); - break; - case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_LOG: - return ConvertLog(primitive); - break; - case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_LOGICAL_AND: - return ConvertLogicalAnd(primitive); - break; - case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_LOGICAL_NOT: - return ConvertLogicalNot(primitive); - break; - case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_LOGICAL_OR: - return ConvertLogicalOr(primitive); - break; - case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_LRN: - return ConvertLRN(primitive); - break; - case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_LSTM: - return ConvertLSTM(primitive); - break; - case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_L2_NORMALIZE_FUSION: - return ConvertL2NormalizeFusion(primitive); - break; - case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_MATMUL_FUSION: - return ConvertMatMulFusion(primitive); - break; - case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_MAXIMUM: - return ConvertMaximum(primitive); - break; - case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_MAX_POOL_FUSION: - return ConvertMaxPoolFusion(primitive); - break; - case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_MINIMUM: - return ConvertMinimum(primitive); - break; - case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_MOD: - return ConvertMod(primitive); - break; - case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_MUL_FUSION: - return ConvertMulFusion(primitive); - break; - case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_NEG: - return ConvertNeg(primitive); - break; - case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_NOT_EQUAL: - return ConvertNotEqual(primitive); - break; - case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_ONE_HOT: - return ConvertOneHot(primitive); - break; - case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_PAD_FUSION: - return ConvertPadFusion(primitive); - break; - case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_POW_FUSION: - return ConvertPowFusion(primitive); - break; - case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_PRELU_FUSION: - return ConvertPReLUFusion(primitive); - break; - case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_QUANT_DTYPE_CAST: - return ConvertQuantDTypeCast(primitive); - break; - case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_RANK: - return ConvertRank(primitive); - break; - case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_RANGE: - return ConvertRange(primitive); - break; - case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_RECIPROCAL: - return ConvertReciprocal(primitive); - break; - case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_REDUCE_FUSION: - return ConvertReduceFusion(primitive); - break; - case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_RESHAPE: - return ConvertReshape(primitive); - break; - case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_RESIZE: - return ConvertResize(primitive); - break; - case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_ROUND: - return ConvertRound(primitive); - break; - case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_RSQRT: - return ConvertRsqrt(primitive); - break; - case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_SCALE_FUSION: - return ConvertScaleFusion(primitive); - break; - case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_SCATTER_ND: - return ConvertScatterNd(primitive); - break; - case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_SHAPE: - return ConvertShape(primitive); - break; - case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_SIN: - return ConvertSin(primitive); - break; - case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_SLICE_FUSION: - return ConvertSliceFusion(primitive); - break; - case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_SOFTMAX: - return ConvertSoftmax(primitive); - break; - case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_SPACE_TO_BATCH_ND: - return ConvertSpaceToBatchND(primitive); - break; - case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_SPACE_TO_DEPTH: - return ConvertSpaceToDepth(primitive); - break; - case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_SPARSE_TO_DENSE: - return ConvertSparseToDense(primitive); - break; - case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_SPLIT: - return ConvertSplit(primitive); - break; - case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_SQRT: - return ConvertSqrt(primitive); - break; - case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_SQUARED_DIFFERENCE: - return ConvertSquaredDifference(primitive); - break; - case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_SQUEEZE: - return ConvertSqueeze(primitive); - break; - case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_SQUARE: - return ConvertSquare(primitive); - break; - case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_STACK: - return ConvertStack(primitive); - break; - case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_STRIDED_SLICE: - return ConvertStridedSlice(primitive); - break; - case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_SUB_FUSION: - return ConvertSubFusion(primitive); - break; - case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_TILE_FUSION: - return ConvertTileFusion(primitive); - break; - case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_TOPK_FUSION: - return ConvertTopKFusion(primitive); - break; - case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_TRANSPOSE: - return ConvertTranspose(primitive); - break; - case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_UNSQUEEZE: - return ConvertUnsqueeze(primitive); - break; - case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_UNSTACK: - return ConvertUnstack(primitive); - break; - case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_WHERE: - return ConvertWhere(primitive); - break; - case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_SELECT: - return ConvertSelect(primitive); - break; - case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_ERF: - return ConvertErf(primitive); - break; - case OHOS::HDI::Nnrt::V2_1::NODE_TYPE_LOG_SOFTMAX: - return ConvertLogSoftmax(primitive); - break; - default: - return {}; + if (convertOpMap.find(type) != convertOpMap.end()) { + return convertOpMap[type](primitive); } + LOGE("MindIR_LiteGraph_To_Model v2_1 failed, nodeType invalid, type =%d", type); + return {}; } inline std::vector MindIR_Tensor_GetQuantParams_OHOS(TensorPtr tensor) diff --git a/frameworks/native/neural_network_runtime/ops/add_builder.cpp b/frameworks/native/neural_network_runtime/ops/add_builder.cpp index 431733af094146199a27a7cf1b0e570dd760533e..73a6f9581640f6fcf34b43eea05867e35886ae99 100644 --- a/frameworks/native/neural_network_runtime/ops/add_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/add_builder.cpp @@ -30,7 +30,7 @@ AddBuilder::AddBuilder() {} AddBuilder::~AddBuilder() {} -OH_NN_ReturnCode AddBuilder::SetActivation(std::shared_ptr& tensor) +OH_NN_ReturnCode AddBuilder::SetActivation(const std::shared_ptr& tensor) { tensor->IdentifyOpParameter(); @@ -81,13 +81,11 @@ OH_NN_ReturnCode AddBuilder::Build(const std::vector& paramsIndex, for (uint32_t i : paramsIndex) { std::shared_ptr tensor = allTensors[i]; - switch (tensor->GetType()) { - case OH_NN_ADD_ACTIVATIONTYPE: - ret = SetActivation(tensor); - break; - default: - LOGE("[Add] Build failed, param invalid, type = %d.", tensor->GetType()); - return OH_NN_INVALID_PARAMETER; + if (m_paramMap.find(tensor->GetType()) != m_paramMap.end()) { + ret = (this->*(m_paramMap[tensor->GetType()]))(tensor); + } else { + LOGE("[Add] Build failed, param invalid, type=%d", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; } if (ret != OH_NN_SUCCESS) { diff --git a/frameworks/native/neural_network_runtime/ops/add_builder.h b/frameworks/native/neural_network_runtime/ops/add_builder.h index 1b650ead3c756be4e9cc2e3073b2715f656b8505..59f6179192bfd42b3abb2225a32d291daef8f8fc 100644 --- a/frameworks/native/neural_network_runtime/ops/add_builder.h +++ b/frameworks/native/neural_network_runtime/ops/add_builder.h @@ -26,6 +26,8 @@ namespace NeuralNetworkRuntime { namespace Ops { class AddBuilder : public OpsBuilder { public: + typedef OH_NN_ReturnCode (AddBuilder::*FuncPtr)(const std::shared_ptr&); + AddBuilder(); ~AddBuilder() override; OH_NN_ReturnCode Build(const std::vector& paramsIndex, @@ -36,10 +38,13 @@ public: LiteGraphPrimitvePtr GetPrimitive() override; private: - OH_NN_ReturnCode SetActivation(std::shared_ptr& tensor); + OH_NN_ReturnCode SetActivation(const std::shared_ptr& tensor); private: mindspore::lite::ActivationType m_activationType {mindspore::lite::ACTIVATION_TYPE_NO_ACTIVATION}; + std::unordered_map m_paramMap = { + {OH_NN_ADD_ACTIVATIONTYPE, &AddBuilder::SetActivation} + }; }; } // namespace Ops } // namespace NeuralNetworkRuntime diff --git a/frameworks/native/neural_network_runtime/ops/all_builder.cpp b/frameworks/native/neural_network_runtime/ops/all_builder.cpp index 63ac02b7c4aba7ebdbcd66233cd19ff4895090a8..e2432a4ee2a89a5b11d90ba76912103eeaccaafd 100644 --- a/frameworks/native/neural_network_runtime/ops/all_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/all_builder.cpp @@ -28,7 +28,7 @@ AllBuilder::AllBuilder() {} AllBuilder::~AllBuilder() {} -OH_NN_ReturnCode AllBuilder::SetKeepDims(std::shared_ptr& tensor) +OH_NN_ReturnCode AllBuilder::SetKeepDims(const std::shared_ptr& tensor) { if (tensor->GetDataType() != OH_NN_INT64) { LOGE("[All] The keep_dims should be type OH_NN_INT64."); @@ -75,22 +75,19 @@ OH_NN_ReturnCode AllBuilder::Build(const std::vector& paramsIndex, return ret; } - OH_NN_ReturnCode returnCode; for (int i : paramsIndex) { std::shared_ptr tensor = allTensors[i]; tensor->IdentifyOpParameter(); - switch (tensor->GetType()) { - case OH_NN_ALL_KEEP_DIMS: - returnCode = SetKeepDims(tensor); - break; - default: - LOGE("[All] Build failed, param invalid, type=%d", tensor->GetType()); - return OH_NN_INVALID_PARAMETER; + if (m_paramMap.find(tensor->GetType()) != m_paramMap.end()) { + ret = (this->*(m_paramMap[tensor->GetType()]))(tensor); + } else { + LOGE("[All] Build failed, param invalid, type=%d", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; } - if (returnCode != OH_NN_SUCCESS) { + if (ret != OH_NN_SUCCESS) { LOGE("[All] Build failed, passed invalid param."); - return returnCode; + return ret; } } diff --git a/frameworks/native/neural_network_runtime/ops/all_builder.h b/frameworks/native/neural_network_runtime/ops/all_builder.h index e43ff1b2755df3faea740b0b44a9404485d21f6c..0aff05b3790751aed68214e2628312a2b0d34c99 100644 --- a/frameworks/native/neural_network_runtime/ops/all_builder.h +++ b/frameworks/native/neural_network_runtime/ops/all_builder.h @@ -26,6 +26,8 @@ namespace NeuralNetworkRuntime { namespace Ops { class AllBuilder : public OpsBuilder { public: + typedef OH_NN_ReturnCode (AllBuilder::*FuncPtr)(const std::shared_ptr&); + AllBuilder(); ~AllBuilder() override; OH_NN_ReturnCode Build(const std::vector& paramsIndex, @@ -36,10 +38,13 @@ public: LiteGraphPrimitvePtr GetPrimitive() override; private: - OH_NN_ReturnCode SetKeepDims(std::shared_ptr& tensor); + OH_NN_ReturnCode SetKeepDims(const std::shared_ptr& tensor); private: int64_t m_keepDims {0}; + std::unordered_map m_paramMap = { + {OH_NN_ALL_KEEP_DIMS, &AllBuilder::SetKeepDims} + }; }; } // namespace Ops } // namespace NeuralNetworkRuntime diff --git a/frameworks/native/neural_network_runtime/ops/argmax_builder.cpp b/frameworks/native/neural_network_runtime/ops/argmax_builder.cpp index 5d11dceec4fd06ed523e7d1f41a4f146b76d9ee6..7adae29ed8fb6acd48fee098bcbe4d6c019654db 100644 --- a/frameworks/native/neural_network_runtime/ops/argmax_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/argmax_builder.cpp @@ -27,7 +27,7 @@ ArgMaxBuilder::ArgMaxBuilder() {} ArgMaxBuilder::~ArgMaxBuilder() {} -OH_NN_ReturnCode ArgMaxBuilder::SetAxis(std::shared_ptr tensor) +OH_NN_ReturnCode ArgMaxBuilder::SetAxis(const std::shared_ptr& tensor) { tensor->IdentifyOpParameter(); @@ -46,7 +46,7 @@ OH_NN_ReturnCode ArgMaxBuilder::SetAxis(std::shared_ptr tensor) return OH_NN_SUCCESS; } -OH_NN_ReturnCode ArgMaxBuilder::SetTopK(std::shared_ptr tensor) +OH_NN_ReturnCode ArgMaxBuilder::SetTopK(const std::shared_ptr& tensor) { tensor->IdentifyOpParameter(); @@ -65,7 +65,7 @@ OH_NN_ReturnCode ArgMaxBuilder::SetTopK(std::shared_ptr tensor) return OH_NN_SUCCESS; } -OH_NN_ReturnCode ArgMaxBuilder::SetKeepdims(std::shared_ptr tensor) +OH_NN_ReturnCode ArgMaxBuilder::SetKeepdims(const std::shared_ptr& tensor) { tensor->IdentifyOpParameter(); @@ -84,7 +84,7 @@ OH_NN_ReturnCode ArgMaxBuilder::SetKeepdims(std::shared_ptr tensor) return OH_NN_SUCCESS; } -OH_NN_ReturnCode ArgMaxBuilder::SetOutMaxValue(std::shared_ptr tensor) +OH_NN_ReturnCode ArgMaxBuilder::SetOutMaxValue(const std::shared_ptr& tensor) { tensor->IdentifyOpParameter(); @@ -134,23 +134,13 @@ OH_NN_ReturnCode ArgMaxBuilder::Build(const std::vector& paramsIndex, for (int i : paramsIndex) { const std::shared_ptr tensor = allTensors[i]; - switch (tensor->GetType()) { - case OH_NN_ARG_MAX_AXIS: - returnCode = SetAxis(tensor); - break; - case OH_NN_ARG_MAX_TOP_K: - returnCode = SetTopK(tensor); - break; - case OH_NN_ARG_MAX_KEEPDIMS: - returnCode = SetKeepdims(tensor); - break; - case OH_NN_ARG_MAX_OUT_MAX_VALUE: - returnCode = SetOutMaxValue(tensor); - break; - default: - LOGE("[ArgMax] Build failed, param invalid, type = %d.", tensor->GetType()); - return OH_NN_INVALID_PARAMETER; + if (m_paramMap.find(tensor->GetType()) != m_paramMap.end()) { + returnCode = (this->*(m_paramMap[tensor->GetType()]))(tensor); + } else { + LOGE("[ArgMax] Build failed, param invalid, type=%d", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; } + if (returnCode != OH_NN_SUCCESS) { LOGE("[ArgMax] Build failed, passed invalid param."); return returnCode; diff --git a/frameworks/native/neural_network_runtime/ops/argmax_builder.h b/frameworks/native/neural_network_runtime/ops/argmax_builder.h index 997088a2f52f92a27bc08c7cd84e8713bf07e83d..9d4c7ed2fa9035194f77b8d93e58843b9141745c 100644 --- a/frameworks/native/neural_network_runtime/ops/argmax_builder.h +++ b/frameworks/native/neural_network_runtime/ops/argmax_builder.h @@ -26,6 +26,8 @@ namespace NeuralNetworkRuntime { namespace Ops { class ArgMaxBuilder : public OpsBuilder { public: + typedef OH_NN_ReturnCode (ArgMaxBuilder::*FuncPtr)(const std::shared_ptr&); + ArgMaxBuilder(); ~ArgMaxBuilder() override; OH_NN_ReturnCode Build(const std::vector& paramsIndex, @@ -35,16 +37,22 @@ public: LiteGraphPrimitvePtr GetPrimitive() override; private: - OH_NN_ReturnCode SetAxis(std::shared_ptr tensor); - OH_NN_ReturnCode SetKeepdims(std::shared_ptr tensor); - OH_NN_ReturnCode SetTopK(std::shared_ptr tensor); - OH_NN_ReturnCode SetOutMaxValue(std::shared_ptr tensor); + OH_NN_ReturnCode SetAxis(const std::shared_ptr& tensor); + OH_NN_ReturnCode SetKeepdims(const std::shared_ptr& tensor); + OH_NN_ReturnCode SetTopK(const std::shared_ptr& tensor); + OH_NN_ReturnCode SetOutMaxValue(const std::shared_ptr& tensor); private: int64_t m_axis {-1}; int64_t m_topK {1}; bool m_keepDims {false}; bool m_outMaxValue {false}; + std::unordered_map m_paramMap = { + {OH_NN_ARG_MAX_AXIS, &ArgMaxBuilder::SetAxis}, + {OH_NN_ARG_MAX_KEEPDIMS, &ArgMaxBuilder::SetKeepdims}, + {OH_NN_ARG_MAX_TOP_K, &ArgMaxBuilder::SetTopK}, + {OH_NN_ARG_MAX_OUT_MAX_VALUE, &ArgMaxBuilder::SetOutMaxValue} + }; }; } // namespace Ops } // namespace NeuralNetworkRuntime diff --git a/frameworks/native/neural_network_runtime/ops/assert_builder.cpp b/frameworks/native/neural_network_runtime/ops/assert_builder.cpp index b185e196e8f84dfd7e23d697be772d1f85cc26f4..28c39eb75592c0936e0f27d4af2d4aae1a7cadac 100644 --- a/frameworks/native/neural_network_runtime/ops/assert_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/assert_builder.cpp @@ -28,7 +28,7 @@ AssertBuilder::AssertBuilder() {} AssertBuilder::~AssertBuilder() {} -OH_NN_ReturnCode AssertBuilder::SetSummarize(std::shared_ptr& tensor) +OH_NN_ReturnCode AssertBuilder::SetSummarize(const std::shared_ptr& tensor) { if (tensor->GetDataType() != OH_NN_INT64) { LOGE("[Assert] The summarize should be type OH_NN_INT64."); @@ -75,22 +75,19 @@ OH_NN_ReturnCode AssertBuilder::Build(const std::vector& paramsIndex, return ret; } - OH_NN_ReturnCode returnCode; for (int i : paramsIndex) { std::shared_ptr tensor = allTensors[i]; tensor->IdentifyOpParameter(); - switch (tensor->GetType()) { - case OH_NN_ASSERT_SUMMARIZE: - returnCode = SetSummarize(tensor); - break; - default: - LOGE("[Assert] Build failed, param invalid, type=%d", tensor->GetType()); - return OH_NN_INVALID_PARAMETER; + if (m_paramMap.find(tensor->GetType()) != m_paramMap.end()) { + ret = (this->*(m_paramMap[tensor->GetType()]))(tensor); + } else { + LOGE("[Assert] Build failed, param invalid, type=%d", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; } - if (returnCode != OH_NN_SUCCESS) { + if (ret != OH_NN_SUCCESS) { LOGE("[Assert] Build failed, passed invalid param."); - return returnCode; + return ret; } } diff --git a/frameworks/native/neural_network_runtime/ops/assert_builder.h b/frameworks/native/neural_network_runtime/ops/assert_builder.h index 7f4189bcd51d670ac4636b71b62d6b558e380a17..5f2b408a2e92f8b1e1547e958be8025cd9dab73f 100644 --- a/frameworks/native/neural_network_runtime/ops/assert_builder.h +++ b/frameworks/native/neural_network_runtime/ops/assert_builder.h @@ -26,6 +26,8 @@ namespace NeuralNetworkRuntime { namespace Ops { class AssertBuilder : public OpsBuilder { public: + typedef OH_NN_ReturnCode (AssertBuilder::*FuncPtr)(const std::shared_ptr&); + AssertBuilder(); ~AssertBuilder() override; OH_NN_ReturnCode Build(const std::vector& paramsIndex, @@ -36,10 +38,13 @@ public: LiteGraphPrimitvePtr GetPrimitive() override; private: - OH_NN_ReturnCode SetSummarize(std::shared_ptr& tensor); + OH_NN_ReturnCode SetSummarize(const std::shared_ptr& tensor); private: int64_t m_summarize {0}; + std::unordered_map m_paramMap = { + {OH_NN_ASSERT_SUMMARIZE, &AssertBuilder::SetSummarize} + }; }; } // namespace Ops } // namespace NeuralNetworkRuntime diff --git a/frameworks/native/neural_network_runtime/ops/batch_to_space_nd_builder.cpp b/frameworks/native/neural_network_runtime/ops/batch_to_space_nd_builder.cpp index 1e6ced63367faf9ddf17ebbe7f7719d44d2effc0..84ae89f0c5c34386145fcd465d83500f00f10a73 100644 --- a/frameworks/native/neural_network_runtime/ops/batch_to_space_nd_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/batch_to_space_nd_builder.cpp @@ -29,7 +29,7 @@ BatchToSpaceNDBuilder::BatchToSpaceNDBuilder() {} BatchToSpaceNDBuilder::~BatchToSpaceNDBuilder() {} -OH_NN_ReturnCode BatchToSpaceNDBuilder::SetInputBlock(std::shared_ptr tensor) +OH_NN_ReturnCode BatchToSpaceNDBuilder::SetInputBlock(const std::shared_ptr& tensor) { tensor->IdentifyOpParameter(); @@ -53,7 +53,7 @@ OH_NN_ReturnCode BatchToSpaceNDBuilder::SetInputBlock(std::shared_ptr return OH_NN_SUCCESS; } -OH_NN_ReturnCode BatchToSpaceNDBuilder::SetInputCrops(std::shared_ptr tensor) +OH_NN_ReturnCode BatchToSpaceNDBuilder::SetInputCrops(const std::shared_ptr& tensor) { tensor->IdentifyOpParameter(); @@ -109,17 +109,13 @@ OH_NN_ReturnCode BatchToSpaceNDBuilder::Build(const std::vector& param for (int i : paramsIndex) { std::shared_ptr tensor = allTensors[i]; - switch (tensor->GetType()) { - case OH_NN_BATCH_TO_SPACE_ND_BLOCKSIZE: - returnCode = SetInputBlock(tensor); - break; - case OH_NN_BATCH_TO_SPACE_ND_CROPS: - returnCode = SetInputCrops(tensor); - break; - default: - LOGE("[BatchToSpaceND] Build failed, param invalid, type = %d.", tensor->GetType()); - return OH_NN_INVALID_PARAMETER; + if (m_paramMap.find(tensor->GetType()) != m_paramMap.end()) { + returnCode = (this->*(m_paramMap[tensor->GetType()]))(tensor); + } else { + LOGE("[BatchToSpaceND] Build failed, param invalid, type=%d", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; } + if (returnCode != OH_NN_SUCCESS) { LOGE("[BatchToSpaceND] Build failed, passed invalid param."); return returnCode; diff --git a/frameworks/native/neural_network_runtime/ops/batch_to_space_nd_builder.h b/frameworks/native/neural_network_runtime/ops/batch_to_space_nd_builder.h index 1675c9bf8cb3cb0af3a9cda6f438aa0e0162a292..c93c8ececcde783b9416c080cc4de40c91242ac1 100644 --- a/frameworks/native/neural_network_runtime/ops/batch_to_space_nd_builder.h +++ b/frameworks/native/neural_network_runtime/ops/batch_to_space_nd_builder.h @@ -26,6 +26,8 @@ namespace NeuralNetworkRuntime { namespace Ops { class BatchToSpaceNDBuilder : public OpsBuilder { public: + typedef OH_NN_ReturnCode (BatchToSpaceNDBuilder::*FuncPtr)(const std::shared_ptr&); + BatchToSpaceNDBuilder(); ~BatchToSpaceNDBuilder() override; OH_NN_ReturnCode Build(const std::vector& paramsIndex, @@ -39,12 +41,16 @@ private: OH_NN_ReturnCode SetBatchToSpaceInput(const std::vector& inputsIndex, const std::vector& outputsIndex, const std::vector>& allTensors); - OH_NN_ReturnCode SetInputBlock(std::shared_ptr tensor); - OH_NN_ReturnCode SetInputCrops(std::shared_ptr tensor); + OH_NN_ReturnCode SetInputBlock(const std::shared_ptr& tensor); + OH_NN_ReturnCode SetInputCrops(const std::shared_ptr& tensor); private: std::vector m_blockSize; std::vector> m_crops; + std::unordered_map m_paramMap = { + {OH_NN_BATCH_TO_SPACE_ND_BLOCKSIZE, &BatchToSpaceNDBuilder::SetInputBlock}, + {OH_NN_BATCH_TO_SPACE_ND_CROPS, &BatchToSpaceNDBuilder::SetInputCrops} + }; }; } // namespace Ops } // namespace NeuralNetworkRuntime diff --git a/frameworks/native/neural_network_runtime/ops/batchnorm_builder.cpp b/frameworks/native/neural_network_runtime/ops/batchnorm_builder.cpp index ee05ea483bb1067ea31bcf111645beeb4fa1eb64..d671490040f0252d6ed2831906d6a2467e26fae2 100644 --- a/frameworks/native/neural_network_runtime/ops/batchnorm_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/batchnorm_builder.cpp @@ -32,7 +32,7 @@ BatchNormBuilder::BatchNormBuilder() {} BatchNormBuilder::~BatchNormBuilder() {} -OH_NN_ReturnCode BatchNormBuilder::SetEpsilon(std::shared_ptr tensor) +OH_NN_ReturnCode BatchNormBuilder::SetEpsilon(const std::shared_ptr& tensor) { tensor->IdentifyOpParameter(); if (tensor->GetDataType() != OH_NN_FLOAT32) { @@ -82,13 +82,11 @@ OH_NN_ReturnCode BatchNormBuilder::Build(const std::vector& paramsInde for (int i : paramsIndex) { std::shared_ptr tensor = allTensors[i]; - switch (tensor->GetType()) { - case OH_NN_BATCH_NORM_EPSILON: - returnCode = SetEpsilon(tensor); - break; - default: - LOGE("[BatchNorm] Parameter Type is invalid, type=%d", tensor->GetType()); - return OH_NN_INVALID_PARAMETER; + if (m_paramMap.find(tensor->GetType()) != m_paramMap.end()) { + returnCode = (this->*(m_paramMap[tensor->GetType()]))(tensor); + } else { + LOGE("[BatchNorm] Build failed, param invalid, type=%d", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; } if (returnCode != OH_NN_SUCCESS) { diff --git a/frameworks/native/neural_network_runtime/ops/batchnorm_builder.h b/frameworks/native/neural_network_runtime/ops/batchnorm_builder.h index ec9ed369bd076433c3bb247ecb26d79bd965e7e1..d630df7dd32bdd1fa8893e1798222a96923db109 100644 --- a/frameworks/native/neural_network_runtime/ops/batchnorm_builder.h +++ b/frameworks/native/neural_network_runtime/ops/batchnorm_builder.h @@ -23,6 +23,8 @@ namespace NeuralNetworkRuntime { namespace Ops { class BatchNormBuilder : public OpsBuilder { public: + typedef OH_NN_ReturnCode (BatchNormBuilder::*FuncPtr)(const std::shared_ptr&); + BatchNormBuilder(); ~BatchNormBuilder() override; OH_NN_ReturnCode Build(const std::vector& paramsIndex, @@ -32,10 +34,13 @@ public: LiteGraphPrimitvePtr GetPrimitive() override; private: - OH_NN_ReturnCode SetEpsilon(std::shared_ptr tensor); + OH_NN_ReturnCode SetEpsilon(const std::shared_ptr& tensor); private: float m_epsilon {0.0001f}; + std::unordered_map m_paramMap = { + {OH_NN_BATCH_NORM_EPSILON, &BatchNormBuilder::SetEpsilon} + }; }; } // namespace Ops } // namespace NeuralNetworkRuntime diff --git a/frameworks/native/neural_network_runtime/ops/broadcast_to_builder.cpp b/frameworks/native/neural_network_runtime/ops/broadcast_to_builder.cpp index 132cd8ccd78c3f6adc51ab7e26eaf6c2f6a43696..0c0e8cf055ed866f95e48dca73bb456520c8a5be 100755 --- a/frameworks/native/neural_network_runtime/ops/broadcast_to_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/broadcast_to_builder.cpp @@ -27,7 +27,7 @@ BroadcastToBuilder::BroadcastToBuilder() {} BroadcastToBuilder::~BroadcastToBuilder() {} -OH_NN_ReturnCode BroadcastToBuilder::SetShape(std::shared_ptr tensor) +OH_NN_ReturnCode BroadcastToBuilder::SetShape(const std::shared_ptr& tensor) { if (tensor->GetDataType() != OH_NN_INT64) { LOGE("[BroadcastTo] The shape should be type OH_NN_INT64."); @@ -77,22 +77,19 @@ OH_NN_ReturnCode BroadcastToBuilder::Build(const std::vector& paramsIn return ret; } - OH_NN_ReturnCode returnCode; for (int i : paramsIndex) { std::shared_ptr tensor = allTensors[i]; tensor->IdentifyOpParameter(); - switch (tensor->GetType()) { - case OH_NN_BROADCAST_TO_SHAPE: - returnCode = SetShape(tensor); - break; - default: - LOGE("[BroadcastTo] Build failed, param invalid, type=%d", tensor->GetType()); - return OH_NN_INVALID_PARAMETER; + if (m_paramMap.find(tensor->GetType()) != m_paramMap.end()) { + ret = (this->*(m_paramMap[tensor->GetType()]))(tensor); + } else { + LOGE("[BroadcastTo] Build failed, param invalid, type=%d", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; } - if (returnCode != OH_NN_SUCCESS) { + if (ret != OH_NN_SUCCESS) { LOGE("[BroadcastTo] Build failed, passed invalid param."); - return returnCode; + return ret; } } diff --git a/frameworks/native/neural_network_runtime/ops/broadcast_to_builder.h b/frameworks/native/neural_network_runtime/ops/broadcast_to_builder.h index 92eea4c8d3eaa8f25f7086633525921f3ef1eac7..25b7783e7e00a4fa19b0b39c8c640bf092ebc12d 100755 --- a/frameworks/native/neural_network_runtime/ops/broadcast_to_builder.h +++ b/frameworks/native/neural_network_runtime/ops/broadcast_to_builder.h @@ -26,6 +26,8 @@ namespace NeuralNetworkRuntime { namespace Ops { class BroadcastToBuilder : public OpsBuilder { public: + typedef OH_NN_ReturnCode (BroadcastToBuilder::*FuncPtr)(const std::shared_ptr&); + BroadcastToBuilder(); ~BroadcastToBuilder() override; OH_NN_ReturnCode Build(const std::vector& paramsIndex, @@ -36,10 +38,13 @@ public: LiteGraphPrimitvePtr GetPrimitive() override; private: - OH_NN_ReturnCode SetShape(std::shared_ptr tensor); + OH_NN_ReturnCode SetShape(const std::shared_ptr& tensor); private: std::vector m_shape; + std::unordered_map m_paramMap = { + {OH_NN_BROADCAST_TO_SHAPE, &BroadcastToBuilder::SetShape} + }; }; } // namespace Ops } // namespace NeuralNetworkRuntime diff --git a/frameworks/native/neural_network_runtime/ops/clip_builder.cpp b/frameworks/native/neural_network_runtime/ops/clip_builder.cpp index 2f5257d09602de57c0c5bc2fd6b5735588396267..88c6864198af8d534bbe7eb9b83dcaad3b61cdd6 100644 --- a/frameworks/native/neural_network_runtime/ops/clip_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/clip_builder.cpp @@ -28,7 +28,7 @@ ClipBuilder::ClipBuilder() {} ClipBuilder::~ClipBuilder() {} -OH_NN_ReturnCode ClipBuilder::SetMax(std::shared_ptr tensor) +OH_NN_ReturnCode ClipBuilder::SetMax(const std::shared_ptr& tensor) { if (tensor->GetDataType() != OH_NN_FLOAT32) { LOGE("[Clip] The max should be type OH_NN_FLOAT32."); @@ -50,7 +50,7 @@ OH_NN_ReturnCode ClipBuilder::SetMax(std::shared_ptr tensor) return OH_NN_SUCCESS; } -OH_NN_ReturnCode ClipBuilder::SetMin(std::shared_ptr tensor) +OH_NN_ReturnCode ClipBuilder::SetMin(const std::shared_ptr& tensor) { if (tensor->GetDataType() != OH_NN_FLOAT32) { LOGE("[Clip] The min should be type OH_NN_FLOAT32."); @@ -97,25 +97,19 @@ OH_NN_ReturnCode ClipBuilder::Build(const std::vector& paramsIndex, return ret; } - OH_NN_ReturnCode returnCode; for (int i : paramsIndex) { std::shared_ptr tensor = allTensors[i]; tensor->IdentifyOpParameter(); - switch (tensor->GetType()) { - case OH_NN_CLIP_MAX: - returnCode = SetMax(tensor); - break; - case OH_NN_CLIP_MIN: - returnCode = SetMin(tensor); - break; - default: - LOGE("[Clip] Build failed, param invalid, type=%d", tensor->GetType()); - return OH_NN_INVALID_PARAMETER; + if (m_paramMap.find(tensor->GetType()) != m_paramMap.end()) { + ret = (this->*(m_paramMap[tensor->GetType()]))(tensor); + } else { + LOGE("[Clip] Build failed, param invalid, type=%d", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; } - if (returnCode != OH_NN_SUCCESS) { + if (ret != OH_NN_SUCCESS) { LOGE("[Clip] Build failed, passed invalid param."); - return returnCode; + return ret; } } diff --git a/frameworks/native/neural_network_runtime/ops/clip_builder.h b/frameworks/native/neural_network_runtime/ops/clip_builder.h index 09c1872d42ac200710729ef7dd66dedf753ff8f2..d8696798fa6d649251626ab93568551f5eb5e4b1 100644 --- a/frameworks/native/neural_network_runtime/ops/clip_builder.h +++ b/frameworks/native/neural_network_runtime/ops/clip_builder.h @@ -26,6 +26,8 @@ namespace NeuralNetworkRuntime { namespace Ops { class ClipBuilder : public OpsBuilder { public: + typedef OH_NN_ReturnCode (ClipBuilder::*FuncPtr)(const std::shared_ptr&); + ClipBuilder(); ~ClipBuilder() override; OH_NN_ReturnCode Build(const std::vector& paramsIndex, @@ -36,12 +38,16 @@ public: LiteGraphPrimitvePtr GetPrimitive() override; private: - OH_NN_ReturnCode SetMax(std::shared_ptr tensor); - OH_NN_ReturnCode SetMin(std::shared_ptr tensor); + OH_NN_ReturnCode SetMax(const std::shared_ptr& tensor); + OH_NN_ReturnCode SetMin(const std::shared_ptr& tensor); private: float m_max {0.0f}; float m_min {0.0f}; + std::unordered_map m_paramMap = { + {OH_NN_CLIP_MAX, &ClipBuilder::SetMax}, + {OH_NN_CLIP_MIN, &ClipBuilder::SetMin} + }; }; } // namespace Ops } // namespace NeuralNetworkRuntime diff --git a/frameworks/native/neural_network_runtime/ops/concat_builder.cpp b/frameworks/native/neural_network_runtime/ops/concat_builder.cpp index 7e08da0446ba8c52dd2392699a266612003534a3..55fed556a07420ebddbe2a4a3e5978ead4a4b79a 100644 --- a/frameworks/native/neural_network_runtime/ops/concat_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/concat_builder.cpp @@ -28,7 +28,7 @@ ConcatBuilder::ConcatBuilder() {} ConcatBuilder::~ConcatBuilder() {} -OH_NN_ReturnCode ConcatBuilder::SetAxis(std::shared_ptr tensor) +OH_NN_ReturnCode ConcatBuilder::SetAxis(const std::shared_ptr& tensor) { tensor->IdentifyOpParameter(); @@ -85,14 +85,13 @@ OH_NN_ReturnCode ConcatBuilder::Build(const std::vector& paramsIndex, for (int i : paramsIndex) { std::shared_ptr tensor = allTensors[i]; - switch (tensor->GetType()) { - case OH_NN_CONCAT_AXIS: - returnCode = SetAxis(tensor); - break; - default: - LOGE("[Concat] Build failed, param invalid, type = %d.", tensor->GetType()); - return OH_NN_INVALID_PARAMETER; + if (m_paramMap.find(tensor->GetType()) != m_paramMap.end()) { + returnCode = (this->*(m_paramMap[tensor->GetType()]))(tensor); + } else { + LOGE("[Concat] Build failed, param invalid, type=%d", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; } + if (returnCode != OH_NN_SUCCESS) { LOGE("[Concat] Build failed, passed invalid param."); return returnCode; diff --git a/frameworks/native/neural_network_runtime/ops/concat_builder.h b/frameworks/native/neural_network_runtime/ops/concat_builder.h index 7d36a0437b5037185ecf4b001c2f670ff0184796..7b5290f1c7246f7e707ef17ea75abf5a404d7b34 100644 --- a/frameworks/native/neural_network_runtime/ops/concat_builder.h +++ b/frameworks/native/neural_network_runtime/ops/concat_builder.h @@ -26,6 +26,8 @@ namespace NeuralNetworkRuntime { namespace Ops { class ConcatBuilder : public OpsBuilder { public: + typedef OH_NN_ReturnCode (ConcatBuilder::*FuncPtr)(const std::shared_ptr&); + ConcatBuilder(); ~ConcatBuilder() override; OH_NN_ReturnCode Build(const std::vector& paramsIndex, @@ -36,12 +38,15 @@ public: LiteGraphPrimitvePtr GetPrimitive() override; private: - OH_NN_ReturnCode SetAxis(std::shared_ptr tensor); + OH_NN_ReturnCode SetAxis(const std::shared_ptr& tensor); OH_NN_ReturnCode SetInputsAndOutputs(const std::vector& inputsIndex, const std::vector& outputsIndex, const std::vector>& allTensors); private: int64_t m_axis{0}; + std::unordered_map m_paramMap = { + {OH_NN_CONCAT_AXIS, &ConcatBuilder::SetAxis} + }; }; } // namespace Ops } // namespace NeuralNetworkRuntime diff --git a/frameworks/native/neural_network_runtime/ops/constant_of_shape_builder.cpp b/frameworks/native/neural_network_runtime/ops/constant_of_shape_builder.cpp index d80bc0950af2932227c1c5350166352f0616e77c..80458bc61c8d683bf173fde90af8a80ea1636ff0 100755 --- a/frameworks/native/neural_network_runtime/ops/constant_of_shape_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/constant_of_shape_builder.cpp @@ -28,7 +28,7 @@ ConstantOfShapeBuilder::ConstantOfShapeBuilder() {} ConstantOfShapeBuilder::~ConstantOfShapeBuilder() {} -OH_NN_ReturnCode ConstantOfShapeBuilder::SetDataType(std::shared_ptr tensor) +OH_NN_ReturnCode ConstantOfShapeBuilder::SetDataType(const std::shared_ptr& tensor) { if (tensor->GetDataType() != OH_NN_INT64) { LOGE("[ConstantOfShape] The dataType should be type OH_NN_INT64."); @@ -50,7 +50,7 @@ OH_NN_ReturnCode ConstantOfShapeBuilder::SetDataType(std::shared_ptr t return OH_NN_SUCCESS; } -OH_NN_ReturnCode ConstantOfShapeBuilder::SetValue(std::shared_ptr tensor) +OH_NN_ReturnCode ConstantOfShapeBuilder::SetValue(const std::shared_ptr& tensor) { if (tensor->GetDataType() != OH_NN_FLOAT32) { LOGE("[ConstantOfShape] The value should be type OH_NN_FLOAT32."); @@ -103,16 +103,11 @@ OH_NN_ReturnCode ConstantOfShapeBuilder::Build(const std::vector& para for (int i : paramsIndex) { std::shared_ptr tensor = allTensors[i]; tensor->IdentifyOpParameter(); - switch (tensor->GetType()) { - case OH_NN_CONSTANT_OF_SHAPE_DATA_TYPE: - ret = SetDataType(tensor); - break; - case OH_NN_CONSTANT_OF_SHAPE_VALUE: - ret = SetValue(tensor); - break; - default: - LOGE("[ConstantOfShape] Build failed, param invalid, type=%d", tensor->GetType()); - return OH_NN_INVALID_PARAMETER; + if (m_paramMap.find(tensor->GetType()) != m_paramMap.end()) { + ret = (this->*(m_paramMap[tensor->GetType()]))(tensor); + } else { + LOGE("[ConstantOfShape] Build failed, param invalid, type=%d", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; } if (ret != OH_NN_SUCCESS) { diff --git a/frameworks/native/neural_network_runtime/ops/constant_of_shape_builder.h b/frameworks/native/neural_network_runtime/ops/constant_of_shape_builder.h index 0aff934f43144b1d31f5b77643fe663f248b14fb..3106fc76bdd7275e1aad8ca838f57c95f8400097 100755 --- a/frameworks/native/neural_network_runtime/ops/constant_of_shape_builder.h +++ b/frameworks/native/neural_network_runtime/ops/constant_of_shape_builder.h @@ -26,6 +26,8 @@ namespace NeuralNetworkRuntime { namespace Ops { class ConstantOfShapeBuilder : public OpsBuilder { public: + typedef OH_NN_ReturnCode (ConstantOfShapeBuilder::*FuncPtr)(const std::shared_ptr&); + ConstantOfShapeBuilder(); ~ConstantOfShapeBuilder() override; OH_NN_ReturnCode Build(const std::vector& paramsIndex, @@ -36,12 +38,16 @@ public: LiteGraphPrimitvePtr GetPrimitive() override; private: - OH_NN_ReturnCode SetDataType(std::shared_ptr tensor); - OH_NN_ReturnCode SetValue(std::shared_ptr tensor); + OH_NN_ReturnCode SetDataType(const std::shared_ptr& tensor); + OH_NN_ReturnCode SetValue(const std::shared_ptr& tensor); private: int64_t m_dataType {0}; std::vector m_value; + std::unordered_map m_paramMap = { + {OH_NN_CONSTANT_OF_SHAPE_DATA_TYPE, &ConstantOfShapeBuilder::SetDataType}, + {OH_NN_CONSTANT_OF_SHAPE_VALUE, &ConstantOfShapeBuilder::SetValue} + }; }; } // namespace Ops } // namespace NeuralNetworkRuntime diff --git a/frameworks/native/neural_network_runtime/ops/conv2d_builder.cpp b/frameworks/native/neural_network_runtime/ops/conv2d_builder.cpp index 447920472362c793ad79ddb1332bb8d5fd6a3727..0fac25495f5f12a9ade7fe74cdf9a80b060fa9e7 100644 --- a/frameworks/native/neural_network_runtime/ops/conv2d_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/conv2d_builder.cpp @@ -83,7 +83,7 @@ void Conv2DBuilder::SetKernelSize(const std::vector& inputsIndex, m_kernelSize.emplace_back(weightShape[KERNEL_WEIGHT_INDEX]); } -OH_NN_ReturnCode Conv2DBuilder::SetStrides(std::shared_ptr tensor) +OH_NN_ReturnCode Conv2DBuilder::SetStrides(const std::shared_ptr& tensor) { tensor->IdentifyOpParameter(); // Set Strides @@ -104,7 +104,7 @@ OH_NN_ReturnCode Conv2DBuilder::SetStrides(std::shared_ptr tensor) return OH_NN_SUCCESS; } -OH_NN_ReturnCode Conv2DBuilder::SetDilation(std::shared_ptr tensor) +OH_NN_ReturnCode Conv2DBuilder::SetDilation(const std::shared_ptr& tensor) { tensor->IdentifyOpParameter(); // Set Dilation @@ -125,7 +125,7 @@ OH_NN_ReturnCode Conv2DBuilder::SetDilation(std::shared_ptr tensor) return OH_NN_SUCCESS; } -OH_NN_ReturnCode Conv2DBuilder::SetPad(std::shared_ptr tensor) +OH_NN_ReturnCode Conv2DBuilder::SetPad(const std::shared_ptr& tensor) { tensor->IdentifyOpParameter(); @@ -170,7 +170,7 @@ OH_NN_ReturnCode Conv2DBuilder::SetPad(std::shared_ptr tensor) return OH_NN_SUCCESS; } -OH_NN_ReturnCode Conv2DBuilder::SetGroup(std::shared_ptr tensor) +OH_NN_ReturnCode Conv2DBuilder::SetGroup(const std::shared_ptr& tensor) { tensor->IdentifyOpParameter(); // Set Group @@ -194,7 +194,7 @@ OH_NN_ReturnCode Conv2DBuilder::SetGroup(std::shared_ptr tensor) return OH_NN_SUCCESS; } -OH_NN_ReturnCode Conv2DBuilder::SetActavitation(std::shared_ptr tensor) +OH_NN_ReturnCode Conv2DBuilder::SetActavitation(const std::shared_ptr& tensor) { tensor->IdentifyOpParameter(); @@ -252,27 +252,13 @@ OH_NN_ReturnCode Conv2DBuilder::Build(const std::vector& paramsIndex, for (int i : paramsIndex) { std::shared_ptr tensor = allTensors[i]; - switch (tensor->GetType()) { - case OH_NN_CONV2D_STRIDES: - returnCode = SetStrides(tensor); - break; - case OH_NN_CONV2D_DILATION: - returnCode = SetDilation(tensor); - break; - case OH_NN_CONV2D_PAD_MODE: - case OH_NN_CONV2D_PAD: - returnCode = SetPad(tensor); - break; - case OH_NN_CONV2D_GROUP: - returnCode = SetGroup(tensor); - break; - case OH_NN_CONV2D_ACTIVATION_TYPE: - returnCode = SetActavitation(tensor); - break; - default: - LOGE("[Conv2D] Build failed, param invalid, type = %d.", tensor->GetType()); - return OH_NN_INVALID_PARAMETER; + if (m_paramMap.find(tensor->GetType()) != m_paramMap.end()) { + returnCode = (this->*(m_paramMap[tensor->GetType()]))(tensor); + } else { + LOGE("[Conv2D] Build failed, param invalid, type=%d", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; } + if (returnCode != OH_NN_SUCCESS) { LOGE("[Conv2D] Build failed, Passed invalid param."); return returnCode; diff --git a/frameworks/native/neural_network_runtime/ops/conv2d_builder.h b/frameworks/native/neural_network_runtime/ops/conv2d_builder.h index 5b89b47adbd71896c9f49161507b9663dd33d64b..610270d89873524253d4d2f563d28001d93dee6a 100644 --- a/frameworks/native/neural_network_runtime/ops/conv2d_builder.h +++ b/frameworks/native/neural_network_runtime/ops/conv2d_builder.h @@ -25,6 +25,8 @@ namespace NeuralNetworkRuntime { namespace Ops { class Conv2DBuilder : public OpsBuilder { public: + typedef OH_NN_ReturnCode (Conv2DBuilder::*FuncPtr)(const std::shared_ptr&); + Conv2DBuilder(); ~Conv2DBuilder() override; OH_NN_ReturnCode Build(const std::vector& paramsIndex, @@ -42,11 +44,11 @@ private: const std::vector>& allTensors); void SetKernelSize(const std::vector& inputsIndex, const std::vector>& allTensors); - OH_NN_ReturnCode SetStrides(std::shared_ptr tensor); - OH_NN_ReturnCode SetDilation(std::shared_ptr tensor); - OH_NN_ReturnCode SetPad(std::shared_ptr tensor); - OH_NN_ReturnCode SetGroup(std::shared_ptr tensor); - OH_NN_ReturnCode SetActavitation(std::shared_ptr tensor); + OH_NN_ReturnCode SetStrides(const std::shared_ptr& tensor); + OH_NN_ReturnCode SetDilation(const std::shared_ptr& tensor); + OH_NN_ReturnCode SetPad(const std::shared_ptr& tensor); + OH_NN_ReturnCode SetGroup(const std::shared_ptr& tensor); + OH_NN_ReturnCode SetActavitation(const std::shared_ptr& tensor); private: int64_t m_group {1}; @@ -58,6 +60,14 @@ private: std::vector m_dilation; mindspore::lite::PadMode m_padMode {mindspore::lite::PAD_MODE_PAD}; mindspore::lite::ActivationType m_activationType {mindspore::lite::ACTIVATION_TYPE_NO_ACTIVATION}; + std::unordered_map m_paramMap = { + {OH_NN_CONV2D_STRIDES, &Conv2DBuilder::SetStrides}, + {OH_NN_CONV2D_PAD, &Conv2DBuilder::SetPad}, + {OH_NN_CONV2D_DILATION, &Conv2DBuilder::SetDilation}, + {OH_NN_CONV2D_PAD_MODE, &Conv2DBuilder::SetPad}, + {OH_NN_CONV2D_ACTIVATION_TYPE, &Conv2DBuilder::SetActavitation}, + {OH_NN_CONV2D_GROUP, &Conv2DBuilder::SetGroup} + }; }; } // namespace Ops } // namespace NeuralNetworkRuntime diff --git a/frameworks/native/neural_network_runtime/ops/conv2d_transpose_builder.cpp b/frameworks/native/neural_network_runtime/ops/conv2d_transpose_builder.cpp index 460d65b56231186aacc4798ece6718f836e3ce00..fc090b5f9a4e5ae4101eb1e7282c91ea1a7d03a9 100644 --- a/frameworks/native/neural_network_runtime/ops/conv2d_transpose_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/conv2d_transpose_builder.cpp @@ -76,7 +76,7 @@ void Conv2DTransposeBuilder::SetKernelSize(const std::vector& inputsIn m_kernelSize.emplace_back(weightShape[KERNEL_WEIGHT_INDEX]); } -OH_NN_ReturnCode Conv2DTransposeBuilder::SetStrides(std::shared_ptr tensor) +OH_NN_ReturnCode Conv2DTransposeBuilder::SetStrides(const std::shared_ptr& tensor) { tensor->IdentifyOpParameter(); // Set Strides @@ -97,7 +97,7 @@ OH_NN_ReturnCode Conv2DTransposeBuilder::SetStrides(std::shared_ptr te return OH_NN_SUCCESS; } -OH_NN_ReturnCode Conv2DTransposeBuilder::SetDilation(std::shared_ptr tensor) +OH_NN_ReturnCode Conv2DTransposeBuilder::SetDilation(const std::shared_ptr& tensor) { tensor->IdentifyOpParameter(); // Set Dilation @@ -118,7 +118,7 @@ OH_NN_ReturnCode Conv2DTransposeBuilder::SetDilation(std::shared_ptr t return OH_NN_SUCCESS; } -OH_NN_ReturnCode Conv2DTransposeBuilder::SetPad(std::shared_ptr tensor) +OH_NN_ReturnCode Conv2DTransposeBuilder::SetPad(const std::shared_ptr& tensor) { tensor->IdentifyOpParameter(); @@ -163,7 +163,7 @@ OH_NN_ReturnCode Conv2DTransposeBuilder::SetPad(std::shared_ptr tensor return OH_NN_SUCCESS; } -OH_NN_ReturnCode Conv2DTransposeBuilder::SetGroup(std::shared_ptr tensor) +OH_NN_ReturnCode Conv2DTransposeBuilder::SetGroup(const std::shared_ptr& tensor) { tensor->IdentifyOpParameter(); // Set Group @@ -187,7 +187,7 @@ OH_NN_ReturnCode Conv2DTransposeBuilder::SetGroup(std::shared_ptr tens return OH_NN_SUCCESS; } -OH_NN_ReturnCode Conv2DTransposeBuilder::SetOutPadding(std::shared_ptr tensor) +OH_NN_ReturnCode Conv2DTransposeBuilder::SetOutPadding(const std::shared_ptr& tensor) { tensor->IdentifyOpParameter(); // Set outputPadding @@ -208,7 +208,7 @@ OH_NN_ReturnCode Conv2DTransposeBuilder::SetOutPadding(std::shared_ptr return OH_NN_SUCCESS; } -OH_NN_ReturnCode Conv2DTransposeBuilder::SetActivation(std::shared_ptr tensor) +OH_NN_ReturnCode Conv2DTransposeBuilder::SetActivation(const std::shared_ptr& tensor) { tensor->IdentifyOpParameter(); @@ -262,29 +262,11 @@ OH_NN_ReturnCode Conv2DTransposeBuilder::Build(const std::vector& para for (int i : paramsIndex) { std::shared_ptr tensor = allTensors[i]; // 参数 tensor - switch (tensor->GetType()) { - case OH_NN_CONV2D_TRANSPOSE_STRIDES: - returnCode = SetStrides(tensor); - break; - case OH_NN_CONV2D_TRANSPOSE_DILATION: - returnCode = SetDilation(tensor); - break; - case OH_NN_CONV2D_TRANSPOSE_PAD_MODE: - case OH_NN_CONV2D_TRANSPOSE_PAD: - returnCode = SetPad(tensor); - break; - case OH_NN_CONV2D_TRANSPOSE_GROUP: - returnCode = SetGroup(tensor); - break; - case OH_NN_CONV2D_TRANSPOSE_OUTPUT_PADDINGS: - returnCode = SetOutPadding(tensor); - break; - case OH_NN_CONV2D_TRANSPOSE_ACTIVATION_TYPE: - returnCode = SetActivation(tensor); - break; - default: - LOGE("[Conv2DTranspose] Build failed, param invalid, type = %d.", tensor->GetType()); - return OH_NN_INVALID_PARAMETER; + if (m_paramMap.find(tensor->GetType()) != m_paramMap.end()) { + returnCode = (this->*(m_paramMap[tensor->GetType()]))(tensor); + } else { + LOGE("[Conv2DTranspose] Build failed, param invalid, type=%d", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; } if (returnCode != OH_NN_SUCCESS) { diff --git a/frameworks/native/neural_network_runtime/ops/conv2d_transpose_builder.h b/frameworks/native/neural_network_runtime/ops/conv2d_transpose_builder.h index a84c280d82a7cf390f2e39d28ce5d949def9f33b..b0dc7d8f0b48530fff3d9ba2bd71ff8a16b0080a 100644 --- a/frameworks/native/neural_network_runtime/ops/conv2d_transpose_builder.h +++ b/frameworks/native/neural_network_runtime/ops/conv2d_transpose_builder.h @@ -25,6 +25,8 @@ namespace NeuralNetworkRuntime { namespace Ops { class Conv2DTransposeBuilder : public OpsBuilder { public: + typedef OH_NN_ReturnCode (Conv2DTransposeBuilder::*FuncPtr)(const std::shared_ptr&); + Conv2DTransposeBuilder(); ~Conv2DTransposeBuilder() override; OH_NN_ReturnCode Build(const std::vector& paramsIndex, @@ -40,12 +42,12 @@ private: const std::vector>& allTensors); void SetKernelSize(const std::vector& inputsIndex, const std::vector>& allTensors); - OH_NN_ReturnCode SetStrides(std::shared_ptr tensor); - OH_NN_ReturnCode SetDilation(std::shared_ptr tensor); - OH_NN_ReturnCode SetPad(std::shared_ptr tensor); - OH_NN_ReturnCode SetGroup(std::shared_ptr tensor); - OH_NN_ReturnCode SetOutPadding(std::shared_ptr tensor); - OH_NN_ReturnCode SetActivation(std::shared_ptr tensor); + OH_NN_ReturnCode SetStrides(const std::shared_ptr& tensor); + OH_NN_ReturnCode SetDilation(const std::shared_ptr& tensor); + OH_NN_ReturnCode SetPad(const std::shared_ptr& tensor); + OH_NN_ReturnCode SetGroup(const std::shared_ptr& tensor); + OH_NN_ReturnCode SetOutPadding(const std::shared_ptr& tensor); + OH_NN_ReturnCode SetActivation(const std::shared_ptr& tensor); private: int64_t m_group {1}; @@ -58,6 +60,15 @@ private: std::vector m_outputPaddings; mindspore::lite::PadMode m_padMode{mindspore::lite::PAD_MODE_PAD}; mindspore::lite::ActivationType m_activationType{mindspore::lite::ACTIVATION_TYPE_NO_ACTIVATION}; + std::unordered_map m_paramMap = { + {OH_NN_CONV2D_TRANSPOSE_STRIDES, &Conv2DTransposeBuilder::SetStrides}, + {OH_NN_CONV2D_TRANSPOSE_PAD, &Conv2DTransposeBuilder::SetPad}, + {OH_NN_CONV2D_TRANSPOSE_DILATION, &Conv2DTransposeBuilder::SetDilation}, + {OH_NN_CONV2D_TRANSPOSE_OUTPUT_PADDINGS, &Conv2DTransposeBuilder::SetOutPadding}, + {OH_NN_CONV2D_TRANSPOSE_PAD_MODE, &Conv2DTransposeBuilder::SetPad}, + {OH_NN_CONV2D_TRANSPOSE_ACTIVATION_TYPE, &Conv2DTransposeBuilder::SetActivation}, + {OH_NN_CONV2D_TRANSPOSE_GROUP, &Conv2DTransposeBuilder::SetGroup} + }; }; } // namespace Ops } // namespace NeuralNetworkRuntime diff --git a/frameworks/native/neural_network_runtime/ops/crop_builder.cpp b/frameworks/native/neural_network_runtime/ops/crop_builder.cpp index 0baa083f24c3e7aca454d446c8b910e14bf44657..fe971d7cc77557f6f86f67c0406107c4de27f98e 100644 --- a/frameworks/native/neural_network_runtime/ops/crop_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/crop_builder.cpp @@ -31,7 +31,7 @@ CropBuilder::CropBuilder() {} CropBuilder::~CropBuilder() {} -OH_NN_ReturnCode CropBuilder::SetAxis(std::shared_ptr tensor) +OH_NN_ReturnCode CropBuilder::SetAxis(const std::shared_ptr& tensor) { if (tensor->GetDataType() != OH_NN_INT64) { LOGE("[Crop] The axis should be type OH_NN_INT64."); @@ -53,7 +53,7 @@ OH_NN_ReturnCode CropBuilder::SetAxis(std::shared_ptr tensor) return OH_NN_SUCCESS; } -OH_NN_ReturnCode CropBuilder::SetOffset(std::shared_ptr tensor) +OH_NN_ReturnCode CropBuilder::SetOffset(const std::shared_ptr& tensor) { if (tensor->GetDataType() != OH_NN_INT64) { LOGE("[Crop] The offset should be type OH_NN_INT64."); @@ -103,25 +103,19 @@ OH_NN_ReturnCode CropBuilder::Build(const std::vector& paramsIndex, m_inputsIndex = inputsIndex; m_outputsIndex = outputsIndex; - OH_NN_ReturnCode returnCode; for (int i : paramsIndex) { std::shared_ptr tensor = allTensors[i]; tensor->IdentifyOpParameter(); - switch (tensor->GetType()) { - case OH_NN_CROP_AXIS: - returnCode = SetAxis(tensor); - break; - case OH_NN_CROP_OFFSET: - returnCode = SetOffset(tensor); - break; - default: - LOGE("[Crop] Build failed, param invalid, type=%d", tensor->GetType()); - return OH_NN_INVALID_PARAMETER; + if (m_paramMap.find(tensor->GetType()) != m_paramMap.end()) { + ret = (this->*(m_paramMap[tensor->GetType()]))(tensor); + } else { + LOGE("[Crop] Build failed, param invalid, type=%d", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; } - if (returnCode != OH_NN_SUCCESS) { + if (ret != OH_NN_SUCCESS) { LOGE("[Crop] Build failed, passed invalid param."); - return returnCode; + return ret; } } diff --git a/frameworks/native/neural_network_runtime/ops/crop_builder.h b/frameworks/native/neural_network_runtime/ops/crop_builder.h index feee26b645bb6bcd3311a3a4ccc41a5abab7ce7a..61ca4838eb9ebf4634f91344d9365b2c8f2d5ca0 100644 --- a/frameworks/native/neural_network_runtime/ops/crop_builder.h +++ b/frameworks/native/neural_network_runtime/ops/crop_builder.h @@ -23,6 +23,8 @@ namespace NeuralNetworkRuntime { namespace Ops { class CropBuilder : public OpsBuilder { public: + typedef OH_NN_ReturnCode (CropBuilder::*FuncPtr)(const std::shared_ptr&); + CropBuilder(); ~CropBuilder() override; OH_NN_ReturnCode Build(const std::vector& paramsIndex, @@ -33,12 +35,16 @@ public: LiteGraphPrimitvePtr GetPrimitive() override; private: - OH_NN_ReturnCode SetAxis(std::shared_ptr tensor); - OH_NN_ReturnCode SetOffset(std::shared_ptr tensor); + OH_NN_ReturnCode SetAxis(const std::shared_ptr& tensor); + OH_NN_ReturnCode SetOffset(const std::shared_ptr& tensor); private: int64_t m_axis {0}; std::vector m_offset; + std::unordered_map m_paramMap = { + {OH_NN_CROP_AXIS, &CropBuilder::SetAxis}, + {OH_NN_CROP_OFFSET, &CropBuilder::SetOffset} + }; }; } // namespace Ops } // namespace NeuralNetworkRuntime diff --git a/frameworks/native/neural_network_runtime/ops/depth_to_space_builder.cpp b/frameworks/native/neural_network_runtime/ops/depth_to_space_builder.cpp index 561caefad705ba8163788e2975cf9083507b57ac..c09be38594c992479d110bb191bafb58dcf811d2 100755 --- a/frameworks/native/neural_network_runtime/ops/depth_to_space_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/depth_to_space_builder.cpp @@ -32,7 +32,7 @@ DepthToSpaceBuilder::DepthToSpaceBuilder() {} DepthToSpaceBuilder::~DepthToSpaceBuilder() {} -OH_NN_ReturnCode DepthToSpaceBuilder::SetBlockSize(std::shared_ptr tensor) +OH_NN_ReturnCode DepthToSpaceBuilder::SetBlockSize(const std::shared_ptr& tensor) { if (tensor->GetDataType() != OH_NN_INT64) { LOGE("[DepthToSpace] The blockSize should be type OH_NN_INT64."); @@ -54,7 +54,7 @@ OH_NN_ReturnCode DepthToSpaceBuilder::SetBlockSize(std::shared_ptr ten return OH_NN_SUCCESS; } -OH_NN_ReturnCode DepthToSpaceBuilder::SetMode(std::shared_ptr tensor) +OH_NN_ReturnCode DepthToSpaceBuilder::SetMode(const std::shared_ptr& tensor) { if (tensor->GetDataType() != OH_NN_INT32) { LOGE("[DepthToSpace] The mode should be type OH_NN_INT32."); @@ -108,16 +108,11 @@ OH_NN_ReturnCode DepthToSpaceBuilder::Build(const std::vector& paramsI for (int i : paramsIndex) { std::shared_ptr tensor = allTensors[i]; tensor->IdentifyOpParameter(); - switch (tensor->GetType()) { - case OH_NN_DEPTH_TO_SPACE_BLOCK_SIZE: - ret = SetBlockSize(tensor); - break; - case OH_NN_DEPTH_TO_SPACE_MODE: - ret = SetMode(tensor); - break; - default: - LOGE("[DepthToSpace] Build failed, param invalid, type=%d", tensor->GetType()); - return OH_NN_INVALID_PARAMETER; + if (m_paramMap.find(tensor->GetType()) != m_paramMap.end()) { + ret = (this->*(m_paramMap[tensor->GetType()]))(tensor); + } else { + LOGE("[DepthToSpace] Build failed, param invalid, type=%d", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; } if (ret != OH_NN_SUCCESS) { diff --git a/frameworks/native/neural_network_runtime/ops/depth_to_space_builder.h b/frameworks/native/neural_network_runtime/ops/depth_to_space_builder.h index 5ecc071488323f1e34aed4c627a570e98b087d74..e0d8980e9f0c8169d73a79d7d934bc4e9a3923bb 100755 --- a/frameworks/native/neural_network_runtime/ops/depth_to_space_builder.h +++ b/frameworks/native/neural_network_runtime/ops/depth_to_space_builder.h @@ -26,6 +26,8 @@ namespace NeuralNetworkRuntime { namespace Ops { class DepthToSpaceBuilder : public OpsBuilder { public: + typedef OH_NN_ReturnCode (DepthToSpaceBuilder::*FuncPtr)(const std::shared_ptr&); + DepthToSpaceBuilder(); ~DepthToSpaceBuilder() override; OH_NN_ReturnCode Build(const std::vector& paramsIndex, @@ -36,12 +38,16 @@ public: LiteGraphPrimitvePtr GetPrimitive() override; private: - OH_NN_ReturnCode SetBlockSize(std::shared_ptr tensor); - OH_NN_ReturnCode SetMode(std::shared_ptr tensor); + OH_NN_ReturnCode SetBlockSize(const std::shared_ptr& tensor); + OH_NN_ReturnCode SetMode(const std::shared_ptr& tensor); private: int64_t m_blockSize {0}; std::string m_mode; + std::unordered_map m_paramMap = { + {OH_NN_DEPTH_TO_SPACE_BLOCK_SIZE, &DepthToSpaceBuilder::SetBlockSize}, + {OH_NN_DEPTH_TO_SPACE_MODE, &DepthToSpaceBuilder::SetMode} + }; }; } // namespace Ops } // namespace NeuralNetworkRuntime diff --git a/frameworks/native/neural_network_runtime/ops/depthwise_conv2d_native_builder.cpp b/frameworks/native/neural_network_runtime/ops/depthwise_conv2d_native_builder.cpp index 93db5eb582d4a5f9bb984b9f4b8745f693f9fb5d..3e50f1ddd8dc9f3328c7c3d10c19e20e601f834c 100644 --- a/frameworks/native/neural_network_runtime/ops/depthwise_conv2d_native_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/depthwise_conv2d_native_builder.cpp @@ -41,7 +41,7 @@ DepthwiseConv2DNativeBuilder::DepthwiseConv2DNativeBuilder() {} DepthwiseConv2DNativeBuilder::~DepthwiseConv2DNativeBuilder() {} -OH_NN_ReturnCode DepthwiseConv2DNativeBuilder::SetIsPadMode(std::shared_ptr tensor, +OH_NN_ReturnCode DepthwiseConv2DNativeBuilder::SetIsPadMode(const std::shared_ptr& tensor, bool &isPadMode) { if (tensor->GetElementCount() == PAD_MODE_SIZE) { @@ -55,7 +55,7 @@ OH_NN_ReturnCode DepthwiseConv2DNativeBuilder::SetIsPadMode(std::shared_ptr tensor) +OH_NN_ReturnCode DepthwiseConv2DNativeBuilder::SetActivation(const std::shared_ptr& tensor) { tensor->IdentifyOpParameter(); // Set ActivationType @@ -101,7 +101,7 @@ OH_NN_ReturnCode DepthwiseConv2DNativeBuilder::SetKernelSize(const std::vector tensor) +OH_NN_ReturnCode DepthwiseConv2DNativeBuilder::SetStrides(const std::shared_ptr& tensor) { tensor->IdentifyOpParameter(); if (tensor->GetDataType() != OH_NN_INT64) { @@ -120,7 +120,7 @@ OH_NN_ReturnCode DepthwiseConv2DNativeBuilder::SetStrides(std::shared_ptr tensor) +OH_NN_ReturnCode DepthwiseConv2DNativeBuilder::SetDilation(const std::shared_ptr& tensor) { tensor->IdentifyOpParameter(); if (tensor->GetDataType() != OH_NN_INT64) { @@ -140,8 +140,7 @@ OH_NN_ReturnCode DepthwiseConv2DNativeBuilder::SetDilation(std::shared_ptr tensor) +OH_NN_ReturnCode DepthwiseConv2DNativeBuilder::SetPadModeOrPaddings(const std::shared_ptr& tensor) { tensor->IdentifyOpParameter(); @@ -233,24 +232,13 @@ OH_NN_ReturnCode DepthwiseConv2DNativeBuilder::Build(const std::vector for (int i : paramsIndex) { std::shared_ptr tensor = allTensors[i]; // 参数 tensor - switch (tensor->GetType()) { - case OH_NN_DEPTHWISE_CONV2D_NATIVE_STRIDES: - ret = SetStrides(tensor); - break; - case OH_NN_DEPTHWISE_CONV2D_NATIVE_DILATION: - ret = SetDilation(tensor); - break; - case OH_NN_DEPTHWISE_CONV2D_NATIVE_PAD_MODE: - case OH_NN_DEPTHWISE_CONV2D_NATIVE_PAD: - ret = SetPadModeOrPaddings(tensor); - break; - case OH_NN_DEPTHWISE_CONV2D_NATIVE_ACTIVATION_TYPE: - ret = SetActivation(tensor); - break; - default: - LOGE("[DepthwiseConv2DNative] Build failed, param invalid, type = %d.", tensor->GetType()); - return OH_NN_INVALID_PARAMETER; + if (m_paramMap.find(tensor->GetType()) != m_paramMap.end()) { + ret = (this->*(m_paramMap[tensor->GetType()]))(tensor); + } else { + LOGE("[DepthwiseConv2DNative] Build failed, param invalid, type=%d", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; } + if (ret != OH_NN_SUCCESS) { LOGE("[DepthwiseConv2DNative] Build failed, passed invalid param."); return ret; diff --git a/frameworks/native/neural_network_runtime/ops/depthwise_conv2d_native_builder.h b/frameworks/native/neural_network_runtime/ops/depthwise_conv2d_native_builder.h index df8879d5ea633f094cfc346f6e6326e0eff7be83..ce97986a0dd483977e40d36dc782fc9fe85478b9 100644 --- a/frameworks/native/neural_network_runtime/ops/depthwise_conv2d_native_builder.h +++ b/frameworks/native/neural_network_runtime/ops/depthwise_conv2d_native_builder.h @@ -25,6 +25,8 @@ namespace NeuralNetworkRuntime { namespace Ops { class DepthwiseConv2DNativeBuilder : public OpsBuilder { public: + typedef OH_NN_ReturnCode (DepthwiseConv2DNativeBuilder::*FuncPtr)(const std::shared_ptr&); + DepthwiseConv2DNativeBuilder(); ~DepthwiseConv2DNativeBuilder() override; OH_NN_ReturnCode Build(const std::vector& paramsIndex, @@ -36,14 +38,14 @@ public: private: OH_NN_ReturnCode SetInputAndOutput(const std::vector& inputsIndex, const std::vector& outputsIndex, const std::vector>& allTensors); - OH_NN_ReturnCode SetIsPadMode(std::shared_ptr tensor, + OH_NN_ReturnCode SetIsPadMode(const std::shared_ptr& tensor, bool &isPadMode); - OH_NN_ReturnCode SetPadModeOrPaddings(std::shared_ptr tensor); + OH_NN_ReturnCode SetPadModeOrPaddings(const std::shared_ptr& tensor); OH_NN_ReturnCode SetKernelSize(const std::vector& inputsIndex, const std::vector>& allTensors); - OH_NN_ReturnCode SetDilation(std::shared_ptr tensor); - OH_NN_ReturnCode SetStrides(std::shared_ptr tensor); - OH_NN_ReturnCode SetActivation(std::shared_ptr tensor); + OH_NN_ReturnCode SetDilation(const std::shared_ptr& tensor); + OH_NN_ReturnCode SetStrides(const std::shared_ptr& tensor); + OH_NN_ReturnCode SetActivation(const std::shared_ptr& tensor); private: int64_t m_inChannel{0}; @@ -54,6 +56,13 @@ private: std::vector m_dilation; mindspore::lite::PadMode m_padMode{mindspore::lite::PAD_MODE_PAD}; mindspore::lite::ActivationType m_activationType{mindspore::lite::ACTIVATION_TYPE_NO_ACTIVATION}; + std::unordered_map m_paramMap = { + {OH_NN_DEPTHWISE_CONV2D_NATIVE_STRIDES, &DepthwiseConv2DNativeBuilder::SetStrides}, + {OH_NN_DEPTHWISE_CONV2D_NATIVE_PAD, &DepthwiseConv2DNativeBuilder::SetPadModeOrPaddings}, + {OH_NN_DEPTHWISE_CONV2D_NATIVE_DILATION, &DepthwiseConv2DNativeBuilder::SetDilation}, + {OH_NN_DEPTHWISE_CONV2D_NATIVE_PAD_MODE, &DepthwiseConv2DNativeBuilder::SetPadModeOrPaddings}, + {OH_NN_DEPTHWISE_CONV2D_NATIVE_ACTIVATION_TYPE, &DepthwiseConv2DNativeBuilder::SetActivation} + }; }; } // namespace Ops } // namespace NeuralNetworkRuntime diff --git a/frameworks/native/neural_network_runtime/ops/detection_post_process_builder.cpp b/frameworks/native/neural_network_runtime/ops/detection_post_process_builder.cpp index 627d49b7ccea8a5d7d85a9bc472e28eed82d3515..a710d4b3602c19686407267f8af7e52392577ecd 100644 --- a/frameworks/native/neural_network_runtime/ops/detection_post_process_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/detection_post_process_builder.cpp @@ -33,7 +33,7 @@ DetectionPostProcessBuilder::DetectionPostProcessBuilder() {} DetectionPostProcessBuilder::~DetectionPostProcessBuilder() {} -OH_NN_ReturnCode DetectionPostProcessBuilder::SetInputSize(std::shared_ptr tensor) +OH_NN_ReturnCode DetectionPostProcessBuilder::SetInputSize(const std::shared_ptr& tensor) { if (tensor->GetDataType() != OH_NN_INT64) { LOGE("[DetectionPostProcess] The inputSize should be type OH_NN_INT64."); @@ -55,7 +55,7 @@ OH_NN_ReturnCode DetectionPostProcessBuilder::SetInputSize(std::shared_ptr tensor) +OH_NN_ReturnCode DetectionPostProcessBuilder::SetScale(const std::shared_ptr& tensor) { if (tensor->GetDataType() != OH_NN_FLOAT32) { LOGE("[DetectionPostProcess] The scale should be type OH_NN_FLOAT32."); @@ -81,7 +81,7 @@ OH_NN_ReturnCode DetectionPostProcessBuilder::SetScale(std::shared_ptr return OH_NN_SUCCESS; } -OH_NN_ReturnCode DetectionPostProcessBuilder::SetNmsIoUThreshold(std::shared_ptr tensor) +OH_NN_ReturnCode DetectionPostProcessBuilder::SetNmsIoUThreshold(const std::shared_ptr& tensor) { if (tensor->GetDataType() != OH_NN_FLOAT32) { LOGE("[DetectionPostProcess] The nmsIoUThreshold should be type OH_NN_FLOAT32."); @@ -103,7 +103,7 @@ OH_NN_ReturnCode DetectionPostProcessBuilder::SetNmsIoUThreshold(std::shared_ptr return OH_NN_SUCCESS; } -OH_NN_ReturnCode DetectionPostProcessBuilder::SetNmsScoreThreshold(std::shared_ptr tensor) +OH_NN_ReturnCode DetectionPostProcessBuilder::SetNmsScoreThreshold(const std::shared_ptr& tensor) { if (tensor->GetDataType() != OH_NN_FLOAT32) { LOGE("[DetectionPostProcess] The scoreThreshold should be type OH_NN_FLOAT32."); @@ -125,7 +125,7 @@ OH_NN_ReturnCode DetectionPostProcessBuilder::SetNmsScoreThreshold(std::shared_p return OH_NN_SUCCESS; } -OH_NN_ReturnCode DetectionPostProcessBuilder::SetMaxDetections(std::shared_ptr tensor) +OH_NN_ReturnCode DetectionPostProcessBuilder::SetMaxDetections(const std::shared_ptr& tensor) { if (tensor->GetDataType() != OH_NN_INT64) { LOGE("[DetectionPostProcess] The maxDetections should be type OH_NN_INT64."); @@ -147,7 +147,7 @@ OH_NN_ReturnCode DetectionPostProcessBuilder::SetMaxDetections(std::shared_ptr tensor) +OH_NN_ReturnCode DetectionPostProcessBuilder::SetDetectionsPerClass(const std::shared_ptr& tensor) { if (tensor->GetDataType() != OH_NN_INT64) { LOGE("[DetectionPostProcess] The detectionsPerClass should be type OH_NN_INT64."); @@ -169,7 +169,7 @@ OH_NN_ReturnCode DetectionPostProcessBuilder::SetDetectionsPerClass(std::shared_ return OH_NN_SUCCESS; } -OH_NN_ReturnCode DetectionPostProcessBuilder::SetMaxClassesPerDetection(std::shared_ptr tensor) +OH_NN_ReturnCode DetectionPostProcessBuilder::SetMaxClassesPerDetection(const std::shared_ptr& tensor) { if (tensor->GetDataType() != OH_NN_INT64) { LOGE("[DetectionPostProcess] The maxClassesPerDetection should be type OH_NN_INT64."); @@ -191,7 +191,7 @@ OH_NN_ReturnCode DetectionPostProcessBuilder::SetMaxClassesPerDetection(std::sha return OH_NN_SUCCESS; } -OH_NN_ReturnCode DetectionPostProcessBuilder::SetNumClasses(std::shared_ptr tensor) +OH_NN_ReturnCode DetectionPostProcessBuilder::SetNumClasses(const std::shared_ptr& tensor) { if (tensor->GetDataType() != OH_NN_INT64) { LOGE("[DetectionPostProcess] The numClasses should be type OH_NN_INT64."); @@ -213,7 +213,7 @@ OH_NN_ReturnCode DetectionPostProcessBuilder::SetNumClasses(std::shared_ptr tensor) +OH_NN_ReturnCode DetectionPostProcessBuilder::SetUseRegularNms(const std::shared_ptr& tensor) { if (tensor->GetDataType() != OH_NN_BOOL) { LOGE("[DetectionPostProcess] The useRegularNms should be type OH_NN_BOOL."); @@ -235,7 +235,7 @@ OH_NN_ReturnCode DetectionPostProcessBuilder::SetUseRegularNms(std::shared_ptr tensor) +OH_NN_ReturnCode DetectionPostProcessBuilder::SetOutQuantized(const std::shared_ptr& tensor) { if (tensor->GetDataType() != OH_NN_BOOL) { LOGE("[DetectionPostProcess] The outQuantized should be type OH_NN_BOOL."); @@ -287,40 +287,11 @@ OH_NN_ReturnCode DetectionPostProcessBuilder::Build(const std::vector& for (int i : paramsIndex) { std::shared_ptr tensor = allTensors[i]; tensor->IdentifyOpParameter(); - switch (tensor->GetType()) { - case OH_NN_DETECTION_POST_PROCESS_INPUT_SIZE: - ret = SetInputSize(tensor); - break; - case OH_NN_DETECTION_POST_PROCESS_SCALE: - ret = SetScale(tensor); - break; - case OH_NN_DETECTION_POST_PROCESS_NMS_IOU_THRESHOLD: - ret = SetNmsIoUThreshold(tensor); - break; - case OH_NN_DETECTION_POST_PROCESS_NMS_SCORE_THRESHOLD: - ret = SetNmsScoreThreshold(tensor); - break; - case OH_NN_DETECTION_POST_PROCESS_MAX_DETECTIONS: - ret = SetMaxDetections(tensor); - break; - case OH_NN_DETECTION_POST_PROCESS_DETECTIONS_PER_CLASS: - ret = SetDetectionsPerClass(tensor); - break; - case OH_NN_DETECTION_POST_PROCESS_MAX_CLASSES_PER_DETECTION: - ret = SetMaxClassesPerDetection(tensor); - break; - case OH_NN_DETECTION_POST_PROCESS_NUM_CLASSES: - ret = SetNumClasses(tensor); - break; - case OH_NN_DETECTION_POST_PROCESS_USE_REGULAR_NMS: - ret = SetUseRegularNms(tensor); - break; - case OH_NN_DETECTION_POST_PROCESS_OUT_QUANTIZED: - ret = SetOutQuantized(tensor); - break; - default: - LOGE("[DetectionPostProcess] Build failed, param invalid, type=%d", tensor->GetType()); - return OH_NN_INVALID_PARAMETER; + if (m_paramMap.find(tensor->GetType()) != m_paramMap.end()) { + ret = (this->*(m_paramMap[tensor->GetType()]))(tensor); + } else { + LOGE("[DetectionPostProcess] Build failed, param invalid, type=%d", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; } if (ret != OH_NN_SUCCESS) { diff --git a/frameworks/native/neural_network_runtime/ops/detection_post_process_builder.h b/frameworks/native/neural_network_runtime/ops/detection_post_process_builder.h index c1bb8db387dc5f9408c585191592cf54622e14e0..505a48f45f429812e7be519ce656422bea380136 100644 --- a/frameworks/native/neural_network_runtime/ops/detection_post_process_builder.h +++ b/frameworks/native/neural_network_runtime/ops/detection_post_process_builder.h @@ -23,6 +23,9 @@ namespace NeuralNetworkRuntime { namespace Ops { class DetectionPostProcessBuilder : public OpsBuilder { public: + typedef DetectionPostProcessBuilder DPPBuilder; + typedef OH_NN_ReturnCode (DPPBuilder::*FuncPtr)(const std::shared_ptr&); + DetectionPostProcessBuilder(); ~DetectionPostProcessBuilder() override; OH_NN_ReturnCode Build(const std::vector& paramsIndex, @@ -33,16 +36,16 @@ public: LiteGraphPrimitvePtr GetPrimitive() override; private: - OH_NN_ReturnCode SetInputSize(std::shared_ptr tensor); - OH_NN_ReturnCode SetScale(std::shared_ptr tensor); - OH_NN_ReturnCode SetNmsIoUThreshold(std::shared_ptr tensor); - OH_NN_ReturnCode SetNmsScoreThreshold(std::shared_ptr tensor); - OH_NN_ReturnCode SetMaxDetections(std::shared_ptr tensor); - OH_NN_ReturnCode SetDetectionsPerClass(std::shared_ptr tensor); - OH_NN_ReturnCode SetMaxClassesPerDetection(std::shared_ptr tensor); - OH_NN_ReturnCode SetNumClasses(std::shared_ptr tensor); - OH_NN_ReturnCode SetUseRegularNms(std::shared_ptr tensor); - OH_NN_ReturnCode SetOutQuantized(std::shared_ptr tensor); + OH_NN_ReturnCode SetInputSize(const std::shared_ptr& tensor); + OH_NN_ReturnCode SetScale(const std::shared_ptr& tensor); + OH_NN_ReturnCode SetNmsIoUThreshold(const std::shared_ptr& tensor); + OH_NN_ReturnCode SetNmsScoreThreshold(const std::shared_ptr& tensor); + OH_NN_ReturnCode SetMaxDetections(const std::shared_ptr& tensor); + OH_NN_ReturnCode SetDetectionsPerClass(const std::shared_ptr& tensor); + OH_NN_ReturnCode SetMaxClassesPerDetection(const std::shared_ptr& tensor); + OH_NN_ReturnCode SetNumClasses(const std::shared_ptr& tensor); + OH_NN_ReturnCode SetUseRegularNms(const std::shared_ptr& tensor); + OH_NN_ReturnCode SetOutQuantized(const std::shared_ptr& tensor); private: int64_t m_inputSize {0}; @@ -55,6 +58,18 @@ private: int64_t m_numClasses {0}; bool m_useRegularNms {false}; bool m_outQuantized {false}; + std::unordered_map m_paramMap = { + {OH_NN_DETECTION_POST_PROCESS_INPUT_SIZE, &DPPBuilder::SetInputSize}, + {OH_NN_DETECTION_POST_PROCESS_SCALE, &DPPBuilder::SetScale}, + {OH_NN_DETECTION_POST_PROCESS_NMS_IOU_THRESHOLD, &DPPBuilder::SetNmsIoUThreshold}, + {OH_NN_DETECTION_POST_PROCESS_NMS_SCORE_THRESHOLD, &DPPBuilder::SetNmsScoreThreshold}, + {OH_NN_DETECTION_POST_PROCESS_MAX_DETECTIONS, &DPPBuilder::SetMaxDetections}, + {OH_NN_DETECTION_POST_PROCESS_DETECTIONS_PER_CLASS, &DPPBuilder::SetDetectionsPerClass}, + {OH_NN_DETECTION_POST_PROCESS_MAX_CLASSES_PER_DETECTION, &DPPBuilder::SetMaxClassesPerDetection}, + {OH_NN_DETECTION_POST_PROCESS_NUM_CLASSES, &DPPBuilder::SetNumClasses}, + {OH_NN_DETECTION_POST_PROCESS_USE_REGULAR_NMS, &DPPBuilder::SetUseRegularNms}, + {OH_NN_DETECTION_POST_PROCESS_OUT_QUANTIZED, &DPPBuilder::SetOutQuantized} + }; }; } // namespace Ops } // namespace NeuralNetworkRuntime diff --git a/frameworks/native/neural_network_runtime/ops/div_builder.cpp b/frameworks/native/neural_network_runtime/ops/div_builder.cpp index 89227b01a0ff4b0d08756afbfc40e1197f7b9a28..e57c4a32e542969025f9203bbbfda992846d0f12 100644 --- a/frameworks/native/neural_network_runtime/ops/div_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/div_builder.cpp @@ -31,7 +31,7 @@ DivBuilder::DivBuilder() {} DivBuilder::~DivBuilder() {} -OH_NN_ReturnCode DivBuilder::SetActicationType(std::shared_ptr tensor) +OH_NN_ReturnCode DivBuilder::SetActicationType(const std::shared_ptr& tensor) { tensor->IdentifyOpParameter(); @@ -87,14 +87,13 @@ OH_NN_ReturnCode DivBuilder::Build(const std::vector& paramsIndex, for (int i : paramsIndex) { std::shared_ptr tensor = allTensors[i]; - switch (tensor->GetType()) { - case OH_NN_DIV_ACTIVATIONTYPE: - returnCode = SetActicationType(tensor); - break; - default: - LOGE("[Div] Build failed, param invalid, type = %d.", tensor->GetType()); - return OH_NN_INVALID_PARAMETER; + if (m_paramMap.find(tensor->GetType()) != m_paramMap.end()) { + returnCode = (this->*(m_paramMap[tensor->GetType()]))(tensor); + } else { + LOGE("[Div] Build failed, param invalid, type=%d", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; } + if (returnCode != OH_NN_SUCCESS) { LOGE("[Div] Build failed, passed invalid param."); return returnCode; diff --git a/frameworks/native/neural_network_runtime/ops/div_builder.h b/frameworks/native/neural_network_runtime/ops/div_builder.h index 1c7daaf46e6217b3c3d09e3566471d2b0806d0ec..95767153faabbee65a2ebcfc39fd543fc4640296 100644 --- a/frameworks/native/neural_network_runtime/ops/div_builder.h +++ b/frameworks/native/neural_network_runtime/ops/div_builder.h @@ -26,6 +26,8 @@ namespace NeuralNetworkRuntime { namespace Ops { class DivBuilder : public OpsBuilder { public: + typedef OH_NN_ReturnCode (DivBuilder::*FuncPtr)(const std::shared_ptr&); + DivBuilder(); ~DivBuilder() override; OH_NN_ReturnCode Build(const std::vector& paramsIndex, @@ -36,10 +38,13 @@ public: LiteGraphPrimitvePtr GetPrimitive() override; private: - OH_NN_ReturnCode SetActicationType(std::shared_ptr tensor); + OH_NN_ReturnCode SetActicationType(const std::shared_ptr& tensor); private: mindspore::lite::ActivationType m_activationType {mindspore::lite::ACTIVATION_TYPE_NO_ACTIVATION}; + std::unordered_map m_paramMap = { + {OH_NN_DIV_ACTIVATIONTYPE, &DivBuilder::SetActicationType} + }; }; } // namespace Ops } // namespace NeuralNetworkRuntime diff --git a/frameworks/native/neural_network_runtime/ops/eltwise_builder.cpp b/frameworks/native/neural_network_runtime/ops/eltwise_builder.cpp index 8a77b3db211a00b4466a3cbeb2da35baf0daaefe..7e8d40a3e82896067510f731fc4c81a7fb7070b1 100644 --- a/frameworks/native/neural_network_runtime/ops/eltwise_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/eltwise_builder.cpp @@ -28,7 +28,7 @@ EltwiseBuilder::EltwiseBuilder() {} EltwiseBuilder::~EltwiseBuilder() {} -OH_NN_ReturnCode EltwiseBuilder::SetMode(std::shared_ptr tensor) +OH_NN_ReturnCode EltwiseBuilder::SetMode(const std::shared_ptr& tensor) { tensor->IdentifyOpParameter(); @@ -86,14 +86,13 @@ OH_NN_ReturnCode EltwiseBuilder::Build(const std::vector& paramsIndex, for (int i : paramsIndex) { std::shared_ptr tensor = allTensors[i]; - switch (tensor->GetType()) { - case OH_NN_ELTWISE_MODE: - returnCode = SetMode(tensor); - break; - default: - LOGE("[Eltwise] Build failed, param invalid, type = %d.", tensor->GetType()); - return OH_NN_INVALID_PARAMETER; + if (m_paramMap.find(tensor->GetType()) != m_paramMap.end()) { + returnCode = (this->*(m_paramMap[tensor->GetType()]))(tensor); + } else { + LOGE("[Eltwise] Build failed, param invalid, type=%d", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; } + if (returnCode != OH_NN_SUCCESS) { LOGE("[Eltwise] Build failed, passed invalid param."); return returnCode; diff --git a/frameworks/native/neural_network_runtime/ops/eltwise_builder.h b/frameworks/native/neural_network_runtime/ops/eltwise_builder.h index 78d4dc55e0a09af0200a7a56ca887fc7f8e9c4e3..9299b8aba1ddbc400de37a328735d30a2febb3f1 100644 --- a/frameworks/native/neural_network_runtime/ops/eltwise_builder.h +++ b/frameworks/native/neural_network_runtime/ops/eltwise_builder.h @@ -26,6 +26,8 @@ namespace NeuralNetworkRuntime { namespace Ops { class EltwiseBuilder : public OpsBuilder { public: + typedef OH_NN_ReturnCode (EltwiseBuilder::*FuncPtr)(const std::shared_ptr&); + EltwiseBuilder(); ~EltwiseBuilder() override; OH_NN_ReturnCode Build(const std::vector& paramsIndex, @@ -36,10 +38,13 @@ public: LiteGraphPrimitvePtr GetPrimitive() override; private: - OH_NN_ReturnCode SetMode(std::shared_ptr tensor); + OH_NN_ReturnCode SetMode(const std::shared_ptr& tensor); private: mindspore::lite::EltwiseMode m_mode {mindspore::lite::ELTWISE_MODE_PROD}; + std::unordered_map m_paramMap = { + {OH_NN_ELTWISE_MODE, &EltwiseBuilder::SetMode} + }; }; } // namespace Ops } // namespace NeuralNetworkRuntime diff --git a/frameworks/native/neural_network_runtime/ops/exp_builder.cpp b/frameworks/native/neural_network_runtime/ops/exp_builder.cpp index 4afb105f5bb7e9f621ea1453525e7d6c45e62546..2350380401eab01cf21f46658c95a1070cb37589 100755 --- a/frameworks/native/neural_network_runtime/ops/exp_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/exp_builder.cpp @@ -28,7 +28,7 @@ ExpBuilder::ExpBuilder() {} ExpBuilder::~ExpBuilder() {} -OH_NN_ReturnCode ExpBuilder::SetBase(std::shared_ptr tensor) +OH_NN_ReturnCode ExpBuilder::SetBase(const std::shared_ptr& tensor) { if (tensor->GetDataType() != OH_NN_FLOAT32) { LOGE("[Exp] The base should be type OH_NN_FLOAT32."); @@ -50,7 +50,7 @@ OH_NN_ReturnCode ExpBuilder::SetBase(std::shared_ptr tensor) return OH_NN_SUCCESS; } -OH_NN_ReturnCode ExpBuilder::SetScale(std::shared_ptr tensor) +OH_NN_ReturnCode ExpBuilder::SetScale(const std::shared_ptr& tensor) { if (tensor->GetDataType() != OH_NN_FLOAT32) { LOGE("[Exp] The scale should be type OH_NN_FLOAT32."); @@ -72,7 +72,7 @@ OH_NN_ReturnCode ExpBuilder::SetScale(std::shared_ptr tensor) return OH_NN_SUCCESS; } -OH_NN_ReturnCode ExpBuilder::SetShift(std::shared_ptr tensor) +OH_NN_ReturnCode ExpBuilder::SetShift(const std::shared_ptr& tensor) { if (tensor->GetDataType() != OH_NN_FLOAT32) { LOGE("[Exp] The shift should be type OH_NN_FLOAT32."); @@ -122,19 +122,11 @@ OH_NN_ReturnCode ExpBuilder::Build(const std::vector& paramsIndex, for (int i : paramsIndex) { std::shared_ptr tensor = allTensors[i]; tensor->IdentifyOpParameter(); - switch (tensor->GetType()) { - case OH_NN_EXP_BASE: - ret = SetBase(tensor); - break; - case OH_NN_EXP_SCALE: - ret = SetScale(tensor); - break; - case OH_NN_EXP_SHIFT: - ret = SetShift(tensor); - break; - default: - LOGE("[Exp] Build failed, param invalid, type=%d", tensor->GetType()); - return OH_NN_INVALID_PARAMETER; + if (m_paramMap.find(tensor->GetType()) != m_paramMap.end()) { + ret = (this->*(m_paramMap[tensor->GetType()]))(tensor); + } else { + LOGE("[Exp] Build failed, param invalid, type=%d", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; } if (ret != OH_NN_SUCCESS) { diff --git a/frameworks/native/neural_network_runtime/ops/exp_builder.h b/frameworks/native/neural_network_runtime/ops/exp_builder.h index edacbfc14375c0c9732886c7eef544196bb0e5db..9f2b64e9a5520634bc162af6b2b51282cba5e1a5 100755 --- a/frameworks/native/neural_network_runtime/ops/exp_builder.h +++ b/frameworks/native/neural_network_runtime/ops/exp_builder.h @@ -26,6 +26,8 @@ namespace NeuralNetworkRuntime { namespace Ops { class ExpBuilder : public OpsBuilder { public: + typedef OH_NN_ReturnCode (ExpBuilder::*FuncPtr)(const std::shared_ptr&); + ExpBuilder(); ~ExpBuilder() override; OH_NN_ReturnCode Build(const std::vector& paramsIndex, @@ -36,14 +38,19 @@ public: LiteGraphPrimitvePtr GetPrimitive() override; private: - OH_NN_ReturnCode SetBase(std::shared_ptr tensor); - OH_NN_ReturnCode SetScale(std::shared_ptr tensor); - OH_NN_ReturnCode SetShift(std::shared_ptr tensor); + OH_NN_ReturnCode SetBase(const std::shared_ptr& tensor); + OH_NN_ReturnCode SetScale(const std::shared_ptr& tensor); + OH_NN_ReturnCode SetShift(const std::shared_ptr& tensor); private: float m_base {-1.0f}; float m_scale {1.0f}; float m_shift {0.0f}; + std::unordered_map m_paramMap = { + {OH_NN_EXP_BASE, &ExpBuilder::SetBase}, + {OH_NN_EXP_SCALE, &ExpBuilder::SetScale}, + {OH_NN_EXP_SHIFT, &ExpBuilder::SetShift} + }; }; } // namespace Ops } // namespace NeuralNetworkRuntime diff --git a/frameworks/native/neural_network_runtime/ops/flatten_builder.cpp b/frameworks/native/neural_network_runtime/ops/flatten_builder.cpp index f9b16c108e7660dd2d8c79e6f0ce5f529105a070..ea58861f66614b33241ba484955991f244dd92ba 100755 --- a/frameworks/native/neural_network_runtime/ops/flatten_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/flatten_builder.cpp @@ -28,7 +28,7 @@ FlattenBuilder::FlattenBuilder() {} FlattenBuilder::~FlattenBuilder() {} -OH_NN_ReturnCode FlattenBuilder::SetAxis(std::shared_ptr tensor) +OH_NN_ReturnCode FlattenBuilder::SetAxis(const std::shared_ptr& tensor) { if (tensor->GetDataType() != OH_NN_INT64) { LOGE("[Flatten] The axis should be type OH_NN_INT64."); @@ -78,13 +78,11 @@ OH_NN_ReturnCode FlattenBuilder::Build(const std::vector& paramsIndex, for (int i : paramsIndex) { std::shared_ptr tensor = allTensors[i]; tensor->IdentifyOpParameter(); - switch (tensor->GetType()) { - case OH_NN_FLATTEN_AXIS: - ret = SetAxis(tensor); - break; - default: - LOGE("[Flatten] Build failed, param invalid, type=%d", tensor->GetType()); - return OH_NN_INVALID_PARAMETER; + if (m_paramMap.find(tensor->GetType()) != m_paramMap.end()) { + ret = (this->*(m_paramMap[tensor->GetType()]))(tensor); + } else { + LOGE("[Flatten] Build failed, param invalid, type=%d", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; } if (ret != OH_NN_SUCCESS) { diff --git a/frameworks/native/neural_network_runtime/ops/flatten_builder.h b/frameworks/native/neural_network_runtime/ops/flatten_builder.h index 98916b78935865647a4c2e2f024f811b8e539d0f..57068acfa4068457b4bd020c5fd355927bb6a6cd 100755 --- a/frameworks/native/neural_network_runtime/ops/flatten_builder.h +++ b/frameworks/native/neural_network_runtime/ops/flatten_builder.h @@ -26,6 +26,8 @@ namespace NeuralNetworkRuntime { namespace Ops { class FlattenBuilder : public OpsBuilder { public: + typedef OH_NN_ReturnCode (FlattenBuilder::*FuncPtr)(const std::shared_ptr&); + FlattenBuilder(); ~FlattenBuilder() override; OH_NN_ReturnCode Build(const std::vector& paramsIndex, @@ -36,10 +38,13 @@ public: LiteGraphPrimitvePtr GetPrimitive() override; private: - OH_NN_ReturnCode SetAxis(std::shared_ptr tensor); + OH_NN_ReturnCode SetAxis(const std::shared_ptr& tensor); private: int64_t m_axis {1}; + std::unordered_map m_paramMap = { + {OH_NN_FLATTEN_AXIS, &FlattenBuilder::SetAxis} + }; }; } // namespace Ops } // namespace NeuralNetworkRuntime diff --git a/frameworks/native/neural_network_runtime/ops/fullconnection_builder.cpp b/frameworks/native/neural_network_runtime/ops/fullconnection_builder.cpp index 3219a645761e14cebcb4209da221fc0728c0f89f..2ac5602285940f6f74f99124943a1b7dc1276b51 100644 --- a/frameworks/native/neural_network_runtime/ops/fullconnection_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/fullconnection_builder.cpp @@ -52,7 +52,7 @@ OH_NN_ReturnCode FullConnectionBuilder::SetFullConnectionInput(const std::vector return OH_NN_SUCCESS; } -OH_NN_ReturnCode FullConnectionBuilder::SetHasBias(std::shared_ptr tensor) +OH_NN_ReturnCode FullConnectionBuilder::SetHasBias(const std::shared_ptr& tensor) { if (tensor->GetDataType() != OH_NN_BOOL) { LOGE("[FullConnection] The hasBias should be type OH_NN_BOOL."); @@ -74,7 +74,7 @@ OH_NN_ReturnCode FullConnectionBuilder::SetHasBias(std::shared_ptr ten return OH_NN_SUCCESS; } -OH_NN_ReturnCode FullConnectionBuilder::SetUseAxis(std::shared_ptr tensor) +OH_NN_ReturnCode FullConnectionBuilder::SetUseAxis(const std::shared_ptr& tensor) { if (tensor->GetDataType() != OH_NN_BOOL) { LOGE("[FullConnection] The useAxis should be type OH_NN_BOOL."); @@ -103,7 +103,7 @@ OH_NN_ReturnCode FullConnectionBuilder::SetUseAxis(std::shared_ptr ten return OH_NN_SUCCESS; } -OH_NN_ReturnCode FullConnectionBuilder::SetFullConnectionActivation(std::shared_ptr tensor) +OH_NN_ReturnCode FullConnectionBuilder::SetFullConnectionActivation(const std::shared_ptr& tensor) { tensor->IdentifyOpParameter(); // Set Activation @@ -133,7 +133,7 @@ OH_NN_ReturnCode FullConnectionBuilder::SetFullConnectionActivation(std::shared_ return OH_NN_SUCCESS; } -OH_NN_ReturnCode FullConnectionBuilder::SetAxis(std::shared_ptr tensor) +OH_NN_ReturnCode FullConnectionBuilder::SetAxis(const std::shared_ptr& tensor) { tensor->IdentifyOpParameter(); @@ -189,23 +189,13 @@ OH_NN_ReturnCode FullConnectionBuilder::Build(const std::vector& param for (int i : paramsIndex) { std::shared_ptr tensor = allTensors[i]; // 参数 tensor - switch (tensor->GetType()) { - case OH_NN_FULL_CONNECTION_AXIS: - returnCode = SetAxis(tensor); - break; - case OH_NN_FULL_CONNECTION_HAS_BIAS: - returnCode = SetHasBias(tensor); - break; - case OH_NN_FULL_CONNECTION_USE_AXIS: - returnCode = SetUseAxis(tensor); - break; - case OH_NN_FULL_CONNECTION_ACTIVATIONTYPE: - returnCode = SetFullConnectionActivation(tensor); - break; - default: - LOGE("[FullConnection] Build failed, param invalid, type = %{public}d.", tensor->GetType()); - return OH_NN_INVALID_PARAMETER; + if (m_paramMap.find(tensor->GetType()) != m_paramMap.end()) { + returnCode = (this->*(m_paramMap[tensor->GetType()]))(tensor); + } else { + LOGE("[FullConnection] Build failed, param invalid, type=%d", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; } + if (returnCode != OH_NN_SUCCESS) { LOGE("[FullConnection] Build failed, passed invalid param."); return returnCode; diff --git a/frameworks/native/neural_network_runtime/ops/fullconnection_builder.h b/frameworks/native/neural_network_runtime/ops/fullconnection_builder.h index bdc164858941cc980e2ea7d19fedd94321a1ddcc..c7cbc3916948425b7e8449a8898690e993fdb561 100644 --- a/frameworks/native/neural_network_runtime/ops/fullconnection_builder.h +++ b/frameworks/native/neural_network_runtime/ops/fullconnection_builder.h @@ -26,6 +26,8 @@ namespace NeuralNetworkRuntime { namespace Ops { class FullConnectionBuilder : public OpsBuilder { public: + typedef OH_NN_ReturnCode (FullConnectionBuilder::*FuncPtr)(const std::shared_ptr&); + FullConnectionBuilder(); ~FullConnectionBuilder() override; OH_NN_ReturnCode Build(const std::vector& paramsIndex, @@ -39,16 +41,22 @@ private: OH_NN_ReturnCode SetFullConnectionInput(const std::vector& inputsIndex, const std::vector& outputsIndex, const std::vector>& allTensors); - OH_NN_ReturnCode SetHasBias(std::shared_ptr tensor); - OH_NN_ReturnCode SetUseAxis(std::shared_ptr tensor); - OH_NN_ReturnCode SetFullConnectionActivation(std::shared_ptr tensor); - OH_NN_ReturnCode SetAxis(std::shared_ptr tensor); + OH_NN_ReturnCode SetHasBias(const std::shared_ptr& tensor); + OH_NN_ReturnCode SetUseAxis(const std::shared_ptr& tensor); + OH_NN_ReturnCode SetFullConnectionActivation(const std::shared_ptr& tensor); + OH_NN_ReturnCode SetAxis(const std::shared_ptr& tensor); private: bool m_hasBias {false}; bool m_useAxis {false}; int64_t m_axis {0}; mindspore::lite::ActivationType m_activationType {mindspore::lite::ACTIVATION_TYPE_NO_ACTIVATION}; + std::unordered_map m_paramMap = { + {OH_NN_FULL_CONNECTION_ACTIVATIONTYPE, &FullConnectionBuilder::SetFullConnectionActivation}, + {OH_NN_FULL_CONNECTION_HAS_BIAS, &FullConnectionBuilder::SetHasBias}, + {OH_NN_FULL_CONNECTION_USE_AXIS, &FullConnectionBuilder::SetUseAxis}, + {OH_NN_FULL_CONNECTION_AXIS, &FullConnectionBuilder::SetAxis} + }; bool m_axisIsSet {false}; bool m_useAxisIsSet {false}; diff --git a/frameworks/native/neural_network_runtime/ops/gelu_builder.cpp b/frameworks/native/neural_network_runtime/ops/gelu_builder.cpp index a610dd6cdd47e33135052fd00293ecc4d755573f..34b1eae6af9f6387780ce11d1ed6236a3ff19f70 100644 --- a/frameworks/native/neural_network_runtime/ops/gelu_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/gelu_builder.cpp @@ -31,7 +31,7 @@ GeluBuilder::GeluBuilder() {} GeluBuilder::~GeluBuilder() {} -OH_NN_ReturnCode GeluBuilder::SetApproximate(std::shared_ptr tensor) +OH_NN_ReturnCode GeluBuilder::SetApproximate(const std::shared_ptr& tensor) { if (tensor->GetDataType() != OH_NN_BOOL) { LOGE("[GeLU] The approximate should be type OH_NN_BOOL."); @@ -78,14 +78,13 @@ OH_NN_ReturnCode GeluBuilder::Build(const std::vector& paramsIndex, for (int i : paramsIndex) { std::shared_ptr tensor = allTensors[i]; tensor->IdentifyOpParameter(); - switch (tensor->GetType()) { - case OH_NN_GELU_APPROXIMATE: - returnCode = SetApproximate(tensor); - break; - default: - LOGE("[Gelu] Build failed, param invalid, type = %d.", tensor->GetType()); - return OH_NN_INVALID_PARAMETER; + if (m_paramMap.find(tensor->GetType()) != m_paramMap.end()) { + returnCode = (this->*(m_paramMap[tensor->GetType()]))(tensor); + } else { + LOGE("[Gelu] Build failed, param invalid, type=%d", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; } + if (returnCode != OH_NN_SUCCESS) { LOGE("[Gelu] Build failed, passed invalid param."); return returnCode; diff --git a/frameworks/native/neural_network_runtime/ops/gelu_builder.h b/frameworks/native/neural_network_runtime/ops/gelu_builder.h index 0a590dcc99fa04844f6b8c1c8cf223cde506b3c9..920341f4d99a21a75fcf403750d7769736361562 100644 --- a/frameworks/native/neural_network_runtime/ops/gelu_builder.h +++ b/frameworks/native/neural_network_runtime/ops/gelu_builder.h @@ -23,6 +23,8 @@ namespace NeuralNetworkRuntime { namespace Ops { class GeluBuilder : public OpsBuilder { public: + typedef OH_NN_ReturnCode (GeluBuilder::*FuncPtr)(const std::shared_ptr&); + GeluBuilder(); ~GeluBuilder() override; OH_NN_ReturnCode Build(const std::vector& paramsIndex, @@ -32,10 +34,13 @@ public: LiteGraphPrimitvePtr GetPrimitive() override; private: - OH_NN_ReturnCode SetApproximate(std::shared_ptr tensor); + OH_NN_ReturnCode SetApproximate(const std::shared_ptr& tensor); private: bool m_approximate {false}; + std::unordered_map m_paramMap = { + {OH_NN_GELU_APPROXIMATE, &GeluBuilder::SetApproximate} + }; }; } // namespace Ops } // namespace NeuralNetworkRuntime diff --git a/frameworks/native/neural_network_runtime/ops/instance_norm_builder.cpp b/frameworks/native/neural_network_runtime/ops/instance_norm_builder.cpp index c6a6e43b5a381b4ec56376cdf55aa907f588e1c7..218eb0eeea222af6759b8f832a201e74deb001ae 100755 --- a/frameworks/native/neural_network_runtime/ops/instance_norm_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/instance_norm_builder.cpp @@ -28,7 +28,7 @@ InstanceNormBuilder::InstanceNormBuilder() {} InstanceNormBuilder::~InstanceNormBuilder() {} -OH_NN_ReturnCode InstanceNormBuilder::SetEpsilon(std::shared_ptr tensor) +OH_NN_ReturnCode InstanceNormBuilder::SetEpsilon(const std::shared_ptr& tensor) { if (tensor->GetDataType() != OH_NN_FLOAT32) { LOGE("[InstanceNorm] The epsilon should be type OH_NN_FLOAT32."); @@ -78,13 +78,11 @@ OH_NN_ReturnCode InstanceNormBuilder::Build(const std::vector& paramsI for (int i : paramsIndex) { std::shared_ptr tensor = allTensors[i]; tensor->IdentifyOpParameter(); - switch (tensor->GetType()) { - case OH_NN_INSTANCE_NORM_EPSILON: - ret = SetEpsilon(tensor); - break; - default: - LOGE("[InstanceNorm] Build failed, param invalid, type=%d", tensor->GetType()); - return OH_NN_INVALID_PARAMETER; + if (m_paramMap.find(tensor->GetType()) != m_paramMap.end()) { + ret = (this->*(m_paramMap[tensor->GetType()]))(tensor); + } else { + LOGE("[InstanceNorm] Build failed, param invalid, type=%d", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; } if (ret != OH_NN_SUCCESS) { diff --git a/frameworks/native/neural_network_runtime/ops/instance_norm_builder.h b/frameworks/native/neural_network_runtime/ops/instance_norm_builder.h index c811684468644a2681737faf0c5d203313b882c0..c12629b1d73e335be1ce87ed2003c7d4d1680840 100755 --- a/frameworks/native/neural_network_runtime/ops/instance_norm_builder.h +++ b/frameworks/native/neural_network_runtime/ops/instance_norm_builder.h @@ -26,6 +26,8 @@ namespace NeuralNetworkRuntime { namespace Ops { class InstanceNormBuilder : public OpsBuilder { public: + typedef OH_NN_ReturnCode (InstanceNormBuilder::*FuncPtr)(const std::shared_ptr&); + InstanceNormBuilder(); ~InstanceNormBuilder() override; OH_NN_ReturnCode Build(const std::vector& paramsIndex, @@ -36,10 +38,13 @@ public: LiteGraphPrimitvePtr GetPrimitive() override; private: - OH_NN_ReturnCode SetEpsilon(std::shared_ptr tensor); + OH_NN_ReturnCode SetEpsilon(const std::shared_ptr& tensor); private: float m_epsilon {0.0f}; + std::unordered_map m_paramMap = { + {OH_NN_INSTANCE_NORM_EPSILON, &InstanceNormBuilder::SetEpsilon} + }; }; } // namespace Ops } // namespace NeuralNetworkRuntime diff --git a/frameworks/native/neural_network_runtime/ops/l2_normalize_builder.cpp b/frameworks/native/neural_network_runtime/ops/l2_normalize_builder.cpp index 24ce736dc179393eb3003dd66b67958e56553008..eddad6c6d8acc8e8bb34a784044f61d1068d82bb 100644 --- a/frameworks/native/neural_network_runtime/ops/l2_normalize_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/l2_normalize_builder.cpp @@ -32,7 +32,7 @@ L2NormalizeBuilder::L2NormalizeBuilder() {} L2NormalizeBuilder::~L2NormalizeBuilder() {} -OH_NN_ReturnCode L2NormalizeBuilder::SetAxis(std::shared_ptr tensor) +OH_NN_ReturnCode L2NormalizeBuilder::SetAxis(const std::shared_ptr& tensor) { if (tensor->GetDataType() != OH_NN_INT64) { LOGE("[L2Normalize] The axis should be type OH_NN_INT64."); @@ -58,7 +58,7 @@ OH_NN_ReturnCode L2NormalizeBuilder::SetAxis(std::shared_ptr tensor) return OH_NN_SUCCESS; } -OH_NN_ReturnCode L2NormalizeBuilder::SetEpsilon(std::shared_ptr tensor) +OH_NN_ReturnCode L2NormalizeBuilder::SetEpsilon(const std::shared_ptr& tensor) { if (tensor->GetDataType() != OH_NN_FLOAT32) { LOGE("[L2Normalize] The epsilon should be type OH_NN_FLOAT32."); @@ -80,7 +80,7 @@ OH_NN_ReturnCode L2NormalizeBuilder::SetEpsilon(std::shared_ptr tensor return OH_NN_SUCCESS; } -OH_NN_ReturnCode L2NormalizeBuilder::SetActivationType(std::shared_ptr tensor) +OH_NN_ReturnCode L2NormalizeBuilder::SetActivationType(const std::shared_ptr& tensor) { if (tensor->GetDataType() != OH_NN_INT8) { LOGE("[L2Normalize] SetActivationType failed, the activationType should have type OH_NN_INT8."); @@ -136,19 +136,11 @@ OH_NN_ReturnCode L2NormalizeBuilder::Build(const std::vector& paramsIn for (int i : paramsIndex) { std::shared_ptr tensor = allTensors[i]; tensor->IdentifyOpParameter(); - switch (tensor->GetType()) { - case OH_NN_L2_NORMALIZE_AXIS: - ret = SetAxis(tensor); - break; - case OH_NN_L2_NORMALIZE_EPSILON: - ret = SetEpsilon(tensor); - break; - case OH_NN_L2_NORMALIZE_ACTIVATION_TYPE: - ret = SetActivationType(tensor); - break; - default: - LOGE("[L2Normalize] Build failed, param invalid, type=%d", tensor->GetType()); - return OH_NN_INVALID_PARAMETER; + if (m_paramMap.find(tensor->GetType()) != m_paramMap.end()) { + ret = (this->*(m_paramMap[tensor->GetType()]))(tensor); + } else { + LOGE("[L2Normalize] Build failed, param invalid, type=%d", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; } if (ret != OH_NN_SUCCESS) { diff --git a/frameworks/native/neural_network_runtime/ops/l2_normalize_builder.h b/frameworks/native/neural_network_runtime/ops/l2_normalize_builder.h index 271d8d62f6be85ac69644cb4a611c98807a29c9c..f4d5d8cb8844d7132fbfc9c328ce430676d21e12 100644 --- a/frameworks/native/neural_network_runtime/ops/l2_normalize_builder.h +++ b/frameworks/native/neural_network_runtime/ops/l2_normalize_builder.h @@ -24,6 +24,8 @@ namespace NeuralNetworkRuntime { namespace Ops { class L2NormalizeBuilder : public OpsBuilder { public: + typedef OH_NN_ReturnCode (L2NormalizeBuilder::*FuncPtr)(const std::shared_ptr&); + L2NormalizeBuilder(); ~L2NormalizeBuilder() override; OH_NN_ReturnCode Build(const std::vector& paramsIndex, @@ -34,14 +36,19 @@ public: LiteGraphPrimitvePtr GetPrimitive() override; private: - OH_NN_ReturnCode SetAxis(std::shared_ptr tensor); - OH_NN_ReturnCode SetEpsilon(std::shared_ptr tensor); - OH_NN_ReturnCode SetActivationType(std::shared_ptr tensor); + OH_NN_ReturnCode SetAxis(const std::shared_ptr& tensor); + OH_NN_ReturnCode SetEpsilon(const std::shared_ptr& tensor); + OH_NN_ReturnCode SetActivationType(const std::shared_ptr& tensor); private: std::vector m_axis; float m_epsilon {1e-6}; mindspore::lite::ActivationType m_activationType {mindspore::lite::ACTIVATION_TYPE_NO_ACTIVATION}; + std::unordered_map m_paramMap = { + {OH_NN_L2_NORMALIZE_ACTIVATION_TYPE, &L2NormalizeBuilder::SetActivationType}, + {OH_NN_L2_NORMALIZE_EPSILON, &L2NormalizeBuilder::SetEpsilon}, + {OH_NN_L2_NORMALIZE_AXIS, &L2NormalizeBuilder::SetAxis} + }; }; } // namespace Ops } // namespace NeuralNetworkRuntime diff --git a/frameworks/native/neural_network_runtime/ops/layernorm_builder.cpp b/frameworks/native/neural_network_runtime/ops/layernorm_builder.cpp index 63e1446f80696f6baf5ddccfea032c1620d05be6..26cb483cf61f7dfe36737f512a0ae2c7f2577a84 100644 --- a/frameworks/native/neural_network_runtime/ops/layernorm_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/layernorm_builder.cpp @@ -33,7 +33,7 @@ LayerNormBuilder::LayerNormBuilder() {} LayerNormBuilder::~LayerNormBuilder() {} -OH_NN_ReturnCode LayerNormBuilder::SetBeginNormAxis(std::shared_ptr tensor) +OH_NN_ReturnCode LayerNormBuilder::SetBeginNormAxis(const std::shared_ptr& tensor) { tensor->IdentifyOpParameter(); if (tensor->GetDataType() != OH_NN_INT64) { @@ -56,7 +56,7 @@ OH_NN_ReturnCode LayerNormBuilder::SetBeginNormAxis(std::shared_ptr te return OH_NN_SUCCESS; } -OH_NN_ReturnCode LayerNormBuilder::SetEpsilon(std::shared_ptr tensor) +OH_NN_ReturnCode LayerNormBuilder::SetEpsilon(const std::shared_ptr& tensor) { tensor->IdentifyOpParameter(); if (tensor->GetDataType() != OH_NN_FLOAT32) { @@ -79,7 +79,7 @@ OH_NN_ReturnCode LayerNormBuilder::SetEpsilon(std::shared_ptr tensor) return OH_NN_SUCCESS; } -OH_NN_ReturnCode LayerNormBuilder::SetBeginParamsAxis(std::shared_ptr tensor) +OH_NN_ReturnCode LayerNormBuilder::SetBeginParamsAxis(const std::shared_ptr& tensor) { tensor->IdentifyOpParameter(); if (tensor->GetDataType() != OH_NN_INT64) { @@ -129,19 +129,11 @@ OH_NN_ReturnCode LayerNormBuilder::Build(const std::vector& paramsInde for (int i : paramsIndex) { std::shared_ptr tensor = allTensors[i]; - switch (tensor->GetType()) { - case OH_NN_LAYER_NORM_BEGIN_NORM_AXIS: - returnCode = SetBeginNormAxis(tensor); - break; - case OH_NN_LAYER_NORM_EPSILON: - returnCode = SetEpsilon(tensor); - break; - case OH_NN_LAYER_NORM_BEGIN_PARAM_AXIS: - returnCode = SetBeginParamsAxis(tensor); - break; - default: - LOGE("[LayerNormBuilder] Parameter Type is invalid, type=%d", tensor->GetType()); - return OH_NN_INVALID_PARAMETER; + if (m_paramMap.find(tensor->GetType()) != m_paramMap.end()) { + returnCode = (this->*(m_paramMap[tensor->GetType()]))(tensor); + } else { + LOGE("[LayerNormBuilder] Build failed, param invalid, type=%d", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; } if (returnCode != OH_NN_SUCCESS) { diff --git a/frameworks/native/neural_network_runtime/ops/layernorm_builder.h b/frameworks/native/neural_network_runtime/ops/layernorm_builder.h index 74b6bd6fca6512046a50e9a444328163c167903d..85708dcf675778adcfd0d4f28993ab211d7a5eb5 100644 --- a/frameworks/native/neural_network_runtime/ops/layernorm_builder.h +++ b/frameworks/native/neural_network_runtime/ops/layernorm_builder.h @@ -23,6 +23,8 @@ namespace NeuralNetworkRuntime { namespace Ops { class LayerNormBuilder : public OpsBuilder { public: + typedef OH_NN_ReturnCode (LayerNormBuilder::*FuncPtr)(const std::shared_ptr&); + LayerNormBuilder(); ~LayerNormBuilder() override; OH_NN_ReturnCode Build(const std::vector& paramsIndex, @@ -32,9 +34,9 @@ public: LiteGraphPrimitvePtr GetPrimitive() override; private: - OH_NN_ReturnCode SetBeginNormAxis(std::shared_ptr tensor); - OH_NN_ReturnCode SetEpsilon(std::shared_ptr tensor); - OH_NN_ReturnCode SetBeginParamsAxis(std::shared_ptr tensor); + OH_NN_ReturnCode SetBeginNormAxis(const std::shared_ptr& tensor); + OH_NN_ReturnCode SetEpsilon(const std::shared_ptr& tensor); + OH_NN_ReturnCode SetBeginParamsAxis(const std::shared_ptr& tensor); OH_NN_ReturnCode ValidateGammaAndBetaShape(const std::vector& inputsIndex, int64_t beginAxis, const std::vector>& allTensors) const; @@ -43,6 +45,11 @@ private: float m_epsilon {1e-7}; bool m_elementwiseAffine {true}; int64_t m_beginParamsAxis {1}; + std::unordered_map m_paramMap = { + {OH_NN_LAYER_NORM_BEGIN_NORM_AXIS, &LayerNormBuilder::SetBeginNormAxis}, + {OH_NN_LAYER_NORM_EPSILON, &LayerNormBuilder::SetEpsilon}, + {OH_NN_LAYER_NORM_BEGIN_PARAM_AXIS, &LayerNormBuilder::SetBeginParamsAxis} + }; }; } // namespace Ops } // namespace NeuralNetworkRuntime diff --git a/frameworks/native/neural_network_runtime/ops/leaky_relu_builder.cpp b/frameworks/native/neural_network_runtime/ops/leaky_relu_builder.cpp index 11fc65cac7ca5513b18021077c5ed01082c3d647..6f5dd5a98d4f768284c1b2abdb0af4416ac3faca 100644 --- a/frameworks/native/neural_network_runtime/ops/leaky_relu_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/leaky_relu_builder.cpp @@ -28,7 +28,7 @@ LeakyReluBuilder::LeakyReluBuilder() {} LeakyReluBuilder::~LeakyReluBuilder() {} -OH_NN_ReturnCode LeakyReluBuilder::SetNegativeSlope(std::shared_ptr tensor) +OH_NN_ReturnCode LeakyReluBuilder::SetNegativeSlope(const std::shared_ptr& tensor) { if (tensor->GetDataType() != OH_NN_FLOAT32) { LOGE("[LeakyRelu] The negativeSlope should be type OH_NN_FLOAT32."); @@ -78,13 +78,11 @@ OH_NN_ReturnCode LeakyReluBuilder::Build(const std::vector& paramsInde for (int i : paramsIndex) { std::shared_ptr tensor = allTensors[i]; tensor->IdentifyOpParameter(); - switch (tensor->GetType()) { - case OH_NN_LEAKY_RELU_NEGATIVE_SLOPE: - ret = SetNegativeSlope(tensor); - break; - default: - LOGE("[LeakyRelu] Build failed, param invalid, type=%d", tensor->GetType()); - return OH_NN_INVALID_PARAMETER; + if (m_paramMap.find(tensor->GetType()) != m_paramMap.end()) { + ret = (this->*(m_paramMap[tensor->GetType()]))(tensor); + } else { + LOGE("[LeakyRelu] Build failed, param invalid, type=%d", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; } if (ret != OH_NN_SUCCESS) { diff --git a/frameworks/native/neural_network_runtime/ops/leaky_relu_builder.h b/frameworks/native/neural_network_runtime/ops/leaky_relu_builder.h index cd17bcc04f7d60d28137eafd1c08d447f7e88709..f311863a4548379ccc5a372a5799d9acd7b6c9e3 100644 --- a/frameworks/native/neural_network_runtime/ops/leaky_relu_builder.h +++ b/frameworks/native/neural_network_runtime/ops/leaky_relu_builder.h @@ -26,6 +26,8 @@ namespace NeuralNetworkRuntime { namespace Ops { class LeakyReluBuilder : public OpsBuilder { public: + typedef OH_NN_ReturnCode (LeakyReluBuilder::*FuncPtr)(const std::shared_ptr&); + LeakyReluBuilder(); ~LeakyReluBuilder() override; OH_NN_ReturnCode Build(const std::vector& paramsIndex, @@ -36,10 +38,13 @@ public: LiteGraphPrimitvePtr GetPrimitive() override; private: - OH_NN_ReturnCode SetNegativeSlope(std::shared_ptr tensor); + OH_NN_ReturnCode SetNegativeSlope(const std::shared_ptr& tensor); private: float m_negativeSlope {0.0f}; + std::unordered_map m_paramMap = { + {OH_NN_LEAKY_RELU_NEGATIVE_SLOPE, &LeakyReluBuilder::SetNegativeSlope} + }; }; } // namespace Ops } // namespace NeuralNetworkRuntime diff --git a/frameworks/native/neural_network_runtime/ops/log_softmax_builder.cpp b/frameworks/native/neural_network_runtime/ops/log_softmax_builder.cpp index 7f0139f75cedeff44730f89dfe503922ffd5fb32..839577ef81d140d3af64b4a3e443f2fe37de4b60 100644 --- a/frameworks/native/neural_network_runtime/ops/log_softmax_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/log_softmax_builder.cpp @@ -31,7 +31,7 @@ LogSoftmaxBuilder::LogSoftmaxBuilder() {} LogSoftmaxBuilder::~LogSoftmaxBuilder() {} -OH_NN_ReturnCode LogSoftmaxBuilder::SetAxis(std::shared_ptr& tensor) +OH_NN_ReturnCode LogSoftmaxBuilder::SetAxis(const std::shared_ptr& tensor) { if (tensor->GetDataType() != OH_NN_INT64) { LOGE("[LogSoftmax] The axis should be type OH_NN_INT64."); @@ -81,13 +81,11 @@ OH_NN_ReturnCode LogSoftmaxBuilder::Build(const std::vector& paramsInd for (int i : paramsIndex) { std::shared_ptr tensor = allTensors[i]; tensor->IdentifyOpParameter(); - switch (tensor->GetType()) { - case OH_NN_LOG_SOFTMAX_AXIS: - ret = SetAxis(tensor); - break; - default: - LOGE("[LogSoftmax] Build failed, param invalid, type=%d", tensor->GetType()); - return OH_NN_INVALID_PARAMETER; + if (m_paramMap.find(tensor->GetType()) != m_paramMap.end()) { + ret = (this->*(m_paramMap[tensor->GetType()]))(tensor); + } else { + LOGE("[LogSoftmax] Build failed, param invalid, type=%d", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; } if (ret != OH_NN_SUCCESS) { diff --git a/frameworks/native/neural_network_runtime/ops/log_softmax_builder.h b/frameworks/native/neural_network_runtime/ops/log_softmax_builder.h index b5e8b77459b65b5c9e6ecba070ebbc6777cc811d..4f030bd8bd6a9b952abec0b7467592b1d8f62f9e 100644 --- a/frameworks/native/neural_network_runtime/ops/log_softmax_builder.h +++ b/frameworks/native/neural_network_runtime/ops/log_softmax_builder.h @@ -23,6 +23,8 @@ namespace NeuralNetworkRuntime { namespace Ops { class LogSoftmaxBuilder : public OpsBuilder { public: + typedef OH_NN_ReturnCode (LogSoftmaxBuilder::*FuncPtr)(const std::shared_ptr&); + LogSoftmaxBuilder(); ~LogSoftmaxBuilder() override; OH_NN_ReturnCode Build(const std::vector& paramsIndex, @@ -33,10 +35,13 @@ public: LiteGraphPrimitvePtr GetPrimitive() override; private: - OH_NN_ReturnCode SetAxis(std::shared_ptr& tensor); + OH_NN_ReturnCode SetAxis(const std::shared_ptr& tensor); private: int64_t m_axis {0}; + std::unordered_map m_paramMap = { + {OH_NN_LOG_SOFTMAX_AXIS, &LogSoftmaxBuilder::SetAxis} + }; }; } // namespace Ops } // namespace NeuralNetworkRuntime diff --git a/frameworks/native/neural_network_runtime/ops/lrn_builder.cpp b/frameworks/native/neural_network_runtime/ops/lrn_builder.cpp index 13fc2c88d8395defa85c7363d3dc68139466e631..ffc6fad0d241272b226acb2a61a74220ffb61795 100644 --- a/frameworks/native/neural_network_runtime/ops/lrn_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/lrn_builder.cpp @@ -32,7 +32,7 @@ LRNBuilder::LRNBuilder() {} LRNBuilder::~LRNBuilder() {} -OH_NN_ReturnCode LRNBuilder::SetDepthRadius(std::shared_ptr tensor) +OH_NN_ReturnCode LRNBuilder::SetDepthRadius(const std::shared_ptr& tensor) { if (tensor->GetDataType() != OH_NN_INT64) { LOGE("[LRN] The depthRadius should be type OH_NN_INT64."); @@ -54,7 +54,7 @@ OH_NN_ReturnCode LRNBuilder::SetDepthRadius(std::shared_ptr tensor) return OH_NN_SUCCESS; } -OH_NN_ReturnCode LRNBuilder::SetBias(std::shared_ptr tensor) +OH_NN_ReturnCode LRNBuilder::SetBias(const std::shared_ptr& tensor) { if (tensor->GetDataType() != OH_NN_FLOAT32) { LOGE("[LRN] The bias should be type OH_NN_FLOAT32."); @@ -76,7 +76,7 @@ OH_NN_ReturnCode LRNBuilder::SetBias(std::shared_ptr tensor) return OH_NN_SUCCESS; } -OH_NN_ReturnCode LRNBuilder::SetAlpha(std::shared_ptr tensor) +OH_NN_ReturnCode LRNBuilder::SetAlpha(const std::shared_ptr& tensor) { if (tensor->GetDataType() != OH_NN_FLOAT32) { LOGE("[LRN] The alpha should be type OH_NN_FLOAT32."); @@ -98,7 +98,7 @@ OH_NN_ReturnCode LRNBuilder::SetAlpha(std::shared_ptr tensor) return OH_NN_SUCCESS; } -OH_NN_ReturnCode LRNBuilder::SetBeta(std::shared_ptr tensor) +OH_NN_ReturnCode LRNBuilder::SetBeta(const std::shared_ptr& tensor) { if (tensor->GetDataType() != OH_NN_FLOAT32) { LOGE("[LRN] The beta should be type OH_NN_FLOAT32."); @@ -120,7 +120,7 @@ OH_NN_ReturnCode LRNBuilder::SetBeta(std::shared_ptr tensor) return OH_NN_SUCCESS; } -OH_NN_ReturnCode LRNBuilder::SetNormRegion(std::shared_ptr tensor) +OH_NN_ReturnCode LRNBuilder::SetNormRegion(const std::shared_ptr& tensor) { if (tensor->GetDataType() != OH_NN_INT32) { LOGE("[LRN] The normRegion should be type OH_NN_INT32."); @@ -178,25 +178,11 @@ OH_NN_ReturnCode LRNBuilder::Build(const std::vector& paramsIndex, for (int i : paramsIndex) { std::shared_ptr tensor = allTensors[i]; tensor->IdentifyOpParameter(); - switch (tensor->GetType()) { - case OH_NN_LRN_DEPTH_RADIUS: - ret = SetDepthRadius(tensor); - break; - case OH_NN_LRN_BIAS: - ret = SetBias(tensor); - break; - case OH_NN_LRN_ALPHA: - ret = SetAlpha(tensor); - break; - case OH_NN_LRN_BETA: - ret = SetBeta(tensor); - break; - case OH_NN_LRN_NORM_REGION: - ret = SetNormRegion(tensor); - break; - default: - LOGE("[LRN] Build failed, param invalid, type=%d", tensor->GetType()); - return OH_NN_INVALID_PARAMETER; + if (m_paramMap.find(tensor->GetType()) != m_paramMap.end()) { + ret = (this->*(m_paramMap[tensor->GetType()]))(tensor); + } else { + LOGE("[LRN] Build failed, param invalid, type=%d", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; } if (ret != OH_NN_SUCCESS) { diff --git a/frameworks/native/neural_network_runtime/ops/lrn_builder.h b/frameworks/native/neural_network_runtime/ops/lrn_builder.h index c5cf81e5113a7c669ff990aa83b4b9289e75724c..845f11430df82c5041cb4a98504c5f19e480b8a7 100644 --- a/frameworks/native/neural_network_runtime/ops/lrn_builder.h +++ b/frameworks/native/neural_network_runtime/ops/lrn_builder.h @@ -23,6 +23,8 @@ namespace NeuralNetworkRuntime { namespace Ops { class LRNBuilder : public OpsBuilder { public: + typedef OH_NN_ReturnCode (LRNBuilder::*FuncPtr)(const std::shared_ptr&); + LRNBuilder(); ~LRNBuilder() override; OH_NN_ReturnCode Build(const std::vector& paramsIndex, @@ -33,11 +35,11 @@ public: LiteGraphPrimitvePtr GetPrimitive() override; private: - OH_NN_ReturnCode SetDepthRadius(std::shared_ptr tensor); - OH_NN_ReturnCode SetBias(std::shared_ptr tensor); - OH_NN_ReturnCode SetAlpha(std::shared_ptr tensor); - OH_NN_ReturnCode SetBeta(std::shared_ptr tensor); - OH_NN_ReturnCode SetNormRegion(std::shared_ptr tensor); + OH_NN_ReturnCode SetDepthRadius(const std::shared_ptr& tensor); + OH_NN_ReturnCode SetBias(const std::shared_ptr& tensor); + OH_NN_ReturnCode SetAlpha(const std::shared_ptr& tensor); + OH_NN_ReturnCode SetBeta(const std::shared_ptr& tensor); + OH_NN_ReturnCode SetNormRegion(const std::shared_ptr& tensor); private: int64_t m_depthRadius {0}; @@ -45,6 +47,13 @@ private: float m_alpha {0.0f}; float m_beta {0.0f}; std::string m_normRegion {"ACROSS_CHANNELS"}; + std::unordered_map m_paramMap = { + {OH_NN_LRN_ALPHA, &LRNBuilder::SetAlpha}, + {OH_NN_LRN_DEPTH_RADIUS, &LRNBuilder::SetDepthRadius}, + {OH_NN_LRN_BIAS, &LRNBuilder::SetBias}, + {OH_NN_LRN_BETA, &LRNBuilder::SetBeta}, + {OH_NN_LRN_NORM_REGION, &LRNBuilder::SetNormRegion} + }; }; } // namespace Ops } // namespace NeuralNetworkRuntime diff --git a/frameworks/native/neural_network_runtime/ops/lstm_builder.cpp b/frameworks/native/neural_network_runtime/ops/lstm_builder.cpp index 5273ea82cc524ce8396db54610eafb64bae98fbf..6104dfdd7c8a655b1b1773f98742f99829684c15 100644 --- a/frameworks/native/neural_network_runtime/ops/lstm_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/lstm_builder.cpp @@ -28,7 +28,7 @@ LSTMBuilder::LSTMBuilder() {} LSTMBuilder::~LSTMBuilder() {} -OH_NN_ReturnCode LSTMBuilder::SetBidirectional(std::shared_ptr tensor) +OH_NN_ReturnCode LSTMBuilder::SetBidirectional(const std::shared_ptr& tensor) { if (tensor->GetDataType() != OH_NN_BOOL) { LOGE("[LSTM] The bidirectional should be type OH_NN_BOOL."); @@ -50,7 +50,7 @@ OH_NN_ReturnCode LSTMBuilder::SetBidirectional(std::shared_ptr tensor) return OH_NN_SUCCESS; } -OH_NN_ReturnCode LSTMBuilder::SetHasBias(std::shared_ptr tensor) +OH_NN_ReturnCode LSTMBuilder::SetHasBias(const std::shared_ptr& tensor) { if (tensor->GetDataType() != OH_NN_BOOL) { LOGE("[LSTM] The hasBias should be type OH_NN_BOOL."); @@ -72,7 +72,7 @@ OH_NN_ReturnCode LSTMBuilder::SetHasBias(std::shared_ptr tensor) return OH_NN_SUCCESS; } -OH_NN_ReturnCode LSTMBuilder::SetInputSize(std::shared_ptr tensor) +OH_NN_ReturnCode LSTMBuilder::SetInputSize(const std::shared_ptr& tensor) { if (tensor->GetDataType() != OH_NN_INT64) { LOGE("[LSTM] The inputSize should be type OH_NN_INT64."); @@ -94,7 +94,7 @@ OH_NN_ReturnCode LSTMBuilder::SetInputSize(std::shared_ptr tensor) return OH_NN_SUCCESS; } -OH_NN_ReturnCode LSTMBuilder::SetHiddenSize(std::shared_ptr tensor) +OH_NN_ReturnCode LSTMBuilder::SetHiddenSize(const std::shared_ptr& tensor) { if (tensor->GetDataType() != OH_NN_INT64) { LOGE("[LSTM] The hiddenSize should be type OH_NN_INT64."); @@ -116,7 +116,7 @@ OH_NN_ReturnCode LSTMBuilder::SetHiddenSize(std::shared_ptr tensor) return OH_NN_SUCCESS; } -OH_NN_ReturnCode LSTMBuilder::SetNumLayers(std::shared_ptr tensor) +OH_NN_ReturnCode LSTMBuilder::SetNumLayers(const std::shared_ptr& tensor) { if (tensor->GetDataType() != OH_NN_INT64) { LOGE("[LSTM] The numLayers should be type OH_NN_INT64."); @@ -138,7 +138,7 @@ OH_NN_ReturnCode LSTMBuilder::SetNumLayers(std::shared_ptr tensor) return OH_NN_SUCCESS; } -OH_NN_ReturnCode LSTMBuilder::SetNumDirections(std::shared_ptr tensor) +OH_NN_ReturnCode LSTMBuilder::SetNumDirections(const std::shared_ptr& tensor) { if (tensor->GetDataType() != OH_NN_INT64) { LOGE("[LSTM] The numDirections should be type OH_NN_INT64."); @@ -160,7 +160,7 @@ OH_NN_ReturnCode LSTMBuilder::SetNumDirections(std::shared_ptr tensor) return OH_NN_SUCCESS; } -OH_NN_ReturnCode LSTMBuilder::SetDropout(std::shared_ptr tensor) +OH_NN_ReturnCode LSTMBuilder::SetDropout(const std::shared_ptr& tensor) { if (tensor->GetDataType() != OH_NN_FLOAT32) { LOGE("[LSTM] The dropout should be type OH_NN_FLOAT32."); @@ -182,7 +182,7 @@ OH_NN_ReturnCode LSTMBuilder::SetDropout(std::shared_ptr tensor) return OH_NN_SUCCESS; } -OH_NN_ReturnCode LSTMBuilder::SetZoneoutCell(std::shared_ptr tensor) +OH_NN_ReturnCode LSTMBuilder::SetZoneoutCell(const std::shared_ptr& tensor) { if (tensor->GetDataType() != OH_NN_FLOAT32) { LOGE("[LSTM] The zoneoutCell should be type OH_NN_FLOAT32."); @@ -204,7 +204,7 @@ OH_NN_ReturnCode LSTMBuilder::SetZoneoutCell(std::shared_ptr tensor) return OH_NN_SUCCESS; } -OH_NN_ReturnCode LSTMBuilder::SetZoneoutHidden(std::shared_ptr tensor) +OH_NN_ReturnCode LSTMBuilder::SetZoneoutHidden(const std::shared_ptr& tensor) { if (tensor->GetDataType() != OH_NN_FLOAT32) { LOGE("[LSTM] The zoneoutHidden should be type OH_NN_FLOAT32."); @@ -226,7 +226,7 @@ OH_NN_ReturnCode LSTMBuilder::SetZoneoutHidden(std::shared_ptr tensor) return OH_NN_SUCCESS; } -OH_NN_ReturnCode LSTMBuilder::SetProjSize(std::shared_ptr tensor) +OH_NN_ReturnCode LSTMBuilder::SetProjSize(const std::shared_ptr& tensor) { if (tensor->GetDataType() != OH_NN_INT64) { LOGE("[LSTM] The projSize should be type OH_NN_INT64."); @@ -255,41 +255,13 @@ OH_NN_ReturnCode LSTMBuilder::ParseParam(const std::vector& paramsInde for (int i : paramsIndex) { std::shared_ptr tensor = allTensors[i]; tensor->IdentifyOpParameter(); - switch (tensor->GetType()) { - case OH_NN_LSTM_BIDIRECTIONAL: - returnCode = SetBidirectional(tensor); - break; - case OH_NN_LSTM_HAS_BIAS: - returnCode = SetHasBias(tensor); - break; - case OH_NN_LSTM_INPUT_SIZE: - returnCode = SetInputSize(tensor); - break; - case OH_NN_LSTM_HIDDEN_SIZE: - returnCode = SetHiddenSize(tensor); - break; - case OH_NN_LSTM_NUM_LAYERS: - returnCode = SetNumLayers(tensor); - break; - case OH_NN_LSTM_NUM_DIRECTIONS: - returnCode = SetNumDirections(tensor); - break; - case OH_NN_LSTM_DROPOUT: - returnCode = SetDropout(tensor); - break; - case OH_NN_LSTM_ZONEOUT_CELL: - returnCode = SetZoneoutCell(tensor); - break; - case OH_NN_LSTM_ZONEOUT_HIDDEN: - returnCode = SetZoneoutHidden(tensor); - break; - case OH_NN_LSTM_PROJ_SIZE: - returnCode = SetProjSize(tensor); - break; - default: - LOGE("[LSTM] Build failed, param invalid, type=%d", tensor->GetType()); - return OH_NN_INVALID_PARAMETER; + if (m_paramMap.find(tensor->GetType()) != m_paramMap.end()) { + returnCode = (this->*(m_paramMap[tensor->GetType()]))(tensor); + } else { + LOGE("[lSTM] Build failed, param invalid, type=%d", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; } + if (returnCode != OH_NN_SUCCESS) { LOGE("[LSTM] Build failed, passed invalid param."); return returnCode; diff --git a/frameworks/native/neural_network_runtime/ops/lstm_builder.h b/frameworks/native/neural_network_runtime/ops/lstm_builder.h index 075c74b41e7239fac98f4ca9c2f15c1e293b6864..efb00c22cf8cff0d71f0cdd8c388d427ead49aa3 100644 --- a/frameworks/native/neural_network_runtime/ops/lstm_builder.h +++ b/frameworks/native/neural_network_runtime/ops/lstm_builder.h @@ -26,6 +26,8 @@ namespace NeuralNetworkRuntime { namespace Ops { class LSTMBuilder : public OpsBuilder { public: + typedef OH_NN_ReturnCode (LSTMBuilder::*FuncPtr)(const std::shared_ptr&); + LSTMBuilder(); ~LSTMBuilder() override; OH_NN_ReturnCode Build(const std::vector& paramsIndex, @@ -35,16 +37,16 @@ public: LiteGraphPrimitvePtr GetPrimitive() override; private: - OH_NN_ReturnCode SetBidirectional(std::shared_ptr tensor); - OH_NN_ReturnCode SetHasBias(std::shared_ptr tensor); - OH_NN_ReturnCode SetInputSize(std::shared_ptr tensor); - OH_NN_ReturnCode SetHiddenSize(std::shared_ptr tensor); - OH_NN_ReturnCode SetNumLayers(std::shared_ptr tensor); - OH_NN_ReturnCode SetNumDirections(std::shared_ptr tensor); - OH_NN_ReturnCode SetDropout(std::shared_ptr tensor); - OH_NN_ReturnCode SetZoneoutCell(std::shared_ptr tensor); - OH_NN_ReturnCode SetZoneoutHidden(std::shared_ptr tensor); - OH_NN_ReturnCode SetProjSize(std::shared_ptr tensor); + OH_NN_ReturnCode SetBidirectional(const std::shared_ptr& tensor); + OH_NN_ReturnCode SetHasBias(const std::shared_ptr& tensor); + OH_NN_ReturnCode SetInputSize(const std::shared_ptr& tensor); + OH_NN_ReturnCode SetHiddenSize(const std::shared_ptr& tensor); + OH_NN_ReturnCode SetNumLayers(const std::shared_ptr& tensor); + OH_NN_ReturnCode SetNumDirections(const std::shared_ptr& tensor); + OH_NN_ReturnCode SetDropout(const std::shared_ptr& tensor); + OH_NN_ReturnCode SetZoneoutCell(const std::shared_ptr& tensor); + OH_NN_ReturnCode SetZoneoutHidden(const std::shared_ptr& tensor); + OH_NN_ReturnCode SetProjSize(const std::shared_ptr& tensor); OH_NN_ReturnCode ParseParam(const std::vector& paramsIndex, const std::vector>& allTensors); @@ -59,6 +61,18 @@ private: float m_zoneoutCell {0.0f}; float m_zoneoutHidden {0.0f}; int64_t m_projSize {0}; + std::unordered_map m_paramMap = { + {OH_NN_LSTM_BIDIRECTIONAL, &LSTMBuilder::SetBidirectional}, + {OH_NN_LSTM_HAS_BIAS, &LSTMBuilder::SetHasBias}, + {OH_NN_LSTM_INPUT_SIZE, &LSTMBuilder::SetInputSize}, + {OH_NN_LSTM_HIDDEN_SIZE, &LSTMBuilder::SetHiddenSize}, + {OH_NN_LSTM_NUM_LAYERS, &LSTMBuilder::SetNumLayers}, + {OH_NN_LSTM_NUM_DIRECTIONS, &LSTMBuilder::SetNumDirections}, + {OH_NN_LSTM_DROPOUT, &LSTMBuilder::SetDropout}, + {OH_NN_LSTM_ZONEOUT_CELL, &LSTMBuilder::SetZoneoutCell}, + {OH_NN_LSTM_ZONEOUT_HIDDEN, &LSTMBuilder::SetZoneoutHidden}, + {OH_NN_LSTM_PROJ_SIZE, &LSTMBuilder::SetProjSize} + }; }; } // namespace Ops } // namespace NeuralNetworkRuntime diff --git a/frameworks/native/neural_network_runtime/ops/matmul_builder.cpp b/frameworks/native/neural_network_runtime/ops/matmul_builder.cpp index 5913934bb3f6e8e24474361cae9cf15474049cf5..909cf79fcd6a7faa55026bdcbdb5f8c5461eb87b 100644 --- a/frameworks/native/neural_network_runtime/ops/matmul_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/matmul_builder.cpp @@ -32,7 +32,7 @@ MatmulBuilder::MatmulBuilder() {} MatmulBuilder::~MatmulBuilder() {} -OH_NN_ReturnCode MatmulBuilder::SetTransposeA(std::shared_ptr tensor) +OH_NN_ReturnCode MatmulBuilder::SetTransposeA(const std::shared_ptr& tensor) { tensor->IdentifyOpParameter(); if (tensor->GetElementCount() != SCALE_LENGTH) { @@ -55,7 +55,7 @@ OH_NN_ReturnCode MatmulBuilder::SetTransposeA(std::shared_ptr tensor) return OH_NN_SUCCESS; } -OH_NN_ReturnCode MatmulBuilder::SetTransposeB(std::shared_ptr tensor) +OH_NN_ReturnCode MatmulBuilder::SetTransposeB(const std::shared_ptr& tensor) { tensor->IdentifyOpParameter(); if (tensor->GetElementCount() != SCALE_LENGTH) { @@ -78,7 +78,7 @@ OH_NN_ReturnCode MatmulBuilder::SetTransposeB(std::shared_ptr tensor) return OH_NN_SUCCESS; } -OH_NN_ReturnCode MatmulBuilder::SetActivationType(std::shared_ptr tensor) +OH_NN_ReturnCode MatmulBuilder::SetActivationType(const std::shared_ptr& tensor) { tensor->IdentifyOpParameter(); if (tensor->GetElementCount() != SCALE_LENGTH) { @@ -135,19 +135,11 @@ OH_NN_ReturnCode MatmulBuilder::Build(const std::vector& paramsIndex, for (int i : paramsIndex) { std::shared_ptr tensor = allTensors[i]; - switch (tensor->GetType()) { - case OH_NN_MATMUL_TRANSPOSE_A: - returnCode = SetTransposeA(tensor); - break; - case OH_NN_MATMUL_TRANSPOSE_B: - returnCode = SetTransposeB(tensor); - break; - case OH_NN_MATMUL_ACTIVATION_TYPE: - returnCode = SetActivationType(tensor); - break; - default: - LOGE("[Matmul] Parameter Type is invalid, type=%d", tensor->GetType()); - return OH_NN_INVALID_PARAMETER; + if (m_paramMap.find(tensor->GetType()) != m_paramMap.end()) { + returnCode = (this->*(m_paramMap[tensor->GetType()]))(tensor); + } else { + LOGE("[Matmul] Build failed, param invalid, type=%d", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; } if (returnCode != OH_NN_SUCCESS) { diff --git a/frameworks/native/neural_network_runtime/ops/matmul_builder.h b/frameworks/native/neural_network_runtime/ops/matmul_builder.h index 3d39f20e1ddfa106d10b69c2635134f825aef5ac..24d9182265b8f962251a34f84da598dbd4189e7a 100644 --- a/frameworks/native/neural_network_runtime/ops/matmul_builder.h +++ b/frameworks/native/neural_network_runtime/ops/matmul_builder.h @@ -25,6 +25,8 @@ namespace NeuralNetworkRuntime { namespace Ops { class MatmulBuilder : public OpsBuilder { public: + typedef OH_NN_ReturnCode (MatmulBuilder::*FuncPtr)(const std::shared_ptr&); + MatmulBuilder(); ~MatmulBuilder() override; OH_NN_ReturnCode Build(const std::vector& paramsIndex, @@ -34,14 +36,19 @@ public: LiteGraphPrimitvePtr GetPrimitive() override; private: - OH_NN_ReturnCode SetTransposeA(std::shared_ptr tensor); - OH_NN_ReturnCode SetTransposeB(std::shared_ptr tensor); - OH_NN_ReturnCode SetActivationType(std::shared_ptr tensor); + OH_NN_ReturnCode SetTransposeA(const std::shared_ptr& tensor); + OH_NN_ReturnCode SetTransposeB(const std::shared_ptr& tensor); + OH_NN_ReturnCode SetActivationType(const std::shared_ptr& tensor); private: mindspore::lite::ActivationType m_activationType{mindspore::lite::ACTIVATION_TYPE_NO_ACTIVATION}; bool m_transposeA{false}; bool m_transposeB{false}; + std::unordered_map m_paramMap = { + {OH_NN_MATMUL_TRANSPOSE_A, &MatmulBuilder::SetTransposeA}, + {OH_NN_MATMUL_TRANSPOSE_B, &MatmulBuilder::SetTransposeB}, + {OH_NN_MATMUL_ACTIVATION_TYPE, &MatmulBuilder::SetActivationType} + }; }; } // namespace Ops } // namespace NeuralNetworkRuntime diff --git a/frameworks/native/neural_network_runtime/ops/mul_builder.cpp b/frameworks/native/neural_network_runtime/ops/mul_builder.cpp index ff9290617dc4621a8db3bf71b69ecc39bcb8bd42..12c97ecad4b4e6078f74402088d06a70e80d9e35 100644 --- a/frameworks/native/neural_network_runtime/ops/mul_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/mul_builder.cpp @@ -32,7 +32,7 @@ MulBuilder::MulBuilder() {} MulBuilder::~MulBuilder() {} -OH_NN_ReturnCode MulBuilder::SetActivationType(std::shared_ptr tensor) +OH_NN_ReturnCode MulBuilder::SetActivationType(const std::shared_ptr& tensor) { tensor->IdentifyOpParameter(); if (tensor->GetElementCount() != SCALE_LENGTH) { diff --git a/frameworks/native/neural_network_runtime/ops/mul_builder.h b/frameworks/native/neural_network_runtime/ops/mul_builder.h index 62c0b240eda723d38169f446e0b995ffe3d684cc..977ccbadab85392cb466a165eec21ee2bbc700ba 100644 --- a/frameworks/native/neural_network_runtime/ops/mul_builder.h +++ b/frameworks/native/neural_network_runtime/ops/mul_builder.h @@ -25,6 +25,8 @@ namespace NeuralNetworkRuntime { namespace Ops { class MulBuilder : public OpsBuilder { public: + typedef OH_NN_ReturnCode (MulBuilder::*FuncPtr)(const std::shared_ptr&); + MulBuilder(); ~MulBuilder() override; OH_NN_ReturnCode Build(const std::vector& paramsIndex, @@ -34,10 +36,13 @@ public: LiteGraphPrimitvePtr GetPrimitive() override; private: - OH_NN_ReturnCode SetActivationType(std::shared_ptr tensor); + OH_NN_ReturnCode SetActivationType(const std::shared_ptr& tensor); private: mindspore::lite::ActivationType m_activationType {mindspore::lite::ACTIVATION_TYPE_NO_ACTIVATION}; + std::unordered_map m_paramMap = { + {OH_NN_MUL_ACTIVATION_TYPE, &MulBuilder::SetActivationType} + }; }; } // namespace Ops } // namespace NeuralNetworkRuntime diff --git a/frameworks/native/neural_network_runtime/ops/onehot_builder.cpp b/frameworks/native/neural_network_runtime/ops/onehot_builder.cpp index 88474d50f193259446bb5344264dd4c415bc0cb1..7d5fdaf008a1667d1cf8679faa05d7086d2a5c53 100644 --- a/frameworks/native/neural_network_runtime/ops/onehot_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/onehot_builder.cpp @@ -31,7 +31,7 @@ OnehotBuilder::OnehotBuilder() {} OnehotBuilder::~OnehotBuilder() {} -OH_NN_ReturnCode OnehotBuilder::SetAxis(std::shared_ptr tensor) +OH_NN_ReturnCode OnehotBuilder::SetAxis(const std::shared_ptr& tensor) { tensor->IdentifyOpParameter(); if (tensor->GetDataType() != OH_NN_INT64) { @@ -76,13 +76,11 @@ OH_NN_ReturnCode OnehotBuilder::Build(const std::vector& paramsIndex, for (int i : paramsIndex) { std::shared_ptr tensor = allTensors[i]; - switch (tensor->GetType()) { - case OH_NN_ONE_HOT_AXIS: - returnCode = SetAxis(tensor); - break; - default: - LOGE("[Onehot] Parameter Type is invalid, type=%d", tensor->GetType()); - return OH_NN_INVALID_PARAMETER; + if (m_paramMap.find(tensor->GetType()) != m_paramMap.end()) { + returnCode = (this->*(m_paramMap[tensor->GetType()]))(tensor); + } else { + LOGE("[Onehot] Build failed, param invalid, type=%d", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; } if (returnCode != OH_NN_SUCCESS) { diff --git a/frameworks/native/neural_network_runtime/ops/onehot_builder.h b/frameworks/native/neural_network_runtime/ops/onehot_builder.h index cca7d01eae2ca285a22112a8cf168dc1941ef91b..c44d34ed67dd0b0605189e3262e53faed3a42a8e 100644 --- a/frameworks/native/neural_network_runtime/ops/onehot_builder.h +++ b/frameworks/native/neural_network_runtime/ops/onehot_builder.h @@ -23,6 +23,8 @@ namespace NeuralNetworkRuntime { namespace Ops { class OnehotBuilder : public OpsBuilder { public: + typedef OH_NN_ReturnCode (OnehotBuilder::*FuncPtr)(const std::shared_ptr&); + OnehotBuilder(); ~OnehotBuilder() override; OH_NN_ReturnCode Build(const std::vector& paramsIndex, @@ -32,10 +34,13 @@ public: LiteGraphPrimitvePtr GetPrimitive() override; private: - OH_NN_ReturnCode SetAxis(std::shared_ptr tensor); + OH_NN_ReturnCode SetAxis(const std::shared_ptr& tensor); private: int64_t m_axis {-1}; + std::unordered_map m_paramMap = { + {OH_NN_ONE_HOT_AXIS, &OnehotBuilder::SetAxis} + }; }; } // namespace Ops } // namespace NeuralNetworkRuntime diff --git a/frameworks/native/neural_network_runtime/ops/pad_builder.cpp b/frameworks/native/neural_network_runtime/ops/pad_builder.cpp index f62e0716d6b46f2289c99deb4fe2150a46ee844c..4d8a2ea56ba8709eb124db4b96589a0d2178ef8d 100644 --- a/frameworks/native/neural_network_runtime/ops/pad_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/pad_builder.cpp @@ -37,7 +37,7 @@ PadBuilder::PadBuilder() {} PadBuilder::~PadBuilder() {} -OH_NN_ReturnCode PadBuilder::SetPaddingMode(std::shared_ptr tensor) +OH_NN_ReturnCode PadBuilder::SetPaddingMode(const std::shared_ptr& tensor) { tensor->IdentifyOpParameter(); if (tensor->GetElementCount() != SCALE_LENGTH) { @@ -70,7 +70,7 @@ OH_NN_ReturnCode PadBuilder::SetPaddingMode(std::shared_ptr tensor) return OH_NN_SUCCESS; } -OH_NN_ReturnCode PadBuilder::SetConstantValue(std::shared_ptr tensor) +OH_NN_ReturnCode PadBuilder::SetConstantValue(const std::shared_ptr& tensor) { tensor->IdentifyOpParameter(); if (tensor->GetElementCount() != SCALE_LENGTH) { @@ -120,16 +120,11 @@ OH_NN_ReturnCode PadBuilder::Build(const std::vector& paramsIndex, for (int i : paramsIndex) { std::shared_ptr tensor = allTensors[i]; - switch (tensor->GetType()) { - case OH_NN_PAD_CONSTANT_VALUE: - returnCode = SetConstantValue(tensor); - break; - case OH_NN_PAD_PADDING_MODE: - returnCode = SetPaddingMode(tensor); - break; - default: - LOGE("[Pad] Parameter Type is invalid, type=%d", tensor->GetType()); - return OH_NN_INVALID_PARAMETER; + if (m_paramMap.find(tensor->GetType()) != m_paramMap.end()) { + returnCode = (this->*(m_paramMap[tensor->GetType()]))(tensor); + } else { + LOGE("[Pad] Build failed, param invalid, type=%d", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; } if (returnCode != OH_NN_SUCCESS) { diff --git a/frameworks/native/neural_network_runtime/ops/pad_builder.h b/frameworks/native/neural_network_runtime/ops/pad_builder.h index ca523af6862623c70d1b5b253c944ab0d7a85abf..c5b58c8c1bca3116065fae3220e98d3df0da32f0 100644 --- a/frameworks/native/neural_network_runtime/ops/pad_builder.h +++ b/frameworks/native/neural_network_runtime/ops/pad_builder.h @@ -24,6 +24,8 @@ namespace NeuralNetworkRuntime { namespace Ops { class PadBuilder : public OpsBuilder { public: + typedef OH_NN_ReturnCode (PadBuilder::*FuncPtr)(const std::shared_ptr&); + PadBuilder(); ~PadBuilder() override; OH_NN_ReturnCode Build(const std::vector& paramsIndex, @@ -33,12 +35,16 @@ public: LiteGraphPrimitvePtr GetPrimitive() override; private: - OH_NN_ReturnCode SetConstantValue(std::shared_ptr tensor); - OH_NN_ReturnCode SetPaddingMode(std::shared_ptr tensor); + OH_NN_ReturnCode SetConstantValue(const std::shared_ptr& tensor); + OH_NN_ReturnCode SetPaddingMode(const std::shared_ptr& tensor); private: float m_constantValue {0.0f}; mindspore::lite::PaddingMode m_paddingMode {mindspore::lite::PADDING_MODE_CONSTANT}; + std::unordered_map m_paramMap = { + {OH_NN_PAD_CONSTANT_VALUE, &PadBuilder::SetConstantValue}, + {OH_NN_PAD_PADDING_MODE, &PadBuilder::SetPaddingMode} + }; }; } // namespace Ops } // namespace NeuralNetworkRuntime diff --git a/frameworks/native/neural_network_runtime/ops/pooling_builder.cpp b/frameworks/native/neural_network_runtime/ops/pooling_builder.cpp index 42131d38d3e541f1918423eed603312c6c5f0b10..bb3a506d2d1b76304cc8c85b1dddeab0df272746 100644 --- a/frameworks/native/neural_network_runtime/ops/pooling_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/pooling_builder.cpp @@ -57,37 +57,13 @@ OH_NN_ReturnCode PoolingBuilder::PoolingBuild(const std::vector& param for (int i : paramsIndex) { std::shared_ptr tensor = allTensors[i]; - switch (tensor->GetType()) { - case OH_NN_AVG_POOL_KERNEL_SIZE: - case OH_NN_MAX_POOL_KERNEL_SIZE: - returnCode = SetKernel(tensor); - break; - case OH_NN_AVG_POOL_STRIDE: - case OH_NN_MAX_POOL_STRIDE: - returnCode = SetStrides(tensor); - break; - case OH_NN_AVG_POOL_PAD_MODE: - case OH_NN_MAX_POOL_PAD_MODE: - case OH_NN_MAX_POOL_PAD: - case OH_NN_AVG_POOL_PAD: - returnCode = SetPadModeOrPaddings(tensor); - break; - case OH_NN_AVG_POOL_ROUND_MODE: - case OH_NN_MAX_POOL_ROUND_MODE: - returnCode = SetRoundMode(tensor); - break; - case OH_NN_AVG_POOL_ACTIVATION_TYPE: - case OH_NN_MAX_POOL_ACTIVATION_TYPE: - returnCode = SetActivation(tensor); - break; - case OH_NN_AVG_POOL_GLOBAL: - case OH_NN_MAX_POOL_GLOBAL: - returnCode = SetGlobal(tensor); - break; - default: - LOGE("[PoolingBuilder] Build failed, param invalid, type = %d.", tensor->GetType()); - return OH_NN_INVALID_PARAMETER; + if (m_paramMap.find(tensor->GetType()) != m_paramMap.end()) { + returnCode = (this->*(m_paramMap[tensor->GetType()]))(tensor); + } else { + LOGE("[PoolingBuilder] Build failed, param invalid, type=%d", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; } + if (returnCode != OH_NN_SUCCESS) { LOGE("[PoolingBuilder] PoolingBuild failed, passed invalid param."); return returnCode; @@ -116,7 +92,7 @@ OH_NN_ReturnCode PoolingBuilder::SetInputAndOutput(const std::vector& return OH_NN_SUCCESS; } -OH_NN_ReturnCode PoolingBuilder::SetKernel(std::shared_ptr tensor) +OH_NN_ReturnCode PoolingBuilder::SetKernel(const std::shared_ptr& tensor) { tensor->IdentifyOpParameter(); // Set kernelSize @@ -138,7 +114,7 @@ OH_NN_ReturnCode PoolingBuilder::SetKernel(std::shared_ptr tensor) return OH_NN_SUCCESS; } -OH_NN_ReturnCode PoolingBuilder::SetStrides(std::shared_ptr tensor) +OH_NN_ReturnCode PoolingBuilder::SetStrides(const std::shared_ptr& tensor) { tensor->IdentifyOpParameter(); // Set Strides @@ -160,7 +136,7 @@ OH_NN_ReturnCode PoolingBuilder::SetStrides(std::shared_ptr tensor) return OH_NN_SUCCESS; } -OH_NN_ReturnCode PoolingBuilder::SetPadModeOrPaddings(std::shared_ptr tensor) +OH_NN_ReturnCode PoolingBuilder::SetPadModeOrPaddings(const std::shared_ptr& tensor) { tensor->IdentifyOpParameter(); @@ -204,7 +180,7 @@ OH_NN_ReturnCode PoolingBuilder::SetPadModeOrPaddings(std::shared_ptr return OH_NN_SUCCESS; } -OH_NN_ReturnCode PoolingBuilder::SetRoundMode(std::shared_ptr tensor) +OH_NN_ReturnCode PoolingBuilder::SetRoundMode(const std::shared_ptr& tensor) { tensor->IdentifyOpParameter(); @@ -237,7 +213,7 @@ OH_NN_ReturnCode PoolingBuilder::SetRoundMode(std::shared_ptr tensor) return OH_NN_SUCCESS; } -OH_NN_ReturnCode PoolingBuilder::SetActivation(std::shared_ptr tensor) +OH_NN_ReturnCode PoolingBuilder::SetActivation(const std::shared_ptr& tensor) { tensor->IdentifyOpParameter(); @@ -268,7 +244,7 @@ OH_NN_ReturnCode PoolingBuilder::SetActivation(std::shared_ptr tensor) return OH_NN_SUCCESS; } -OH_NN_ReturnCode PoolingBuilder::SetGlobal(std::shared_ptr tensor) +OH_NN_ReturnCode PoolingBuilder::SetGlobal(const std::shared_ptr& tensor) { if (tensor->GetDataType() != OH_NN_BOOL) { LOGE("[PoolingBuilder] The global should be type OH_NN_BOOL."); diff --git a/frameworks/native/neural_network_runtime/ops/pooling_builder.h b/frameworks/native/neural_network_runtime/ops/pooling_builder.h index 3f020d8a096756867f401379e19632f60c1afa6d..681685f076fbe5b60b0b89f7e88ddc2247b821b1 100644 --- a/frameworks/native/neural_network_runtime/ops/pooling_builder.h +++ b/frameworks/native/neural_network_runtime/ops/pooling_builder.h @@ -25,6 +25,8 @@ namespace NeuralNetworkRuntime { namespace Ops { class PoolingBuilder : public OpsBuilder { public: + typedef OH_NN_ReturnCode (PoolingBuilder::*FuncPtr)(const std::shared_ptr&); + PoolingBuilder() = default; virtual ~PoolingBuilder() = default; @@ -37,12 +39,12 @@ public: const std::vector& outputsIndex, const std::vector>& allTensors); - OH_NN_ReturnCode SetKernel(std::shared_ptr tensor); - OH_NN_ReturnCode SetStrides(std::shared_ptr tensor); - OH_NN_ReturnCode SetPadModeOrPaddings(std::shared_ptr tensor); - OH_NN_ReturnCode SetRoundMode(std::shared_ptr tensor); - OH_NN_ReturnCode SetActivation(std::shared_ptr tensor); - OH_NN_ReturnCode SetGlobal(std::shared_ptr tensor); + OH_NN_ReturnCode SetKernel(const std::shared_ptr& tensor); + OH_NN_ReturnCode SetStrides(const std::shared_ptr& tensor); + OH_NN_ReturnCode SetPadModeOrPaddings(const std::shared_ptr& tensor); + OH_NN_ReturnCode SetRoundMode(const std::shared_ptr& tensor); + OH_NN_ReturnCode SetActivation(const std::shared_ptr& tensor); + OH_NN_ReturnCode SetGlobal(const std::shared_ptr& tensor); protected: std::vector m_kernelSize; @@ -53,6 +55,23 @@ protected: mindspore::lite::RoundMode m_roundMode {mindspore::lite::ROUND_MODE_FLOOR}; mindspore::lite::Format m_format {mindspore::lite::FORMAT_NCHW}; bool m_global {false}; + std::unordered_map m_paramMap = { + {OH_NN_MAX_POOL_KERNEL_SIZE, &PoolingBuilder::SetKernel}, + {OH_NN_MAX_POOL_STRIDE, &PoolingBuilder::SetStrides}, + {OH_NN_MAX_POOL_PAD_MODE, &PoolingBuilder::SetPadModeOrPaddings}, + {OH_NN_MAX_POOL_PAD, &PoolingBuilder::SetPadModeOrPaddings}, + {OH_NN_MAX_POOL_ACTIVATION_TYPE, &PoolingBuilder::SetActivation}, + {OH_NN_MAX_POOL_ROUND_MODE, &PoolingBuilder::SetRoundMode}, + {OH_NN_MAX_POOL_GLOBAL, &PoolingBuilder::SetGlobal}, + + {OH_NN_AVG_POOL_KERNEL_SIZE, &PoolingBuilder::SetKernel}, + {OH_NN_AVG_POOL_STRIDE, &PoolingBuilder::SetStrides}, + {OH_NN_AVG_POOL_PAD_MODE, &PoolingBuilder::SetPadModeOrPaddings}, + {OH_NN_AVG_POOL_PAD, &PoolingBuilder::SetPadModeOrPaddings}, + {OH_NN_AVG_POOL_ACTIVATION_TYPE, &PoolingBuilder::SetActivation}, + {OH_NN_AVG_POOL_ROUND_MODE, &PoolingBuilder::SetRoundMode}, + {OH_NN_AVG_POOL_GLOBAL, &PoolingBuilder::SetGlobal} + }; }; } // namespace Ops } // namespace NeuralNetworkRuntime diff --git a/frameworks/native/neural_network_runtime/ops/pow_builder.cpp b/frameworks/native/neural_network_runtime/ops/pow_builder.cpp index 20cc8a7a9f0ea5b360c3351c753d344053a28286..fd5d765414933d341ad0bc6323c14c002e524c2f 100644 --- a/frameworks/native/neural_network_runtime/ops/pow_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/pow_builder.cpp @@ -31,7 +31,7 @@ PowBuilder::PowBuilder() {} PowBuilder::~PowBuilder() {} -OH_NN_ReturnCode PowBuilder::SetScale(std::shared_ptr tensor) +OH_NN_ReturnCode PowBuilder::SetScale(const std::shared_ptr& tensor) { if (tensor->GetDataType() != OH_NN_FLOAT32) { LOGE("[Pow] The scale should be type OH_NN_FLOAT32."); @@ -53,7 +53,7 @@ OH_NN_ReturnCode PowBuilder::SetScale(std::shared_ptr tensor) return OH_NN_SUCCESS; } -OH_NN_ReturnCode PowBuilder::SetShift(std::shared_ptr tensor) +OH_NN_ReturnCode PowBuilder::SetShift(const std::shared_ptr& tensor) { if (tensor->GetDataType() != OH_NN_FLOAT32) { LOGE("[Pow] The shift should be type OH_NN_FLOAT32."); @@ -104,16 +104,11 @@ OH_NN_ReturnCode PowBuilder::Build(const std::vector& paramsIndex, for (int i : paramsIndex) { std::shared_ptr tensor = allTensors[i]; tensor->IdentifyOpParameter(); - switch (tensor->GetType()) { - case OH_NN_POW_SCALE: - returnCode = SetScale(tensor); - break; - case OH_NN_POW_SHIFT: - returnCode = SetShift(tensor); - break; - default: - LOGE("[Pow] Build failed, param invalid, type=%d", tensor->GetType()); - return OH_NN_INVALID_PARAMETER; + if (m_paramMap.find(tensor->GetType()) != m_paramMap.end()) { + returnCode = (this->*(m_paramMap[tensor->GetType()]))(tensor); + } else { + LOGE("[Pow] Build failed, param invalid, type=%d", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; } if (returnCode != OH_NN_SUCCESS) { diff --git a/frameworks/native/neural_network_runtime/ops/pow_builder.h b/frameworks/native/neural_network_runtime/ops/pow_builder.h index 2b58d86b9d1262e14f2f796a4fddac871e27ef95..78d1ba244edef8a162fc2da719aa8b59ff0e1991 100644 --- a/frameworks/native/neural_network_runtime/ops/pow_builder.h +++ b/frameworks/native/neural_network_runtime/ops/pow_builder.h @@ -23,6 +23,8 @@ namespace NeuralNetworkRuntime { namespace Ops { class PowBuilder : public OpsBuilder { public: + typedef OH_NN_ReturnCode (PowBuilder::*FuncPtr)(const std::shared_ptr&); + PowBuilder(); ~PowBuilder() override; OH_NN_ReturnCode Build(const std::vector& paramsIndex, @@ -33,12 +35,16 @@ public: LiteGraphPrimitvePtr GetPrimitive() override; private: - OH_NN_ReturnCode SetScale(std::shared_ptr tensor); - OH_NN_ReturnCode SetShift(std::shared_ptr tensor); + OH_NN_ReturnCode SetScale(const std::shared_ptr& tensor); + OH_NN_ReturnCode SetShift(const std::shared_ptr& tensor); private: float m_scale {1.0f}; float m_shift {0.0f}; + std::unordered_map m_paramMap = { + {OH_NN_POW_SCALE, &PowBuilder::SetScale}, + {OH_NN_POW_SHIFT, &PowBuilder::SetShift} + }; }; } // namespace Ops } // namespace NeuralNetworkRuntime diff --git a/frameworks/native/neural_network_runtime/ops/quant_dtype_cast_builder.cpp b/frameworks/native/neural_network_runtime/ops/quant_dtype_cast_builder.cpp index 7b693c60ed430b0c5cde4a0523caee494faaebc1..c45c1463c6a5079453f951814797bb8fdb9dafcf 100644 --- a/frameworks/native/neural_network_runtime/ops/quant_dtype_cast_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/quant_dtype_cast_builder.cpp @@ -30,7 +30,7 @@ QuantDTypeCastBuilder::QuantDTypeCastBuilder() {} QuantDTypeCastBuilder::~QuantDTypeCastBuilder() {} -OH_NN_ReturnCode QuantDTypeCastBuilder::SetSrcT(std::shared_ptr tensor) +OH_NN_ReturnCode QuantDTypeCastBuilder::SetSrcT(const std::shared_ptr& tensor) { tensor->IdentifyOpParameter(); if (tensor->GetDataType() != OH_NN_INT64) { @@ -48,7 +48,7 @@ OH_NN_ReturnCode QuantDTypeCastBuilder::SetSrcT(std::shared_ptr tensor return OH_NN_SUCCESS; } -OH_NN_ReturnCode QuantDTypeCastBuilder::SetDstT(std::shared_ptr tensor) +OH_NN_ReturnCode QuantDTypeCastBuilder::SetDstT(const std::shared_ptr& tensor) { tensor->IdentifyOpParameter(); if (tensor->GetDataType() != OH_NN_INT64) { @@ -66,7 +66,7 @@ OH_NN_ReturnCode QuantDTypeCastBuilder::SetDstT(std::shared_ptr tensor return OH_NN_SUCCESS; } -OH_NN_ReturnCode QuantDTypeCastBuilder::SetAxis(std::shared_ptr tensor) +OH_NN_ReturnCode QuantDTypeCastBuilder::SetAxis(const std::shared_ptr& tensor) { tensor->IdentifyOpParameter(); if (tensor->GetDataType() != OH_NN_INT64) { @@ -111,19 +111,11 @@ OH_NN_ReturnCode QuantDTypeCastBuilder::Build(const std::vector& param for (uint32_t i : paramsIndex) { std::shared_ptr tensor = allTensors[i]; - switch (tensor->GetType()) { - case OH_NN_QUANT_DTYPE_CAST_SRC_T: - returnCode = SetSrcT(tensor); - break; - case OH_NN_QUANT_DTYPE_CAST_DST_T: - returnCode = SetDstT(tensor); - break; - case OH_NN_QUANT_DTYPE_CAST_AXIS: - returnCode = SetAxis(tensor); - break; - default: - LOGE("[QuantDTypeCast] Build failed, parameter type is invalid. type=%d", tensor->GetType()); - return OH_NN_INVALID_PARAMETER; + if (m_paramMap.find(tensor->GetType()) != m_paramMap.end()) { + returnCode = (this->*(m_paramMap[tensor->GetType()]))(tensor); + } else { + LOGE("[QunatDTypeCast] Build failed, param invalid, type=%d", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; } if (returnCode != OH_NN_SUCCESS) { diff --git a/frameworks/native/neural_network_runtime/ops/quant_dtype_cast_builder.h b/frameworks/native/neural_network_runtime/ops/quant_dtype_cast_builder.h index f85922be07f5f1252eddb5e95ceb46e5d7b0f530..0bd1fd2ca4c8ba06b7e147d303da5e904a1c09cb 100644 --- a/frameworks/native/neural_network_runtime/ops/quant_dtype_cast_builder.h +++ b/frameworks/native/neural_network_runtime/ops/quant_dtype_cast_builder.h @@ -23,6 +23,8 @@ namespace NeuralNetworkRuntime { namespace Ops { class QuantDTypeCastBuilder : public OpsBuilder { public: + typedef OH_NN_ReturnCode (QuantDTypeCastBuilder::*FuncPtr)(const std::shared_ptr&); + QuantDTypeCastBuilder(); ~QuantDTypeCastBuilder() override; OH_NN_ReturnCode Build(const std::vector& paramsIndex, @@ -33,14 +35,19 @@ public: LiteGraphPrimitvePtr GetPrimitive() override; private: - OH_NN_ReturnCode SetSrcT(std::shared_ptr tensor); - OH_NN_ReturnCode SetDstT(std::shared_ptr tensor); - OH_NN_ReturnCode SetAxis(std::shared_ptr tensor); + OH_NN_ReturnCode SetSrcT(const std::shared_ptr& tensor); + OH_NN_ReturnCode SetDstT(const std::shared_ptr& tensor); + OH_NN_ReturnCode SetAxis(const std::shared_ptr& tensor); private: const uint64_t* m_src_t{nullptr}; const uint64_t* m_dst_t{nullptr}; int64_t m_axis {0}; + std::unordered_map m_paramMap = { + {OH_NN_QUANT_DTYPE_CAST_SRC_T, &QuantDTypeCastBuilder::SetSrcT}, + {OH_NN_QUANT_DTYPE_CAST_DST_T, &QuantDTypeCastBuilder::SetDstT}, + {OH_NN_QUANT_DTYPE_CAST_AXIS, &QuantDTypeCastBuilder::SetAxis} + }; }; } // namespace Ops } // namespace NeuralNetworkRuntime diff --git a/frameworks/native/neural_network_runtime/ops/range_builder.cpp b/frameworks/native/neural_network_runtime/ops/range_builder.cpp index c2a6e7586cfcc207d1d891932e5be1a4b070760c..4484aa9dc8452b8ab8eec3f277e83c4be43087b6 100755 --- a/frameworks/native/neural_network_runtime/ops/range_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/range_builder.cpp @@ -28,7 +28,7 @@ RangeBuilder::RangeBuilder() {} RangeBuilder::~RangeBuilder() {} -OH_NN_ReturnCode RangeBuilder::SetStart(std::shared_ptr tensor) +OH_NN_ReturnCode RangeBuilder::SetStart(const std::shared_ptr& tensor) { if (tensor->GetDataType() != OH_NN_INT64) { LOGE("[Range] The start should be type OH_NN_INT64."); @@ -50,7 +50,7 @@ OH_NN_ReturnCode RangeBuilder::SetStart(std::shared_ptr tensor) return OH_NN_SUCCESS; } -OH_NN_ReturnCode RangeBuilder::SetLimit(std::shared_ptr tensor) +OH_NN_ReturnCode RangeBuilder::SetLimit(const std::shared_ptr& tensor) { if (tensor->GetDataType() != OH_NN_INT64) { LOGE("[Range] The limit should be type OH_NN_INT64."); @@ -72,7 +72,7 @@ OH_NN_ReturnCode RangeBuilder::SetLimit(std::shared_ptr tensor) return OH_NN_SUCCESS; } -OH_NN_ReturnCode RangeBuilder::SetDelta(std::shared_ptr tensor) +OH_NN_ReturnCode RangeBuilder::SetDelta(const std::shared_ptr& tensor) { if (tensor->GetDataType() != OH_NN_INT64) { LOGE("[Range] The delta should be type OH_NN_INT64."); @@ -122,19 +122,11 @@ OH_NN_ReturnCode RangeBuilder::Build(const std::vector& paramsIndex, for (int i : paramsIndex) { std::shared_ptr tensor = allTensors[i]; tensor->IdentifyOpParameter(); - switch (tensor->GetType()) { - case OH_NN_RANGE_START: - ret = SetStart(tensor); - break; - case OH_NN_RANGE_LIMIT: - ret = SetLimit(tensor); - break; - case OH_NN_RANGE_DELTA: - ret = SetDelta(tensor); - break; - default: - LOGE("[Range] Build failed, param invalid, type=%d", tensor->GetType()); - return OH_NN_INVALID_PARAMETER; + if (m_paramMap.find(tensor->GetType()) != m_paramMap.end()) { + ret = (this->*(m_paramMap[tensor->GetType()]))(tensor); + } else { + LOGE("[Range] Build failed, param invalid, type=%d", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; } if (ret != OH_NN_SUCCESS) { diff --git a/frameworks/native/neural_network_runtime/ops/range_builder.h b/frameworks/native/neural_network_runtime/ops/range_builder.h index 8bc33f142c82eb579b01e612966ea7ddcdc9a059..4ac5ab58e433a3fd98973759ba205cead7fc2af3 100755 --- a/frameworks/native/neural_network_runtime/ops/range_builder.h +++ b/frameworks/native/neural_network_runtime/ops/range_builder.h @@ -26,6 +26,8 @@ namespace NeuralNetworkRuntime { namespace Ops { class RangeBuilder : public OpsBuilder { public: + typedef OH_NN_ReturnCode (RangeBuilder::*FuncPtr)(const std::shared_ptr&); + RangeBuilder(); ~RangeBuilder() override; OH_NN_ReturnCode Build(const std::vector& paramsIndex, @@ -36,14 +38,19 @@ public: LiteGraphPrimitvePtr GetPrimitive() override; private: - OH_NN_ReturnCode SetStart(std::shared_ptr tensor); - OH_NN_ReturnCode SetLimit(std::shared_ptr tensor); - OH_NN_ReturnCode SetDelta(std::shared_ptr tensor); + OH_NN_ReturnCode SetStart(const std::shared_ptr& tensor); + OH_NN_ReturnCode SetLimit(const std::shared_ptr& tensor); + OH_NN_ReturnCode SetDelta(const std::shared_ptr& tensor); private: int64_t m_start {0}; int64_t m_limit {0}; int64_t m_delta {1}; + std::unordered_map m_paramMap = { + {OH_NN_RANGE_START, &RangeBuilder::SetStart}, + {OH_NN_RANGE_LIMIT, &RangeBuilder::SetLimit}, + {OH_NN_RANGE_DELTA, &RangeBuilder::SetDelta} + }; }; } // namespace Ops } // namespace NeuralNetworkRuntime diff --git a/frameworks/native/neural_network_runtime/ops/reduceL2_builder.cpp b/frameworks/native/neural_network_runtime/ops/reduceL2_builder.cpp index e2d7ffbff5991591ce3c67c97a22455c1aa488c9..19c7148247ffabdae2911167997f5d257a0437e9 100644 --- a/frameworks/native/neural_network_runtime/ops/reduceL2_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/reduceL2_builder.cpp @@ -31,7 +31,7 @@ ReduceL2Builder::ReduceL2Builder() {} ReduceL2Builder::~ReduceL2Builder() {} -OH_NN_ReturnCode ReduceL2Builder::SetCoeff(std::shared_ptr tensor) +OH_NN_ReturnCode ReduceL2Builder::SetCoeff(const std::shared_ptr& tensor) { if (tensor->GetDataType() != OH_NN_FLOAT32) { LOGE("[ReduceL2] The coeff should be type OH_NN_FLOAT32."); @@ -53,7 +53,7 @@ OH_NN_ReturnCode ReduceL2Builder::SetCoeff(std::shared_ptr tensor) return OH_NN_SUCCESS; } -OH_NN_ReturnCode ReduceL2Builder::SetReduceToEnd(std::shared_ptr tensor) +OH_NN_ReturnCode ReduceL2Builder::SetReduceToEnd(const std::shared_ptr& tensor) { if (tensor->GetDataType() != OH_NN_BOOL) { LOGE("[ReduceL2] SetReduceToEnd failed, the reduceToEnd should be type OH_NN_BOOL."); @@ -76,7 +76,7 @@ OH_NN_ReturnCode ReduceL2Builder::SetReduceToEnd(std::shared_ptr tenso return OH_NN_SUCCESS; } -OH_NN_ReturnCode ReduceL2Builder::SetKeepDims(std::shared_ptr tensor) +OH_NN_ReturnCode ReduceL2Builder::SetKeepDims(const std::shared_ptr& tensor) { if (tensor->GetDataType() != OH_NN_BOOL) { LOGE("[ReduceL2] SetKeepDims failed, the keep_dims should be type OH_NN_BOOL."); @@ -126,19 +126,11 @@ OH_NN_ReturnCode ReduceL2Builder::Build(const std::vector& paramsIndex for (uint32_t i : paramsIndex) { std::shared_ptr tensor = allTensors[i]; - switch (tensor->GetType()) { - case OH_NN_REDUCE_L2_KEEP_DIMS: - returnCode = SetKeepDims(tensor); - break; - case OH_NN_REDUCE_L2_REDUCE_TO_END: - returnCode = SetReduceToEnd(tensor); - break; - case OH_NN_REDUCE_L2_COEFF: - returnCode = SetCoeff(tensor); - break; - default: - LOGE("[ReduceL2] Build failed, parameter type is invalid. type=%d", tensor->GetType()); - return OH_NN_INVALID_PARAMETER; + if (m_paramMap.find(tensor->GetType()) != m_paramMap.end()) { + returnCode = (this->*(m_paramMap[tensor->GetType()]))(tensor); + } else { + LOGE("[ReduceL2] Build failed, param invalid, type=%d", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; } if (returnCode != OH_NN_SUCCESS) { diff --git a/frameworks/native/neural_network_runtime/ops/reduceL2_builder.h b/frameworks/native/neural_network_runtime/ops/reduceL2_builder.h index 9c61bd244c214b50155a41b750a94fc2956813a5..5672284519a4fb13a3eca3b44bd594429038dfd3 100644 --- a/frameworks/native/neural_network_runtime/ops/reduceL2_builder.h +++ b/frameworks/native/neural_network_runtime/ops/reduceL2_builder.h @@ -23,6 +23,8 @@ namespace NeuralNetworkRuntime { namespace Ops { class ReduceL2Builder : public OpsBuilder { public: + typedef OH_NN_ReturnCode (ReduceL2Builder::*FuncPtr)(const std::shared_ptr&); + ReduceL2Builder(); ~ReduceL2Builder() override; OH_NN_ReturnCode Build(const std::vector& paramsIndex, @@ -33,14 +35,19 @@ public: LiteGraphPrimitvePtr GetPrimitive() override; private: - OH_NN_ReturnCode SetCoeff(std::shared_ptr tensor); - OH_NN_ReturnCode SetReduceToEnd(std::shared_ptr tensor); - OH_NN_ReturnCode SetKeepDims(std::shared_ptr tensor); + OH_NN_ReturnCode SetCoeff(const std::shared_ptr& tensor); + OH_NN_ReturnCode SetReduceToEnd(const std::shared_ptr& tensor); + OH_NN_ReturnCode SetKeepDims(const std::shared_ptr& tensor); private: float m_coeff {0.0f}; bool m_reduceToEnd {false}; bool m_keepDims {false}; + std::unordered_map m_paramMap = { + {OH_NN_REDUCE_L2_COEFF, &ReduceL2Builder::SetCoeff}, + {OH_NN_REDUCE_L2_REDUCE_TO_END, &ReduceL2Builder::SetReduceToEnd}, + {OH_NN_REDUCE_L2_KEEP_DIMS, &ReduceL2Builder::SetKeepDims} + }; }; } // namespace Ops } // namespace NeuralNetworkRuntime diff --git a/frameworks/native/neural_network_runtime/ops/reduceall_builder.cpp b/frameworks/native/neural_network_runtime/ops/reduceall_builder.cpp index 50eb5dec955d085b57b2d8912cfb6ad4a4d54599..81a7cf345ff5a84fa0dcf46986b5a6c2a715a07a 100644 --- a/frameworks/native/neural_network_runtime/ops/reduceall_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/reduceall_builder.cpp @@ -31,7 +31,7 @@ ReduceAllBuilder::ReduceAllBuilder() {} ReduceAllBuilder::~ReduceAllBuilder() {} -OH_NN_ReturnCode ReduceAllBuilder::SetCoeff(std::shared_ptr tensor) +OH_NN_ReturnCode ReduceAllBuilder::SetCoeff(const std::shared_ptr& tensor) { if (tensor->GetDataType() != OH_NN_FLOAT32) { LOGE("[ReduceAll] The coeff should be type OH_NN_FLOAT32."); @@ -53,7 +53,7 @@ OH_NN_ReturnCode ReduceAllBuilder::SetCoeff(std::shared_ptr tensor) return OH_NN_SUCCESS; } -OH_NN_ReturnCode ReduceAllBuilder::SetReduceToEnd(std::shared_ptr tensor) +OH_NN_ReturnCode ReduceAllBuilder::SetReduceToEnd(const std::shared_ptr& tensor) { if (tensor->GetDataType() != OH_NN_BOOL) { LOGE("[ReduceAll] SetReduceToEnd failed, the reduceToEnd should be type OH_NN_BOOL."); @@ -76,7 +76,7 @@ OH_NN_ReturnCode ReduceAllBuilder::SetReduceToEnd(std::shared_ptr tens return OH_NN_SUCCESS; } -OH_NN_ReturnCode ReduceAllBuilder::SetKeepDims(std::shared_ptr tensor) +OH_NN_ReturnCode ReduceAllBuilder::SetKeepDims(const std::shared_ptr& tensor) { if (tensor->GetDataType() != OH_NN_BOOL) { LOGE("[ReduceAll] SetKeepDims failed, the keep_dims should be type OH_NN_BOOL."); @@ -126,19 +126,11 @@ OH_NN_ReturnCode ReduceAllBuilder::Build(const std::vector& paramsInde for (uint32_t i : paramsIndex) { std::shared_ptr tensor = allTensors[i]; - switch (tensor->GetType()) { - case OH_NN_REDUCE_ALL_KEEP_DIMS: - returnCode = SetKeepDims(tensor); - break; - case OH_NN_REDUCE_ALL_REDUCE_TO_END: - returnCode = SetReduceToEnd(tensor); - break; - case OH_NN_REDUCE_ALL_COEFF: - returnCode = SetCoeff(tensor); - break; - default: - LOGE("[ReduceAll] Build failed, parameter type is invalid. type=%d", tensor->GetType()); - return OH_NN_INVALID_PARAMETER; + if (m_paramMap.find(tensor->GetType()) != m_paramMap.end()) { + returnCode = (this->*(m_paramMap[tensor->GetType()]))(tensor); + } else { + LOGE("[ReduceAll] Build failed, param invalid, type=%d", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; } if (returnCode != OH_NN_SUCCESS) { diff --git a/frameworks/native/neural_network_runtime/ops/reduceall_builder.h b/frameworks/native/neural_network_runtime/ops/reduceall_builder.h index 8efd7c020925b3429c516cb5a95e52c45f9aaec0..9c48029229d13df5793fce88422ff45eb3a20f77 100644 --- a/frameworks/native/neural_network_runtime/ops/reduceall_builder.h +++ b/frameworks/native/neural_network_runtime/ops/reduceall_builder.h @@ -23,6 +23,8 @@ namespace NeuralNetworkRuntime { namespace Ops { class ReduceAllBuilder : public OpsBuilder { public: + typedef OH_NN_ReturnCode (ReduceAllBuilder::*FuncPtr)(const std::shared_ptr&); + ReduceAllBuilder(); ~ReduceAllBuilder() override; OH_NN_ReturnCode Build(const std::vector& paramsIndex, @@ -33,14 +35,19 @@ public: LiteGraphPrimitvePtr GetPrimitive() override; private: - OH_NN_ReturnCode SetCoeff(std::shared_ptr tensor); - OH_NN_ReturnCode SetReduceToEnd(std::shared_ptr tensor); - OH_NN_ReturnCode SetKeepDims(std::shared_ptr tensor); + OH_NN_ReturnCode SetCoeff(const std::shared_ptr& tensor); + OH_NN_ReturnCode SetReduceToEnd(const std::shared_ptr& tensor); + OH_NN_ReturnCode SetKeepDims(const std::shared_ptr& tensor); private: float m_coeff {0.0f}; bool m_reduceToEnd {false}; bool m_keepDims {false}; + std::unordered_map m_paramMap = { + {OH_NN_REDUCE_ALL_COEFF, &ReduceAllBuilder::SetCoeff}, + {OH_NN_REDUCE_ALL_REDUCE_TO_END, &ReduceAllBuilder::SetReduceToEnd}, + {OH_NN_REDUCE_ALL_KEEP_DIMS, &ReduceAllBuilder::SetKeepDims} + }; }; } // namespace Ops } // namespace NeuralNetworkRuntime diff --git a/frameworks/native/neural_network_runtime/ops/reducemax_builder.cpp b/frameworks/native/neural_network_runtime/ops/reducemax_builder.cpp index 7b255c9c7ff031b6240bf5a176d3eb058a271e6c..cea4e04ffcca9e16074920f64402a007fb4a91f4 100644 --- a/frameworks/native/neural_network_runtime/ops/reducemax_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/reducemax_builder.cpp @@ -31,7 +31,7 @@ ReduceMaxBuilder::ReduceMaxBuilder() {} ReduceMaxBuilder::~ReduceMaxBuilder() {} -OH_NN_ReturnCode ReduceMaxBuilder::SetCoeff(std::shared_ptr tensor) +OH_NN_ReturnCode ReduceMaxBuilder::SetCoeff(const std::shared_ptr& tensor) { if (tensor->GetDataType() != OH_NN_FLOAT32) { LOGE("[ReduceMax] The coeff should be type OH_NN_FLOAT32."); @@ -53,7 +53,7 @@ OH_NN_ReturnCode ReduceMaxBuilder::SetCoeff(std::shared_ptr tensor) return OH_NN_SUCCESS; } -OH_NN_ReturnCode ReduceMaxBuilder::SetReduceToEnd(std::shared_ptr tensor) +OH_NN_ReturnCode ReduceMaxBuilder::SetReduceToEnd(const std::shared_ptr& tensor) { if (tensor->GetDataType() != OH_NN_BOOL) { LOGE("[ReduceMax] SetReduceToEnd failed, the reduceToEnd should be type OH_NN_BOOL."); @@ -76,7 +76,7 @@ OH_NN_ReturnCode ReduceMaxBuilder::SetReduceToEnd(std::shared_ptr tens return OH_NN_SUCCESS; } -OH_NN_ReturnCode ReduceMaxBuilder::SetKeepDims(std::shared_ptr tensor) +OH_NN_ReturnCode ReduceMaxBuilder::SetKeepDims(const std::shared_ptr& tensor) { if (tensor->GetDataType() != OH_NN_BOOL) { LOGE("[ReduceMax] SetKeepDims failed, the keep_dims should be type OH_NN_BOOL."); @@ -126,19 +126,11 @@ OH_NN_ReturnCode ReduceMaxBuilder::Build(const std::vector& paramsInde for (uint32_t i : paramsIndex) { std::shared_ptr tensor = allTensors[i]; - switch (tensor->GetType()) { - case OH_NN_REDUCE_MAX_KEEP_DIMS: - returnCode = SetKeepDims(tensor); - break; - case OH_NN_REDUCE_MAX_REDUCE_TO_END: - returnCode = SetReduceToEnd(tensor); - break; - case OH_NN_REDUCE_MAX_COEFF: - returnCode = SetCoeff(tensor); - break; - default: - LOGE("[ReduceMax] Build failed, parameter type is invalid. type=%d", tensor->GetType()); - return OH_NN_INVALID_PARAMETER; + if (m_paramMap.find(tensor->GetType()) != m_paramMap.end()) { + returnCode = (this->*(m_paramMap[tensor->GetType()]))(tensor); + } else { + LOGE("[ReduceMax] Build failed, param invalid, type=%d", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; } if (returnCode != OH_NN_SUCCESS) { diff --git a/frameworks/native/neural_network_runtime/ops/reducemax_builder.h b/frameworks/native/neural_network_runtime/ops/reducemax_builder.h index 08f469b60fa7f886c276a216b3da35b89cd5ba58..8ba6b11d63df50fac79ef5ce5a63e1597d460625 100644 --- a/frameworks/native/neural_network_runtime/ops/reducemax_builder.h +++ b/frameworks/native/neural_network_runtime/ops/reducemax_builder.h @@ -23,6 +23,8 @@ namespace NeuralNetworkRuntime { namespace Ops { class ReduceMaxBuilder : public OpsBuilder { public: + typedef OH_NN_ReturnCode (ReduceMaxBuilder::*FuncPtr)(const std::shared_ptr&); + ReduceMaxBuilder(); ~ReduceMaxBuilder() override; OH_NN_ReturnCode Build(const std::vector& paramsIndex, @@ -33,14 +35,19 @@ public: LiteGraphPrimitvePtr GetPrimitive() override; private: - OH_NN_ReturnCode SetCoeff(std::shared_ptr tensor); - OH_NN_ReturnCode SetReduceToEnd(std::shared_ptr tensor); - OH_NN_ReturnCode SetKeepDims(std::shared_ptr tensor); + OH_NN_ReturnCode SetCoeff(const std::shared_ptr& tensor); + OH_NN_ReturnCode SetReduceToEnd(const std::shared_ptr& tensor); + OH_NN_ReturnCode SetKeepDims(const std::shared_ptr& tensor); private: float m_coeff {0.0f}; bool m_reduceToEnd {false}; bool m_keepDims {false}; + std::unordered_map m_paramMap = { + {OH_NN_REDUCE_MAX_COEFF, &ReduceMaxBuilder::SetCoeff}, + {OH_NN_REDUCE_MAX_REDUCE_TO_END, &ReduceMaxBuilder::SetReduceToEnd}, + {OH_NN_REDUCE_MAX_KEEP_DIMS, &ReduceMaxBuilder::SetKeepDims} + }; }; } // namespace Ops } // namespace NeuralNetworkRuntime diff --git a/frameworks/native/neural_network_runtime/ops/reducemean_builder.cpp b/frameworks/native/neural_network_runtime/ops/reducemean_builder.cpp index 8f821cef949188c4054912024501d6e5c614dd25..c208d1c9ee4a081f4d29616e7319c412cbe564ed 100644 --- a/frameworks/native/neural_network_runtime/ops/reducemean_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/reducemean_builder.cpp @@ -31,7 +31,7 @@ ReduceMeanBuilder::ReduceMeanBuilder() {} ReduceMeanBuilder:: ~ReduceMeanBuilder() {} -OH_NN_ReturnCode ReduceMeanBuilder::SetCoeff(std::shared_ptr tensor) +OH_NN_ReturnCode ReduceMeanBuilder::SetCoeff(const std::shared_ptr& tensor) { if (tensor->GetDataType() != OH_NN_FLOAT32) { LOGE("[ReduceAll] The coeff should be type OH_NN_FLOAT32."); @@ -53,7 +53,7 @@ OH_NN_ReturnCode ReduceMeanBuilder::SetCoeff(std::shared_ptr tensor) return OH_NN_SUCCESS; } -OH_NN_ReturnCode ReduceMeanBuilder::SetReduceToEnd(std::shared_ptr tensor) +OH_NN_ReturnCode ReduceMeanBuilder::SetReduceToEnd(const std::shared_ptr& tensor) { if (tensor->GetDataType() != OH_NN_BOOL) { LOGE("[ReduceAll] SetReduceToEnd failed, the reduceToEnd should be type OH_NN_BOOL."); @@ -76,7 +76,7 @@ OH_NN_ReturnCode ReduceMeanBuilder::SetReduceToEnd(std::shared_ptr ten return OH_NN_SUCCESS; } -OH_NN_ReturnCode ReduceMeanBuilder::SetKeepDims(std::shared_ptr tensor) +OH_NN_ReturnCode ReduceMeanBuilder::SetKeepDims(const std::shared_ptr& tensor) { tensor->IdentifyOpParameter(); if (tensor->GetElementCount() != SCALE_LENGTH) { @@ -126,19 +126,11 @@ OH_NN_ReturnCode ReduceMeanBuilder::Build(const std::vector& paramsInd for (uint32_t i : paramsIndex) { std::shared_ptr tensor = allTensors[i]; - switch (tensor->GetType()) { - case OH_NN_REDUCE_MEAN_KEEP_DIMS: - returnCode = SetKeepDims(tensor); - break; - case OH_NN_REDUCE_MEAN_REDUCE_TO_END: - returnCode = SetReduceToEnd(tensor); - break; - case OH_NN_REDUCE_MEAN_COEFF: - returnCode = SetCoeff(tensor); - break; - default: - LOGE("[ReduceMean] Build failed, parameter type is invalid. type=%d", tensor->GetType()); - return OH_NN_INVALID_PARAMETER; + if (m_paramMap.find(tensor->GetType()) != m_paramMap.end()) { + returnCode = (this->*(m_paramMap[tensor->GetType()]))(tensor); + } else { + LOGE("[ReduceMean] Build failed, param invalid, type=%d", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; } if (returnCode != OH_NN_SUCCESS) { diff --git a/frameworks/native/neural_network_runtime/ops/reducemean_builder.h b/frameworks/native/neural_network_runtime/ops/reducemean_builder.h index 64af503e01763ae563e4dd295fd2922c15fb2a82..9d24a7432786a3e7f932a7bdbccd30bc9f8c9f9a 100644 --- a/frameworks/native/neural_network_runtime/ops/reducemean_builder.h +++ b/frameworks/native/neural_network_runtime/ops/reducemean_builder.h @@ -23,6 +23,8 @@ namespace NeuralNetworkRuntime { namespace Ops { class ReduceMeanBuilder : public OpsBuilder { public: + typedef OH_NN_ReturnCode (ReduceMeanBuilder::*FuncPtr)(const std::shared_ptr&); + ReduceMeanBuilder(); ~ReduceMeanBuilder() override; OH_NN_ReturnCode Build(const std::vector& paramsIndex, @@ -33,14 +35,19 @@ public: LiteGraphPrimitvePtr GetPrimitive() override; private: - OH_NN_ReturnCode SetCoeff(std::shared_ptr tensor); - OH_NN_ReturnCode SetReduceToEnd(std::shared_ptr tensor); - OH_NN_ReturnCode SetKeepDims(std::shared_ptr tensor); + OH_NN_ReturnCode SetCoeff(const std::shared_ptr& tensor); + OH_NN_ReturnCode SetReduceToEnd(const std::shared_ptr& tensor); + OH_NN_ReturnCode SetKeepDims(const std::shared_ptr& tensor); private: bool m_keepDims{false}; float m_coeff {0.0f}; bool m_reduceToEnd {false}; + std::unordered_map m_paramMap = { + {OH_NN_REDUCE_MEAN_COEFF, &ReduceMeanBuilder::SetCoeff}, + {OH_NN_REDUCE_MEAN_REDUCE_TO_END, &ReduceMeanBuilder::SetReduceToEnd}, + {OH_NN_REDUCE_MEAN_KEEP_DIMS, &ReduceMeanBuilder::SetKeepDims} + }; }; } // namespace Ops } // namespace NeuralNetworkRuntime diff --git a/frameworks/native/neural_network_runtime/ops/reducemin_builder.cpp b/frameworks/native/neural_network_runtime/ops/reducemin_builder.cpp index 9cf7fed4a41196938eafcf793737baaa5afd9e8f..65a9229c74bd1e4f2655da55778f950a08f90000 100644 --- a/frameworks/native/neural_network_runtime/ops/reducemin_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/reducemin_builder.cpp @@ -31,7 +31,7 @@ ReduceMinBuilder::ReduceMinBuilder() {} ReduceMinBuilder::~ReduceMinBuilder() {} -OH_NN_ReturnCode ReduceMinBuilder::SetCoeff(std::shared_ptr tensor) +OH_NN_ReturnCode ReduceMinBuilder::SetCoeff(const std::shared_ptr& tensor) { if (tensor->GetDataType() != OH_NN_FLOAT32) { LOGE("[ReduceMin] The coeff should be type OH_NN_FLOAT32."); @@ -53,7 +53,7 @@ OH_NN_ReturnCode ReduceMinBuilder::SetCoeff(std::shared_ptr tensor) return OH_NN_SUCCESS; } -OH_NN_ReturnCode ReduceMinBuilder::SetReduceToEnd(std::shared_ptr tensor) +OH_NN_ReturnCode ReduceMinBuilder::SetReduceToEnd(const std::shared_ptr& tensor) { if (tensor->GetDataType() != OH_NN_BOOL) { LOGE("[ReduceMin] SetReduceToEnd failed, the reduceToEnd should be type OH_NN_BOOL."); @@ -76,7 +76,7 @@ OH_NN_ReturnCode ReduceMinBuilder::SetReduceToEnd(std::shared_ptr tens return OH_NN_SUCCESS; } -OH_NN_ReturnCode ReduceMinBuilder::SetKeepDims(std::shared_ptr tensor) +OH_NN_ReturnCode ReduceMinBuilder::SetKeepDims(const std::shared_ptr& tensor) { if (tensor->GetDataType() != OH_NN_BOOL) { LOGE("[ReduceMin] SetKeepDims failed, the keep_dims should be type OH_NN_BOOL."); @@ -126,19 +126,11 @@ OH_NN_ReturnCode ReduceMinBuilder::Build(const std::vector& paramsInde for (uint32_t i : paramsIndex) { std::shared_ptr tensor = allTensors[i]; - switch (tensor->GetType()) { - case OH_NN_REDUCE_MIN_KEEP_DIMS: - returnCode = SetKeepDims(tensor); - break; - case OH_NN_REDUCE_MIN_REDUCE_TO_END: - returnCode = SetReduceToEnd(tensor); - break; - case OH_NN_REDUCE_MIN_COEFF: - returnCode = SetCoeff(tensor); - break; - default: - LOGE("[ReduceMin] Build failed, parameter type is invalid. type=%d", tensor->GetType()); - return OH_NN_INVALID_PARAMETER; + if (m_paramMap.find(tensor->GetType()) != m_paramMap.end()) { + returnCode = (this->*(m_paramMap[tensor->GetType()]))(tensor); + } else { + LOGE("[ReduceMin] Build failed, param invalid, type=%d", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; } if (returnCode != OH_NN_SUCCESS) { diff --git a/frameworks/native/neural_network_runtime/ops/reducemin_builder.h b/frameworks/native/neural_network_runtime/ops/reducemin_builder.h index 01e1e9b51b2ade7dd806df55ca5cf0772a7983aa..d55b30a0b92ef9fffb1c3c7fcd0419b70611a740 100644 --- a/frameworks/native/neural_network_runtime/ops/reducemin_builder.h +++ b/frameworks/native/neural_network_runtime/ops/reducemin_builder.h @@ -23,6 +23,8 @@ namespace NeuralNetworkRuntime { namespace Ops { class ReduceMinBuilder : public OpsBuilder { public: + typedef OH_NN_ReturnCode (ReduceMinBuilder::*FuncPtr)(const std::shared_ptr&); + ReduceMinBuilder(); ~ReduceMinBuilder() override; OH_NN_ReturnCode Build(const std::vector& paramsIndex, @@ -33,14 +35,19 @@ public: LiteGraphPrimitvePtr GetPrimitive() override; private: - OH_NN_ReturnCode SetCoeff(std::shared_ptr tensor); - OH_NN_ReturnCode SetReduceToEnd(std::shared_ptr tensor); - OH_NN_ReturnCode SetKeepDims(std::shared_ptr tensor); + OH_NN_ReturnCode SetCoeff(const std::shared_ptr& tensor); + OH_NN_ReturnCode SetReduceToEnd(const std::shared_ptr& tensor); + OH_NN_ReturnCode SetKeepDims(const std::shared_ptr& tensor); private: float m_coeff {0.0f}; bool m_reduceToEnd {false}; bool m_keepDims {false}; + std::unordered_map m_paramMap = { + {OH_NN_REDUCE_MIN_COEFF, &ReduceMinBuilder::SetCoeff}, + {OH_NN_REDUCE_MIN_REDUCE_TO_END, &ReduceMinBuilder::SetReduceToEnd}, + {OH_NN_REDUCE_MIN_KEEP_DIMS, &ReduceMinBuilder::SetKeepDims} + }; }; } // namespace Ops } // namespace NeuralNetworkRuntime diff --git a/frameworks/native/neural_network_runtime/ops/reduceprod_builder.cpp b/frameworks/native/neural_network_runtime/ops/reduceprod_builder.cpp index cfcee8cd47aa52c44f695756164eea1c6a13c7af..ba8cadd905163356a979118cd9c8a8988ccffd8f 100644 --- a/frameworks/native/neural_network_runtime/ops/reduceprod_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/reduceprod_builder.cpp @@ -31,7 +31,7 @@ ReduceProdBuilder::ReduceProdBuilder() {} ReduceProdBuilder:: ~ReduceProdBuilder() {} -OH_NN_ReturnCode ReduceProdBuilder::SetCoeff(std::shared_ptr tensor) +OH_NN_ReturnCode ReduceProdBuilder::SetCoeff(const std::shared_ptr& tensor) { if (tensor->GetDataType() != OH_NN_FLOAT32) { LOGE("[ReduceAll] The coeff should be type OH_NN_FLOAT32."); @@ -53,7 +53,7 @@ OH_NN_ReturnCode ReduceProdBuilder::SetCoeff(std::shared_ptr tensor) return OH_NN_SUCCESS; } -OH_NN_ReturnCode ReduceProdBuilder::SetReduceToEnd(std::shared_ptr tensor) +OH_NN_ReturnCode ReduceProdBuilder::SetReduceToEnd(const std::shared_ptr& tensor) { if (tensor->GetDataType() != OH_NN_BOOL) { LOGE("[ReduceAll] SetReduceToEnd failed, the reduceToEnd should be type OH_NN_BOOL."); @@ -76,7 +76,7 @@ OH_NN_ReturnCode ReduceProdBuilder::SetReduceToEnd(std::shared_ptr ten return OH_NN_SUCCESS; } -OH_NN_ReturnCode ReduceProdBuilder::SetKeepDims(std::shared_ptr tensor) +OH_NN_ReturnCode ReduceProdBuilder::SetKeepDims(const std::shared_ptr& tensor) { tensor->IdentifyOpParameter(); if (tensor->GetElementCount() != SCALE_LENGTH) { @@ -126,19 +126,11 @@ OH_NN_ReturnCode ReduceProdBuilder::Build(const std::vector& paramsInd for (uint32_t i : paramsIndex) { std::shared_ptr tensor = allTensors[i]; - switch (tensor->GetType()) { - case OH_NN_REDUCE_PROD_KEEP_DIMS: - returnCode = SetKeepDims(tensor); - break; - case OH_NN_REDUCE_PROD_REDUCE_TO_END: - returnCode = SetReduceToEnd(tensor); - break; - case OH_NN_REDUCE_PROD_COEFF: - returnCode = SetCoeff(tensor); - break; - default: - LOGE("[ReduceProd] Build failed, parameter type is invalid. type=%d", tensor->GetType()); - return OH_NN_INVALID_PARAMETER; + if (m_paramMap.find(tensor->GetType()) != m_paramMap.end()) { + returnCode = (this->*(m_paramMap[tensor->GetType()]))(tensor); + } else { + LOGE("[ReduceProd] Build failed, param invalid, type=%d", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; } if (returnCode != OH_NN_SUCCESS) { diff --git a/frameworks/native/neural_network_runtime/ops/reduceprod_builder.h b/frameworks/native/neural_network_runtime/ops/reduceprod_builder.h index 7ca13c123cc7d05086da64dc2a6332b1b39bcab9..9d520f8c902e92987fe72c7aab9fdd1be3cd61e4 100644 --- a/frameworks/native/neural_network_runtime/ops/reduceprod_builder.h +++ b/frameworks/native/neural_network_runtime/ops/reduceprod_builder.h @@ -23,6 +23,8 @@ namespace NeuralNetworkRuntime { namespace Ops { class ReduceProdBuilder : public OpsBuilder { public: + typedef OH_NN_ReturnCode (ReduceProdBuilder::*FuncPtr)(const std::shared_ptr&); + ReduceProdBuilder(); ~ReduceProdBuilder() override; OH_NN_ReturnCode Build(const std::vector& paramsIndex, @@ -33,14 +35,19 @@ public: LiteGraphPrimitvePtr GetPrimitive() override; private: - OH_NN_ReturnCode SetCoeff(std::shared_ptr tensor); - OH_NN_ReturnCode SetReduceToEnd(std::shared_ptr tensor); - OH_NN_ReturnCode SetKeepDims(std::shared_ptr tensor); + OH_NN_ReturnCode SetCoeff(const std::shared_ptr& tensor); + OH_NN_ReturnCode SetReduceToEnd(const std::shared_ptr& tensor); + OH_NN_ReturnCode SetKeepDims(const std::shared_ptr& tensor); private: bool m_keepDims{false}; float m_coeff {0.0f}; bool m_reduceToEnd {false}; + std::unordered_map m_paramMap = { + {OH_NN_REDUCE_PROD_COEFF, &ReduceProdBuilder::SetCoeff}, + {OH_NN_REDUCE_PROD_REDUCE_TO_END, &ReduceProdBuilder::SetReduceToEnd}, + {OH_NN_REDUCE_PROD_KEEP_DIMS, &ReduceProdBuilder::SetKeepDims} + }; }; } // namespace Ops } // namespace NeuralNetworkRuntime diff --git a/frameworks/native/neural_network_runtime/ops/reducesum_builder.cpp b/frameworks/native/neural_network_runtime/ops/reducesum_builder.cpp index 18e18050158d84084a7c983cbcc3b251cb980146..d00c897ceb4f22471bc9dfe5beceacc606fefea4 100644 --- a/frameworks/native/neural_network_runtime/ops/reducesum_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/reducesum_builder.cpp @@ -31,7 +31,7 @@ ReduceSumBuilder::ReduceSumBuilder() {} ReduceSumBuilder::~ReduceSumBuilder() {} -OH_NN_ReturnCode ReduceSumBuilder::SetCoeff(std::shared_ptr tensor) +OH_NN_ReturnCode ReduceSumBuilder::SetCoeff(const std::shared_ptr& tensor) { if (tensor->GetDataType() != OH_NN_FLOAT32) { LOGE("[ReduceSum] The coeff should be type OH_NN_FLOAT32."); @@ -53,7 +53,7 @@ OH_NN_ReturnCode ReduceSumBuilder::SetCoeff(std::shared_ptr tensor) return OH_NN_SUCCESS; } -OH_NN_ReturnCode ReduceSumBuilder::SetReduceToEnd(std::shared_ptr tensor) +OH_NN_ReturnCode ReduceSumBuilder::SetReduceToEnd(const std::shared_ptr& tensor) { if (tensor->GetDataType() != OH_NN_BOOL) { LOGE("[ReduceSum] SetReduceToEnd failed, the reduceToEnd should be type OH_NN_BOOL."); @@ -76,7 +76,7 @@ OH_NN_ReturnCode ReduceSumBuilder::SetReduceToEnd(std::shared_ptr tens return OH_NN_SUCCESS; } -OH_NN_ReturnCode ReduceSumBuilder::SetKeepDims(std::shared_ptr tensor) +OH_NN_ReturnCode ReduceSumBuilder::SetKeepDims(const std::shared_ptr& tensor) { if (tensor->GetDataType() != OH_NN_BOOL) { LOGE("[ReduceSum] SetKeepDims failed, the keep_dims should be type OH_NN_BOOL."); @@ -126,19 +126,11 @@ OH_NN_ReturnCode ReduceSumBuilder::Build(const std::vector& paramsInde for (uint32_t i : paramsIndex) { std::shared_ptr tensor = allTensors[i]; - switch (tensor->GetType()) { - case OH_NN_REDUCE_SUM_KEEP_DIMS: - returnCode = SetKeepDims(tensor); - break; - case OH_NN_REDUCE_SUM_REDUCE_TO_END: - returnCode = SetReduceToEnd(tensor); - break; - case OH_NN_REDUCE_SUM_COEFF: - returnCode = SetCoeff(tensor); - break; - default: - LOGE("[ReduceSum] Build failed, parameter type is invalid. type=%d", tensor->GetType()); - return OH_NN_INVALID_PARAMETER; + if (m_paramMap.find(tensor->GetType()) != m_paramMap.end()) { + returnCode = (this->*(m_paramMap[tensor->GetType()]))(tensor); + } else { + LOGE("[ReduceSum] Build failed, param invalid, type=%d", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; } if (returnCode != OH_NN_SUCCESS) { diff --git a/frameworks/native/neural_network_runtime/ops/reducesum_builder.h b/frameworks/native/neural_network_runtime/ops/reducesum_builder.h index 778f9cb96b0ad1139cd2d1af9335e28c7f1d4e96..7b1fbf8208229b6a9c9fb312bdb4da924fd5a471 100644 --- a/frameworks/native/neural_network_runtime/ops/reducesum_builder.h +++ b/frameworks/native/neural_network_runtime/ops/reducesum_builder.h @@ -23,6 +23,8 @@ namespace NeuralNetworkRuntime { namespace Ops { class ReduceSumBuilder : public OpsBuilder { public: + typedef OH_NN_ReturnCode (ReduceSumBuilder::*FuncPtr)(const std::shared_ptr&); + ReduceSumBuilder(); ~ReduceSumBuilder() override; OH_NN_ReturnCode Build(const std::vector& paramsIndex, @@ -33,14 +35,19 @@ public: LiteGraphPrimitvePtr GetPrimitive() override; private: - OH_NN_ReturnCode SetCoeff(std::shared_ptr tensor); - OH_NN_ReturnCode SetReduceToEnd(std::shared_ptr tensor); - OH_NN_ReturnCode SetKeepDims(std::shared_ptr tensor); + OH_NN_ReturnCode SetCoeff(const std::shared_ptr& tensor); + OH_NN_ReturnCode SetReduceToEnd(const std::shared_ptr& tensor); + OH_NN_ReturnCode SetKeepDims(const std::shared_ptr& tensor); private: float m_coeff {0.0f}; bool m_reduceToEnd {false}; bool m_keepDims {false}; + std::unordered_map m_paramMap = { + {OH_NN_REDUCE_SUM_COEFF, &ReduceSumBuilder::SetCoeff}, + {OH_NN_REDUCE_SUM_REDUCE_TO_END, &ReduceSumBuilder::SetReduceToEnd}, + {OH_NN_REDUCE_SUM_KEEP_DIMS, &ReduceSumBuilder::SetKeepDims} + }; }; } // namespace Ops } // namespace NeuralNetworkRuntime diff --git a/frameworks/native/neural_network_runtime/ops/resize_bilinear_builder.cpp b/frameworks/native/neural_network_runtime/ops/resize_bilinear_builder.cpp index 137a62e5a7d82462cdd6be3919f4a08d064ca6c4..dadc50d10a36a1c67aff7e9d2cca7a468fce74a1 100644 --- a/frameworks/native/neural_network_runtime/ops/resize_bilinear_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/resize_bilinear_builder.cpp @@ -30,7 +30,7 @@ ResizeBilinearBuilder::ResizeBilinearBuilder() {} ResizeBilinearBuilder::~ResizeBilinearBuilder() {} -OH_NN_ReturnCode ResizeBilinearBuilder::SetNewHeight(std::shared_ptr tensor) +OH_NN_ReturnCode ResizeBilinearBuilder::SetNewHeight(const std::shared_ptr& tensor) { tensor->IdentifyOpParameter(); if (tensor->GetElementCount() != SCALE_LENGTH) { @@ -53,7 +53,7 @@ OH_NN_ReturnCode ResizeBilinearBuilder::SetNewHeight(std::shared_ptr t return OH_NN_SUCCESS; } -OH_NN_ReturnCode ResizeBilinearBuilder::SetNewWidth(std::shared_ptr tensor) +OH_NN_ReturnCode ResizeBilinearBuilder::SetNewWidth(const std::shared_ptr& tensor) { tensor->IdentifyOpParameter(); if (tensor->GetElementCount() != SCALE_LENGTH) { @@ -76,7 +76,7 @@ OH_NN_ReturnCode ResizeBilinearBuilder::SetNewWidth(std::shared_ptr te return OH_NN_SUCCESS; } -OH_NN_ReturnCode ResizeBilinearBuilder::SetPreserveAspectRatio(std::shared_ptr tensor) +OH_NN_ReturnCode ResizeBilinearBuilder::SetPreserveAspectRatio(const std::shared_ptr& tensor) { tensor->IdentifyOpParameter(); if (tensor->GetElementCount() != SCALE_LENGTH) { @@ -99,7 +99,7 @@ OH_NN_ReturnCode ResizeBilinearBuilder::SetPreserveAspectRatio(std::shared_ptr tensor) +OH_NN_ReturnCode ResizeBilinearBuilder::SetCoordinateTransformMode(const std::shared_ptr& tensor) { tensor->IdentifyOpParameter(); if (tensor->GetElementCount() != SCALE_LENGTH) { @@ -125,7 +125,7 @@ OH_NN_ReturnCode ResizeBilinearBuilder::SetCoordinateTransformMode(std::shared_p return OH_NN_SUCCESS; } -OH_NN_ReturnCode ResizeBilinearBuilder::SetExcludeOutside(std::shared_ptr tensor) +OH_NN_ReturnCode ResizeBilinearBuilder::SetExcludeOutside(const std::shared_ptr& tensor) { tensor->IdentifyOpParameter(); if (tensor->GetElementCount() != SCALE_LENGTH) { @@ -175,25 +175,11 @@ OH_NN_ReturnCode ResizeBilinearBuilder::Build(const std::vector& param for (uint32_t i : paramsIndex) { std::shared_ptr tensor = allTensors[i]; - switch (tensor->GetType()) { - case OH_NN_RESIZE_BILINEAR_NEW_HEIGHT: - returnCode = SetNewHeight(tensor); - break; - case OH_NN_RESIZE_BILINEAR_NEW_WIDTH: - returnCode = SetNewWidth(tensor); - break; - case OH_NN_RESIZE_BILINEAR_PRESERVE_ASPECT_RATIO: - returnCode = SetPreserveAspectRatio(tensor); - break; - case OH_NN_RESIZE_BILINEAR_COORDINATE_TRANSFORM_MODE: - returnCode = SetCoordinateTransformMode(tensor); - break; - case OH_NN_RESIZE_BILINEAR_EXCLUDE_OUTSIDE: - returnCode = SetExcludeOutside(tensor); - break; - default: - LOGE("[ResizeBilinear] Build failed, parameter type is invalid. type=%d", tensor->GetType()); - return OH_NN_INVALID_PARAMETER; + if (m_paramMap.find(tensor->GetType()) != m_paramMap.end()) { + returnCode = (this->*(m_paramMap[tensor->GetType()]))(tensor); + } else { + LOGE("[ResizeBilinear] Build failed, param invalid, type=%d", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; } if (returnCode != OH_NN_SUCCESS) { diff --git a/frameworks/native/neural_network_runtime/ops/resize_bilinear_builder.h b/frameworks/native/neural_network_runtime/ops/resize_bilinear_builder.h index 6503e771bc95f0359948aa61df90f82bf7a2df35..df495abd8ea3f48885491484cf822d976963a5f2 100644 --- a/frameworks/native/neural_network_runtime/ops/resize_bilinear_builder.h +++ b/frameworks/native/neural_network_runtime/ops/resize_bilinear_builder.h @@ -25,6 +25,8 @@ namespace NeuralNetworkRuntime { namespace Ops { class ResizeBilinearBuilder : public OpsBuilder { public: + typedef OH_NN_ReturnCode (ResizeBilinearBuilder::*FuncPtr)(const std::shared_ptr&); + ResizeBilinearBuilder(); ~ResizeBilinearBuilder() override; OH_NN_ReturnCode Build(const std::vector& paramsIndex, @@ -35,11 +37,11 @@ public: LiteGraphPrimitvePtr GetPrimitive() override; private: - OH_NN_ReturnCode SetNewHeight(std::shared_ptr tensor); - OH_NN_ReturnCode SetNewWidth(std::shared_ptr tensor); - OH_NN_ReturnCode SetPreserveAspectRatio(std::shared_ptr tensor); - OH_NN_ReturnCode SetCoordinateTransformMode(std::shared_ptr tensor); - OH_NN_ReturnCode SetExcludeOutside(std::shared_ptr tensor); + OH_NN_ReturnCode SetNewHeight(const std::shared_ptr& tensor); + OH_NN_ReturnCode SetNewWidth(const std::shared_ptr& tensor); + OH_NN_ReturnCode SetPreserveAspectRatio(const std::shared_ptr& tensor); + OH_NN_ReturnCode SetCoordinateTransformMode(const std::shared_ptr& tensor); + OH_NN_ReturnCode SetExcludeOutside(const std::shared_ptr& tensor); private: mindspore::lite::ResizeMethod m_method {mindspore::lite::RESIZE_METHOD_LINEAR}; @@ -49,6 +51,13 @@ private: mindspore::lite::CoordinateTransformMode m_coordinateTransformMode { mindspore::lite::COORDINATE_TRANSFORM_MODE_ASYMMETRIC}; uint64_t m_excludeOutside{0}; + std::unordered_map m_paramMap = { + {OH_NN_RESIZE_BILINEAR_NEW_HEIGHT, &ResizeBilinearBuilder::SetNewHeight}, + {OH_NN_RESIZE_BILINEAR_NEW_WIDTH, &ResizeBilinearBuilder::SetNewWidth}, + {OH_NN_RESIZE_BILINEAR_PRESERVE_ASPECT_RATIO, &ResizeBilinearBuilder::SetPreserveAspectRatio}, + {OH_NN_RESIZE_BILINEAR_COORDINATE_TRANSFORM_MODE, &ResizeBilinearBuilder::SetCoordinateTransformMode}, + {OH_NN_RESIZE_BILINEAR_EXCLUDE_OUTSIDE, &ResizeBilinearBuilder::SetExcludeOutside} + }; }; } // namespace Ops } // namespace NeuralNetworkRuntime diff --git a/frameworks/native/neural_network_runtime/ops/scale_builder.cpp b/frameworks/native/neural_network_runtime/ops/scale_builder.cpp index 9c2256ac0efa76031fa4620cfadaa72660f5122d..86085ef7ed2d2bd02d27e3d952d5613fe286dea4 100644 --- a/frameworks/native/neural_network_runtime/ops/scale_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/scale_builder.cpp @@ -32,7 +32,7 @@ ScaleBuilder::ScaleBuilder() {} ScaleBuilder::~ScaleBuilder() {} -OH_NN_ReturnCode ScaleBuilder::SetAxis(std::shared_ptr tensor) +OH_NN_ReturnCode ScaleBuilder::SetAxis(const std::shared_ptr& tensor) { tensor->IdentifyOpParameter(); if (tensor->GetDataType() != OH_NN_INT64) { @@ -55,7 +55,7 @@ OH_NN_ReturnCode ScaleBuilder::SetAxis(std::shared_ptr tensor) return OH_NN_SUCCESS; } -OH_NN_ReturnCode ScaleBuilder::SetActivationType(std::shared_ptr tensor) +OH_NN_ReturnCode ScaleBuilder::SetActivationType(const std::shared_ptr& tensor) { tensor->IdentifyOpParameter(); if (tensor->GetDataType() != OH_NN_INT8) { @@ -112,16 +112,11 @@ OH_NN_ReturnCode ScaleBuilder::Build(const std::vector& paramsIndex, for (uint32_t i : paramsIndex) { std::shared_ptr tensor = allTensors[i]; - switch (tensor->GetType()) { - case OH_NN_SCALE_AXIS: - returnCode = SetAxis(tensor); - break; - case OH_NN_SCALE_ACTIVATIONTYPE: - returnCode = SetActivationType(tensor); - break; - default: - LOGE("[ResizeBilinear] Build failed, parameter type is invalid. type=%d", tensor->GetType()); - return OH_NN_INVALID_PARAMETER; + if (m_paramMap.find(tensor->GetType()) != m_paramMap.end()) { + returnCode = (this->*(m_paramMap[tensor->GetType()]))(tensor); + } else { + LOGE("[ScaleBuilder] Build failed, param invalid, type=%d", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; } if (returnCode != OH_NN_SUCCESS) { diff --git a/frameworks/native/neural_network_runtime/ops/scale_builder.h b/frameworks/native/neural_network_runtime/ops/scale_builder.h index d9b011ed985f48ee30c0d5bbb26a26dca5d2c900..7a0d5529940cc00654b49294cc41e47f69600098 100644 --- a/frameworks/native/neural_network_runtime/ops/scale_builder.h +++ b/frameworks/native/neural_network_runtime/ops/scale_builder.h @@ -25,6 +25,8 @@ namespace NeuralNetworkRuntime { namespace Ops { class ScaleBuilder : public OpsBuilder { public: + typedef OH_NN_ReturnCode (ScaleBuilder::*FuncPtr)(const std::shared_ptr&); + ScaleBuilder(); ~ScaleBuilder() override; OH_NN_ReturnCode Build(const std::vector& paramsIndex, @@ -35,12 +37,16 @@ public: LiteGraphPrimitvePtr GetPrimitive() override; private: - OH_NN_ReturnCode SetAxis(std::shared_ptr tensor); - OH_NN_ReturnCode SetActivationType(std::shared_ptr tensor); + OH_NN_ReturnCode SetAxis(const std::shared_ptr& tensor); + OH_NN_ReturnCode SetActivationType(const std::shared_ptr& tensor); private: mindspore::lite::ActivationType m_activationType{mindspore::lite::ACTIVATION_TYPE_NO_ACTIVATION}; const uint64_t* m_axis{nullptr}; + std::unordered_map m_paramMap = { + {OH_NN_SCALE_ACTIVATIONTYPE, &ScaleBuilder::SetActivationType}, + {OH_NN_SCALE_AXIS, &ScaleBuilder::SetAxis} + }; }; } // namespace Ops } // namespace NeuralNetworkRuntime diff --git a/frameworks/native/neural_network_runtime/ops/slice_builder.cpp b/frameworks/native/neural_network_runtime/ops/slice_builder.cpp index 4ab1d947de1f12abadf3cc6ec9f75b2afd22e40c..826c81357d53bcd8a921a75d4270bd66c69cbde4 100644 --- a/frameworks/native/neural_network_runtime/ops/slice_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/slice_builder.cpp @@ -29,7 +29,7 @@ SliceBuilder::SliceBuilder() {} SliceBuilder::~SliceBuilder() {} -OH_NN_ReturnCode SliceBuilder::SetAxes(std::shared_ptr tensor) +OH_NN_ReturnCode SliceBuilder::SetAxes(const std::shared_ptr& tensor) { if (tensor->GetDataType() != OH_NN_INT64) { LOGE("[SliceBuilder] The axes should be type OH_NN_INT64."); @@ -85,14 +85,13 @@ OH_NN_ReturnCode SliceBuilder::Build(const std::vector& paramsIndex, for (int i : paramsIndex) { std::shared_ptr tensor = allTensors[i]; tensor->IdentifyOpParameter(); - switch (tensor->GetType()) { - case OH_NN_SLICE_AXES: - returnCode = SetAxes(tensor); - break; - default: - LOGE("[SliceBuilder] Build failed, param invalid, type = %d.", tensor->GetType()); - return OH_NN_INVALID_PARAMETER; + if (m_paramMap.find(tensor->GetType()) != m_paramMap.end()) { + returnCode = (this->*(m_paramMap[tensor->GetType()]))(tensor); + } else { + LOGE("[SliceBuilder] Build failed, param invalid, type=%d", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; } + if (returnCode != OH_NN_SUCCESS) { LOGE("[SliceBuilder] Build failed, passed invalid param."); return returnCode; diff --git a/frameworks/native/neural_network_runtime/ops/slice_builder.h b/frameworks/native/neural_network_runtime/ops/slice_builder.h index f6f5ee6898646390a85ba3c3d996b6a1b1a7f978..0cbebf33f5ade36b4997921e74d0363ce002bd11 100644 --- a/frameworks/native/neural_network_runtime/ops/slice_builder.h +++ b/frameworks/native/neural_network_runtime/ops/slice_builder.h @@ -24,6 +24,8 @@ namespace NeuralNetworkRuntime { namespace Ops { class SliceBuilder : public OpsBuilder { public: + typedef OH_NN_ReturnCode (SliceBuilder::*FuncPtr)(const std::shared_ptr&); + SliceBuilder(); ~SliceBuilder() override; OH_NN_ReturnCode Build(const std::vector& paramsIndex, @@ -34,10 +36,13 @@ public: LiteGraphTensorPtr GetPrimitive() override; private: - OH_NN_ReturnCode SetAxes(std::shared_ptr tensor); + OH_NN_ReturnCode SetAxes(const std::shared_ptr& tensor); private: std::vector m_axes; + std::unordered_map m_paramMap = { + {OH_NN_SLICE_AXES, &SliceBuilder::SetAxes} + }; }; } // namespace Ops } // namespace NeuralNetworkRuntime diff --git a/frameworks/native/neural_network_runtime/ops/softmax_builder.cpp b/frameworks/native/neural_network_runtime/ops/softmax_builder.cpp index 6b64c66e08473497c9ea5a70afab77500df3d3ab..be45b080d2e4a7b6b7e058afca3f905542268387 100644 --- a/frameworks/native/neural_network_runtime/ops/softmax_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/softmax_builder.cpp @@ -29,7 +29,7 @@ SoftmaxBuilder::SoftmaxBuilder() {} SoftmaxBuilder::~SoftmaxBuilder() {} -OH_NN_ReturnCode SoftmaxBuilder::SetAxis(std::shared_ptr tensor) +OH_NN_ReturnCode SoftmaxBuilder::SetAxis(const std::shared_ptr& tensor) { // Set Axis if (tensor->GetDataType() != OH_NN_INT64) { @@ -82,13 +82,11 @@ OH_NN_ReturnCode SoftmaxBuilder::Build(const std::vector& paramsIndex, for (int i : paramsIndex) { std::shared_ptr tensor = allTensors[i]; tensor->IdentifyOpParameter(); - switch (tensor->GetType()) { - case OH_NN_SOFTMAX_AXIS: - returnCode = SetAxis(tensor); - break; - default: - LOGE("[SoftmaxBuilder] Parameter Type is invalid. type=%d", tensor->GetType()); - return OH_NN_INVALID_PARAMETER; + if (m_paramMap.find(tensor->GetType()) != m_paramMap.end()) { + returnCode = (this->*(m_paramMap[tensor->GetType()]))(tensor); + } else { + LOGE("[SoftmaxBuilder] Build failed, param invalid, type=%d", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; } if (returnCode != OH_NN_SUCCESS) { diff --git a/frameworks/native/neural_network_runtime/ops/softmax_builder.h b/frameworks/native/neural_network_runtime/ops/softmax_builder.h index 2b83133cb069a66f2478d6d2e5b2dfd511266b4f..a0607e3acc38c826958d31148fd1cbd8481c39fc 100644 --- a/frameworks/native/neural_network_runtime/ops/softmax_builder.h +++ b/frameworks/native/neural_network_runtime/ops/softmax_builder.h @@ -24,6 +24,8 @@ namespace NeuralNetworkRuntime { namespace Ops { class SoftmaxBuilder : public OpsBuilder { public: + typedef OH_NN_ReturnCode (SoftmaxBuilder::*FuncPtr)(const std::shared_ptr&); + SoftmaxBuilder(); ~SoftmaxBuilder() override; OH_NN_ReturnCode Build(const std::vector& paramsIndex, @@ -34,10 +36,13 @@ public: LiteGraphTensorPtr GetPrimitive() override; private: - OH_NN_ReturnCode SetAxis(std::shared_ptr tensor); + OH_NN_ReturnCode SetAxis(const std::shared_ptr& tensor); private: std::vector m_axis; + std::unordered_map m_paramMap = { + {OH_NN_SOFTMAX_AXIS, &SoftmaxBuilder::SetAxis} + }; }; } // namespace Ops } // namespace NeuralNetworkRuntime diff --git a/frameworks/native/neural_network_runtime/ops/space_to_batch_nd_builder.cpp b/frameworks/native/neural_network_runtime/ops/space_to_batch_nd_builder.cpp index 6fd66351eba4027cca69c0d477e45bd6686492dd..9c949991678c63b4fbc019c37b7043c8e9228d1f 100644 --- a/frameworks/native/neural_network_runtime/ops/space_to_batch_nd_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/space_to_batch_nd_builder.cpp @@ -35,7 +35,7 @@ SpaceToBatchNDBuilder::SpaceToBatchNDBuilder() {} SpaceToBatchNDBuilder::~SpaceToBatchNDBuilder() {} -OH_NN_ReturnCode SpaceToBatchNDBuilder::SetBlockShape(std::shared_ptr tensor) +OH_NN_ReturnCode SpaceToBatchNDBuilder::SetBlockShape(const std::shared_ptr& tensor) { if (tensor->GetDataType() != OH_NN_INT64) { LOGE("[SpaceToBatchNDBuilder] The 2nd input blockShape should be type OH_NN_INT64."); @@ -68,7 +68,7 @@ OH_NN_ReturnCode SpaceToBatchNDBuilder::SetBlockShape(std::shared_ptr return OH_NN_SUCCESS; } -OH_NN_ReturnCode SpaceToBatchNDBuilder::SetPaddings(std::shared_ptr tensor) +OH_NN_ReturnCode SpaceToBatchNDBuilder::SetPaddings(const std::shared_ptr& tensor) { if (tensor->GetDataType() != OH_NN_INT64) { LOGE("[SpaceToBatchNDBuilder] The 3rd input paddings should be type OH_NN_INT64."); @@ -128,16 +128,11 @@ OH_NN_ReturnCode SpaceToBatchNDBuilder::Build(const std::vector& param for (int i : paramsIndex) { std::shared_ptr tensor = allTensors[i]; tensor->IdentifyOpParameter(); - switch (tensor->GetType()) { - case OH_NN_SPACE_TO_BATCH_ND_BLOCK_SHAPE: - returnCode = SetBlockShape(tensor); - break; - case OH_NN_SPACE_TO_BATCH_ND_PADDINGS: - returnCode = SetPaddings(tensor); - break; - default: - LOGE("[SpaceToBatchNDBuilder] Parameter Type is invalid. type=%d", tensor->GetType()); - return OH_NN_INVALID_PARAMETER; + if (m_paramMap.find(tensor->GetType()) != m_paramMap.end()) { + returnCode = (this->*(m_paramMap[tensor->GetType()]))(tensor); + } else { + LOGE("[SpaceToBatchNDBuilder] Build failed, param invalid, type=%d", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; } if (returnCode != OH_NN_SUCCESS) { @@ -154,7 +149,7 @@ OH_NN_ReturnCode SpaceToBatchNDBuilder::Build(const std::vector& param return OH_NN_SUCCESS; } -OH_NN_ReturnCode SpaceToBatchNDBuilder::SetPadData(std::shared_ptr tensor) +OH_NN_ReturnCode SpaceToBatchNDBuilder::SetPadData(const std::shared_ptr& tensor) { paddings.clear(); diff --git a/frameworks/native/neural_network_runtime/ops/space_to_batch_nd_builder.h b/frameworks/native/neural_network_runtime/ops/space_to_batch_nd_builder.h index 4f0d067778ca38f701f2b44bfe4b9da7fd0fd6c0..d4de195259236eea89e3f70188baa0a1ea7038f9 100644 --- a/frameworks/native/neural_network_runtime/ops/space_to_batch_nd_builder.h +++ b/frameworks/native/neural_network_runtime/ops/space_to_batch_nd_builder.h @@ -24,6 +24,8 @@ namespace NeuralNetworkRuntime { namespace Ops { class SpaceToBatchNDBuilder : public OpsBuilder { public: + typedef OH_NN_ReturnCode (SpaceToBatchNDBuilder::*FuncPtr)(const std::shared_ptr&); + SpaceToBatchNDBuilder(); ~SpaceToBatchNDBuilder() override; OH_NN_ReturnCode Build(const std::vector& paramsIndex, @@ -34,13 +36,17 @@ public: LiteGraphTensorPtr GetPrimitive() override; private: - OH_NN_ReturnCode SetPadData(std::shared_ptr tensor); - OH_NN_ReturnCode SetBlockShape(std::shared_ptr tensor); - OH_NN_ReturnCode SetPaddings(std::shared_ptr tensor); + OH_NN_ReturnCode SetPadData(const std::shared_ptr& tensor); + OH_NN_ReturnCode SetBlockShape(const std::shared_ptr& tensor); + OH_NN_ReturnCode SetPaddings(const std::shared_ptr& tensor); private: std::vector> paddings; std::vector block_shape {}; + std::unordered_map m_paramMap = { + {OH_NN_SPACE_TO_BATCH_ND_BLOCK_SHAPE, &SpaceToBatchNDBuilder::SetBlockShape}, + {OH_NN_SPACE_TO_BATCH_ND_PADDINGS, &SpaceToBatchNDBuilder::SetPaddings} + }; }; } // namespace Ops } // namespace NeuralNetworkRuntime diff --git a/frameworks/native/neural_network_runtime/ops/space_to_depth_builder.cpp b/frameworks/native/neural_network_runtime/ops/space_to_depth_builder.cpp index 3829df78d48fa781dbd730cdb7046e8627cc1597..c1c28c6ecbe826aa660acbea77de87c82091ece2 100644 --- a/frameworks/native/neural_network_runtime/ops/space_to_depth_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/space_to_depth_builder.cpp @@ -33,7 +33,7 @@ SpaceToDepthBuilder::SpaceToDepthBuilder() {} SpaceToDepthBuilder::~SpaceToDepthBuilder() {} -OH_NN_ReturnCode SpaceToDepthBuilder::SetBlockSize(std::shared_ptr tensor) +OH_NN_ReturnCode SpaceToDepthBuilder::SetBlockSize(const std::shared_ptr& tensor) { if (tensor->GetDataType() != OH_NN_INT64) { LOGE("[SpaceToDepth] The blockSize should be type OH_NN_INT64."); @@ -83,13 +83,11 @@ OH_NN_ReturnCode SpaceToDepthBuilder::Build(const std::vector& paramsI for (int i : paramsIndex) { std::shared_ptr tensor = allTensors[i]; tensor->IdentifyOpParameter(); - switch (tensor->GetType()) { - case OH_NN_SPACE_TO_DEPTH_BLOCK_SIZE: - ret = SetBlockSize(tensor); - break; - default: - LOGE("[SpaceToDepth] Build failed, param invalid, type=%d", tensor->GetType()); - return OH_NN_INVALID_PARAMETER; + if (m_paramMap.find(tensor->GetType()) != m_paramMap.end()) { + ret = (this->*(m_paramMap[tensor->GetType()]))(tensor); + } else { + LOGE("[SpaceToDepth] Build failed, param invalid, type=%d", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; } if (ret != OH_NN_SUCCESS) { diff --git a/frameworks/native/neural_network_runtime/ops/space_to_depth_builder.h b/frameworks/native/neural_network_runtime/ops/space_to_depth_builder.h index 425347f9ae094dc7cded1675d7dfd61cfb7ffe1e..370a4a6fdb0edfe75b257b843b3155afee9bf8fc 100644 --- a/frameworks/native/neural_network_runtime/ops/space_to_depth_builder.h +++ b/frameworks/native/neural_network_runtime/ops/space_to_depth_builder.h @@ -23,6 +23,8 @@ namespace NeuralNetworkRuntime { namespace Ops { class SpaceToDepthBuilder : public OpsBuilder { public: + typedef OH_NN_ReturnCode (SpaceToDepthBuilder::*FuncPtr)(const std::shared_ptr&); + SpaceToDepthBuilder(); ~SpaceToDepthBuilder() override; OH_NN_ReturnCode Build(const std::vector& paramsIndex, @@ -33,10 +35,13 @@ public: LiteGraphPrimitvePtr GetPrimitive() override; private: - OH_NN_ReturnCode SetBlockSize(std::shared_ptr tensor); + OH_NN_ReturnCode SetBlockSize(const std::shared_ptr& tensor); private: int64_t m_blockSize {0}; + std::unordered_map m_paramMap = { + {OH_NN_SPACE_TO_DEPTH_BLOCK_SIZE, &SpaceToDepthBuilder::SetBlockSize} + }; }; } // namespace Ops } // namespace NeuralNetworkRuntime diff --git a/frameworks/native/neural_network_runtime/ops/split_builder.cpp b/frameworks/native/neural_network_runtime/ops/split_builder.cpp index ad6091cfff56167fa15873e9ed5a9288fa6731dc..a7880094822124138f978ac072e4809cf65a8fea 100644 --- a/frameworks/native/neural_network_runtime/ops/split_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/split_builder.cpp @@ -61,7 +61,7 @@ OH_NN_ReturnCode SplitBuilder::SetInputAndOutput(const std::vector &in return OH_NN_SUCCESS; } -OH_NN_ReturnCode SplitBuilder::SetAxis(std::shared_ptr tensor) +OH_NN_ReturnCode SplitBuilder::SetAxis(const std::shared_ptr& tensor) { if (tensor->GetDataType() != OH_NN_INT64) { LOGE("[SplitBuilder] The 4th input axis should be type OH_NN_INT64."); @@ -83,7 +83,7 @@ OH_NN_ReturnCode SplitBuilder::SetAxis(std::shared_ptr tensor) return OH_NN_SUCCESS; } -OH_NN_ReturnCode SplitBuilder::SetOutputNum(std::shared_ptr tensor) +OH_NN_ReturnCode SplitBuilder::SetOutputNum(const std::shared_ptr& tensor) { if (tensor->GetDataType() != OH_NN_INT64) { LOGE("[SplitBuilder] The 2nd input outputNum should be type OH_NN_INT64."); @@ -100,7 +100,7 @@ OH_NN_ReturnCode SplitBuilder::SetOutputNum(std::shared_ptr tensor) return OH_NN_SUCCESS; } -OH_NN_ReturnCode SplitBuilder::SetSizeSplits(std::shared_ptr tensor) +OH_NN_ReturnCode SplitBuilder::SetSizeSplits(const std::shared_ptr& tensor) { if (tensor->GetDataType() != OH_NN_INT64) { LOGE("[SplitBuilder] The 3rd input sizeSplit should be type OH_NN_INT64."); @@ -146,19 +146,11 @@ OH_NN_ReturnCode SplitBuilder::Build(const std::vector ¶msIndex, for (int i : paramsIndex) { std::shared_ptr tensor = allTensors[i]; tensor->IdentifyOpParameter(); - switch (tensor->GetType()) { - case OH_NN_SPLIT_AXIS: - returnCode = SetAxis(tensor); - break; - case OH_NN_SPLIT_OUTPUT_NUM: - returnCode = SetOutputNum(tensor); - break; - case OH_NN_SPLIT_SIZE_SPLITS: - returnCode = SetSizeSplits(tensor); - break; - default: - LOGE("[SplitBuilder] Parameter Type is invalid. type=%d", tensor->GetType()); - return OH_NN_INVALID_PARAMETER; + if (m_paramMap.find(tensor->GetType()) != m_paramMap.end()) { + returnCode = (this->*(m_paramMap[tensor->GetType()]))(tensor); + } else { + LOGE("[SplitBuilder] Build failed, param invalid, type=%d", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; } if (returnCode != OH_NN_SUCCESS) { diff --git a/frameworks/native/neural_network_runtime/ops/split_builder.h b/frameworks/native/neural_network_runtime/ops/split_builder.h index 3ebee8de6fad50340e30a8001c62969ddc48e212..6453b3d782eec27f2cb54ca76e5bc887c0acda2c 100644 --- a/frameworks/native/neural_network_runtime/ops/split_builder.h +++ b/frameworks/native/neural_network_runtime/ops/split_builder.h @@ -26,6 +26,8 @@ namespace NeuralNetworkRuntime { namespace Ops { class SplitBuilder : public OpsBuilder { public: + typedef OH_NN_ReturnCode (SplitBuilder::*FuncPtr)(const std::shared_ptr&); + SplitBuilder(); ~SplitBuilder() override; OH_NN_ReturnCode Build(const std::vector& paramsIndex, @@ -39,14 +41,19 @@ private: OH_NN_ReturnCode SetInputAndOutput(const std::vector& inputsIndex, const std::vector& outputsIndex, const std::vector>& allTensors); - OH_NN_ReturnCode SetAxis(std::shared_ptr tensor); - OH_NN_ReturnCode SetOutputNum(std::shared_ptr tensor); - OH_NN_ReturnCode SetSizeSplits(std::shared_ptr tensor); + OH_NN_ReturnCode SetAxis(const std::shared_ptr& tensor); + OH_NN_ReturnCode SetOutputNum(const std::shared_ptr& tensor); + OH_NN_ReturnCode SetSizeSplits(const std::shared_ptr& tensor); private: int64_t m_output_num {0}; std::vector m_size_splits; int64_t m_axis {0}; + std::unordered_map m_paramMap = { + {OH_NN_SPLIT_AXIS, &SplitBuilder::SetAxis}, + {OH_NN_SPLIT_OUTPUT_NUM, &SplitBuilder::SetOutputNum}, + {OH_NN_SPLIT_SIZE_SPLITS, &SplitBuilder::SetSizeSplits} + }; }; } // namespace Ops } // namespace NeuralNetworkRuntime diff --git a/frameworks/native/neural_network_runtime/ops/squeeze_builder.cpp b/frameworks/native/neural_network_runtime/ops/squeeze_builder.cpp index 61604669e90784f0715895805811d974757cef35..18d63fbdcc344e9039ac31bee1b01276094894d8 100644 --- a/frameworks/native/neural_network_runtime/ops/squeeze_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/squeeze_builder.cpp @@ -29,7 +29,7 @@ SqueezeBuilder::SqueezeBuilder() {} SqueezeBuilder::~SqueezeBuilder() {} -OH_NN_ReturnCode SqueezeBuilder::SetAxis(std::shared_ptr tensor) +OH_NN_ReturnCode SqueezeBuilder::SetAxis(const std::shared_ptr& tensor) { if (tensor->GetDataType() != OH_NN_INT64) { LOGE("[SqueezeBuilder] The 2nd input axis should be type OH_NN_INT64."); @@ -86,13 +86,11 @@ OH_NN_ReturnCode SqueezeBuilder::Build(const std::vector ¶msIndex, for (int i : paramsIndex) { std::shared_ptr tensor = allTensors[i]; tensor->IdentifyOpParameter(); - switch (tensor->GetType()) { - case OH_NN_SQUEEZE_AXIS: - returnCode = SetAxis(tensor); - break; - default: - LOGE("[SqueezeBuilder] Parameter Type is invalid. type=%d", tensor->GetType()); - return OH_NN_INVALID_PARAMETER; + if (m_paramMap.find(tensor->GetType()) != m_paramMap.end()) { + returnCode = (this->*(m_paramMap[tensor->GetType()]))(tensor); + } else { + LOGE("[SqueezeBuilder] Build failed, param invalid, type=%d", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; } if (returnCode != OH_NN_SUCCESS) { diff --git a/frameworks/native/neural_network_runtime/ops/squeeze_builder.h b/frameworks/native/neural_network_runtime/ops/squeeze_builder.h index f02ed3819c45662837804649afa952555e67fc9b..a611184b404130019aaeac3e1d3640281334d9f4 100644 --- a/frameworks/native/neural_network_runtime/ops/squeeze_builder.h +++ b/frameworks/native/neural_network_runtime/ops/squeeze_builder.h @@ -24,6 +24,8 @@ namespace NeuralNetworkRuntime { namespace Ops { class SqueezeBuilder : public OpsBuilder { public: + typedef OH_NN_ReturnCode (SqueezeBuilder::*FuncPtr)(const std::shared_ptr&); + SqueezeBuilder(); ~SqueezeBuilder() override; OH_NN_ReturnCode Build(const std::vector& paramsIndex, @@ -34,10 +36,13 @@ public: LiteGraphTensorPtr GetPrimitive() override; private: - OH_NN_ReturnCode SetAxis(std::shared_ptr tensor); + OH_NN_ReturnCode SetAxis(const std::shared_ptr& tensor); private: std::vector m_axis; + std::unordered_map m_paramMap = { + {OH_NN_SQUEEZE_AXIS, &SqueezeBuilder::SetAxis} + }; }; } // namespace Ops } // namespace NeuralNetworkRuntime diff --git a/frameworks/native/neural_network_runtime/ops/stack_builder.cpp b/frameworks/native/neural_network_runtime/ops/stack_builder.cpp index f3aa2b3924e06aaf98954e46b095c63092977f0a..fc95848ae9561d39dc89b55b3efed25bab6f57ee 100644 --- a/frameworks/native/neural_network_runtime/ops/stack_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/stack_builder.cpp @@ -29,7 +29,7 @@ StackBuilder::StackBuilder() {} StackBuilder::~StackBuilder() {} -OH_NN_ReturnCode StackBuilder::SetAxis(std::shared_ptr tensor) +OH_NN_ReturnCode StackBuilder::SetAxis(const std::shared_ptr& tensor) { if (tensor->GetDataType() != OH_NN_INT64) { LOGE("[StackBuilder] The last input axis should be type OH_NN_INT64."); @@ -88,13 +88,11 @@ OH_NN_ReturnCode StackBuilder::Build(const std::vector& paramsIndex, for (int i : paramsIndex) { std::shared_ptr tensor = allTensors[i]; tensor->IdentifyOpParameter(); - switch (tensor->GetType()) { - case OH_NN_STACK_AXIS: - returnCode = SetAxis(tensor); - break; - default: - LOGE("[StackBuilder] Parameter Type is invalid. type=%d", tensor->GetType()); - return OH_NN_INVALID_PARAMETER; + if (m_paramMap.find(tensor->GetType()) != m_paramMap.end()) { + returnCode = (this->*(m_paramMap[tensor->GetType()]))(tensor); + } else { + LOGE("[StackBuilder] Build failed, param invalid, type=%d", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; } if (returnCode != OH_NN_SUCCESS) { diff --git a/frameworks/native/neural_network_runtime/ops/stack_builder.h b/frameworks/native/neural_network_runtime/ops/stack_builder.h index 665fb1695ba15fc7ec9a1f306bf530cab7688903..512ddb0a67a3664e7d918a9e641be3715e773998 100644 --- a/frameworks/native/neural_network_runtime/ops/stack_builder.h +++ b/frameworks/native/neural_network_runtime/ops/stack_builder.h @@ -24,6 +24,8 @@ namespace NeuralNetworkRuntime { namespace Ops { class StackBuilder : public OpsBuilder { public: + typedef OH_NN_ReturnCode (StackBuilder::*FuncPtr)(const std::shared_ptr&); + StackBuilder(); ~StackBuilder() override; OH_NN_ReturnCode Build(const std::vector& paramsIndex, @@ -34,10 +36,13 @@ public: LiteGraphTensorPtr GetPrimitive() override; private: - OH_NN_ReturnCode SetAxis(std::shared_ptr tensor); + OH_NN_ReturnCode SetAxis(const std::shared_ptr& tensor); private: int64_t m_axis = {0}; + std::unordered_map m_paramMap = { + {OH_NN_STACK_AXIS, &StackBuilder::SetAxis} + }; }; } // namespace Ops } // namespace NeuralNetworkRuntime diff --git a/frameworks/native/neural_network_runtime/ops/strided_slice_builder.cpp b/frameworks/native/neural_network_runtime/ops/strided_slice_builder.cpp index bc537549bb6cede454e146271895662c3962870d..93a1bbf8355a9094e8c017b611097b34f72f9213 100644 --- a/frameworks/native/neural_network_runtime/ops/strided_slice_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/strided_slice_builder.cpp @@ -47,7 +47,7 @@ OH_NN_ReturnCode StridedSliceBuilder::SetInputOutput(const std::vector return OH_NN_SUCCESS; } -OH_NN_ReturnCode StridedSliceBuilder::SetBeginMask(std::shared_ptr tensor) +OH_NN_ReturnCode StridedSliceBuilder::SetBeginMask(const std::shared_ptr& tensor) { if (tensor->GetDataType() != OH_NN_INT64) { LOGE("[StridedSliceBuilder] The 5th input beginMask should be type HNN_INT64."); @@ -64,7 +64,7 @@ OH_NN_ReturnCode StridedSliceBuilder::SetBeginMask(std::shared_ptr ten return OH_NN_SUCCESS; } -OH_NN_ReturnCode StridedSliceBuilder::SetEndMask(std::shared_ptr tensor) +OH_NN_ReturnCode StridedSliceBuilder::SetEndMask(const std::shared_ptr& tensor) { if (tensor->GetDataType() != OH_NN_INT64) { LOGE("[StridedSliceBuilder] The 6th input endMask should be type HNN_INT64."); @@ -81,7 +81,7 @@ OH_NN_ReturnCode StridedSliceBuilder::SetEndMask(std::shared_ptr tenso return OH_NN_SUCCESS; } -OH_NN_ReturnCode StridedSliceBuilder::SetEllipsisMask(std::shared_ptr tensor) +OH_NN_ReturnCode StridedSliceBuilder::SetEllipsisMask(const std::shared_ptr& tensor) { if (tensor->GetDataType() != OH_NN_INT64) { LOGE("[StridedSliceBuilder] The 7th input ellipsisMask should be type HNN_INT64."); @@ -98,7 +98,7 @@ OH_NN_ReturnCode StridedSliceBuilder::SetEllipsisMask(std::shared_ptr return OH_NN_SUCCESS; } -OH_NN_ReturnCode StridedSliceBuilder::SetNewAxisMask(std::shared_ptr tensor) +OH_NN_ReturnCode StridedSliceBuilder::SetNewAxisMask(const std::shared_ptr& tensor) { if (tensor->GetDataType() != OH_NN_INT64) { LOGE("[StridedSliceBuilder] The 8th input newAxisMask should be type HNN_INT64."); @@ -115,7 +115,7 @@ OH_NN_ReturnCode StridedSliceBuilder::SetNewAxisMask(std::shared_ptr t return OH_NN_SUCCESS; } -OH_NN_ReturnCode StridedSliceBuilder::SetShrinkAxisMask(std::shared_ptr tensor) +OH_NN_ReturnCode StridedSliceBuilder::SetShrinkAxisMask(const std::shared_ptr& tensor) { if (tensor->GetDataType() != OH_NN_INT64) { LOGE("[StridedSliceBuilder] The 9th input shrinkAxisMAsk should be type HNN_INT64."); @@ -163,25 +163,11 @@ OH_NN_ReturnCode StridedSliceBuilder::Build(const std::vector& paramsI for (int i : paramsIndex) { std::shared_ptr tensor = allTensors[i]; tensor->IdentifyOpParameter(); - switch (tensor->GetType()) { - case OH_NN_STRIDED_SLICE_BEGIN_MASK: - returnCode = SetBeginMask(tensor); - break; - case OH_NN_STRIDED_SLICE_END_MASK: - returnCode = SetEndMask(tensor); - break; - case OH_NN_STRIDED_SLICE_ELLIPSIS_MASK: - returnCode = SetEllipsisMask(tensor); - break; - case OH_NN_STRIDED_SLICE_NEW_AXIS_MASK: - returnCode = SetNewAxisMask(tensor); - break; - case OH_NN_STRIDED_SLICE_SHRINK_AXIS_MASK: - returnCode = SetShrinkAxisMask(tensor); - break; - default: - LOGE("[StridedSliceBuilder] Parameter Type is invalid. type=%d", tensor->GetType()); - return OH_NN_INVALID_PARAMETER; + if (m_paramMap.find(tensor->GetType()) != m_paramMap.end()) { + returnCode = (this->*(m_paramMap[tensor->GetType()]))(tensor); + } else { + LOGE("[StridedSliceBuilder] Build failed, param invalid, type=%d", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; } if (returnCode != OH_NN_SUCCESS) { diff --git a/frameworks/native/neural_network_runtime/ops/strided_slice_builder.h b/frameworks/native/neural_network_runtime/ops/strided_slice_builder.h index 06ccb65793c602f22c08db2bb0ed01dde2d4b21f..bd4787d6058f45a0e47b3ec9859c4ffbc7ddd285 100644 --- a/frameworks/native/neural_network_runtime/ops/strided_slice_builder.h +++ b/frameworks/native/neural_network_runtime/ops/strided_slice_builder.h @@ -24,6 +24,8 @@ namespace NeuralNetworkRuntime { namespace Ops { class StridedSliceBuilder : public OpsBuilder { public: + typedef OH_NN_ReturnCode (StridedSliceBuilder::*FuncPtr)(const std::shared_ptr&); + StridedSliceBuilder(); ~StridedSliceBuilder() override; OH_NN_ReturnCode Build(const std::vector& paramsIndex, @@ -37,11 +39,11 @@ private: OH_NN_ReturnCode SetInputOutput(const std::vector& inputsIndex, const std::vector& outputsIndex, const std::vector>& allTensors); - OH_NN_ReturnCode SetBeginMask(std::shared_ptr tensor); - OH_NN_ReturnCode SetEndMask(std::shared_ptr tensor); - OH_NN_ReturnCode SetEllipsisMask(std::shared_ptr tensor); - OH_NN_ReturnCode SetNewAxisMask(std::shared_ptr tensor); - OH_NN_ReturnCode SetShrinkAxisMask(std::shared_ptr tensor); + OH_NN_ReturnCode SetBeginMask(const std::shared_ptr& tensor); + OH_NN_ReturnCode SetEndMask(const std::shared_ptr& tensor); + OH_NN_ReturnCode SetEllipsisMask(const std::shared_ptr& tensor); + OH_NN_ReturnCode SetNewAxisMask(const std::shared_ptr& tensor); + OH_NN_ReturnCode SetShrinkAxisMask(const std::shared_ptr& tensor); private: int64_t m_begin_mask = {0}; @@ -49,6 +51,13 @@ private: int64_t m_ellipsis_mask = {0}; int64_t m_new_axis_mask = {0}; int64_t m_shrink_axis_mask = {0}; + std::unordered_map m_paramMap = { + {OH_NN_STRIDED_SLICE_BEGIN_MASK, &StridedSliceBuilder::SetBeginMask}, + {OH_NN_STRIDED_SLICE_END_MASK, &StridedSliceBuilder::SetEndMask}, + {OH_NN_STRIDED_SLICE_ELLIPSIS_MASK, &StridedSliceBuilder::SetEllipsisMask}, + {OH_NN_STRIDED_SLICE_NEW_AXIS_MASK, &StridedSliceBuilder::SetNewAxisMask}, + {OH_NN_STRIDED_SLICE_SHRINK_AXIS_MASK, &StridedSliceBuilder::SetShrinkAxisMask} + }; }; } // namespace Ops } // namespace NeuralNetworkRuntime diff --git a/frameworks/native/neural_network_runtime/ops/sub_builder.cpp b/frameworks/native/neural_network_runtime/ops/sub_builder.cpp index a298235acaaa512f0f5338524acaa199d2c42e8b..9ea5f2431da87ee85e4517cf95d37f9ee15f9586 100644 --- a/frameworks/native/neural_network_runtime/ops/sub_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/sub_builder.cpp @@ -29,7 +29,7 @@ SubBuilder::SubBuilder() {} SubBuilder::~SubBuilder() {} -OH_NN_ReturnCode SubBuilder::SetActivationType(std::shared_ptr tensor) +OH_NN_ReturnCode SubBuilder::SetActivationType(const std::shared_ptr& tensor) { if (tensor->GetDataType() != OH_NN_INT8) { LOGE("[SubBuilder] The 3rd input activation should be type OH_NN_INT8."); @@ -92,13 +92,11 @@ OH_NN_ReturnCode SubBuilder::Build(const std::vector& paramsIndex, for (int i : paramsIndex) { std::shared_ptr tensor = allTensors[i]; tensor->IdentifyOpParameter(); - switch (tensor->GetType()) { - case OH_NN_SUB_ACTIVATIONTYPE: - returnCode = SetActivationType(tensor); - break; - default: - LOGE("[SubBuilder] Parameter Type is invalid. type=%d", tensor->GetType()); - return OH_NN_INVALID_PARAMETER; + if (m_paramMap.find(tensor->GetType()) != m_paramMap.end()) { + returnCode = (this->*(m_paramMap[tensor->GetType()]))(tensor); + } else { + LOGE("[SubBuilder] Build failed, param invalid, type=%d", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; } if (returnCode != OH_NN_SUCCESS) { diff --git a/frameworks/native/neural_network_runtime/ops/sub_builder.h b/frameworks/native/neural_network_runtime/ops/sub_builder.h index 6e638a5cdf5c90155a49b079c885b1c8b7399c1f..f9ae173091154f3a437eb7efbf1ac96016309331 100644 --- a/frameworks/native/neural_network_runtime/ops/sub_builder.h +++ b/frameworks/native/neural_network_runtime/ops/sub_builder.h @@ -26,6 +26,8 @@ namespace NeuralNetworkRuntime { namespace Ops { class SubBuilder : public OpsBuilder { public: + typedef OH_NN_ReturnCode (SubBuilder::*FuncPtr)(const std::shared_ptr&); + SubBuilder(); ~SubBuilder() override; OH_NN_ReturnCode Build(const std::vector& paramsIndex, @@ -36,10 +38,13 @@ public: LiteGraphPrimitvePtr GetPrimitive() override; private: - OH_NN_ReturnCode SetActivationType(std::shared_ptr tensor); + OH_NN_ReturnCode SetActivationType(const std::shared_ptr& tensor); private: mindspore::lite::ActivationType m_activationType {mindspore::lite::ACTIVATION_TYPE_NO_ACTIVATION}; + std::unordered_map m_paramMap = { + {OH_NN_SUB_ACTIVATIONTYPE, &SubBuilder::SetActivationType} + }; }; } // namespace Ops } // namespace NeuralNetworkRuntime diff --git a/frameworks/native/neural_network_runtime/ops/tile_builder.cpp b/frameworks/native/neural_network_runtime/ops/tile_builder.cpp index 11ae5eac08d4d292a23583c54a5fe5f23776a6bc..bb6ea1d303836b159386d3fc2024cefbd31d40ab 100644 --- a/frameworks/native/neural_network_runtime/ops/tile_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/tile_builder.cpp @@ -29,7 +29,7 @@ TileBuilder::TileBuilder() {} TileBuilder::~TileBuilder() {} -OH_NN_ReturnCode TileBuilder::SetDims(std::shared_ptr tensor) +OH_NN_ReturnCode TileBuilder::SetDims(const std::shared_ptr& tensor) { if (tensor->GetDataType() != OH_NN_INT64) { LOGE("[TileBuilder] The dims should be type OH_NN_INT64."); @@ -85,14 +85,13 @@ OH_NN_ReturnCode TileBuilder::Build(const std::vector& paramsIndex, for (int i : paramsIndex) { std::shared_ptr tensor = allTensors[i]; tensor->IdentifyOpParameter(); - switch (tensor->GetType()) { - case OH_NN_TILE_DIMS: - returnCode = SetDims(tensor); - break; - default: - LOGE("[TileBuilder] Build failed, param invalid, type = %d.", tensor->GetType()); - return OH_NN_INVALID_PARAMETER; + if (m_paramMap.find(tensor->GetType()) != m_paramMap.end()) { + returnCode = (this->*(m_paramMap[tensor->GetType()]))(tensor); + } else { + LOGE("[TileBuilder] Build failed, param invalid, type=%d", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; } + if (returnCode != OH_NN_SUCCESS) { LOGE("[TileBuilder] Build failed, passed invalid param."); return returnCode; diff --git a/frameworks/native/neural_network_runtime/ops/tile_builder.h b/frameworks/native/neural_network_runtime/ops/tile_builder.h index e504403349c55e5ef761ce351a92b8f903784cfe..e291bbfb158219685213adaa6a46b4637af770dc 100644 --- a/frameworks/native/neural_network_runtime/ops/tile_builder.h +++ b/frameworks/native/neural_network_runtime/ops/tile_builder.h @@ -24,6 +24,8 @@ namespace NeuralNetworkRuntime { namespace Ops { class TileBuilder : public OpsBuilder { public: + typedef OH_NN_ReturnCode (TileBuilder::*FuncPtr)(const std::shared_ptr&); + TileBuilder(); ~TileBuilder() override; OH_NN_ReturnCode Build(const std::vector& paramsIndex, @@ -34,10 +36,13 @@ public: LiteGraphPrimitvePtr GetPrimitive() override; private: - OH_NN_ReturnCode SetDims(std::shared_ptr tensor); + OH_NN_ReturnCode SetDims(const std::shared_ptr& tensor); private: std::vector m_dims {0}; + std::unordered_map m_paramMap = { + {OH_NN_TILE_DIMS, &TileBuilder::SetDims} + }; }; } // namespace Ops } // namespace NeuralNetworkRuntime diff --git a/frameworks/native/neural_network_runtime/ops/top_k_builder.cpp b/frameworks/native/neural_network_runtime/ops/top_k_builder.cpp index 03004033818ba41b2dc682f8c6c746a889925c0e..2972987895faf18dd63e5afa6ccbde7225878068 100644 --- a/frameworks/native/neural_network_runtime/ops/top_k_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/top_k_builder.cpp @@ -30,7 +30,7 @@ TopKBuilder::TopKBuilder() {} TopKBuilder::~TopKBuilder() {} -OH_NN_ReturnCode TopKBuilder::SetSorted(std::shared_ptr tensor) +OH_NN_ReturnCode TopKBuilder::SetSorted(const std::shared_ptr& tensor) { if (tensor->GetDataType() != OH_NN_BOOL) { LOGE("[TopK] The sorted should be type OH_NN_BOOL."); @@ -47,7 +47,7 @@ OH_NN_ReturnCode TopKBuilder::SetSorted(std::shared_ptr tensor) return OH_NN_SUCCESS; } -OH_NN_ReturnCode TopKBuilder::SetAxis(std::shared_ptr tensor) +OH_NN_ReturnCode TopKBuilder::SetAxis(const std::shared_ptr& tensor) { if (tensor->GetDataType() != OH_NN_INT64) { LOGE("[TopK] The axis should be type OH_NN_INT64."); @@ -103,16 +103,11 @@ OH_NN_ReturnCode TopKBuilder::Build(const std::vector& paramsIndex, for (int i : paramsIndex) { std::shared_ptr tensor = allTensors[i]; tensor->IdentifyOpParameter(); - switch (tensor->GetType()) { - case OH_NN_TOP_K_SORTED: - returnCode = SetSorted(tensor); - break; - case OH_NN_TOP_K_AXIS: - returnCode = SetAxis(tensor); - break; - default: - LOGE("[TopK] Parameter Type is invalid. type=%d", tensor->GetType()); - return OH_NN_INVALID_PARAMETER; + if (m_paramMap.find(tensor->GetType()) != m_paramMap.end()) { + returnCode = (this->*(m_paramMap[tensor->GetType()]))(tensor); + } else { + LOGE("[TopK] Build failed, param invalid, type=%d", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; } if (returnCode != OH_NN_SUCCESS) { diff --git a/frameworks/native/neural_network_runtime/ops/top_k_builder.h b/frameworks/native/neural_network_runtime/ops/top_k_builder.h index 4c8ccf5aa3d6dd94db491de2c5fc6e6f30796ff5..1eeff5c6a9d77b12d5611cafc90f107e649659b5 100644 --- a/frameworks/native/neural_network_runtime/ops/top_k_builder.h +++ b/frameworks/native/neural_network_runtime/ops/top_k_builder.h @@ -24,6 +24,8 @@ namespace NeuralNetworkRuntime { namespace Ops { class TopKBuilder : public OpsBuilder { public: + typedef OH_NN_ReturnCode (TopKBuilder::*FuncPtr)(const std::shared_ptr&); + TopKBuilder(); ~TopKBuilder() override; OH_NN_ReturnCode Build(const std::vector& paramsIndex, @@ -33,12 +35,16 @@ public: LiteGraphPrimitvePtr GetPrimitive() override; private: - OH_NN_ReturnCode SetSorted(std::shared_ptr tensor); - OH_NN_ReturnCode SetAxis(std::shared_ptr tensor); + OH_NN_ReturnCode SetSorted(const std::shared_ptr& tensor); + OH_NN_ReturnCode SetAxis(const std::shared_ptr& tensor); private: int64_t m_axis {0}; bool m_sorted {true}; // true means sorting in the descending order. + std::unordered_map m_paramMap = { + {OH_NN_TOP_K_SORTED, &TopKBuilder::SetSorted}, + {OH_NN_TOP_K_AXIS, &TopKBuilder::SetAxis} + }; }; } // namespace Ops } // namespace NeuralNetworkRuntime diff --git a/frameworks/native/neural_network_runtime/ops/unsqueeze_builder.cpp b/frameworks/native/neural_network_runtime/ops/unsqueeze_builder.cpp index 25d0231fa86bf92188718c6253ff770f2c1aaf7b..a9ddf26ba85d8cdc4f8416ec69291aae7f5b15cf 100644 --- a/frameworks/native/neural_network_runtime/ops/unsqueeze_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/unsqueeze_builder.cpp @@ -29,7 +29,7 @@ UnsqueezeBuilder::UnsqueezeBuilder() {} UnsqueezeBuilder::~UnsqueezeBuilder() {} -OH_NN_ReturnCode UnsqueezeBuilder::SetAxis(std::shared_ptr tensor) +OH_NN_ReturnCode UnsqueezeBuilder::SetAxis(const std::shared_ptr& tensor) { // Set Axis if (tensor->GetDataType() != OH_NN_INT64) { @@ -81,13 +81,11 @@ OH_NN_ReturnCode UnsqueezeBuilder::Build(const std::vector& paramsInde for (int i : paramsIndex) { std::shared_ptr tensor = allTensors[i]; tensor->IdentifyOpParameter(); - switch (tensor->GetType()) { - case OH_NN_UNSQUEEZE_AXIS: - returnCode = SetAxis(tensor); - break; - default: - LOGE("[UnsqueezeBuilder] Parameter Type is invalid. type=%d", tensor->GetType()); - return OH_NN_INVALID_PARAMETER; + if (m_paramMap.find(tensor->GetType()) != m_paramMap.end()) { + returnCode = (this->*(m_paramMap[tensor->GetType()]))(tensor); + } else { + LOGE("[UnsqueezeBuilder] Build failed, param invalid, type=%d", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; } if (returnCode != OH_NN_SUCCESS) { diff --git a/frameworks/native/neural_network_runtime/ops/unsqueeze_builder.h b/frameworks/native/neural_network_runtime/ops/unsqueeze_builder.h index 08a5ad287ff7a13260404afebc52c86f2669e4ec..6f14c101e44cffb36f2ea0b4480fea5861568453 100644 --- a/frameworks/native/neural_network_runtime/ops/unsqueeze_builder.h +++ b/frameworks/native/neural_network_runtime/ops/unsqueeze_builder.h @@ -24,6 +24,8 @@ namespace NeuralNetworkRuntime { namespace Ops { class UnsqueezeBuilder : public OpsBuilder { public: + typedef OH_NN_ReturnCode (UnsqueezeBuilder::*FuncPtr)(const std::shared_ptr&); + UnsqueezeBuilder(); ~UnsqueezeBuilder() override; OH_NN_ReturnCode Build(const std::vector& paramsIndex, @@ -34,10 +36,13 @@ public: LiteGraphPrimitvePtr GetPrimitive() override; private: - OH_NN_ReturnCode SetAxis(std::shared_ptr tensor); + OH_NN_ReturnCode SetAxis(const std::shared_ptr& tensor); private: std::vector m_axis; + std::unordered_map m_paramMap = { + {OH_NN_UNSQUEEZE_AXIS, &UnsqueezeBuilder::SetAxis} + }; }; } // namespace Ops } // namespace NeuralNetworkRuntime diff --git a/frameworks/native/neural_network_runtime/ops/unstack_builder.cpp b/frameworks/native/neural_network_runtime/ops/unstack_builder.cpp index 4a0aea9ebd14d31bcb7faf852f0740c48e0d0c7f..f0d162845fc9edc565448a033aa808a51e16ccab 100755 --- a/frameworks/native/neural_network_runtime/ops/unstack_builder.cpp +++ b/frameworks/native/neural_network_runtime/ops/unstack_builder.cpp @@ -28,7 +28,7 @@ UnstackBuilder::UnstackBuilder() {} UnstackBuilder::~UnstackBuilder() {} -OH_NN_ReturnCode UnstackBuilder::SetAxis(std::shared_ptr tensor) +OH_NN_ReturnCode UnstackBuilder::SetAxis(const std::shared_ptr& tensor) { if (tensor->GetDataType() != OH_NN_INT64) { LOGE("[Unstack] The axis should be type OH_NN_INT64."); @@ -97,13 +97,11 @@ OH_NN_ReturnCode UnstackBuilder::Build(const std::vector& paramsIndex, for (int i : paramsIndex) { std::shared_ptr tensor = allTensors[i]; tensor->IdentifyOpParameter(); - switch (tensor->GetType()) { - case OH_NN_UNSTACK_AXIS: - returnCode = SetAxis(tensor); - break; - default: - LOGE("[Unstack] Build failed, param invalid, type=%d", tensor->GetType()); - return OH_NN_INVALID_PARAMETER; + if (m_paramMap.find(tensor->GetType()) != m_paramMap.end()) { + returnCode = (this->*(m_paramMap[tensor->GetType()]))(tensor); + } else { + LOGE("[Unstack] Build failed, param invalid, type=%d", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; } if (returnCode != OH_NN_SUCCESS) { diff --git a/frameworks/native/neural_network_runtime/ops/unstack_builder.h b/frameworks/native/neural_network_runtime/ops/unstack_builder.h index 40a23f5448682603feb0ad2b8bdb33946c5720d0..6619733f9263dc8f3e0cc28a7a9b73b1bfe532c1 100755 --- a/frameworks/native/neural_network_runtime/ops/unstack_builder.h +++ b/frameworks/native/neural_network_runtime/ops/unstack_builder.h @@ -26,6 +26,8 @@ namespace NeuralNetworkRuntime { namespace Ops { class UnstackBuilder : public OpsBuilder { public: + typedef OH_NN_ReturnCode (UnstackBuilder::*FuncPtr)(const std::shared_ptr&); + UnstackBuilder(); ~UnstackBuilder() override; OH_NN_ReturnCode Build(const std::vector& paramsIndex, @@ -36,10 +38,13 @@ public: LiteGraphPrimitvePtr GetPrimitive() override; private: - OH_NN_ReturnCode SetAxis(std::shared_ptr tensor); + OH_NN_ReturnCode SetAxis(const std::shared_ptr& tensor); private: int64_t m_axis {0}; + std::unordered_map m_paramMap = { + {OH_NN_UNSTACK_AXIS, &UnstackBuilder::SetAxis} + }; }; } // namespace Ops } // namespace NeuralNetworkRuntime