From 16dbd74b1362db08da307c17df5cfd7be3e9fccc Mon Sep 17 00:00:00 2001 From: yangyongjie Date: Mon, 24 Oct 2022 18:50:53 +0800 Subject: [PATCH] add neural network runtime Signed-off-by: yangyongjie --- .gitignore | 20 + BUILD.gn | 28 + LICENSE | 28 +- OAT.xml | 34 + README.en.md | 36 - README.md | 39 - README_zh.md | 26 + bundle.json | 50 + common/log.h | 41 + common/scoped_trace.h | 50 + common/utils.h | 40 + .../deep_learning_framework/CMakeLists.txt | 29 + example/deep_learning_framework/Flowchart.png | Bin 0 -> 126482 bytes example/deep_learning_framework/Principle.png | Bin 0 -> 25277 bytes example/deep_learning_framework/README_zh.md | 218 ++ .../cmake_build/build_ohos_tflite.sh | 51 + .../tflite/CMakeLists.txt | 25 + .../delegates/nnrt_delegate/CMakeLists.txt | 34 + .../delegates/nnrt_delegate/nnrt_delegate.cpp | 316 +++ .../delegates/nnrt_delegate/nnrt_delegate.h | 167 ++ .../nnrt_delegate/nnrt_delegate_kernel.cpp | 420 ++++ .../nnrt_delegate/nnrt_delegate_kernel.h | 104 + .../nnrt_delegate/nnrt_delegate_provider.cpp | 215 ++ .../nnrt_delegate/nnrt_op_builder.cpp | 503 ++++ .../delegates/nnrt_delegate/nnrt_op_builder.h | 325 +++ .../delegates/nnrt_delegate/nnrt_utils.cpp | 347 +++ .../delegates/nnrt_delegate/nnrt_utils.h | 153 ++ .../delegates/nnrt_delegate/tensor_mapping.h | 177 ++ .../tflite/label_classify/CMakeLists.txt | 39 + .../tflite/label_classify/label_classify.cpp | 341 +++ .../tflite/label_classify/label_classify.h | 47 + .../tflite/nnrt/CMakeLists.txt | 26 + .../tflite/nnrt/nnrt_implementation.cpp | 113 + .../tflite/nnrt/nnrt_implementation.h | 88 + .../tflite/tools/bitmap_helpers.cpp | 116 + .../tflite/tools/bitmap_helpers.h | 141 ++ .../tflite/tools/get_topn.h | 87 + .../tflite/tools/log.h | 46 + .../tflite/tools/utils.cpp | 267 ++ .../tflite/tools/utils.h | 44 + example/drivers/README_zh.md | 329 +++ example/drivers/arch_diagram.png | Bin 0 -> 8746 bytes example/drivers/dev_flow.png | Bin 0 -> 6014 bytes example/drivers/nnrt/BUILD.gn | 24 + example/drivers/nnrt/hdi_cpu_service/BUILD.gn | 90 + .../include/nnrt_device_service.h | 83 + .../hdi_cpu_service/include/node_functions.h | 71 + .../hdi_cpu_service/include/node_registry.h | 57 + .../include/prepared_model_service.h | 77 + .../include/shared_buffer_parser.h | 49 + .../nnrt/hdi_cpu_service/include/validation.h | 33 + .../src/nnrt_device_driver.cpp | 115 + .../src/nnrt_device_service.cpp | 436 ++++ .../hdi_cpu_service/src/node_functions.cpp | 373 +++ .../hdi_cpu_service/src/node_registry.cpp | 60 + .../src/prepared_model_service.cpp | 412 +++ .../src/shared_buffer_parser.cpp | 104 + .../nnrt/hdi_cpu_service/src/validation.cpp | 72 + frameworks/BUILD.gn | 133 + frameworks/native/compilation.cpp | 714 ++++++ frameworks/native/compilation.h | 86 + frameworks/native/device_manager.cpp | 171 ++ frameworks/native/device_manager.h | 68 + frameworks/native/device_registrar.cpp | 32 + frameworks/native/execution_plan.cpp | 91 + frameworks/native/execution_plan.h | 45 + frameworks/native/executor.cpp | 555 ++++ frameworks/native/executor.h | 72 + frameworks/native/hdi_device.cpp | 331 +++ frameworks/native/hdi_device.h | 63 + frameworks/native/hdi_interfaces.h | 29 + frameworks/native/hdi_prepared_model.cpp | 91 + frameworks/native/hdi_prepared_model.h | 47 + frameworks/native/inner_model.cpp | 546 ++++ frameworks/native/inner_model.h | 67 + frameworks/native/memory_manager.cpp | 102 + frameworks/native/memory_manager.h | 60 + frameworks/native/neural_network_runtime.cpp | 682 +++++ frameworks/native/nn_tensor.cpp | 409 +++ frameworks/native/nn_tensor.h | 97 + frameworks/native/ops/add_builder.cpp | 115 + frameworks/native/ops/add_builder.h | 48 + frameworks/native/ops/argmax_builder.cpp | 131 + frameworks/native/ops/argmax_builder.h | 51 + frameworks/native/ops/avgpool_builder.cpp | 60 + frameworks/native/ops/avgpool_builder.h | 40 + .../native/ops/batch_to_space_nd_builder.cpp | 142 ++ .../native/ops/batch_to_space_nd_builder.h | 53 + frameworks/native/ops/batchnorm_builder.cpp | 116 + frameworks/native/ops/batchnorm_builder.h | 44 + frameworks/native/ops/bias_add_builder.cpp | 75 + frameworks/native/ops/bias_add_builder.h | 42 + frameworks/native/ops/cast_builder.cpp | 89 + frameworks/native/ops/cast_builder.h | 42 + frameworks/native/ops/concat_builder.cpp | 146 ++ frameworks/native/ops/concat_builder.h | 50 + frameworks/native/ops/conv2d_builder.cpp | 298 +++ frameworks/native/ops/conv2d_builder.h | 65 + .../native/ops/conv2d_transpose_builder.cpp | 313 +++ .../native/ops/conv2d_transpose_builder.h | 66 + .../ops/depthwise_conv2d_native_builder.cpp | 275 ++ .../ops/depthwise_conv2d_native_builder.h | 62 + frameworks/native/ops/div_builder.cpp | 119 + frameworks/native/ops/div_builder.h | 48 + frameworks/native/ops/eltwise_builder.cpp | 119 + frameworks/native/ops/eltwise_builder.h | 48 + frameworks/native/ops/expandims_builder.cpp | 73 + frameworks/native/ops/expandims_builder.h | 42 + frameworks/native/ops/fill_builder.cpp | 80 + frameworks/native/ops/fill_builder.h | 38 + .../native/ops/fullconnection_builder.cpp | 183 ++ .../native/ops/fullconnection_builder.h | 55 + frameworks/native/ops/gather_builder.cpp | 80 + frameworks/native/ops/gather_builder.h | 38 + frameworks/native/ops/gelu_builder.cpp | 83 + frameworks/native/ops/gelu_builder.h | 38 + frameworks/native/ops/hswish_builder.cpp | 87 + frameworks/native/ops/hswish_builder.h | 38 + frameworks/native/ops/layernorm_builder.cpp | 206 ++ frameworks/native/ops/layernorm_builder.h | 51 + frameworks/native/ops/lessequal_builder.cpp | 80 + frameworks/native/ops/lessequal_builder.h | 38 + frameworks/native/ops/matmul_builder.cpp | 175 ++ frameworks/native/ops/matmul_builder.h | 50 + frameworks/native/ops/maximum_builder.cpp | 77 + frameworks/native/ops/maximum_builder.h | 38 + frameworks/native/ops/maxpool_builder.cpp | 58 + frameworks/native/ops/maxpool_builder.h | 40 + frameworks/native/ops/mul_builder.cpp | 122 + frameworks/native/ops/mul_builder.h | 46 + frameworks/native/ops/onehot_builder.cpp | 107 + frameworks/native/ops/onehot_builder.h | 44 + frameworks/native/ops/pad_builder.cpp | 116 + frameworks/native/ops/pad_builder.h | 45 + frameworks/native/ops/pooling_builder.cpp | 220 ++ frameworks/native/ops/pooling_builder.h | 59 + frameworks/native/ops/pow_builder.cpp | 82 + frameworks/native/ops/pow_builder.h | 39 + frameworks/native/ops/prelu_builder.cpp | 78 + frameworks/native/ops/prelu_builder.h | 39 + .../native/ops/quant_dtype_cast_builder.cpp | 128 + .../native/ops/quant_dtype_cast_builder.h | 47 + frameworks/native/ops/reduceall_builder.cpp | 114 + frameworks/native/ops/reduceall_builder.h | 48 + frameworks/native/ops/reducemean_builder.cpp | 116 + frameworks/native/ops/reducemean_builder.h | 48 + frameworks/native/ops/reduceprod_builder.cpp | 116 + frameworks/native/ops/reduceprod_builder.h | 48 + frameworks/native/ops/relu6_builder.cpp | 87 + frameworks/native/ops/relu6_builder.h | 39 + frameworks/native/ops/relu_builder.cpp | 87 + frameworks/native/ops/relu_builder.h | 39 + frameworks/native/ops/reshape_builder.cpp | 79 + frameworks/native/ops/reshape_builder.h | 39 + .../native/ops/resize_bilinear_builder.cpp | 227 ++ .../native/ops/resize_bilinear_builder.h | 57 + frameworks/native/ops/rsqrt_builder.cpp | 79 + frameworks/native/ops/rsqrt_builder.h | 39 + frameworks/native/ops/scale_builder.cpp | 149 ++ frameworks/native/ops/scale_builder.h | 49 + frameworks/native/ops/shape_builder.cpp | 77 + frameworks/native/ops/shape_builder.h | 39 + frameworks/native/ops/sigmoid_builder.cpp | 88 + frameworks/native/ops/sigmoid_builder.h | 39 + frameworks/native/ops/slice_builder.cpp | 89 + frameworks/native/ops/slice_builder.h | 43 + frameworks/native/ops/softmax_builder.cpp | 121 + frameworks/native/ops/softmax_builder.h | 46 + .../native/ops/space_to_batch_nd_builder.cpp | 192 ++ .../native/ops/space_to_batch_nd_builder.h | 49 + frameworks/native/ops/split_builder.cpp | 186 ++ frameworks/native/ops/split_builder.h | 55 + frameworks/native/ops/sqrt_builder.cpp | 88 + frameworks/native/ops/sqrt_builder.h | 40 + .../native/ops/squared_difference_builder.cpp | 86 + .../native/ops/squared_difference_builder.h | 40 + frameworks/native/ops/squeeze_builder.cpp | 125 + frameworks/native/ops/squeeze_builder.h | 46 + frameworks/native/ops/stack_builder.cpp | 125 + frameworks/native/ops/stack_builder.h | 46 + .../native/ops/strided_slice_builder.cpp | 212 ++ frameworks/native/ops/strided_slice_builder.h | 57 + frameworks/native/ops/sub_builder.cpp | 131 + frameworks/native/ops/sub_builder.h | 48 + frameworks/native/ops/tanh_builder.cpp | 92 + frameworks/native/ops/tanh_builder.h | 45 + frameworks/native/ops/tile_builder.cpp | 86 + frameworks/native/ops/tile_builder.h | 43 + frameworks/native/ops/top_k_builder.cpp | 117 + frameworks/native/ops/top_k_builder.h | 45 + frameworks/native/ops/transpose_builder.cpp | 86 + frameworks/native/ops/transpose_builder.h | 39 + frameworks/native/ops/unsqueeze_builder.cpp | 119 + frameworks/native/ops/unsqueeze_builder.h | 46 + frameworks/native/ops_builder.cpp | 99 + frameworks/native/ops_builder.h | 81 + frameworks/native/ops_registry.cpp | 46 + frameworks/native/ops_registry.h | 54 + frameworks/native/transform.cpp | 299 +++ frameworks/native/transform.h | 68 + frameworks/native/validation.cpp | 72 + frameworks/native/validation.h | 45 + .../c/neural_network_runtime_inner.h | 54 + interfaces/kits/c/neural_network_runtime.h | 686 +++++ .../kits/c/neural_network_runtime_type.h | 1632 ++++++++++++ interfaces/oem/cpp_api/cpp_type.h | 64 + interfaces/oem/cpp_api/device.h | 60 + interfaces/oem/cpp_api/device_registrar.h | 41 + interfaces/oem/cpp_api/prepared_model.h | 40 + neural-network-runtime-guidelines.md | 460 ++++ neural_network_runtime_add_op_model.png | Bin 0 -> 27897 bytes neural_network_runtime_intro.png | Bin 0 -> 26387 bytes test/system_test/BUILD.gn | 107 + test/system_test/common/nnrt_test.cpp | 234 ++ test/system_test/common/nnrt_test.h | 89 + test/system_test/device_test.cpp | 190 ++ test/system_test/end_to_end_test.cpp | 617 +++++ test/system_test/end_to_end_test.h | 41 + test/system_test/stress_test.cpp | 185 ++ test/unittest/BUILD.gn | 23 + test/unittest/common/base_test.cpp | 43 + test/unittest/common/base_test.h | 37 + .../common/compilation_mock_idevice.cpp | 286 +++ test/unittest/common/executor_mock_device.cpp | 104 + test/unittest/common/file_utils.cpp | 51 + test/unittest/common/file_utils.h | 34 + .../common/inner_model_mock_device.cpp | 72 + test/unittest/common/mock_idevice.cpp | 54 + test/unittest/common/mock_idevice.h | 62 + test/unittest/components/BUILD.gn | 336 +++ .../compilation/compilation_test.cpp | 1143 +++++++++ .../components/compilation/compilation_test.h | 38 + .../device_manager/device_manager_test.cpp | 242 ++ .../device_registrar_test.cpp | 264 ++ .../components/executor/executor_test.cpp | 1206 +++++++++ .../components/executor/executor_test.h | 48 + .../components/hdi_device/hdi_device_test.cpp | 875 +++++++ .../hdi_prepared_model_test.cpp | 344 +++ .../inner_model/inner_model_test.cpp | 825 ++++++ .../components/inner_model/nn_tensor_test.cpp | 525 ++++ .../inner_model/nn_validation_test.cpp | 175 ++ .../inner_model/ops_regitstry_test.cpp | 64 + .../memory_manager/memory_manager_test.cpp | 231 ++ .../neural_network_runtime_test.cpp | 2221 +++++++++++++++++ .../neural_network_runtime_test.h | 52 + .../components/transform/transform_test.cpp | 912 +++++++ test/unittest/inner_kits/BUILD.gn | 64 + .../neural_network_runtime_inner_test.cpp | 130 + .../neural_network_runtime_inner_test.h | 36 + test/unittest/ops/BUILD.gn | 119 + test/unittest/ops/add_test.cpp | 277 ++ test/unittest/ops/argmax_test.cpp | 330 +++ test/unittest/ops/avgpool_pad_test.cpp | 431 ++++ test/unittest/ops/avgpool_padmod_test.cpp | 420 ++++ test/unittest/ops/batch_to_space_nd_test.cpp | 338 +++ test/unittest/ops/batchnorm_builder_test.cpp | 261 ++ test/unittest/ops/biasadd_test.cpp | 212 ++ test/unittest/ops/cast_test.cpp | 260 ++ .../unittest/ops/concat_three_inputs_test.cpp | 244 ++ test/unittest/ops/concat_two_inputs_test.cpp | 265 ++ test/unittest/ops/conv2d_pad_test.cpp | 561 +++++ test/unittest/ops/conv2d_padmode_test.cpp | 591 +++++ .../ops/conv2d_tranpose_padmode_test.cpp | 790 ++++++ .../ops/conv2d_transpose_pad_test.cpp | 488 ++++ .../ops/depthwise_conv2d_native_pad_test.cpp | 629 +++++ .../depthwise_conv2d_native_padmode_test.cpp | 409 +++ test/unittest/ops/div_test.cpp | 288 +++ test/unittest/ops/eltwise_test.cpp | 297 +++ test/unittest/ops/expandims_test.cpp | 186 ++ test/unittest/ops/fill_builder_test.cpp | 180 ++ test/unittest/ops/fullconnection_test.cpp | 289 +++ .../ops/fullconnection_with_axis_test.cpp | 341 +++ test/unittest/ops/gather_builder_test.cpp | 181 ++ test/unittest/ops/gelu_builder_test.cpp | 185 ++ test/unittest/ops/hswish_builder_test.cpp | 185 ++ test/unittest/ops/layernorm_builder_test.cpp | 465 ++++ test/unittest/ops/lessequal_builder_test.cpp | 181 ++ test/unittest/ops/matmul_builder_test.cpp | 483 ++++ test/unittest/ops/maximum_builder_test.cpp | 182 ++ test/unittest/ops/maxpool_pad_test.cpp | 432 ++++ test/unittest/ops/maxpool_padmode_test.cpp | 341 +++ test/unittest/ops/mul_builder_test.cpp | 288 +++ test/unittest/ops/onehot_builder_test.cpp | 239 ++ test/unittest/ops/ops_test.cpp | 100 + test/unittest/ops/ops_test.h | 58 + test/unittest/ops/pad_builder_test.cpp | 261 ++ test/unittest/ops/pow_builder_test.cpp | 177 ++ test/unittest/ops/prelu_builder_test.cpp | 177 ++ .../ops/quant_dtype_cast_builder_test.cpp | 307 +++ test/unittest/ops/reduce_all_builder_test.cpp | 263 ++ .../unittest/ops/reduce_mean_builder_test.cpp | 263 ++ .../unittest/ops/reduce_prod_builder_test.cpp | 263 ++ test/unittest/ops/relu6_builder_test.cpp | 180 ++ test/unittest/ops/relu_builder_test.cpp | 180 ++ test/unittest/ops/reshape_builder_test.cpp | 177 ++ .../ops/resize_bilinear_builder_test.cpp | 664 +++++ test/unittest/ops/rsqrt_builder_test.cpp | 177 ++ test/unittest/ops/scale_builder_test.cpp | 377 +++ test/unittest/ops/shape_builder_test.cpp | 178 ++ test/unittest/ops/sigmoid_builder_test.cpp | 180 ++ test/unittest/ops/slice_builder_test.cpp | 214 ++ test/unittest/ops/softmax_builder_test.cpp | 303 +++ .../ops/spacetobatchnd_builder_test.cpp | 487 ++++ test/unittest/ops/split_builder_test.cpp | 435 ++++ test/unittest/ops/sqrt_builder_test.cpp | 205 ++ .../ops/squared_difference_builder_test.cpp | 204 ++ test/unittest/ops/squeeze_builder_test.cpp | 266 ++ test/unittest/ops/stack_builder_test.cpp | 299 +++ .../ops/strided_slice_builder_test.cpp | 557 +++++ test/unittest/ops/sub_builder_test.cpp | 324 +++ test/unittest/ops/tanh_builder_test.cpp | 194 ++ test/unittest/ops/tile_builder_test.cpp | 200 ++ test/unittest/ops/topk_builder_test.cpp | 261 ++ test/unittest/ops/transpose_builder_test.cpp | 193 ++ test/unittest/ops/unsqueeze_builder_test.cpp | 301 +++ 315 files changed, 56883 insertions(+), 101 deletions(-) create mode 100644 .gitignore create mode 100644 BUILD.gn create mode 100644 OAT.xml delete mode 100644 README.en.md delete mode 100644 README.md create mode 100644 README_zh.md create mode 100644 bundle.json create mode 100644 common/log.h create mode 100644 common/scoped_trace.h create mode 100644 common/utils.h create mode 100644 example/deep_learning_framework/CMakeLists.txt create mode 100644 example/deep_learning_framework/Flowchart.png create mode 100644 example/deep_learning_framework/Principle.png create mode 100644 example/deep_learning_framework/README_zh.md create mode 100644 example/deep_learning_framework/cmake_build/build_ohos_tflite.sh create mode 100644 example/deep_learning_framework/tflite/CMakeLists.txt create mode 100644 example/deep_learning_framework/tflite/delegates/nnrt_delegate/CMakeLists.txt create mode 100644 example/deep_learning_framework/tflite/delegates/nnrt_delegate/nnrt_delegate.cpp create mode 100644 example/deep_learning_framework/tflite/delegates/nnrt_delegate/nnrt_delegate.h create mode 100644 example/deep_learning_framework/tflite/delegates/nnrt_delegate/nnrt_delegate_kernel.cpp create mode 100644 example/deep_learning_framework/tflite/delegates/nnrt_delegate/nnrt_delegate_kernel.h create mode 100644 example/deep_learning_framework/tflite/delegates/nnrt_delegate/nnrt_delegate_provider.cpp create mode 100644 example/deep_learning_framework/tflite/delegates/nnrt_delegate/nnrt_op_builder.cpp create mode 100644 example/deep_learning_framework/tflite/delegates/nnrt_delegate/nnrt_op_builder.h create mode 100644 example/deep_learning_framework/tflite/delegates/nnrt_delegate/nnrt_utils.cpp create mode 100644 example/deep_learning_framework/tflite/delegates/nnrt_delegate/nnrt_utils.h create mode 100644 example/deep_learning_framework/tflite/delegates/nnrt_delegate/tensor_mapping.h create mode 100644 example/deep_learning_framework/tflite/label_classify/CMakeLists.txt create mode 100644 example/deep_learning_framework/tflite/label_classify/label_classify.cpp create mode 100644 example/deep_learning_framework/tflite/label_classify/label_classify.h create mode 100644 example/deep_learning_framework/tflite/nnrt/CMakeLists.txt create mode 100644 example/deep_learning_framework/tflite/nnrt/nnrt_implementation.cpp create mode 100644 example/deep_learning_framework/tflite/nnrt/nnrt_implementation.h create mode 100644 example/deep_learning_framework/tflite/tools/bitmap_helpers.cpp create mode 100644 example/deep_learning_framework/tflite/tools/bitmap_helpers.h create mode 100644 example/deep_learning_framework/tflite/tools/get_topn.h create mode 100644 example/deep_learning_framework/tflite/tools/log.h create mode 100644 example/deep_learning_framework/tflite/tools/utils.cpp create mode 100644 example/deep_learning_framework/tflite/tools/utils.h create mode 100644 example/drivers/README_zh.md create mode 100644 example/drivers/arch_diagram.png create mode 100644 example/drivers/dev_flow.png create mode 100644 example/drivers/nnrt/BUILD.gn create mode 100644 example/drivers/nnrt/hdi_cpu_service/BUILD.gn create mode 100644 example/drivers/nnrt/hdi_cpu_service/include/nnrt_device_service.h create mode 100644 example/drivers/nnrt/hdi_cpu_service/include/node_functions.h create mode 100644 example/drivers/nnrt/hdi_cpu_service/include/node_registry.h create mode 100644 example/drivers/nnrt/hdi_cpu_service/include/prepared_model_service.h create mode 100644 example/drivers/nnrt/hdi_cpu_service/include/shared_buffer_parser.h create mode 100644 example/drivers/nnrt/hdi_cpu_service/include/validation.h create mode 100644 example/drivers/nnrt/hdi_cpu_service/src/nnrt_device_driver.cpp create mode 100644 example/drivers/nnrt/hdi_cpu_service/src/nnrt_device_service.cpp create mode 100644 example/drivers/nnrt/hdi_cpu_service/src/node_functions.cpp create mode 100644 example/drivers/nnrt/hdi_cpu_service/src/node_registry.cpp create mode 100644 example/drivers/nnrt/hdi_cpu_service/src/prepared_model_service.cpp create mode 100644 example/drivers/nnrt/hdi_cpu_service/src/shared_buffer_parser.cpp create mode 100644 example/drivers/nnrt/hdi_cpu_service/src/validation.cpp create mode 100644 frameworks/BUILD.gn create mode 100644 frameworks/native/compilation.cpp create mode 100644 frameworks/native/compilation.h create mode 100644 frameworks/native/device_manager.cpp create mode 100644 frameworks/native/device_manager.h create mode 100644 frameworks/native/device_registrar.cpp create mode 100644 frameworks/native/execution_plan.cpp create mode 100644 frameworks/native/execution_plan.h create mode 100644 frameworks/native/executor.cpp create mode 100644 frameworks/native/executor.h create mode 100644 frameworks/native/hdi_device.cpp create mode 100644 frameworks/native/hdi_device.h create mode 100644 frameworks/native/hdi_interfaces.h create mode 100644 frameworks/native/hdi_prepared_model.cpp create mode 100644 frameworks/native/hdi_prepared_model.h create mode 100644 frameworks/native/inner_model.cpp create mode 100644 frameworks/native/inner_model.h create mode 100644 frameworks/native/memory_manager.cpp create mode 100644 frameworks/native/memory_manager.h create mode 100644 frameworks/native/neural_network_runtime.cpp create mode 100644 frameworks/native/nn_tensor.cpp create mode 100644 frameworks/native/nn_tensor.h create mode 100644 frameworks/native/ops/add_builder.cpp create mode 100644 frameworks/native/ops/add_builder.h create mode 100644 frameworks/native/ops/argmax_builder.cpp create mode 100644 frameworks/native/ops/argmax_builder.h create mode 100644 frameworks/native/ops/avgpool_builder.cpp create mode 100644 frameworks/native/ops/avgpool_builder.h create mode 100644 frameworks/native/ops/batch_to_space_nd_builder.cpp create mode 100644 frameworks/native/ops/batch_to_space_nd_builder.h create mode 100644 frameworks/native/ops/batchnorm_builder.cpp create mode 100644 frameworks/native/ops/batchnorm_builder.h create mode 100644 frameworks/native/ops/bias_add_builder.cpp create mode 100644 frameworks/native/ops/bias_add_builder.h create mode 100644 frameworks/native/ops/cast_builder.cpp create mode 100644 frameworks/native/ops/cast_builder.h create mode 100644 frameworks/native/ops/concat_builder.cpp create mode 100644 frameworks/native/ops/concat_builder.h create mode 100644 frameworks/native/ops/conv2d_builder.cpp create mode 100644 frameworks/native/ops/conv2d_builder.h create mode 100644 frameworks/native/ops/conv2d_transpose_builder.cpp create mode 100644 frameworks/native/ops/conv2d_transpose_builder.h create mode 100644 frameworks/native/ops/depthwise_conv2d_native_builder.cpp create mode 100644 frameworks/native/ops/depthwise_conv2d_native_builder.h create mode 100644 frameworks/native/ops/div_builder.cpp create mode 100644 frameworks/native/ops/div_builder.h create mode 100644 frameworks/native/ops/eltwise_builder.cpp create mode 100644 frameworks/native/ops/eltwise_builder.h create mode 100644 frameworks/native/ops/expandims_builder.cpp create mode 100644 frameworks/native/ops/expandims_builder.h create mode 100644 frameworks/native/ops/fill_builder.cpp create mode 100644 frameworks/native/ops/fill_builder.h create mode 100644 frameworks/native/ops/fullconnection_builder.cpp create mode 100644 frameworks/native/ops/fullconnection_builder.h create mode 100644 frameworks/native/ops/gather_builder.cpp create mode 100644 frameworks/native/ops/gather_builder.h create mode 100644 frameworks/native/ops/gelu_builder.cpp create mode 100644 frameworks/native/ops/gelu_builder.h create mode 100644 frameworks/native/ops/hswish_builder.cpp create mode 100644 frameworks/native/ops/hswish_builder.h create mode 100644 frameworks/native/ops/layernorm_builder.cpp create mode 100644 frameworks/native/ops/layernorm_builder.h create mode 100644 frameworks/native/ops/lessequal_builder.cpp create mode 100644 frameworks/native/ops/lessequal_builder.h create mode 100644 frameworks/native/ops/matmul_builder.cpp create mode 100644 frameworks/native/ops/matmul_builder.h create mode 100644 frameworks/native/ops/maximum_builder.cpp create mode 100644 frameworks/native/ops/maximum_builder.h create mode 100644 frameworks/native/ops/maxpool_builder.cpp create mode 100644 frameworks/native/ops/maxpool_builder.h create mode 100644 frameworks/native/ops/mul_builder.cpp create mode 100644 frameworks/native/ops/mul_builder.h create mode 100644 frameworks/native/ops/onehot_builder.cpp create mode 100644 frameworks/native/ops/onehot_builder.h create mode 100644 frameworks/native/ops/pad_builder.cpp create mode 100644 frameworks/native/ops/pad_builder.h create mode 100644 frameworks/native/ops/pooling_builder.cpp create mode 100644 frameworks/native/ops/pooling_builder.h create mode 100644 frameworks/native/ops/pow_builder.cpp create mode 100644 frameworks/native/ops/pow_builder.h create mode 100644 frameworks/native/ops/prelu_builder.cpp create mode 100644 frameworks/native/ops/prelu_builder.h create mode 100644 frameworks/native/ops/quant_dtype_cast_builder.cpp create mode 100644 frameworks/native/ops/quant_dtype_cast_builder.h create mode 100644 frameworks/native/ops/reduceall_builder.cpp create mode 100644 frameworks/native/ops/reduceall_builder.h create mode 100644 frameworks/native/ops/reducemean_builder.cpp create mode 100644 frameworks/native/ops/reducemean_builder.h create mode 100644 frameworks/native/ops/reduceprod_builder.cpp create mode 100644 frameworks/native/ops/reduceprod_builder.h create mode 100644 frameworks/native/ops/relu6_builder.cpp create mode 100644 frameworks/native/ops/relu6_builder.h create mode 100644 frameworks/native/ops/relu_builder.cpp create mode 100644 frameworks/native/ops/relu_builder.h create mode 100644 frameworks/native/ops/reshape_builder.cpp create mode 100644 frameworks/native/ops/reshape_builder.h create mode 100644 frameworks/native/ops/resize_bilinear_builder.cpp create mode 100644 frameworks/native/ops/resize_bilinear_builder.h create mode 100644 frameworks/native/ops/rsqrt_builder.cpp create mode 100644 frameworks/native/ops/rsqrt_builder.h create mode 100644 frameworks/native/ops/scale_builder.cpp create mode 100644 frameworks/native/ops/scale_builder.h create mode 100644 frameworks/native/ops/shape_builder.cpp create mode 100644 frameworks/native/ops/shape_builder.h create mode 100644 frameworks/native/ops/sigmoid_builder.cpp create mode 100644 frameworks/native/ops/sigmoid_builder.h create mode 100644 frameworks/native/ops/slice_builder.cpp create mode 100644 frameworks/native/ops/slice_builder.h create mode 100644 frameworks/native/ops/softmax_builder.cpp create mode 100644 frameworks/native/ops/softmax_builder.h create mode 100644 frameworks/native/ops/space_to_batch_nd_builder.cpp create mode 100644 frameworks/native/ops/space_to_batch_nd_builder.h create mode 100644 frameworks/native/ops/split_builder.cpp create mode 100644 frameworks/native/ops/split_builder.h create mode 100644 frameworks/native/ops/sqrt_builder.cpp create mode 100644 frameworks/native/ops/sqrt_builder.h create mode 100644 frameworks/native/ops/squared_difference_builder.cpp create mode 100644 frameworks/native/ops/squared_difference_builder.h create mode 100644 frameworks/native/ops/squeeze_builder.cpp create mode 100644 frameworks/native/ops/squeeze_builder.h create mode 100644 frameworks/native/ops/stack_builder.cpp create mode 100644 frameworks/native/ops/stack_builder.h create mode 100644 frameworks/native/ops/strided_slice_builder.cpp create mode 100644 frameworks/native/ops/strided_slice_builder.h create mode 100644 frameworks/native/ops/sub_builder.cpp create mode 100644 frameworks/native/ops/sub_builder.h create mode 100644 frameworks/native/ops/tanh_builder.cpp create mode 100644 frameworks/native/ops/tanh_builder.h create mode 100644 frameworks/native/ops/tile_builder.cpp create mode 100644 frameworks/native/ops/tile_builder.h create mode 100644 frameworks/native/ops/top_k_builder.cpp create mode 100644 frameworks/native/ops/top_k_builder.h create mode 100644 frameworks/native/ops/transpose_builder.cpp create mode 100644 frameworks/native/ops/transpose_builder.h create mode 100644 frameworks/native/ops/unsqueeze_builder.cpp create mode 100644 frameworks/native/ops/unsqueeze_builder.h create mode 100644 frameworks/native/ops_builder.cpp create mode 100644 frameworks/native/ops_builder.h create mode 100644 frameworks/native/ops_registry.cpp create mode 100644 frameworks/native/ops_registry.h create mode 100644 frameworks/native/transform.cpp create mode 100644 frameworks/native/transform.h create mode 100644 frameworks/native/validation.cpp create mode 100644 frameworks/native/validation.h create mode 100644 interfaces/innerkits/c/neural_network_runtime_inner.h create mode 100644 interfaces/kits/c/neural_network_runtime.h create mode 100644 interfaces/kits/c/neural_network_runtime_type.h create mode 100644 interfaces/oem/cpp_api/cpp_type.h create mode 100644 interfaces/oem/cpp_api/device.h create mode 100644 interfaces/oem/cpp_api/device_registrar.h create mode 100644 interfaces/oem/cpp_api/prepared_model.h create mode 100644 neural-network-runtime-guidelines.md create mode 100644 neural_network_runtime_add_op_model.png create mode 100644 neural_network_runtime_intro.png create mode 100644 test/system_test/BUILD.gn create mode 100644 test/system_test/common/nnrt_test.cpp create mode 100644 test/system_test/common/nnrt_test.h create mode 100644 test/system_test/device_test.cpp create mode 100644 test/system_test/end_to_end_test.cpp create mode 100644 test/system_test/end_to_end_test.h create mode 100644 test/system_test/stress_test.cpp create mode 100644 test/unittest/BUILD.gn create mode 100644 test/unittest/common/base_test.cpp create mode 100644 test/unittest/common/base_test.h create mode 100644 test/unittest/common/compilation_mock_idevice.cpp create mode 100644 test/unittest/common/executor_mock_device.cpp create mode 100644 test/unittest/common/file_utils.cpp create mode 100644 test/unittest/common/file_utils.h create mode 100644 test/unittest/common/inner_model_mock_device.cpp create mode 100644 test/unittest/common/mock_idevice.cpp create mode 100644 test/unittest/common/mock_idevice.h create mode 100644 test/unittest/components/BUILD.gn create mode 100644 test/unittest/components/compilation/compilation_test.cpp create mode 100644 test/unittest/components/compilation/compilation_test.h create mode 100644 test/unittest/components/device_manager/device_manager_test.cpp create mode 100644 test/unittest/components/device_registrar/device_registrar_test.cpp create mode 100644 test/unittest/components/executor/executor_test.cpp create mode 100644 test/unittest/components/executor/executor_test.h create mode 100644 test/unittest/components/hdi_device/hdi_device_test.cpp create mode 100644 test/unittest/components/hdi_prepared_model/hdi_prepared_model_test.cpp create mode 100644 test/unittest/components/inner_model/inner_model_test.cpp create mode 100644 test/unittest/components/inner_model/nn_tensor_test.cpp create mode 100644 test/unittest/components/inner_model/nn_validation_test.cpp create mode 100644 test/unittest/components/inner_model/ops_regitstry_test.cpp create mode 100644 test/unittest/components/memory_manager/memory_manager_test.cpp create mode 100644 test/unittest/components/neural_network_runtime_test/neural_network_runtime_test.cpp create mode 100644 test/unittest/components/neural_network_runtime_test/neural_network_runtime_test.h create mode 100644 test/unittest/components/transform/transform_test.cpp create mode 100644 test/unittest/inner_kits/BUILD.gn create mode 100644 test/unittest/inner_kits/neural_network_runtime_inner_test.cpp create mode 100644 test/unittest/inner_kits/neural_network_runtime_inner_test.h create mode 100644 test/unittest/ops/BUILD.gn create mode 100644 test/unittest/ops/add_test.cpp create mode 100644 test/unittest/ops/argmax_test.cpp create mode 100644 test/unittest/ops/avgpool_pad_test.cpp create mode 100644 test/unittest/ops/avgpool_padmod_test.cpp create mode 100644 test/unittest/ops/batch_to_space_nd_test.cpp create mode 100644 test/unittest/ops/batchnorm_builder_test.cpp create mode 100644 test/unittest/ops/biasadd_test.cpp create mode 100644 test/unittest/ops/cast_test.cpp create mode 100644 test/unittest/ops/concat_three_inputs_test.cpp create mode 100644 test/unittest/ops/concat_two_inputs_test.cpp create mode 100644 test/unittest/ops/conv2d_pad_test.cpp create mode 100644 test/unittest/ops/conv2d_padmode_test.cpp create mode 100644 test/unittest/ops/conv2d_tranpose_padmode_test.cpp create mode 100644 test/unittest/ops/conv2d_transpose_pad_test.cpp create mode 100644 test/unittest/ops/depthwise_conv2d_native_pad_test.cpp create mode 100644 test/unittest/ops/depthwise_conv2d_native_padmode_test.cpp create mode 100644 test/unittest/ops/div_test.cpp create mode 100644 test/unittest/ops/eltwise_test.cpp create mode 100644 test/unittest/ops/expandims_test.cpp create mode 100644 test/unittest/ops/fill_builder_test.cpp create mode 100644 test/unittest/ops/fullconnection_test.cpp create mode 100644 test/unittest/ops/fullconnection_with_axis_test.cpp create mode 100644 test/unittest/ops/gather_builder_test.cpp create mode 100644 test/unittest/ops/gelu_builder_test.cpp create mode 100644 test/unittest/ops/hswish_builder_test.cpp create mode 100644 test/unittest/ops/layernorm_builder_test.cpp create mode 100644 test/unittest/ops/lessequal_builder_test.cpp create mode 100644 test/unittest/ops/matmul_builder_test.cpp create mode 100644 test/unittest/ops/maximum_builder_test.cpp create mode 100644 test/unittest/ops/maxpool_pad_test.cpp create mode 100644 test/unittest/ops/maxpool_padmode_test.cpp create mode 100644 test/unittest/ops/mul_builder_test.cpp create mode 100644 test/unittest/ops/onehot_builder_test.cpp create mode 100644 test/unittest/ops/ops_test.cpp create mode 100644 test/unittest/ops/ops_test.h create mode 100644 test/unittest/ops/pad_builder_test.cpp create mode 100644 test/unittest/ops/pow_builder_test.cpp create mode 100644 test/unittest/ops/prelu_builder_test.cpp create mode 100644 test/unittest/ops/quant_dtype_cast_builder_test.cpp create mode 100644 test/unittest/ops/reduce_all_builder_test.cpp create mode 100644 test/unittest/ops/reduce_mean_builder_test.cpp create mode 100644 test/unittest/ops/reduce_prod_builder_test.cpp create mode 100644 test/unittest/ops/relu6_builder_test.cpp create mode 100644 test/unittest/ops/relu_builder_test.cpp create mode 100644 test/unittest/ops/reshape_builder_test.cpp create mode 100644 test/unittest/ops/resize_bilinear_builder_test.cpp create mode 100644 test/unittest/ops/rsqrt_builder_test.cpp create mode 100644 test/unittest/ops/scale_builder_test.cpp create mode 100644 test/unittest/ops/shape_builder_test.cpp create mode 100644 test/unittest/ops/sigmoid_builder_test.cpp create mode 100644 test/unittest/ops/slice_builder_test.cpp create mode 100644 test/unittest/ops/softmax_builder_test.cpp create mode 100644 test/unittest/ops/spacetobatchnd_builder_test.cpp create mode 100644 test/unittest/ops/split_builder_test.cpp create mode 100644 test/unittest/ops/sqrt_builder_test.cpp create mode 100644 test/unittest/ops/squared_difference_builder_test.cpp create mode 100644 test/unittest/ops/squeeze_builder_test.cpp create mode 100644 test/unittest/ops/stack_builder_test.cpp create mode 100644 test/unittest/ops/strided_slice_builder_test.cpp create mode 100644 test/unittest/ops/sub_builder_test.cpp create mode 100644 test/unittest/ops/tanh_builder_test.cpp create mode 100644 test/unittest/ops/tile_builder_test.cpp create mode 100644 test/unittest/ops/topk_builder_test.cpp create mode 100644 test/unittest/ops/transpose_builder_test.cpp create mode 100644 test/unittest/ops/unsqueeze_builder_test.cpp diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..335cc7f --- /dev/null +++ b/.gitignore @@ -0,0 +1,20 @@ +# ignore .a .o +*.[ao] +*.so +*.ms + +# ignore build +build/ +libs/ +obj/ +_ignore/ + +# ignore schema +jni/include/schema/ + +# ignore vscode +.vscode + +# ignore runtime file +deploy.bat +log.txt \ No newline at end of file diff --git a/BUILD.gn b/BUILD.gn new file mode 100644 index 0000000..36de2f9 --- /dev/null +++ b/BUILD.gn @@ -0,0 +1,28 @@ +# Copyright (c) 2022 Huawei Device Co., Ltd. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import("//build/ohos.gni") + +group("nnrt_target") { + deps = [ + "frameworks:libneural_network_runtime" + ] +} + +group("nnrt_test_target") { + testonly = true + deps = [ + "test/unittest:unittest", + "test/system_test:system_test" + ] +} diff --git a/LICENSE b/LICENSE index 29f81d8..4a45986 100644 --- a/LICENSE +++ b/LICENSE @@ -1,3 +1,4 @@ + Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ @@ -173,29 +174,4 @@ incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. + END OF TERMS AND CONDITIONS \ No newline at end of file diff --git a/OAT.xml b/OAT.xml new file mode 100644 index 0000000..8c1351e --- /dev/null +++ b/OAT.xml @@ -0,0 +1,34 @@ + + + + + + + + + + + + + + + + + + diff --git a/README.en.md b/README.en.md deleted file mode 100644 index d233708..0000000 --- a/README.en.md +++ /dev/null @@ -1,36 +0,0 @@ -# neural_network_runtime - -#### Description -{**When you're done, you can delete the content in this README and update the file with details for others getting started with your repository**} - -#### Software Architecture -Software architecture description - -#### Installation - -1. xxxx -2. xxxx -3. xxxx - -#### Instructions - -1. xxxx -2. xxxx -3. xxxx - -#### Contribution - -1. Fork the repository -2. Create Feat_xxx branch -3. Commit your code -4. Create Pull Request - - -#### Gitee Feature - -1. You can use Readme\_XXX.md to support different languages, such as Readme\_en.md, Readme\_zh.md -2. Gitee blog [blog.gitee.com](https://blog.gitee.com) -3. Explore open source project [https://gitee.com/explore](https://gitee.com/explore) -4. The most valuable open source project [GVP](https://gitee.com/gvp) -5. The manual of Gitee [https://gitee.com/help](https://gitee.com/help) -6. The most popular members [https://gitee.com/gitee-stars/](https://gitee.com/gitee-stars/) diff --git a/README.md b/README.md deleted file mode 100644 index dc3cc36..0000000 --- a/README.md +++ /dev/null @@ -1,39 +0,0 @@ -# neural_network_runtime - -#### 介绍 -{**以下是 Gitee 平台说明,您可以替换此简介** -Gitee 是 OSCHINA 推出的基于 Git 的代码托管平台(同时支持 SVN)。专为开发者提供稳定、高效、安全的云端软件开发协作平台 -无论是个人、团队、或是企业,都能够用 Gitee 实现代码托管、项目管理、协作开发。企业项目请看 [https://gitee.com/enterprises](https://gitee.com/enterprises)} - -#### 软件架构 -软件架构说明 - - -#### 安装教程 - -1. xxxx -2. xxxx -3. xxxx - -#### 使用说明 - -1. xxxx -2. xxxx -3. xxxx - -#### 参与贡献 - -1. Fork 本仓库 -2. 新建 Feat_xxx 分支 -3. 提交代码 -4. 新建 Pull Request - - -#### 特技 - -1. 使用 Readme\_XXX.md 来支持不同的语言,例如 Readme\_en.md, Readme\_zh.md -2. Gitee 官方博客 [blog.gitee.com](https://blog.gitee.com) -3. 你可以 [https://gitee.com/explore](https://gitee.com/explore) 这个地址来了解 Gitee 上的优秀开源项目 -4. [GVP](https://gitee.com/gvp) 全称是 Gitee 最有价值开源项目,是综合评定出的优秀开源项目 -5. Gitee 官方提供的使用手册 [https://gitee.com/help](https://gitee.com/help) -6. Gitee 封面人物是一档用来展示 Gitee 会员风采的栏目 [https://gitee.com/gitee-stars/](https://gitee.com/gitee-stars/) diff --git a/README_zh.md b/README_zh.md new file mode 100644 index 0000000..720bf3c --- /dev/null +++ b/README_zh.md @@ -0,0 +1,26 @@ +# Neural Network Runtime + +Neural Network Runtime(神经网络运行时)是一套面向AI领域的运行时部件,适配上层AI推理引擎和底层加速芯片,为端侧AI推理引擎提供硬件加速的计算能力。 + +## 基本概念 + +在开发前,需要先了解以下概念,以便更好地理解全文内容: + +- Native API:Openharmony 面向应用开发者的C语言接口。 +- HDI:Hardware Device Interface,硬件设备接口,是OpenHarmony中系统组件与芯片组件通信的接口。关于更多HDI的细节,请浏览[驱动子系统](https://gitee.com/openharmony/docs/blob/master/zh-cn/readme/%E9%A9%B1%E5%8A%A8%E5%AD%90%E7%B3%BB%E7%BB%9F.md)。 + +## 运作机制 + +**图1** Neural Network Runtime架构图 +!["Neural Network Runtime架构图"](neural_network_runtime_intro.png) + +如图1所示,在OpenHarmony系统上,AI应用通常要经过AI推理引擎和Neural Network Runtime才能对接底层芯片驱动,进而加速推理计算。Neural Network Runtime和芯片驱动直接通过HDI接口交互,Neural Network Runtime将模型和数据传递给芯片驱动,通过HDI接口在加速芯片上执行推理计算,计算结果通过Neural Network Runtime、AI推理引擎逐层返回至AI应用。 + +通常,AI应用、AI推理引擎、Neural Network Runtime处在同一个进程下,芯片驱动运行在另一个进程下,两者之间需要借助进程间通信(IPC)传递模型和计算数据。Neural Network Runtime根据HDI接口实现了HDI客户端,相应的,芯片厂商需要根据HDI接口实现并开放HDI服务。 + +架构图中每层功能简单阐述如下: +- AI应用:借助AI模型,提供丰富的应用能力,如:图像分类、人脸识别、文字识别等。 +- AI推理引擎:为AI应用提供模型搭建、模型优化、推理计算的能力。 +- Neural Network Runtime:作为AI推理引擎和底层加速芯片的桥梁,它开放了标准统一的HDI接口,不同的芯片都可以通过HDI接口接入Neural Network Runtime。 +- HDI服务端:HDI服务端接收Neural Network Runtime传入的模型,将模型转换为加速芯片驱动所使用模型格式,并调用芯片驱动的接口执行计算。 +- 加速芯片:加速芯片通常能够加速AI模型或者模型中部分算子的计算,提供优于CPU的性能。 \ No newline at end of file diff --git a/bundle.json b/bundle.json new file mode 100644 index 0000000..9f69a3e --- /dev/null +++ b/bundle.json @@ -0,0 +1,50 @@ +{ + "name": "@ohos/neural_network_runtime", + "description": "The Neural Network Runtime that bridges the inference framework and the device accelerator.", + "version": "3.2", + "license": "MIT", + "publishAs": "code-segment", + "segment": { + "destPath": "foundation/ai/neural_network_runtime" + }, + "dirs": {}, + "scripts": {}, + "licensePath": "COPYING", + "readmePath": { + "en": "README.rst" + }, + "component": { + "name": "neural_network_runtime", + "subsystem": "ai", + "syscap": [], + "features": [], + "adapted_system_type": ["standard"], + "rom": "1024KB", + "ram": "2048KB", + "deps": { + "components": [ + "hilog" + ], + "third_party": [] + }, + "build": { + "sub_component": [ + "//foundation/ai/neural_network_runtime:nnrt_target" + ], + "inner_kits": [ + {"type": "so", + "name": "//foundation/ai/neural_network_runtime:nnrt_target", + "header": { + "header_files": [ + "neural_network_runtime_inner.h" + ], + "header_base":"//foundation/ai/neural_network_runtime/interfaces/innerkits/c" + } + } + ], + "test": [ + "//foundation/ai/neural_network_runtime:nnrt_test_target" + ] + } + } +} \ No newline at end of file diff --git a/common/log.h b/common/log.h new file mode 100644 index 0000000..c75b325 --- /dev/null +++ b/common/log.h @@ -0,0 +1,41 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_LOG_H +#define NEURAL_NETWORK_RUNTIME_LOG_H + +#include +#include +#include "hilog/log_c.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#ifndef NNR_LOG_DOMAIN +#define NNR_LOG_DOMAIN 0xD002101 +#endif + +#define LOGD(...) HiLogPrint(LOG_CORE, LOG_DEBUG, NNR_LOG_DOMAIN, "NNRt", __VA_ARGS__) +#define LOGI(...) HiLogPrint(LOG_CORE, LOG_INFO, NNR_LOG_DOMAIN, "NNRt", __VA_ARGS__) +#define LOGW(...) HiLogPrint(LOG_CORE, LOG_WARN, NNR_LOG_DOMAIN, "NNRt", __VA_ARGS__) +#define LOGE(...) HiLogPrint(LOG_CORE, LOG_ERROR, NNR_LOG_DOMAIN, "NNRt", __VA_ARGS__) +#define LOGF(...) HiLogPrint(LOG_CORE, LOG_FATAL, NNR_LOG_DOMAIN, "NNRt", __VA_ARGS__) + +#ifdef __cplusplus +} +#endif + +#endif // NEURAL_NETWORK_RUNTIME_LOG_H diff --git a/common/scoped_trace.h b/common/scoped_trace.h new file mode 100644 index 0000000..650503e --- /dev/null +++ b/common/scoped_trace.h @@ -0,0 +1,50 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_SCOPED_TRACE_H +#define NEURAL_NETWORK_RUNTIME_SCOPED_TRACE_H + +#include +#include "hitrace/trace.h" + +#define NNRT_TRACE_NAME(name) ScopedTrace ___tracer(name) +namespace OHOS { +namespace NeuralNetworkRuntime { +class ScopedTrace { +public: + inline ScopedTrace(const std::string& name) + { + m_name = name; + HiviewDFX::HiTraceId traceId = HiviewDFX::HiTraceChain::GetId(); + if (traceId.IsValid()) { + HiviewDFX::HiTraceChain::Tracepoint(HITRACE_TP_GENERAL, traceId, "NNRt Trace start: %s", name.c_str()); + } + } + + inline ~ScopedTrace() + { + HiviewDFX::HiTraceId traceId = HiviewDFX::HiTraceChain::GetId(); + if (traceId.IsValid()) { + HiviewDFX::HiTraceChain::Tracepoint(HITRACE_TP_GENERAL, traceId, "NNRt Trace end: %s", m_name.c_str()); + } + } + +private: + std::string m_name {}; +}; +} // namespace NeuralNetworkRuntime +} // namespace OHOS + +#endif // NEURAL_NETWORK_RUNTIME_SCOPED_TRACE_H diff --git a/common/utils.h b/common/utils.h new file mode 100644 index 0000000..7430ad0 --- /dev/null +++ b/common/utils.h @@ -0,0 +1,40 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_UTILS_H +#define NEURAL_NETWORK_RUNTIME_UTILS_H + +#include + +#include "log.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +template +std::shared_ptr CreateSharedPtr(Args&&... args) +{ + std::shared_ptr tPtr = nullptr; + try { + tPtr = std::make_shared(args...); + } catch (const std::bad_alloc& except) { + LOGW("Create a new shared pointer failed. Error: %s", except.what()); + return nullptr; + } + return tPtr; +} + +} // namespace NeuralNetworkRuntime +} // namespace OHOS +#endif // NEURAL_NETWORK_RUNTIME_UTILS_H diff --git a/example/deep_learning_framework/CMakeLists.txt b/example/deep_learning_framework/CMakeLists.txt new file mode 100644 index 0000000..c3dbadd --- /dev/null +++ b/example/deep_learning_framework/CMakeLists.txt @@ -0,0 +1,29 @@ +# +# Copyright (c) 2022 Huawei Device Co., Ltd. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# CMake lowest version requirement +cmake_minimum_required(VERSION 3.16.5) + +# project information +project(label_classification) + +set(CMAKE_C_FLAGS "-pthread -fstack-protector-all -fPIC -D_FORTIFY_SOURCE=2") + +# If the CPU architecture is 32-bit, -march=armv7-a +set(CMAKE_CXX_FLAGS "-pthread -fstack-protector-all -fPIC -D_FORTIFY_SOURCE=2 -march=armv7-a") +set(LOCAL_DIRECTORY_PATH ${PROJECT_SOURCE_DIR}) + +add_subdirectory(${LOCAL_DIRECTORY_PATH}/tflite) + diff --git a/example/deep_learning_framework/Flowchart.png b/example/deep_learning_framework/Flowchart.png new file mode 100644 index 0000000000000000000000000000000000000000..d6018c17a9116b72f2ebc10e38e39764d2ec0058 GIT binary patch literal 126482 zcmYhiV{~lM(gm92D7J0eJh3{-iEZ1q-LZX=oY=N)+qUi3_ug;3_kQ#ky?U=bcJErN zs%FhuD_ri6I2<%KG#D5doTP+^0vH&$AsEpPf=X^W=UKnB&?Fnbo;tof z+umGEO$X(NG94$ONVq5=Ap^o`^CZwmBGgeG(H(n%&;i7M126>yQNR5ZaQG=1fB$q< zY9oM}XxE~Z`su^#r0m>$&~oIWs;ruI$H6MB;ZiU+H+S=LXX-C$Z_ng<(5h%E^EACY zUNHIsf%-##VUtCz!;#<|ce2lB4!*rZPwL6Vj05#^t)bd2Kkwfw-w%xVO7-oaVCM5h>yXPvo_o%K$O()4iw%?vT&J7o z^WT;ApYLqGVNDD+!8@}H*J`vA`<;4$Eu0?r$1o&P7_Gi*S+uQaq;Qo^CW2JNB>AaI zSeo6j&-4%sI_xR>Eok zOoCX+eRCSQ)7xjS(4j5T;Z%nB-9aKRPx$@NQH$1wc_P5Kr7is>Bg!=e1KhpSim1=g z6^3+6D=+{7BhJP3*I~G%o#*Kw0u7({t5Z4>n-~zYr=V1bhEC!+R0n8@_acm`3|T%Tt?5|Yz)M(h=FS=ooKy1sCH-?n^bZd-f#ku3XcD|n_a z&ZMH9Gzq?Iop_aZqW!SviCq8(P`J0Z&(3Ou(mVw4pbfZ}$$u!%0G zC_3p}z3l_U5b$T4h(yJah&z;&P6J?&Kf~3&K~y=tJzlgZ?a9lVqwomRqWK-Ck5@Ie zR|=gbBr1IfAXMC!_^IpZZK_^K^2h50{`%|t7M)7U1rdSmSkt3} za%Phmvdb!&iuGp@903D!EtRBn7(t)lqqJjiN@P1JxuMqY1cIvl6*oe4Ku}J*5mlq` z(P=9(pbb$mC%VY}2{FM3wa-KP_40i+teb`o7D=rj=U{Vio2t|ey#dA4Rf3Rldpj~$uR`69md%2b1)J(5%_qfpUPg$11^ZlvPE9HWc%xPa@KVj3iXsAd}7B{QWbxajPm_wt}IcLias+a#t_9{m>+QW;clj z3bqF6R5kCr02Phc=jUg;lEJ_#)?9TJ4Q=f#DK>nFiu&4G=8e|oYTHI^7=O!FTs%BH zdNMa`t&^R;P{)!}Dw;AcWmS~@U>$8UxdLG!p<{M3Hn=zp>uPUVunGQAYOy!`Z%kIh za_Iz&dSDyS5gy8~6F83p=So!$yab8NayrvD0R zm44P+E=Akm7~%JYz~!$sk#(;GMyu7V1|Ggm8D8NdloEg!b3l~bPuV#;A1DL}ZYQyG za2zSnT3A?U0qj^K3R7s+dke5b#aLnntvn$sX00_Jb`;=3m+qHX4V)^NB}DpT!Or zidexA9mwS|KbDV+U#T;7Y%qMO zoDB$&KDMF7ZkXNQwXv~*ND&T&pP52*DYK8BUG5J(&evoQO-@W zp?8gOIMqHmNuNw*a)5oMvN3YVxh6WLQiw;nX6E)E!5|_gMP0YXzFCnzRiuZWGra!I z6+=_O|4`~QU#1$HT8I8QwY9epin~1^snJP6h;9(s*!N;ZI=6@^VY-Gm-cc*4W2j@7 zI57K7cDeWG^%65eZ*{I?%>qC6UEwI$%QTiW(8$!p1cm{QRS8<*UV|-Nnq|iG|fsA{Yl}`LUbH?Z!kEaWfHojs}Nu8XsKd~;;<&q63ejrOtbeIIy+-;cY%DK-)`!}h)LimwruR@Dlm~ksPxxCiYKP9gWqKmgK;#AU|jMG6joq+RKP4@*Z2_yk9 z(9qb_c=`Cs=&EAozgN`%)m30;ub*{xZ$8(m{ApNOVNh%d`C;r*J)`FLBmIt$CH>;| zSB`Tp_!N%Xxfk9Udd=PMM%%5)cVks!)eAzwV6567k81i7G4D7>%1-%p)qT&9ql(hEDafr%%H z`(TB)TstkBLEJfiPXKhtPt{f`GrIx`*$m;PeX_d(RngRqs#A?Cn(*%5eWSk%2>< zXzIb`BZ)a#2C1aWDJUpd3xo0t`fn9g9fFeZ2mlQ9|1#12!`Tuj6A99EI8vN3l(z_g z{;;MF>=XXoZX`o0dy5y4hhPppp-mnuyWQ(28=G@6bqyYb;LKk@7Xx^G*76E#8qjEn zPh3TZI{IyZm~@-d#FDaiQ32;ySNbwNB(~B?VO|_H-CrA5ILPwZr=`iiF%G>JRTB5A zYmjYx=b5~M!Zg+fy&f;-^wR(ocANDu!40LVp{wl5Y z|2}nMyMj3o^k>egH<)R>w!6VslM(-!nJ?Iem8+m-^ZWizdClI8m?ZSpxDr-Q3G&*| zwZ8?1R!&0Uvf=468Mz$&DR{bSl{mc?;vkC7INtP&x82b8=ut~cet2?+x0Bm zj{Au77xbJo)5h#K)y34jvFfzt*7=BRVMLAKAEF6Lsa_qSmqU5tRp3sWGxPq!g!(%n z0kPjO*{yzuAmE3A2fo(*@)ZWnTRtf(szSWuhJAv#+THB%bi>37tj%=uqLL7n(?bjh zz^?(#%U4O2%YwNHk?<~Y4i^2<*&78{QC(VU(Qw(Bp+Np8Q-bE1?qTsdMtpO*L-*@r z1gfEM67BaRA(9am+|Knm)N}VbkYfWlbbsPlv%dV3r_pAR55w}5mEt^tO$qf#KiHqg zRjlvj?EP5mL7&1nMK+-~q2m0J27Npu6!@1<>ks36TdyG*EK*~*cLAey>2JgulpEFZ zZLbSH*80O)2O;u`YYKJ}FtcDODNBwzP1NVJWjjTpY3PJXS;O+YFKi8cD5!9Y4L6Pk zCGkGi_q2|JB(I;$%*?U6S|Po&4D&CFIzGQ0U?SplNtX$nI;gG6&m!8_*pF8WP!hA& zr#r1RPf0XtbS-J?YXb*LQV4gV&6c8T5Zf$`B%}1qUie#8JuQ|rIUj)9m1Dm-znh{z z6j#%3oK}|KkCV{l{-j!XXf~Q0Y#>zL;M9dbac#0kBe$*lAw0$rUU*ogt&e~ZgN-o{ z4Xv@`|Lv#BSza5J@$5Xo{Yj}~ISsS|-b3kiAgu3j%w-5y-B9%X+Dr+M3?P+ zNVr$Q=0C}u_^DtF8i}wv-4g91TfcXu><1P}bFGN)gF8+(EmcUC=DVkhI0jVN8Iph& z%tzahq#BxnqK;;(qod=bu?A5hI20DEw}9IH?U618nKsIzFJCNbSAngLmJp>)+2_E4 zzi1P!vDQGhZ;*nN=}_Puu9}prK%N0y+e76`=tfhz!|~?pv!~U#4DO+{Iy)Ob<@Hsj zn?qPhx75Qpm8Z!8vXN-Mte3@Pd|QDwYNx1}9NVfd-7cfS9$n-WeN@vt`Mtb^a!S@&eYOTpg397ba@Ag-JDDAvK2t-?Hn*;`008TCX*JOeQrF1`%FEM@B|E zmJIrSz-Nr^Ig=ncT=3fb%E5q1r9)X3$=7LY)uIn(Ewf_TX z4L-PxW0iG#=R}PW7Djq~W@2~i5R=x}C=7%;{QW*c5R-@emzE7;#x}oM`J1tt_YIZz zHheDl#xl=9tl$UZUs>CG^f|$t><#@x6%GWRiOU|MSm+^iF!@kZ!;=q=Q)V=9T3S63ZM0HsRHi#gg(>FHT8k=a{QvS$lr zJR8xO8XsRi;ZEYM%KAPVJL7$O{1=tO>DbTO)^e7xKM8BJVwr(H{`1VXz;qLdC+*pkpjWfqzP3^lG?#JC zA11q9y*(EA<5m2FoNzRUEt4U%np~EIPU5xs&!q|4CEude(#uA5?<9|G*`N+F((uXL zPkNKQqg+L+_z4=9=63WLf8j5b2QA^e-u(h}(3DU6f9YE|Xm*+JcB5eCx>7wtapj>ID8 z2t6%`)UkxKm`>W207P1Q7l^K*?_v&Zi;L}?H&0JVT(34OK;^6g3bA#c=TrPMta6EC za1%jWWL+C>?C^Ti{i0S-z=!*K%nRg#yOiDJ9wgu4@aDJXdc$AIp4ABt8#x_4*}~3e z)`;I7HEE!Su)1vbVC-EokRn=OoVJhN@Ks&Y_Po@{6RNZfv{diVpS_>8Tm1qxd;R)|0ZTG3CU#xyh%3<62q_6%3-NhH(oVs~ zC3!7vucjcH&Fv8JxlWaxAi(W10rW*2N-->=`Xg;KI&H4Z#hWhf~-9mbG%d%AqXT0CG?4?c^WrdikD z1g96p?XRig&cP>BboR$GNqU1^N#oKgjVGV8BPRLRyKjh-GCYLx?Gkw}DvIIx;tn1QV>gXk?cjM+07?pIU!_l7_J z9rE=0HZaG>BNMf}_NnQdEp;5 zUUfY00z3J=I*=sa=Z&Lrb#h?hgWm15Av`%rQI})xpOaTkC*RepQ^=HbDCz zD?T|S<|Iu*Mn+Ia3|T>uIutp=aU?PV5Rq!j#vS*fGP>-J!L?Gp6}kJa)%ua%6&>Go z0)2X#^wwO}5}Ath{_!63dYw{h#LPFfrw(rduNcxCL|#AfXZp5(*%MgWhlqq8fk4n_ zp0*euBhk78Yu@5c8O|GCm#k2~e(3?>QLkV0w2*94v7akE(hI+1&-3?oQwzglk3cjQ zWWgxGv&D)QrMqC&TEy)n+V3JdBcyH~jzvNvdCX$#TB*G5%qg#2XaJwjcP>!!|7LW! zyZ9zV+vM6k_m)zvm&)tHeWu@6XwAuN$;lV;jI^~oL|+2MP-#V#LEq^AZ_%M#vk4wv zbE~tHJ1x9l~owdNxZ-Xmvl}voroq+bmHt1OV~NRO*;~7%gP~vF$=ms8Zy0;);M|{ zWMNb>!D9z%xNf-LD7wtK6!^bCQW3 zk?Aw}0`J>+ifvzIT1UG>$IgEf$uW>|g!Hv0OA88u|JP*X`}_OT)p2>G{U?&n2QpM! z+|=(_%89fQ*I7tJ-JlIzZ@ZN(#imQ0$ZOCKNk5>7bIPoxk-I-=(`$a=G5#JkP;*WD z>Tu#L$=d3A7C@iHt-m<$l4@xfti#v3$1j{>H$%o_aIj$J)qEE4nAN5?bn5=xp=?Q; z!GqJJ=uiLkp?lw2=Eb7dT;-s}#gO7iKQkO=#N*y}<@irmCqjsxT`5ENXI^!2D*X`U z(QG90x)cQ6+FzUAOV5eQJBl%&bW8}zHwN?1L^7PgmlXchp56KUhzF%Q@&yJk7wsKS zFCl^b#Bniy}Af=^q=+`3(E*8YC`HTR#Wzupn}Or9>x-kDagzt|>zl0z%F)hoiJ zOrRa>%|se>kI+D^LB#9%-2RwPyE`bN6FkkT7~b~q5(f?uU+mZ)nhI<}B@@16QFPcC z$}^B{wYw)svB8y-G#?84_*`6lpaIDD4-R(jPv(fpd_P}I8$brOw|9m)QxhG@gWSSG zf@qHq2D8`NS}&2iMZFnW7`)mo@KA3Ui=JDb0{v9t=e?jKJ$*~MM;@SPOGLVc5L5ep z!+OPz|L=8Ia*=Lb_AY-Y=-{W_kms(B*Go$_tHwqj>?0=faO^qE+wfs99r%^SSAak< zfP6H|ss&b9EIMEwXs(t=8c6ZtdBZvPt;6(G|B6YNl3KRc9|e87(W$qiA7}aPm7O=i zc4wH|T1~^Uhhzo8g`#n1`+$y7%@z)h4o}TPcPPIeXTitBFnpN0g)DrSlB;_Pp2cKL zKSrM59-nM#o!YDPoC2KAw0LaSM&;#V=ZLx7vn!&0ZXjRz(VHeF-h$w~wvw0`@W@vX zpl9M~(&q-7T^y9wNUz4*!jaARvCy*;h^;^a+gC`*HJsQCbG>J$=Ey(O=sMg+8+XcPW)*9`rD z95GR?!RK}=?y)SG@OH2t5^euO)N~dwog|HKvX#diaDvr3Sp~W3nG>mG+Zgm9oRp** z6dw>XOiwEupYqKiqElcjuXGGKATtxU!9iZw3{d%}B`Ua~flWzS`2>2UFun={6>1}1 zlK1>STx$ye|8iP5rE+8YE_-6iMPK&>gP-scX>YnroMU*x@M`UFckcMn62#A{RE#wj z?w)aQ%ecsrbMo+zY+ri6Kid9~KDp+9R=USx_~yFvjx3y*=+z@!d7zTI6I@{#=rO6YM7S1?>rCYmJ0O<_l*pp^$L22GV#ciaNvo#dnzuBknGiQuo)DQvRN|5ZX_A zXUpKQJye*8G%468fJ4k`NXlP(YtSVjVbuV}Lw%Umj7^I~p%)m*7pzdGi3onR)=q?D zCrM{-o0ynXH#Hp{{4y{w5Q5tlS1L1N?jKg<00M!{^G}e{BOkPtdfUq*Xp{GnlnYf? zx$z3ja`NebUpD1`#O(_BDbDZ7i;E|NB5tJ<^DzB~@U}S}ITMu=SHArw>wYw2jNX9L z_lO#+c7Bl=U-|rALh#O{1HfKFP}6@CTwFWdFf2z{cfiLlO<%&H9Vs}SjA^}rkatLb zV{g^=_zh>_U16U7oYrh(3N3EF`{$OcUrGCh==68|l~s@_Ee}h_x_w20E$y&g#$3bE zak(R6CCsa^3+&Ax+JFQWE2%NpmVFJUQCVPSXs8`|K!@1o8=?fDDfFnI&W_&`e-Zn% z+3kiHdh=OGXa&A0x=LKc16Hm#g!L`{GtA~DIEt9nv<4=Ezc%`vI!WuP878XI2p>t& zVN+(Y?k;qyJH-0!hLi*U?eVgohr{@Mxd!D_(<`^GAAdANv%2SqZ}9#s@Fl;W(NO6Z z?y3QVZpO*|dtUDoqxzT!7f=tD;rJMuc5bO3=yOq{B#C_!6JvNbX*fNd8f*-sTqC`^ z)%?hqjlYE%)=0L*obH#GNSr~*-@eNlU%bJFoxn}QMIEsY<8A>1#e+nkdsp>OgX|76 z8MOXWIqDo;r>Cc83#Di>9KX`D;>SpOS(o%><5Zc$)4VUWHiT6pk@e58ZpaoV6TkZvQ`TXi~0yoif1;@_-4(hX#`{NC(QE-i!d_jzpwKG!-@D^Gx9 z3^4j^uGBwa@xsHb{F=M%Jf$~(*G)145%~JCr(C5YK7^9n#AmIw>`}#{aG@^XFzgIA ztbn*&5Q8D7PB{k+YWKLmfBqx!CWk-fv2x1q8bPNc<6sVaeY8QIL#HCcK>hJobBMoX z{mIC;oSUuj0SL&+_`$Fa3LvtHiOLrCotc~4vW-lQG$~ul-6FK0fGp*e2@ODR z+8YGr1m#~moXTWaN@oi|LMg-MzclqKGLqjr!`F2b1v%#1RoC8@~V|8)fK1gz+ zAGl+=+@Q(Pes!cyT+dt^kqH(^x4V*B!N+VN2`MLt?DT%iQ(cNaUaeu7ot!AJA!M+C z>n8h%8y@t9edK#r~)faWpJrvj;qOum0yGHcXX4Vi_D4Cj3 zZ|0dEc$PQ9F=4Bv1Q8%7OSJ69LIOZS1hq*-!}9GLDP6V0q5x;vW8k5j`L-f2^PE9V zwJHY?lL^!N?IFL)Zix~$-yVRHu$)qD(dAUQc8Vy?)54n3$wU;Nf)>+ctCR0H^PJHX z5dPVzeep(B%NCbB865Ja5cV%MB`n&gXy3HIOR&eZu+;rzn8A>nedK)qI+2%|UZIU+ zI^2GLteBLW{hMTqvBdkcp{+CV0#*rzbY$MD*bgeyxWSO=B(G~3@^~}97z`kI%_edY zyj*vvSND_!vY50cY~P-2tj8i)+?r{Rv{pD*9^zgrwO%?V&oBu+J0ujwa>UdMzI4+o z&o?GbTQpvSG=k5!vwC2ZGz4uIF2WENvmN8~KX>H^I81JN@A~?<(yDY<5FIfRbXQk* zyfdQp0;K&}eZJXi`42X7*lVC)W~h+O_?o_Wg~O?FPk@nGikKghc+eG3C`@imC1t~) zk&&+MzNDsW>ljJ0fzp{Y>eg`!Q$eW=v))R-Xcuh?AE#@4&w4m8U19ylJXh4$8<^`R zLQW^1(<5ze)B7~H;bhoMfho0+va_C~UCqWFR{ho!t2;W0t~{e1sHtPRb5i=vTA7NL zbvhp}d9X8q;|pJxMZhSD*F645@2nrxF84AHYRnT+lRZNOP%3ff{Xx#C3ufS~o_Kg% z4G`1ibwa!`s@|qJ*51E=x0}899xOGl#Gu7Fm6*IeI}k}Z2@GxvwmI*U6voN5Ug@rrZdM6{M~#NZGYnK%rx z?OM<+%*(W&r4}(~FD$TwgYw3)|NY?X`ZEzfI@E-PWA~R`CJVYC$Df|TN80?0P-!fj z_XWG%uUy3;`0g=9HHTfiMLNys3k0d^VYTPFn-}~XL^E_iHl@xQSaA4#)yWKFCSywO zi9=;gTY8X5z7dl(j!4ezA;2$k>W;|*q`TE~!wso_Fe-ZdR2-%EY~sFy=7Adm8}X68 zq*MOb!4M!o`{nedd7=9YD#GL8c`yoRsKCu<@OUP_Wq#`aZ>YbjRXWEjbrk8_I{(6Zp!pufJeY>NBVB`Hv562^vdG9QrTFyA%sbzroUr? z@VZ%jnH_W*BSF)0d}6M(G12@R6-#nAo89HVUklT|O6T8?=f2Asf*JXyD6?gcjPRZe1T{y@}zoS(rsUQ=Rl8UEn;z9Jk-%t!qORU!_G1jJ3PSpKL9!C>IUjRo5xS} z7J+b#IQzHvsvgl4)b3ePtG9C!27``W0}6dqp3!UqL|m{tHP#lS496mma#nEtp36Rp<3q<0z3;o*6rraloe zK!5&E1mRTd>-An(&6I}z)|62@J84(f6k)aSE z?nC_%n-o!&Xg1U*VI*4`kkwGU%kNf;^6yU6@-BYSvUwv}#=7V8Rm@$hsH3wBJ%OsN zhJy3fZY68pB-3wKJ1=Y+prPMr6{bk^c8{EerJqC*Zo1q+QoZ-KlGX2;vSKBhLO%-v z!%!#Of&G2hHBe@FN*Qe*I`o10-3#opb26lP#e6@hAg`Vi8w0;+=fha@d@(zO-&BIg z>j`2xwMx(?gntmLd3TYUJuIcCebgOWiY#3AkYU;}QmCm?+m2`dI}o1Na5P@u=N%84 z--M``5d#_i++xuI1kDSq!o@4p&SUiqrMou!enydvng0>3-E2(!83(o5P4K~HzXJS@ zBrSqV2ab+sB)&AyHqR(x3UgUr1F>0WS%`tT%}Any!ImHSxeGQ;6^%#k0Js zusKolb*UbcY$-Wt*^GXGdzz%5Ol6(n`oNJ0ndKHSLGD3Rfc(Z`;-;V&F~=bs;TIuW z=(FT^=-cHrfb(4ArvtzS`m+{L8KsyVwdBJaq^|kN3_wR&8cgsW#AFQ;N%&6dOd< zN?Iva14HI-1}m=>h0pie<9tIZ&EhZ+?YG2fL?pOIo5Dfc{n*iVv+9U)Ie(M1s|y(M zpR{7^EA{8|5AR4K%)RuRo*tT>Awghw!Aq+U=iIO;hw6?pI@I^*fgjN~tyxHLj@Hmj zuZWk;I2FwB5Z$g@ho=R4R5YXn4Pu8VSxR2u0lffBW-4tWa2eJPJz2bXqVNsOkj~ z@_5|b+**|{3b{tR=;cx=^vCNhb;FSIwZ#kJ!#8_FTonuOrTgdSR;cLEzdZz{1}7$> zq&{#<_XlB?H6}C=DvP~jC&~mZHc6o8MNO4Pob4YJFrqKF{NXH@1iflu3+r*BDtI3C z_c}XLyWF@I&u_$)u=J?%!vXElzPUrSDdq(uDlF`LYrLxGTC_9i?sv9MG^=Y6DbH!jmW2dqSAXjQO z>Nj>$0Ut~ZLKa*wSIYxek8j+3rN1#z9PT{$7lcjz0A@bziY77;N7MhxCg?6<(@--k zj`R*~(g$_;G4dftAE>+zdpaUuaMWEcLnR4kM|Cku^wYK&|0kk^wZ|V>oNbfbT~iPB z=rd?}t54AxqRLD=C$d^?s4>v7%@kfKsoB^M?f79HpZkm_K_kSe-lowk9kVl5!YYy; z>ZkYPTEX}rFe~ozBaac9)}s^I&Q1$%ptXQ&p44atK-+(jOam5?P(nua1Jdyza>xa2 zuTYNhynfD(sy3P!#y|#|*taoqp+y_U)J^-YLzM1s$RQDXy(*L>t=4FTm9qPS zwXbcWT`b4!-@eFix9vAPyxL${5}J7W$d)v3Jf4?H4M1mm7WDitgZY1F7g2JJzLsl$+WmqRm;`m~b;Y260%Y5HepqQ%iJ;aCllf{pBqjPPD`o$T_so*VF@rd3n z^(U=%9o)AyXOSX$?P|fPIIdVAU5Z}wCVKfsBc8a7D$A%5$&Z-$CR5rj%qsbxb>?3I ztrfrS$TmgGww7ZjdWG&2wYH;nWpfa(xnbsxIE8JDc-kkv0+0{Y<(^A0SK%v{%bCzM z4}k*|ht0ABy4U_Sqp~^BHDrWXYfiNO&t+jZ%@C1EI`{EZz{JjW;GYXkL!U0)X{qS) zluE6yrDfL0oBc!h|71f4z;C>`(E@C-SvrjOl~WA*q1x;2ga}tJYK2=T8dwp9k^Ikh z-R22WTa@LSFwR1CA4*;VRbu8`*83}g9ln2_8Z+Lu5&mr1OwD~>94@8IZo4Mm182U1 z`*F%rbx_=U0GGplJN|aBuac&^q`U_HU-;ZO`KGo3gTpn?zJbF5&x69GN$cX?e9-R_ zc6P{+esDhp2rZvUJ*0C_Tva9Koy9b9{iPMvJh;uR!A@(LC5Qnglms(s!&EEBsR$FY z$N51Ox#ztu3lGj`NefBbH0lcRBDA*L!;$lcVjY_|SU5P2RM|Q9cjRYP`Hf1HY1=zH zf^YkI#F~v(Jp+*#WgAOM4u|7dr;MfaN}!P1gRlzoKX~;&HRSyKyxM$@{C^B&S#j|; zD60K07K@6C(y}fMD)0!$At{E6GS*c2=qipN<9w4&>wT27gi*dtqESW6bi;c>Zg(pQ z{!ZZ`C@APpUCk~>mhhy!pbbn(H@+`kjd=o%!Shb(N$tX){;mCz)M?Jy4hQv#xTU-P zBBC0vqTGZ*NON$IW=tR-;v0D^){Z*@9g1|Ew@cnj$ti#UI|6aiwOQ$nMg0NaGR;gyAVa$OW?8O`& zix{l1z_!?U93H~WO%@TGF|EqwV&7bh>ow&GDQ;70-%%}*P;F(eQ$Cu?W*2Z0Wj~kK z5qIz@2iBH>=6F2I&v1%?c_{tkdaSrA3>ZIANszzh{8UN!Zx{|C?Xg09UAz6+34>iV z#xTAeIw0Ol|Hv7;tbX8cgW<1`;nM9#XOdLvn87hSaf!*~p(K<$%&~i6IekJEh8)1! zd^QFqnW@geoHT(niv~DXh74qD*G~n5gq^b^KSR&?RNXL+#Z^m2U8jc!%P`Q>37Ax* z8xOL86w;PNiRN#=jYh+ROkRj+LqF6*3-E*~6(30xPMjEZf5w4?0CNgb&fpHw=}A}; zeXdVP@*&1i@}_jT!`8W_%iaXoLYaxg(84Vdxjc@G=AZ*57qjnF&MLK(PGx;{EBM;p zWUMm%8FpM;T-Uy?Evu0?w;jCFHzjk3w93popnmY<#ey~o$&^X$GCEreF6UF!qJ~xZ zTqAiAEv-x`HrxNm1OKgo{!g-088NcfuhQwzGcg%VV$~H{jsNe-iF6L)vVHLw{GRUa zzmmPy(AaTGwW+CUNyfv%A+S3N6DV;C%<6p{_PZkVs!35e9KCB9&Welch*%({U`t2m zTxgg;koOlWhltR~$LqtMLPKD={m4XxOzc_?in|R{3Iu(f<@t|43DjKl^ziBF=~|UG zK_MZ%qY>Kii-%IG^&Hh>t9edYvZ1dpRj7DDN4ijh^K9 zZ~V}dxs*eF-Wy5!}9DZLA$9%EpkQCeSvGE2kBEG-|x&C($ojKMxrr&(HeSo|i1u5jf zR2OWeyl=jWijJ;8hF@L;BxHDMrf?P>#XI}8SJBdx$*5`W5TTNRPP1U{>{xlt4IYlA zFkrHnz+OR19%0Cnf0J9uwa56vl9Uixf(3|eppE*Gu*yCioLvyIY`m+G|tk%EnSeh;U9r2*M z97_s|Mv|lnH}EkQ$#{^aK=-a0 zugr$U{5L29tg+;zqyvXMe+kMK+}+_xWvP%&+{*#q9%^Mat5&2KyTeI)iyGeh@~mpr zDd;oXc=8Plt&ZBbOff7rXQGa$xW~F;A_V~Xcs3sn6jYLbk7Xof%y@WaJe1MB3;+-@ z_L1u6=SL0|sf7VbqzAA#>_91HS?%@$Rx;uWqh1gQF;hI(Jl&-#C_!>3OHuThYN)Rl zQB%W|;lN0biidK>Get*BPm0etYmWrUEM;>HdhEGyDJXaviSbk1<06baE({9wOZf%a800M(rtp*amTu5H{);5D%jjQN#`ss6wT@5vq*9XWRfLe^j`h zu_NBpLXWu99qiQRg9}}O@`G8Lo7e5w&<%8*0=2(DqBoj_UfrXg)p)?SS z11SQ~c>T~CZG2=vSW@gK`Ff^zq!+zLp`zCm4$qMmnR`w2$fK?|KApRaq7aZd`m)#j zoGfLxUX0vP{Fw{HUWgycIPYPsp#nmfK*VrtU#2sqQgNVDm$Ga-x+1)LR|?uL@ulK= z!mQyH#Z7u`wrfA>^EL9^CZG_pTTt)*!OCSNf_7{9R&#ltar6LQey3Q6_jK&?@RR>} z)uz`=6lPrWBGRG5IdRS}x5woBhHuci5;81rTrz>WvHzAwv|o0?w?$pp;P74xjnBu+ z5o(mUM@N9~RbAA}7&R9m3glgtCPs&RQh758DvOKww(nQy{U9@wMdlAWUDoCAHd-QE@AtJO`H`EeOal*KrBg>17H6GW<$5vt-yjCc-|!4R;^TR4m64g9&Sevk0POi}6KC1z-|E_O z!iDk5XXV!adgNca0jf2%rCb8AkP&vpW3VAB4`>65O*TlfA#sP)N*TzhOL zqQtOs*@!BUlHFD^qB4=r28yx_?uo9NT4hc(s+@D+^|z>o+Zjjme|AE?4$@1Tv%{Zn zh5j1vvl=;tyKf!@wHvfE9rOK+PV9y);?a^szlZ1TuJk-k?({ zxbKIFO^yHQP|JBwN^rMtyxpOYb!nJ{D>HXMnW`;hVFbor#bWZA(7nuMbN@alM0z!% z%-=|@s~SOChX1zk+eY(;gBKgmF_y2E7?Qt}Z@6e= zJm>^n`RZ+9V$KiJx7>KqxULVG{jz5~Q0sv}ozm^uQq@eUG7TyQhGW4545$M9dh`8? z8N8-oP#!_qn3$+gA6{ZDTN4!&G!A<|43eNp`_B7*H^rZEQ14fT5-vmv(H*&L>iW#KuEI3Kj zO79^%om^98XjYmdasV`POAY$e*}a>yhs|Z6lIoA9_F&G|VI*Rt6J}H4FwG%X87N#o zkKmx14(;sBJ*9Gup&Q4nd;!l%y$@}fgmOCiytwSWqhv zj%V18H+5hUV$!=^$GlprZ#_up+t9hls(v2?PH(Kby^y0D=iD(i-`SDlMS)0T{kRcs z4{832iqr-pz6g?N;)g>GjYiKFXh+-Y?&;jjj9^awdsLS0+u+q@TYCS7l&x-K9bH`R z+FukYQhU3X3(_$MaO>IMgnuFk+~((Wv6M4R>}Asus!W^)WFg>;2+turOHs9yAaaq# z9IxX|shg@T$SqJQZsIK#4ZxP9$;GSmGg!9?Sik6Ns7$IVZgl3$N&L$A zs%q;7Wd#pqc*sT5T@U_PyT=1X2Zu>58s-*Q*jd)ECtTTd8U7WBG8ZAGGfP`Cx1S7vwAi3Oc*~m0k~q#z7qf7C+^!-^@Yecu`;?GVJ-wz+MWv-|ms6YwrYP`&ck%?hsJ-O-S}-km)-N z#3yDkp<-nE9-gO^Mq&)-><@zf%GL7`(no z^!n_er9}iMZvXgo`*9U1N(k^ti4EkCmv{TRq|3|GnX5hDH6Z^I(>pU33R7Q8)j7B&D-DqlD3K<`wXi+*K`|*Lt5gy+}2@DDfV!C$~ z7qEtbivMv8H17DrM|uUwU!nRmC1KuqsDuq4_kF$TV_f}5tQ!+`$hq9iKZsrBDlo1- zo>>$#Yj@j7{);GsZM(D!%u&@tzcQ1weqNPuAxD|noOD)vCB?P5YY8lYT(-w2<}7u- zhNbWi{;vYb@xU0xhs1Eh)UI>SenEqH%!MivnWbO3G+cMMYsr(8;H=tx*N^)`(pZe} z##}Y2#O&Zbq*lXi8~3Rbcl#&or~l|kNsDG!-S1(sSfC=|H-@}ONTe5QhyJPG$-SGw zY-|Z9n46>ihDhtpBW3@&SVz8{|ApNe(;kpTP3Dinkjil6i1e?Yf`sE9G9JRkARhQL zTUP@{`9qX4k~dN`ndXEG`Yx)*K!su4o>%G)6yONFqEm#Ll^!^?-TMl@_l;$EGAd|Y z_voA!?Arzk-MnKeD=Ysu0(tEX!r}6|A)1?;x4(;op>1_Y^-?TELz6{Pzi0ff2lM|G zhxWw|dxK$)=SvjFG#@pUlt`I{gocqYV2w04J`3YpLG(K_!sSliZJbp$f&JBH2lqrk zg~zR*Y%n+s9fHJIFE_8=df`&G<_w5@rQskNb+uYbY+!`N2_ z#SwLF;_mM5?(Xgq5`s(c;O-FIVQ>iUArRc%-Q8V-yTf+g-TG>+w)SrqOm|ODpL_0O zXVkV_-~q<$>LHJeCBQZewRa#hy1e@Hqni1zw}k6ZN(o4*E4=m@)QSnk>bW@x)&x;; zvvR&mdYFTQmZtVkFy<+y&AqgEtlC39i1KXQ+yy7HW93MB--lw?&{OO;14m&|Ozt+v)tDk|i z67ZOJX`xoA|C$rr?mUF`gEg)3g!~JZeC2~Y*hT{Q2Vc|Kav1k2>a?(Q<)q~7Bb>7w zhh~LVuTWYs)Rwz;mk2vngN|T}xY(V8-qi3oxSmy);;Wn? z6?jEIN7BkqBHPL?T-hCyeS=LFa>i~iY{I=r2eFtq!y8fdFpCX$pZujXhpmfXcMr14 zTn5n8uMtAT>(-3gDc2MDMKsLEq{x?G_f55_>FC)}WmSeT&609|{R*G&psz|v3@BUR zqv{Ku@eNC1__h(NRw(>1un+5ROq~;Ds#{MmI!P{KB&@8csL19oYO=z`bHDMp%ra!$ zFmf+jJt;#>OG?E)DRjT!G$s!T3xVN4!G#HHYg;ug7v1qEo5PG@y`uAx0NNhX=&w@@ z$z~eRdyZ4LH2)Gi*q;a1JWL>8A^;i@Cp>8^N3;{@K(X>mTGak2Uq!Q-p#B$pWNoN# zU|{FM|3eT1vyA1^$}YSu{lA3AdOA7??Ck9SWCuw~IU?=!RkZ`~ZofBK|5JE{uD_6~Q`?iG$5bh?(Wh;)1~TBpt#17`U)U`nld)Rs-<^g zbT+$4ZKbcA+S9dvrWSV~6E{vG?T}aB;!ZR6oPf=3wu3zIziL*taMIEhJ=w z*M=~^n@-r@<*=)fjtksl!cbK1rGNn#`jkzQUi0s#!K1&n((+5i-O6{e0KIG zkWswQ<0A?3CKEHlNIMt=pfmp16k^wJ>+ESn{^BLZR#vT_s4-R26t_8VK&$0NAwQT` z>Y~MjW)Kq1-CLIqND-5@71^w({2{G))$T#AAKt8C`hOUh?c)+?%M%t2K7Ir^^X86j zhEjGVTg4P6UfxzqWXye1X-`rr=Hup`NjM!@IQW5cqxhQ{Q04&^iNK(sW}?YBI5@bj z{n6C_uJz%cA7`>xc&hw(uKYfl@#*h~`~N}j5T*m!Bhy0(9UUEd@;6d@pf0yia~uaoaZA>7#J+w4ANgMEuzkdSdgB`=39Gu(AELZ;a08x! zs~ox=I-XWhh~ODQecB%!4CwZYR_LH$HiIUWMH|TSX4p3mljO=p4dql->pe_ONb!fiDb9v)4#xvf1=D4^%3pny=#5wWd$y&5E)Pg#_8a$dIN z?~&P3m>3-`*yfT-rdAAoVF8gjP)9vq8`IL#vQP^_RqVBt^V=c(<*1^h17KN7TBYEe z+0RslhRQdlzm^;y^!3F(D)P9+;^$jUDnRbSls>`1jnvOmKDLI)fg z014|CADBXy{nGLfJoB7kNwV<>DpJ)^(9TzilOFBlPp`uiM|deP3?iN z0O!miX-ow9&8`L$!~U>_#UU~huXWrz_>u9RYm$LK?wvZ5c2f%Z^1(DTO(wf_h9XJG zG5@lyWG*ZOaoJ<3M`}xf29310Z!+_jypa*vMD`zly#41HI)#H$1fVu{F{YPDcM)=^ zM*ZM5&Bh6kBfPG+USu;L!$Tq#NV=c*ENum`$?@=fN&RS$l3DUIOQe^03^`4y;#gdD^=my8oK9e z&Ao&*RZn^3xNAQKaCpcVKylg5?#A^yTDPa`uTiD z(SEp%;p!oE;%8{z4g{8rUY|pI7TlU8+<<2F=sd~lf;Pozm!=&4DELJ?d+!D$eY)qR zRvz?K|3UaYKWql%sZaf&#ltqX@*Ca;DC0-YdOW12l31L`TVDGI#>xa$fK8{-&XCB_ zVk_ZVpvk4zD%P4$e+N1#pPLBx9t0`pDT0ES)g_I~(E{xHD#}2j(tWHRC>1$LxO=D~ zYwPb78h?n5gKtl0e*)XaNfdPA!A@Q(WSzd<)`0-`Xv|B|mdIpNSw5){RPIPLzbwUp@aznLQjZWL?- z1dVDxq%3Aac)dh#wQ=t_)cIV`iuF4v%~HTDl z5Q0ZO%m{DY3D-W+vS5wn_!xMO;@y%BBc@;%OV|Ihh<6#f1_kPV`3K)Lr$}E#DK=sB z7Vo#jm=RF+{4;Q|u|4`xt0;<7A*VY4-r2vfOq2x?#8M4lw|94W6&pShJ@c^f+4Gvmjwg_EMiV@{kWh@6xjD^19O>jflq|L+4azjYyG`u>gzZgjaV)j}YFKPs zTfn*JtrC1DU=jRe-%|}j!(*M2vLtHtDtyg!$IW6h9E9R+{AK(WgBRR7(tE{lsQ%b` zQj=ym)N41c(`2_EaHTuMA5#ss?|1=q3oQ>q0NCBV*hH? zDhKWK%$Un82oHl|U43yRADzJlak8jwbd3fL_2?z$cjik}r7M-HSr3JPh*%m0AwC<8 zoL(PE&tQ6>P2~32Cnn>|?K;t{Vb0@Jv$=hl{)WfIjtcv55oDMtieli6OtmiaI-3C^ zT_F^R8o!!)zzTgJ2J_vW_waEdZ+PsdICv!DyKoBH`Sq@4WEy~rFSa$ztyMP%Gr|$s zl==*99%ZYB<_Wh`o7%Oq@-uRo$Psi`I63i2qG=c4DLTgKX|g{g6NGf`Hf9ri2Yw$Ji@wq5}sTysTz=LHN1yVDNo? zoFr}JWvCvr`qYX<&-P}}Y7mv2zu8c!()Wx+>N+!+o~|{R#&3~T9y4Sf{eC^J7idl6 z=#o&pZ|>b#@IG{q-&9W2Bxx#wfgqKz)&M9Hvv^91T%*2>PI2x@MG(TEziWBU z5d*Z3xX}@(jot;sBD0`zAr<6Hdu!F^8C$HU1Nld@px51G_yM^6(0tYA1+_s_Wl%*2 zxd_MR`F-yx?z_XLy4s5g9UOo7UQ$UDo->WoJ0gJ=a^ z9`N#1D>D818iekvLR!%=EoOzFlF0}E0E@~lpMG2@eHwg~2YbsWA?|}`AQjp>z8=Rz zxzhvTa6L!=7+h1WdL9zG{gj2sw~yYZ#|SSQmnC+Tft1wFhs-T&hZU4Z$BlrxX-Y0^ ziR$7eOhet|a>}Qox3|*IjI;qF9MX#DcggVhOvos-x!W`#1=U|`U0NbHRWjbw35LJ zex;6P7<1aC8zz@}waaaagYtI8o8Y-t!Q93Ncn*9?gPJ?4P!}RN*LrQWEN;Tl2BO{A z@BDE6AlVp#`ESUcA=M06*x2M}F6oTm|6^PP1Mwnp_eXsU!tS=gWr=(S83TuTFjm4{*JD&_3Pd+;&7(JZ{VSl z>UCz)nC-Lw^U>8~D?T(X-!)>UqQtLue@msgAt!TOJIf~c0u$a2-X0`grE9A6u$qBx zvCE99zZUx3AX8HrlQKdbJ?)#FCL5+@q`Y4LfVS0LA5lT)m-et2d`g zHE8Re8~J)5G96~#_-@@{ch>9cEoUnz1>DU2@bm@6*RLRiMQHnw{c|j%Yww+IIR)zN zqwdGp|M=Mlb-X?2S`UgEyEA9=b6^t84iR*u0MEIed=qo9@bjiw;o+J1WC=D7=0n;L2@;19U)H6C+>a4A>kr-^E#*-PG}^}$ zgf(u(@tpno3P6j~5z7g;U7mz#wFuR&IjA_Re|pRWIivB`1Df4vz0Mja=c)#bQ5H_y z0yVr@W{MPKl~V)8LKx}k#o?W04bIacOrgae#L$SMnJtAgnYoWrGSxmWJrYwJ_x#r; z1t_u1DXeP=J=eC>hCgqdjtRDqpJ>}wD5GZ4PZK^aJQ1^!Mf7yo^cvD)yRP?@xj{b8 z*kh_y&zS{xyGC3t;~VEA$&{wkWsz93#$o!axn1`Bnwu2tPn%Vt zdM>cS@0N;!HpGH%KXwj`m9VT_+a|I=cifQcKp981jRBm9Jb!xAp%(MbWzs9eFl7Pdk9M9}?Z(XOYkJ8X<5( z{lalDT4=`{#MwBZBKeIilz>!r`Yidez!vt^0`3IcsV7NcH#v#7i|EPE^5sPIiViv4 zb#buez3`~A7$@Xi!LDoa_nl7$4>uu%*A{K^-D`qKihSZ&if4Iub;~l6ly|G zTfsl9*1@w@-eX-i%0KqPIo%!$?nv1hc!J(L*i6@!VQddnGVz-b5#2;&bf&;xSBf!d z{%RRDBykB_%(iv7gsXU;2=57rKo`}Y(kVJi(S<7M4Fd~C{OH?V8lQH1&o57cF#T3H z^@p0k;z)8#5&w52KynsMEQq6^ppeFEPr};9CCA0h&2HQS1^5jEH{uC{E|Aho?*D&l zmXPl=mjCM&tbohW54W=w$08wR>6p;hgK< z9A?&6H?NX9g8tJtzNZn?_}XLw-NXpUC-HWN^phnv_`ilBW>39JjmyEm)~NqdA+iqDRI zU-vT3Kc$kRkCYs?qL$!zyJT@jg?S)_VCQ!nQYH)!8dNs+?!{h7QSnGSikQ0ml4f@i zEh}%}al6fyq%@8me}Y<2U(DvpBj#~{EEFvmIedei$58a6Yc8Y7!JNKsVb%C0rP)E? z0->3jv3f}kKdOBGlI#%Xyx&lENw$;0{V|0GxLnD=MEfnZEk`1uMhp^rAxF`8_Lg$6 zQ-0$(WZA5$Z8S=|*d?ED*gP%Chs0a&w-hG977jGGLMm&{y%m)Am)`y)EH9^y@BLp+ z+nb@ohRHO zUp&J!yY7wof#B53*F z#~Ob6`m1jvKM`ID7e$-BTcD-vhk-uZ3q9inmo&SxTjo2+KD@|c?n(Z_< zQdWNWloBb_23P1`nXkdvLO*7mpvTfV(SNSqiz&zxa%cko3ioh`oEdQ(j8Qb9@kC^%zrjO?PTt{cAqCB~tj z&z4v&CP_+5U>4><8-@PGmajn;nUQ48+%N5rz1c#R&pT5B>c-?nM>L+94ZuKq(#RO<(uwa$f`qHz$z)d1MO6PQ}%u?b(@O9{@p; z@pzG$6HWvkxbPp5t`4tlFB#W|KiVFBSR3jonVzTclN@o&gR9~Np*9tx_TTAN+s7~! z|DFsS{-izTvJA~G?HFwF*tHujV1BU_1#9)L_BkyyS5-WX$BylsCNrSC2WpH{Ep$1L zxyerj7s1+{ZNMM9U@*F0<3QHwA(nXHn2#;gCKjB7GuJp52L37HGP*SgIVAz4Ul__S zkdW5m!)do(CvND-Z-Xj;xDNekYAz`xVAWtH{Y^(UpRSD@QT`mNGAQX5E7Q;Q&&pd2 zvq5UA*o>off0K(MW*%NmULN^aCeBw~a03tY;76(KB1Cm)WdIMz6!9g1d_fI9sh+>| z^+j(isJsNaXsWF&*x2XO0j|>0xpW_s%PatHA_3AfWpXS=H3a{G7^W*_adAezBUf{% z|BuF+EUO)mK|Ati7YFn1c}axEuQb}nY5vifHVPL~MzkNgE6mLWGZ}~uh>Ak|+B2I3 zY>i>%lu!Yr9f?$g2r!=QD}VzQ9wazTEduWC=DW{3Y-3 z$HvCCqGc0)qXn1ofI_ez;=a7zS41KaN}CWZMcsRK+#Q&1aB*ZuUg(+)2dV(}^78Vn z9Rnq$Z+rlb^lW z3FJ4|)-s9iwXJ|!`g6_LCgPNC-etG|T3=gZvz#K!x{sJcuHrO4`x0 zSuZdNzzWgRM=a!(dU$qY9f&m34SPkAQxaL?+YeEU0cP4`%8i+s1jbS+zj)%B84XZc zn%w>wzoJJ6jrhQi*CZ(-gWm|9(Yy|90+G7jN_majNZ0A>x`*221fFu?ssm=a)nK0Pf%Y%hk7uc(a154ThIGQ*3ed;FV>M9DZ2v}?i3RiC7 z6$hfvu(J3k1%d0Rjc=Nret6wnlw3>?9HAdK0_Y8P>k5M?N8uKMwR(qH#0XxU{VYsYt84E?FVfZDauSoFuJ!s6 z575LdWR%GE1P>%q+Mvk7XBT9(0X)L)z9ASwTpcpnUfj;QzduC2tJKB0^e;e7j*!BI zkZXPW9%Xiq+Z6cM(amk3%D7j@bXE@S-wP&%53sQ=@?Hic-nh~Yv?`CCgR-00OJ2O# z30|X?SMmc!5z8ma8L%_LQSC;7S?>{btoAmR&>Rz2#~SeW_>5xmo-`}(%P4OLcEO_) z?M=0A_&xZ#lipRRnieC5bv^+bM8YiN5&}+prQ+EnLZYF(djuXI@4_-M1kl3mzX#@* z`h3EBtMVjs|NI#T9vsXD$hy1P`S9phR{$hz6GpcL4NANxkw&qSRYEEWGdKXlh6Bk+ zzH6dhx8aPXqNew2Q+bkC&v(`p!6hC4z}unMZycGa>FLACubIG4YL-5Ck7bgQ55RQ6 z`ar{Fb{tHa#CdsnbygDR2SPx|>kvd}n+v(SUQJ&_o#+1kJ|Xo_TGcN=<)rfH#OVp) z-IK}R2z{l8I9ug(cVx-HcY^!2(%M9%stlY=j_W9P49__X1!2g;6#~=s!68IQH1OZQ zbSp>of!(TC?{f%=2L0ziXr>daKK9b$wvcETa3JvF6do7g1mM)o`Ax6UMKzQ?fcMmB zXyk~;c7-jg9_8~;n{oEQzdzkNo-MIlZ*eV#HG5btqesjy95|u2S?WW)`SrCzc3tUZ24d6JL8wN$(V0``hm1VG( z(gP^((r<2a$pZM-fzXKm()|B(SkBq?YNubX;-0IST+kpkUzeDhUrtexlL9&fnd)AK z#O;}O_sx^DA`_b*nsVZH&wKgS#wRJcVMJU}T57H>*lp@K?YtxN`dg9AE zwEUlt+hWh49T$>cMhIzp8TDt*d4GmD4QdA01cy}#--SMfHV1z9*l#C|v(^J$!)G7| zi=NeK6V&)mQHL|T27QbYDe2Zgi)<`zJI|tKA*phDBN{oeV8;{6Za+e^dU(EcUxc!n zfAEuRvIL@bIKyh(YW93gNJksGr87cv;V}0;#oUy>{8n&cGt`0MIi#?%i@vfe=g|x) z&*VHI4)2!JPGFa!{i%At8NHSIkV`=hZRPEL4|V{{5jCv%y*f&QRp&^;Kp**gEjF7^ zpXu1AtdMEpFCd8r4xihQfbma5_{4eIxU0!7FR#P!;W6`lFBZ7$hO5}BBpZVTRTf(L zCc&47;%SxiQMtLZQ(x)_5p#mn+&qKdRB&7*rj21CGu;K_xEElr_-^4}?Y(vl9N>5= zb(P!k(K#q5^56P?i)0GxAE@CPOBKfw!_)5{b1~CW#`wmCKQ!A|#!`kRp|*D?j7dwd z%bAc0q0>0WvWmZeR5~G`;X!BJD% zW~lOC7(EQ`y3FFrnsC#Dbx{x;gxv{wSz0BWr*-shnnH^-?(h@k`mgtsiFao}!djM1 zU?;v>n49iiix4!nEFpYjW=__xbvai0DCuze6wv)j!O(IGL* z!1Pwk11%OGzMb$J9_6&6a!TKSBtHCwFfXTKH5D;w?!W;^xozF>$&G;ds6$8SU?IFA zray8Tk~CNYCbu4tJnKVTtHu3Xsjy?44MmdT0W=T6UEDurW4*1W(qqaM8H>GF^Lee z_kmc~gJ1C7*F4h)h=zsAoK23nRlL?@p9_N0^7t_A4%@blJ&{v|R3r?wp@FQfsk)io zz<%p1c2Sh;NS#Z+p^9-Gq0jjr82Lg zBH-%nMclZ8CL^aP+BUne`If{PH?w=2SkqyaAf|VFc`AoIB0Z8uZAM3f9l0(uaBo#< zb`aw$k!kgg7TDL3PifCY3%@a|p9NlmVw!7VYt0$OCC*l3eep>Q9(HG9Bx|`%)oED; z@G3uP*#iXypiqUFAvQq%VjIfNHolokvzjKb0mr(2v*{lKUwwIV=>A%No5cSXH2HFY z`Z4|7R#}0*RURm47(%i9n={Lch2nGbUSq0B#g*%qj<-kqljSu4vV+}Q*Ml*;Nv=w< zh80~I^9`k+sWonKfiG8{%Wbw2rBP1muT@%r+aS&eS`R2}*FusnrFOa9smkL!U~TBZ zFyV~8SVQ@`#-uH2?saWKy%ZlXx!qd5cjWvlIn6qA?4yaRDg@t#J65r2LnvZ0D`i_lBEv&_jip+$4l%b!586eM8=uOIzUm)t zc|2XvQ$p3Qc?N-p$ph?u8YIP+g(4sJ3p?bbKvyW0)|>cR9X-kmSW7?x-n4 zy;OtNqX5L)H3~LaA3i3VF(Y9^KufZ@t=J(OIv5+9& zQ8@>^apv9TY6+tJpMAF4n(p2{rSQ1a-2I|TR-?#m=(d)97|v0%sd)}@O>v&vIjX9) z6$BjuXK_EmfltEf?UUZqVZ5g365j%)`qQ{n*U=)$Gt$y7v&lUPurai|CMIG>=d%Uy zIRf~Iv|HVnQ+Kz~iKr%WfBqy$EhBX0*>f85s!L`}M@g#ZW)%f(`n~*FaWY^C;bVWe zv0jQw@gn4yoFuQPdB1P$RHp_;-vGoe5Z}e&NUo@<<^Q@AO~Jwg39R~3;p87TIUj{* z9l-MVg-MQ@!(;$yER#PG7d!%p|6OUY9kuEJHn6-P`-`D4RMO&X0sptV#SuJep%@gP znfQUnp*q{+4~bl8y;xC)KmA9S7wAmre@%nrkt}N9zNo06B+@AF6v4!UKwYH%ui}F4 z^Jj-4*w=R70FR|Bg0VX8Z*EhJwvgc1{cUuv>FH@a_HR(Bm_(cS91#*yfz6kfTsWo( z!66~gshCK>K~byllIqfn!aUOHd?fzEpTze>)nNm_6T39No+6I-At_t5J=6^}4HsNZ zZu3-ess}wj=HYSZAg#a8=cq({Cb;cfa;D90hZD(54MtCWNAC}hB@ggT|2#zUI_(Qb zql6c>T&xRvD}4qA2JJ5$XSq^~zCm&@wnK1vdq}>6Pw0-b6>gG6<8$Hwex_Kf=(9n< z#-n6`gfG9y2DoKkeYZYov1kqyP6x3$JX%JRR>ry>X=-ai~>J~t@d5DvJ#kPLcHAa5o3ne;YHK*;s4Lk34a$mRu+YYaW$xZ_2`92%p zzvm3W+kM&PYzgG$Db9SMtE3ST#?x0#3v99|VOy|d9k2ZIN_;O;+T=c(ad6i^Fgtw1 zI-#?T(5#j~TAW^0{W4Y(BfZCnszroQ@N@tL;Mx?En1gMoCa<~OReTuXH5Rg4R|0X| zrZBV82~CeXET+$>Jwx5?)7D*R(Rd8PD`G;b{t zfh0ny9cs+-7M+gIq9o2P4YOG6rmHune~;Tv#l|vQBjn9YLOq@^-YE9i+~=O0J~g?| zK%Dp_T3V^x%LcdLo2)ZmJ43WBj`#ePv|#%b_L>V;Yn6Bvuq?v&Em{MRpN=AEd7`Dl ziHY0Mj396Vk8WP)w>boG5BWIGZc>gqlc}owLkB|O#B2#NU5(jRI%I9+H+678BaA*X zvs5f5(D(-#->Q`2Ny-wzL$V@b>aOZ)CsWQj(Om(tMlCHTsPFSamZpKGKtf{TWogoS zPIf|YA=OL}gh+C**z6-&jdMg|%BMmq_LmtxUOglMj;N|>4~%ms#(WF8@j_i{D`_k# zk{@T<3yJ8L=rglg4_bXDstnC43@`ZmlsM9!+I1w&IZK+J+k;3{oP+xZ`_nZw`9f6G zw3LEzR`y9LJooRC~obBdL46E;SjOMXs*CG4Jum5h8+epL!Wq7k3Dn=kT z98-0vF)=oBce>si_4a6*Sh&TltIS5s%Y*RtbOU7~+q90HU}bcIy}d!~)@D6`zXG=9 z;Rt%Z-k`~nD#921;aF?lk{+Sm{{@ICR=B7A=P$!N;hIV> zlXW+5oigHSTk<2#XA$iF`<-aR(^mFmLU{WAp_*(0frN-BRPgvJxkIwkzww}A8S@h& z^ETfT>R45K;A_u8JyHtq-cKVtO|yrET$2)(Xz=Lbf&2S3nh7DZK~MM_>X!oRk_HD< z)$UZ#+0j%5w-Bps_iyXT<99RXJ~o?azpZA%75CHV%`j$(Q)O$u%(&ba?F2SWKkQrX z<>_#KhM#mk&NzUsS$eVJ%-DC2ow=m#N^JE$>vE7x`6u^hjeI{Xy}Gah>qECH;c=A3 zW%@dqu!cX?-XAJjo)#vJ+j4dIg|P+JOz~nUkB>-y{9ce-NjTh(3c~=74|C<-M7X0e z=ZWy<<0x)$ynMWe&X>7P5vJufM{|8krg>%Ys-iU(5V&3#>vQpZo!V0ahqnZXmrd)lvr(d z2Eg>RLZTr9g&FwuM!^7e_ea^+%5Q)|Zf-HKxR{32s2v_angReqI3GZo+I9zNighGe z7#Mn*9Cyj6sY3u8bERsSR9!U`&mZedtf{>qk)zvg7MWB!WHQL@FSRKb%O z>9uX+vy>B}u(g1|b^^ZJhP+AA7CiI|q8`0b2|Ljn?5%Vv%R0OFz0K{>EM|Mloy5ht zz5~-OIS#9XxOktZ9%b#a8d74jO|Z{3zaSA4ATeOTA@Nr69q@fki;6~b|HWJ_d03%8 zUEr=-emy!+q+bqs|G>-hO%{*87SHROp6P4?9#Y0c1-%}-H5mi0c(S;EAJWg?&;HNR z!F^I~%@^RvjdWB+g~pvlP}jAyX&3cQJ=o;^{)kKAJDWGoe>9~Fk(z%z)htF6#QMFx z6i6PD98KD^j$Y(`lxAO_>hL_-lEdkD<89H(RQqCEUZx!Ca=zk{9elmBe#NVDN8-YJF-z@^A5=M9Ed(Q|U zoo*5<2<8~K6_OWhzZ|VLdrNyR`oCv|GE+kFs}Amm#|^ca#bvj@6x^|1--bDT@@9~b z&)fZ8A!sPyjQVEQI_>QW`;m%#ddw1oHui!yLY z4mn+-3rlp>fjsUSce(dy7i=afyXp6K{83`qDN^M^iKtqe$IKIE0LeeeE@ua=Tw#io zzDA;1dM68Sa@s<<^u!sD`)9g-lgulIg!rS&fPj^mxVE^8m$XNBQu+2n5^4SW)<=N= z*Z@1j>D}N5Tc}>Nb8RaHLo)Z?dFMoa=$dhzl{8!nt6%KVOfzRT3Ab2D_8Xg}njxPd zq6EErSB8ns(S`+Ci!w(6=F&@ywwOQEg(|bu;)YW562@LCw#nbB*rU|~LOrBChc{^y ztiI>`7c4|MRE?6*=t1x4gGm&)=$F7I#?k3&TIX{j4Ur?sgDhPhoc2= zBDOEG7A&!~8*>eKJT7#13sR;_KhCT8rHca(jMKZp&0pYWgw3p&&Za2M`IM39Kga>6 zV;G`atz(wbM%+U{_f?}DDt|0K$_tzGFD?YiN{N0CVjcItK>*j;m} zr9p&W5X`q*DxSdkFx}Se@z+kpSwrK$*_Lgc-%$+@@=3E#gMqo7BE}1y0y8Ej5Sub~ zbY>xHKj&b3VO_iZE`E|E3B@4)+vNn}*bg;>TeG`$cQa&#H6ey-xn)a{zOjqY>|W9_ zW7@uqSvU=Y(rZyzcYj~K$_e_AvYRY9L=%g^#qC6%tZBz*mkbtZF-=Ui$iV4k zO=-v=_O0F9STx9t9%~W;d4{}w9#G?Oahdvc}lfmfjD)ODVRB6|@8KvS^wqNUCc&Q=*& z#^y-8Q1l$I8V|u1;8`$2Oz@NI&iYc-@kqj1x7;+>ujiUnk2pRK2>_B_EdU%!hyycPxrkJh4Lz+Tu`|vGq35VGSsg`X@YJ7=cjW6)s-YN z%Wk`F zX`R9dYIQs7NuXA2$~1bM0Astkv zlfm*;Y(j^4o}Sh)>o`{(lIU;CtJ9u5H@TcWZG?9arqlVrn)Sx$!8kNdcP}rRsvC(+ zoH+%b+aS|T)(2l;Vn_w#Q4yKnFa8L}#G@;KRkVJvBUb%&LhOJ zn;-Ab{a8@g-SckhIQQeoY7Y(Mx8da7ie0IwH2PKk<=HRGfj1DM#^uba^wBe@+U1b+ud8(h84s==cpyHii10u6Bc$zQoinp#uF7cZIZdT z1fgD1y|W-o^L3seYs_Ny3}IE;MUf6pFqXS<)L$SNDZDY2NBMAQCZEZ9Shf9|5U|=)~_*;q*%03SgH;Npzug8 zOq^Q10SjM?NY8k)QZFK>C7ZOSc3JDOdZNCua`68WevAo2nNmUT5sFv*MD!S_e?Y}4 z5%Hu82bs9bW%%p>J2u%?WTnjR>4xo^kGogCZnb>K#*5b?Twv8sUjx@xLm=tWFx}Q? z1btx7QLMsU^7Jb;&bWk&pwcSw39~r%lh{HOrzQ7r@OfvuD@!aD7_}8Wc3FJ}Cvb5Q zrbwy!<3;J`i^lpk?Ku$<*rAz7ANvXpt&=UAUZ=%DzQ)_2B;&6l1$5v!IXQtdDXU7w zNT1pZ@i|ojTF&iSUf#`#n;vW2@0L*4*54Q<{o3r|HN9%wFM=PT+C)sC4+&4@BI`4e zG+HSNr`Wqw@p7B7Ry0hTzRa0T#b?F3v#NeOUqI7Ih@Tihxv$HLzPa-Rh2u%E?*V2< z6M!bviw%>#~-paC8ONPwT0 zYbL+pVt(bF0&{Y5I^v+MF~5iteHZf+xjGr1#d@6r)z;Q-@9qWxDpo+{3Tn&++;oV5 zj7|N_&p@l$aTn%on-^!E;2IbRaD<3XLLx4?aipBi5i$xNdx5k(GlNl4QSqN!D>rB- zVAQNKo`dRl4_(VYr0iS@VnpYH!@n%Tf`*3v9H|iCJiu@?Rm3Ua`Zrp$lcBfAkyhBI zB@;$J)I~}aRn@r@n2tMb5#zYMMsCxg-pm5ZExxO#^-Pf>wRmjzM9e_OHMXxuuo5YA zU_rQbRs!1;*b0g0s@g366R*%KS{V4Bm3)&`QF3x)1Pqu#FZzhEuSl>_+Dnb5 z?0S$lLoGB%_}Ns67-_!LQ$)NWFupcc|5gJVDvM8)waHxII~^Q*wtg<6;)}+Z8FWZ} z-*8C}?!6%z4ci~-Ydo^w7y+zXfQAK4Qn0B~N z`FqgI#oU>dRn}*FB_}3f$1ZjDH-ZzP z78vBczN;}z=}e||%2!~`5IRgwbY!*IXatbid&HqmaFW~wx~5%Q4bj*rSKEtQcxC?$UF^ZIAjg6Scw`MR+?DU+R{##mMWLjXAiQ1F^)2W@cu=VYPNX(Br^W zuK4^LQGaz0DAtj+r<4Yg{~awaC

ZuRHRSkaX8NAF43vw3e0WuG3b|`re?DpkV)X`G4@SM4o(NONWnkFdph4Dbk zTKFIu$)@HCI-9t^yRqv`dBUaLND-jaVK8dMu`!Dlx?U4_nRsi-{~Kjr85LI(1sU9B zaCe8`?oJ>`AOwQDdvJFc+yV^lfdrS}&fqS=-3cy1g6+$T~{B%(?>HhJm|*>9wjvfn|6OCjsm7m7bk#xz%RU zpe4X__I?J-wnN?|;{J1Nl4eiy3%A?zdQp(b+b;}cfZkwt|BH*PAACG}Vh`*7{KLHi zgL(RCNK!NcALQuUCmB_uch<91NnwjTF`EzKNZM+xJH?eW!Wf!;C0V(G0Yb=XA=Q0r zYvk!_Rgu486|&7{tWCg9%uleN2Oj^|H@O+-cHyb&nuS+9~y$Qu1VgQo%~X#UcX?{eBQyN+(+#Q0dwkJ zk}qu1{C1l|w6_zYPABMz7s)WkmfJrjbr#9Jdv^_`d~PjU^NiopM)@}JOKmKZ?6@sW z^$=FzJC`dB&2)LhWB~fXa2|2Xk6dQ;b%(4huK3IW(N_5Xz2UHB__?{azl3Z7`i%N@ z2DRL={{$ErKqVmcqdOUFAasV`Zu4Aju=${=S)6-3OowG?OR!y9&a`(=s3+q>L3~N5_dhU4Vug*zdPOu8-RMQxDC@mvv#@-wU^|Dvz}lja8e!+gxiE2`>O< zrendRWI<0hT&#C?h^frBY+Fv2h(fN@DqaaXH!sHJJ!6i3wvKD|O||FiciP(SeN>08smxTc!*^QiFh+nwl3n#9K+vHYY*DE&Ro ztN!T>W*m^Mut#QV`(~x=FGCs4su(bG(0PX?ngVzFrIGr_bqa!1IB}w;Xr-N(hPOXsH$|0% zK&k$sH#Gh&HFP=_g-Mc)m;NRxeKa6|z|~LQgB|cx0wTh&2%;;){^@3%zc3Fa!$ddd zicIF{c2gDjZX19`zozXemS1Nt@-_HuZMo-i<|7nWrjs~sl3K6!_Xz2#PEmBwjYb*9 z3l^E0gfy4g@V^@DZVzt_vt7C+T3M+}nu+Muu%rVA{qe*XLzY;pd9)t!IszSH&eqxr z^zJA=G_0WROm~@X-M;Z-F~W|v>{AXcHAd>RjJWFsZM>05)lFFB^8 zOC^}~^Z$3_l~q$Kkmq15bXPy?4wx>U{DOQN{p;}-1gpV@4;>n+SC|erZ|ED-kYzXZ&0oRV=?aD>q1W1q zN{qnBu;@_`j%ppv;pGIZ1vh6?+p_Cqz2bvyI8Bx>($2U1F2nR^l_;P2^^yh^gRe;m z&EFByf|EVDr|BwOyfiXnrQ?Oee_gfyF_GITYjx7L@+6a;!snWnmZo22F~f$Q;fDg0 zrUR70?0{2lLE!6)e$^hZxg{IW3r`D}x6i2p&K4o}_t8nG1S<91<`lV^|KhZzK2k2V z`bYuOTuUq`sOUJwI@mi?tQ~X>WJ_uHZEnF0pp?BEIv-JQjPdtt;qcvgyf!#ExJ%5B zZ}F4(Ot0o`0F@jQV4(Yc9xy!knwm<@%ZmprNBSbM%vnB;%F@LFnGO;H0s^3C1@H(6 z>XB@~iM*zYii*&Iv`D@K8z#VP{BXTPB;C#%Tnxh1);710y$;EG)$6k9{#160gV7E+ zMH(dK7|@^T#1!a|)+P)Kt|km0eEDP570k;nW?~@gFrC^hPw7GOcnzoPm4p}JZUtvz z(}eb2J8hdCC3|qo%r0a>U%1bcuhGH1Cr#wj!qoW8^uETYVec=nr^0R#5q&yUVc3zK zulEAN?~kUlC;3VBRsN!TK3aHN()_H77<@3U0wYpazvNu|0)_sGZPg6dn0bw^?zx zodT(2Vv`6!fyb4wZ(2rD$I3XCEs$TF|7QJ|u;5dh$KkJXLi3VRlT-YGh89~q1+wr7 zOTcw;!I}f2*wluOcLBLRHl==Xy7|3;WU$;6x-{{g^Q&Hn%mD3_>#`^nJ+{NLr+jHV zdClA&Rl{1!VoK*e(yRMAID8B%?!J%t9YCZ#S(R2k`J*PPbPpS+sNt2H7@iWtu;a68 zrF{121h8$Fhs6R-VQ>`pd_w6 zf>3~2jb{bM26i6atKFQOk?&eJ;BsAXjh)s}x$APtijtu|A%AcB4TCH-Vq5NE zd%sB`-=7Xgt3JXktr#}q*a5MHIQpy6~pI_68_K6^ba_jv7wOy zJ-0U>^XVvsY7IFJj!_sZzaP%-sjXg#$Gq*qGIsEY4S>I<26!%nI-5ua?>(!~hfrNY z=Od1X#yNU*E7iR+Q;;ym$O)e*yKiU6$4D4ZPCL|U!yiD<-Qu11@^lfmq`~x7 zdB&m3_7%-i0jfvNni4~XH;fnZY~}&xPb-{uC?F~uCH{# z6J|ub_1=DL85_L38sH6`s4=7xxYSPf+&cNg!@2BE^6_t+!uh)Ybcdey&Cwcp#EN6l z<{uSna^Hl-Alzo9oRvT`sGqB}+r5eFwkwxB#_y0^17V)m3)0ETk4uf1jCqcH8X;JKK-(jlKH5NL6rGgujDV!9Ipwk{Ez2{G z)|g!j&K9}fp)~b!w&Ky+@Ivj6FCsE~-ye3V8{4r<`@Dv~c#6_<^;0ic--ygdS@`o( zO;ZOVS}n+{$MAcISWK}qUz&a^dYaPnRF{zidTuQ;0s;tfGn#kEIh!jKJ;#lRhM_v` zido%gEat`iwc}63`orDlN}eoMW`S+)vOh-?0*taz%N{52J*6jjvI+{c>0ZJQ5ye=P z*7YGDAW6#S%V8Yh8AHtGv8V4rb>alnk7k(36732uIw<1pfeEoUc;@&uIekC;q@?x7 zU#nq%zbiqHI$x)a(a)pi5UIaH)YAZ?e>+M_(+)W0HZS=#v4!dcm6nIdKWPUGm{RwV z)^2XaeI5dC7cVbb0&W!xsN#COpikHDhJF_E7s{KLE{i3a&}hsIrf$;OL>pz=0ADYH zTli1spXnq{;byMIcmAS^i5|3DFR@Nab!RPj9Uq}imYwH3KR9toAq&KNSVam^Ochjf zfCF$p5@tEM{c9!826`SpG0^D?1;^x{c_DgSGny^d(7SoDO#33lpXh|==7^YW2E8DU z#z^OVkH2aE0yZ%;C{NhxL)sZCAa^Ts>h6ZQX#X95X3)xc6nydUhRvv*sM^oK1^UeU z7lS04ue2AEl2I?wF_7OQLuJuQfN2Re(q(QyL=UU$*YbHD2myXOEg3C4bSU@>6`bbL zLYi5BD&f7eE}`C2vdg+8tjX%or@4%h!#OLWGiYt7Lv=iR1feIbXGVEw4u2-{u6^H% z6n;}nTU|s2SugvFX^#iLZAuTG5MDkJ4`cbXXlJpc@)vf;N-e0$e%0&4N0TCciGM%6 zmwTJ~$L=1it804`;`~7`XppG(xDq~l8n@b-2D@{YekcFPt>Q9zK(!J@@b_zS6$CID zW>(E)rKO4!JIl;Lbu6i~v4s*=)SMAKzSewA>sM$GjU7DxTS47w#XA-u&7@pVGb?TeI z-NXA06sQy@{e}G~-(UXT&SP!qVATt1xm(2a{1(SG8e(b7A9A)XMKJqeBHBC3x2Fd= z>PPlpqH7xXo;z4jUO(CBs^W0%5rVRB6I=SXS;%#vVY_62Jo(AxqLMj)RaS5=h1GYCUfyGct?^?%~ zC6L-|H>nF8Fpj*Nam_HmlSpzQ57@WR0!bWNz`g|p5Jo_NeG3?XR>6R$02EmIK><$z zQ()=04R{LF0^rR5IlZjBd}kz;mG`*>lik7_9?c2*XV>WUFQKsKS!_JH$Un}BKC{|K z;1n@x+`xbOD5JpV)C7(h=)Xq6R`+r-1pfq7g}Stc9X@3}@OgvW~+x;*BW>m{X$Hm137M4>#bpS{yJUskY+O@_f@Fft169QAK zlqMeOx>TYj*EV3LF#X2?@QLXG-pV!*GpUdLV1;llI9^-+_G@wx+(muhsn+$UBvB0tJM` z`9x?3cKDjFBixN194aYuRwIA1zt88LB*Mfz`;c9ah{Zt>l5{;Cjg82L@HxLM>TNCu zrwJ^nUX`a>6y>8_j95Yv&rOZvv;5*j9Yro`GNSUJwGT9Xa;Z;Gme6fYb{k zlm%_mgB>-&pWgK5#ihpA`+GzZvY6#qWZf`g^s$;;J$^biho>DueX2lGhyk9P^{J&k zO9GHbyKPD)@*2^qb|l0N@YI+up;oMTVS6>geL+j>9@Zh;FQsw$ozIGyXBhJkb6RUP z$qrV_jrw)GVEQ;8cOR4vX?r5^&1om)^gt^a25dSyxz{zvbo@fTb^1Nxn#3+1>5>tY zDViKNk}tNqi19Z!OHYrXT4{3xH)mt=ll?)&f!n0iK`cg)S~wCE4{S!V^QRx&0!*AS1rhF%0nEW|Vs58Rpxdg|EM7kg24V&t|f#hmmbs_(R-;f46 z-UJy$`zJ>*`L-Z2_JPa46FQ4u;;kR3^(a+gRV6 zv$b$WL0b{>qTPc{du!ohx`V`W(lQ2#*2uUsAqkhD3jokc&B}c z=n{!&JTGy}soE%&YhFMK>>qUL`Q2+8hzzpRlbbc>3!s<~cv5^wbIj-3jxt z3H`wduuC^S1qm$MNwiOs{e7fKJZb_dtaD*O`W`d#uI;s;e=~2?qz4k z@z=p!)O%kM&lCuI+@tv}JUqie)xra-Ix!tCM>fP19Tsbv5f!*rv8FtYKHOR%o>R5g z`FmjTC~uL^cjen-IS?vlMQM9_>6NfJ1*LtleT*ybhc#QQpHtCJ4h@FJx4 zi?Aq(o72?`h8+|dC;frrEE7)kywE-@7&uaq0JQGeb;O4yEN2N59*@#gswq2G*m%c& zSQB!x)i%QQ0P1Gn2O>aW6qX}Cfek6dq|7^AtY0CtK^QN=J%)S$2^0`rkF`KPIm9FE z==;}B+2VVg)P=5X=?RiIqvQeQ)M-K)3|9YdZ@xCSgQ!#hAWvEVx? z6IxtuPG$Vh-KwQ%L)?L^)FNv5zI_}#l@m(*Fzv5tMMgfsiE!RK&<9%svqiR{S1K!W{&K; zZG)=9TpUQ{y>X4yzMV;56dG^#eIh37w!)SjG4ZD!lMITd?Q0@;=;r+Ux?(b`6jI?` zGC@ybDZ8ecz8}e#5pjeMHKsp`s!9r9USFI?F1CcC!0)?%$slE@!OmwLnaYWGDuLs{Yx9n!*D@Me9I$gNXBQYtUT#WSc z&|gy!mY4y9R*nH9c_n3R$K7OxBmrb`=Ql{itQM+9{j+YXqwNXLfFoMz1^J9w!q!y5 zbw%mVG;D<(Xj}SD^S&63>6L8lVE`;FEG7+Kmb%x$BTE^$D|M`6gwy#>Zc!M6tX+{C zS*%OO$DXH1{BS6sd6DOS-UWF@HvHz!^=jt5z5~&2 zliNSgf?|%0Q}^-otyZ#$)sEzPqioC6bf$`}vMQi{>?dXIu+09Hjq#g5SmK9LjyHR( zEHu+`%ofT!+>r;6xQ%JW6C8}HiVUH3uGqUJ!Ao_v0G=IG7tNjg0FBQ4&XIWs zbmCke*is#D+g8NwK9xljBR}RB?(a)!K^rNQRkIl~&#@Z4~zrZ%j(#q5w7ay~gne4pUg~*Y7;hkePs~`S}5$ z{yeaCN)}4zsqZ7~DOB`9H%q{2-55xQRh}_rf$wimSN=tfj_d8i3m=#&WlT({0X@Xl z=4SUo1BHsLG8His0zCZU)5vJr+ameO{%-&z4^&WU{EJrT?nk#RTv$|8>wD*TLc_tS z@i_EPPyVt90RCA9s+B!JF1@Gx_v3{k=;I5Cu(-=(P;@iG2T&7M8CdTP{{Q#Kd(yJw zZW<1b1OUJ*YD_!*s%S&|Z~R{$@?NC?sPFReyh}za4c%AHh85Tjn#l+^SNpyE^Vn1I zoAkpMaprA6TVL?wNAxmdtR%d0T~zW#VT*g2i`K{4`%~!r0WApk5-^Q1U92&&XBV*@ zCp}sf1IlOu(0$#~-W*Bnjk*y~)GLeo)E5{}+3wv_OaLit4_Ld8<3=s90I)<;AO!b% zNDho^$G4#|rKhL2wz0XSM=f!WN=O(4{Oun!j;Cdzyb0uczC0Eqc$tG|)n3|vQ1SWV zBKFHGW%pL(EpivCH!fVsex_Edj|<$TX(y64-FbbKl8^O@-0r1m_mVn6Ks8D$YH!c} zUM261-z23Y)Kq=Pm&(NA)AJc*#?c~hw$aspO<2MdXb?hZFV+-GHl3y2uiw&)_tS~> z-a@Xd;;7vzdu3^9TGS&j9Ez@otd)YitlMkDvSO13yUuuw z=zCLbfq`03Ac~hcrwHsG*TghvyY0>cN<=IP@bmDQT^39fKcU-kxxg z*QNsgz>}9u#_zk81bjVOC;L#C3(k;7;S7r@p!1{pUqUl~3zYrsto$fJ#?As<< zET14#-RVEwG+*Gnq%PDKh@P(g^xdd>8LX04A<_37pV@0-No{}7!S^h@ry7s(I{%|FrrV0?R8-;O z7x}jJ&mmB1#kT-h;1F3gApPW0z2HojpEEO#Ov3ZIJMH&cr8t~DRwIi=?v4o6UV5e! z(CgbzZ#!$s_FpBt+$8@JpGA9r!EP_)jb~e5YG!4Np`|-l^7120wpwv9PzZ$&f6wcV z8)ZOCs?5(IA|%Y<{%+C$n0Q1mg`Sp0;!1ZGi~{?+N^LLrad3F(cK8u)y{f$@4tYOJG}W zh_y<|%aZ&?r4aP}O>_{>T5Zzefb2@B(-+@n>0qa1TRXl)vM|{~O-IS3vL24%r~Fj) zjbwZ^#lyM`{0e(b%;5FXiTG=*i*wXZqzIOwvAq_-SL4cgPz_Fu81(xzT0!L#g01a( zVO?SJLs!Ow=Xp{;Sdcpgq-I0*g3VAgrysd`)e)OGB%HnViS49GcI&NCmGM{W#`&1X zi#o4G$5=}%^6`qLqn3L03xP({K%f?;$r;haBwV>>Bv-9B#}oHw#HQ``3%xtT`|AXw z8roD`m*2toi^(HM4>|DvGp>TnZ2e!M*0$=<r3C>_1Y#xqDB(?{?((dJt8{ zqY*`nix07ZY53E~{c&GkObm{>t!T^-Ka4?X$0daEMW#EE85iVw)HPpbS5KT`I?n;k z<*hObtD0k*hZ8VY%bYmbMvNG#GEUbUf5G}3E zvG^f0`krSlX3>`4v|g}@Ra80Zv`

GKbsvXCc+g$x@1c;q6M-3u%G4&qWk1Rx&XN zi9JNyja?)mn|5k77nNC^p8w~h|#U{B&Cwq4HwKa0aiNd@IT66YJEB4!mA>2vn zv7Ws}pSa^jk6#<;^GOTTJUTgV7fgimqVHvX%PAXf9}m$`h|LD!uRh@OV5ba6dEHWZ zYeAdaNF|<+16@+1(D&8O6Aae^L(fa}%^%B6M-kZ1=yJV&mZJomFT>IB2ixa3`zOl{ zxJIKJHy(%lG`U-VB5RBkY#8X#Gh)`Hj(vuF{bmC9SoINUMeodrhoIg9 zCw>U)MWA)e0Mkt9EW$A<>7>?Pg`6@X6|KCyTY%czEUt zP1AF8#qyPM3@j`o!h`ei;m7yypzv4w*8uksn~3P$DXs_LBXJw)=;*k9^2HW|0C0K1 zwEELwtp)wc5gMze3{|$#`xi=)o45H(%B{kdfme_;p>KgG>;L)V#+##|?XxpX7#Ntt zI4m!ME0^38*^T65eL_o zT+=#LNqwcSDkF=_ydW+`C{eP{8x(J=GIcxmm{Fnd=_2|1N|8!MTu=(39ETW+-R&D- z*LQkxjL0i#?~~=9G1~h7;$s{yTo9wE&CCDVD;8Qp(CX~!y7Os47`W)@xzcjVr@p#5 z;I3n2$s84^q8!(|3C3%aW?j6VFeNLOl0!0)4t0XahB~y0lCD_jujF_7BMFkfuj|Kz zko$ga4@EqSCiMR5b^T*8k2j%!`P>Y%_-a$Pa}M@J+XU2~eh7?knEy*r%NyVkJ_0#J5sF1EF1)Jz^|4Z{m3 zUFw{(*e?=!B+ngo>HO2f=(g&j#48riwR;J*omFsFKZdmGhEeUw~Y#97FF7CVyxZh;xq zlaGDHm(N_5Gw^Ii1ySOC@}^Zcbm&BUm}h6G5Mdjl0p*-n_0S08kbL-W%txDl@k!Yv zS}A=+p(dSHw712M5uaZ-xB}9sELDlaP;B>`^?f>DcnYO&3hK=mlY!!4?W_!{Wbs=& zgC>7!9WF%b5mw@TdpYcB8*|p;;IiDeeCIJ*BcBWC`EaQbT;4UeQf$9hc5dS>ob|P2 z*{*JY_M&|NkwhF#WSP*=2I*+jQ?m)771l@gZH<}O8wjI3o*RbEK81sSGzSF{NfguT zm@G}heQq%4a@(78VOz!I*&pwPB_v!ej~z7BcDHZ&68#uk3UbyH#tOOS1zl$n@_+Q~ z{s@BI_jniO8C2ZDhdYx!far_RESDEiUO*5}uiIn3p-R@i8B@t9(EB)LOV(L~_M+Ic z*!YD#RyC6MK|(kMxU41p3Zyb>HbjjPe}O6T^H_2&8uLR7b{-#*>R@)k?qO_k2=Zre z8O;wi5~N1Dr;K7y_mSQB*T-7iC@i8^!% zzt$NRj|;%$ErbOnZ$s_Ew@xmdx%>`z|BWXY>W}=H0}OX}E2U*mu>Tz}A=q>SkD|&* zIuI+56?5t-V;5dCnA}67uyX~FXmDn?0>$taMpKL{#nFTZ>WBn<8NLzeng+L+bbwO~ z^`WMGAhJnIan@H+ttBPXjY{NfbHL!%S7poVg*Q2awCfN5b-o<~TA%-;%R|47R>eAX z=M=ZW8_#bqUcD?sS#Kj|mL1-i$7e*~ZpfE*)}rZ)z_nG$Q8oEV!0+z5+}tP;-JeQn zjnl7wSA}+D%8IhDppN!ECJm2mlq>e;L<`hD-$O&sfb zvy4hH*e-R^HK8i8V6&DISo2SGoKBtC6A}_J?9X!8H32NyG?-gTHf+fd-1*f; zcLKt5M4?ZLLuKGT>fJY0qIM3Kk+jY+Y~~9Oa}v|EX*Z%cnCvkwwTGddL`Jr1y)Ko! zndb@9wXxC7NlEcyJAY%S3q%PP4CQH)E}?fncAU-E+?od>x&tPtvPaa+&u=)y#0K53 zyz%>e>}RMCbdM0AdHTq0JdrNX7HlD^q%u9zR%ZbO61sc#)WhV4Z?>wjFTw=P4OkhUJCA0v`Lu_9K>)1?T}xt`l< zQo@7P&HUW*i4^y1EFkd2oA)N|9bO^gG5-a^$&Zg>`R%Mz6w54KIQ5e!WV=EUFnOX= zMY5Kfwj=cOER!6bLT5vpU{T*9=Z2Cj42>c@r8>ht_UGsg*j(xV6xjTk&%RDNV+^)jKWs9wvyxi&# z7|e)~6@-7RQrqiJkE6t4TD<9}1+7X_FrgJDQ#i{T{Mk+p;kCp3>TX_8YmK>IwHnKe zyH8Uk2+KA2lSDQAa8J>%rQ?u*-Sd4DLO1qr!C>l=hfG4@;G$ z*%~1kThI@hezr1x<$B8|K@w!XA4x~eiBell3nI5fcRxQx7HLP0X{_qK@g8{>*aIC< zo8125lEWAe-7+AVx>|{(+gE6CTgS`((FtFoLks%YqmPj**P|+feJlR^GX+7fUjQC2 zox-NZACw{Gbm%9BFyYPe=%`GFW-Z)F!?;Wl?bG2hFex#iDwyi8kf*mAbLT}8IKfhO zd--}CV(~k8CAO(kmn1R^Kb_O$8{KvWXMQc%X+FZ_$_+uEML5Pz4vBMe42IEQK)?q@6h%K;WDPE2?~2*!DhXY&DZ-FH0v!m zJkW+jEhw390DRR~Ui{klR~UY}=@dxx2byi-KG3J(uy=B`oLZ1K)&8xY-FBIhAFRlu`caCEU2j4r0@m*+XX(+DRs0#0i_)6$7p9kJ==MS0- z=Ay}7APOOmPWDB`UVpB)hlTRt71-woe_Wa-;+KMSe2+$%>Go1oEIVQ~4yLX@31Dh^ zeR>CZOAx&L&HbH;XleDS@?x&|Sit_O2`=d2gb7~mo0P*6jq-^EhA-SFW4~SOM;V-K z?p)BNH#CPYIOISpjAAb6iNT2MCT)-xfu4!DjAr+e9*h!eEawMCv6;KQug^InoFEJR z=!BckuuMrZN+4;|K!!kba&7Zwxh@dUgHp_&Rnh%Xz_BaT~63#+QZcIB- z<{p++cpBu?;O)ts^&`friu9a9NVtCTKt*q0kud=o2r*qrZ9)aIEAd}{|eSF7U0*3v&`$SL8TxFis7>xWio$8Oz^>BY>_t}AKMGg|Q&&OYIghK7yAoctYrSmjD26|Oto zBi#MQ6WbzW?x?ExIVCXO;QV(?1fV!vDgy@wC4CdI62KVqbd|LwZ4gLFLnH3#0XxiH zcJeHF5^@b2+yQ@&RWcwy-%>*KP1^meo-v15AAKyBKi6AgRJYC-7y_$|KS{Aqv#T&4 zism(FX>!>G$q6|Ockvg5c|o;+PhvwxR`yq`jeFiGlAmfs#1po6%pCi+5=q*6{{YG` zAUiJQU&|ema=Rsc%WONRYriFs(7n8d*B61Qpru7n-7vocfTiK4OLVyjNU&a0WDICc$^q#%>i78 zfdyW<_wUgFqata*ugFN|LHz^h6``c01fa`@l$9|VcKQu3@F#S2NdUBt5mjahOaMOw z>^-`pXQro17EU%0~j=+=zzPdJ-UeNzB2!}XNvp#`)2IH0My_*I zYC$jb;s}gw)*u13Sx&E3Vk6tvCkHv@Z399=LVn8*=Pg*HN3+iW#=i1+3KY2ZQz7jI zd;Q+bDD(b;-pA`v`$-c@{$>Wv0a#`ye*Frl-2Jr;!8bQI3?An$ptiH?)v>>=Kwfo| zK_YPHuLHKxU%_L7t|tb^js5*u$JOrgg_7m~+PJqHo&IgZS4iG+f}{b6W`ptl5g4S4 zlQuk`L1=aL3h45i`fq8sMOn5z#xr=VI8C9*favgwPtu|StucH93b@ef`2KB&kFXQ0 zo`amC$+<8$QF}li1-!oYIegUgAs#>!_e#4XQp**U3Q(l~@gD8sM2i;Ne%f_LFKLXPj*8f;W+<8_tgyCk;@O0(g+Vmg#8hWvggw zgviQX#5e17RkdQXpOwx1ZZExaFl99c(rQd@dNQ6AL6~^Ljpj0G#4R8w5Voq6AV8rnO^BWvo*#j7huRJ*>uMD#RqsHLpV-7Biu#b}Fms8PsgSYe}cg}5e{X_tql&x>r}k#&u|E@F^SRUXO0^*`APkW^ks=0L#UylVMS11 zZ|{BP{&!1LsgR!skt)WYuu1Nb=27IH&gi7|j^e1KF!&Tt(U zFY%!?A?Xhlt{Mf;-XryA``-yC6>Oq@khsXD1jxk>^Ug+=f{IGqDYJrfGNs}u#c{*i z94#o_I$iDl$x{8&!({Rq9Ng>Zm@+<8{>C(GRUY#!GR218ehsxr?8NquC`v?Mt;oqq z+sFd&c=pzpVK`%UhHevbEPf&XGIFhMFyT%nY+N19>K=JT{0pF%b4g@QFSfWc0`1(otpvpkCj;s)Rrfgy8u2ai&C6MZSe;+SA~cxk6^Zc zrh@%$ey%gtxUql$PvgNiObimRB`D1cI|w+%*S7!8XJrLugJ@{qI;3d$Veo1 zgH~u>D&oIGMOZ)*HSpv7n;t3`xh=-v+WL^ z6Mxh#+)oX5m9xuO6phRf_N&NjWTJlkbam=!B&DN6yizA^7*oiR|K+I?NMWiS0x8WU z%SHjGOhOCt=-JkK#?s^JIx0JM5BAE(rA9_>=%a< zqa)A?I=nHd!G}$t>={Y?*0StI$b8{yB(JQD?#t)m?!ITo6xNjkKBn@#lf?!&d4A&u zWf9Pev%_C)03VSD)TJH2FVbw7^^=xFUd zA9;T$RAoh=Y1aSa`Gm#E4vkv;&`8j|oNQ9YVH5unn{U^l=!DjJ@h5rOvFYYFF*yIW zm$0HEX!=3#$jzI!w_PHB%uc%4D z@eI23b$04ba1`&X&%_^|r!s6WBIN@G54O7)xvEa)Half*X7rsb5ht!4Cm-Gvug#8(L>oyz0Qu*v z9)OonmzYL=K0EA+6+MmpyfvGUP;vj4R}EOL`6dr z@~4xJzW2nVI%0=YZMRJ&BT?jNB?J;EZ1DF!J{=2CY2Kv?_!%IIe(sgOFug zOIAlG!{H;pfPj_L3sAA|f&S|`3cd@g%mC@xddH(t0`Okq1CZ>D;$k|FC*F~S(CFq= zfIV^vM26gNL6;ys#R_EWLm?~SXDqrIdy~L_C(qKT>1V`Kh!sn&JF_UR!AY&{Z`|ke zh6AVC*a*7JslqlgUKxvbuqKPYa`T5r{@d=NJm$4S-Svd&0{1(-3m68iZ+wm??QR1! zU&S{cP5*Ua9=kSST{bLG)LL{8_h0*!)Y~e$eW1T)NMYww|8s-s$byvI>IiVL078-; zG0Qe37SN%Tw6uYE9jONw;2<3H?v%X1myFY%`62}@4<+HzkDU^l&`M-tNFs8xDpXxv z{RwB}4hUlLsSBWhcV$mBB=$)Df$LubbnkKh^hyu_y^HWNyRzij8@yDZ5f6A0%o2#( z#VVSg8~Tg~h^ua%{DPBQ-L7^ch3dOj-qEqJcp2ZLm&_$yX|BRCbshaa6VzGrIi~^y5H$h(a~lbKNDtQMbUNR2TLd^ zwZSE5(GsrB-d5Qzd+MWxjN+}!$TUxvio@fP#m!729K0Xj++|u0_ct2oXRZoIvs21? zAquEoexDu8=sK^Nj1=7~AHb0Uu_dMjeXU@#v}vRI3#RRHWDyU3AV-RwKD4M^R3WRT(Us_2?`~lB4pTnG8%5%|?786cPn-O>{!kU-tc7$m zNXF&;jsrQ$F>H5&Hvnb}G@mmrwbTcnE#`@NFcD6hFfG}bKy~$CpUaxYB&DH&gOfZ% zG;fDaXR%*vk+j5$DV4FXJEhrzT<-D~B{oP0dJ&yNQ+AJ#EZ7I=bsU7e%9 z_qcbhog{cirU-p-AtqU0sO|GZl)s^XDF~1K5c6@%_j{1K0U5uMUr%&q$+tH#1bWWb zn1MU0zTV%bi?_z)WDLVE0|>O)jZakb6>DcQDfto%`(Q zycH=M&cN7D#3q!w_rEe}Nu&_KjYf@z2wC#d?WX1z(;(_?yg#{x*A~Gd0^doKJoKj- zg~Yw8v~&1?`C7M~mnq>A=trIR&r|@F;f`Y%7O#H%=7kR}krQU0u#C(}3j`L)6;})T zgA_;IjTyO7ykP7>IH>FfU~OYZ*6?gK=lKz6y)3j~S=40T;6AxN!1lHEu0X`nFV8eI z64P+{hp1<%_|&4`PA(YqRHcAgQ5Z*sK#!bVB|G1Gn#s>pOoYu^5O#0Z;)5xdlD z7E;_?^w2)3Cqs16u^1X1mNq>(uh7nsXc3q;XG zMqqDfxg37fY&B7R7LlcHMSQP|wgs>#HS3DGql{mP-Hhr*82GVudubt@Hx^6xGQj4(KQzskTtgPa`=DvVt zW}M#S!~1p5{Jto`C0*Q>M||}m@IEkBW!|aLDgr?r1n*56TqH2RV*>S~ZIuF4`N2bXxrWhV_;&`am8EKJC&9r|As54o$SS z<)@)k(~*VLOwYnGKCLEsn~Iq(QIZ*#GL5lWwOeVt87mrP%u%-Pr-5U64-=fOd!nAH`q?`j}Fq34oaGHC;ZD-9m9lW*dX=QbYzQk6^ zLs&)G-Z_tf$Z>K{Raf|?=jq%1|AVo&4vHge_XUHyyTjnYHCS+WLU4Bt8rf=jJJTggBsfj+qz4-=DgzySpzgd$?A{w%m1Uy zrXGZk3*fEwE$kecCf>xmEPYs)?q`LVoC#|CI0xSLCQ&6`lz!sWnN1s$ifz?#dCV=ODl zL+lf>;0zP%QQZ&u&D=|3F#V;RKGI~;5TZQZhn?o9RUfHXc|A;h7Z8Sb=^q>mQ`h^M zO;r;j7Wc~W6t$(tL}SYX&HzrK@)|9qJrX1Fw9EAHP`!NMfa!?Ug)I;HWkqgZZbU?0 z9{+$$9MxWy-H|De-(W2Wm436jWaO^5ch0+EeT%Crh1Z!&tLM`Mp7(g#>Z0Em#oLTv z*TveKWxf6KMAKyxx}y#{LC$$m*Ju|LP->t8wU&*F+mF)IyD%|{XM)CN9m9xIV@sy* zhW4wkk@`@@LRf%4SYzi-1AbdCfIrRwkNRbg z*v5>fm{_UaCB}-e;_fWdt%qzjGJ@`!p?B~I)+2mLKz6p+GYU5uEO#o5%HR#3H zA=$uFbwLrBwjl=wan4%}%%n`N5CAd3e7G4cz4jBHbm=DpzbO?RzOO{btPaiJ(10hq zGip8@&wJs&c%eur;yCp8VgKxK?J)#Y;(~Df9vHM96(Y7jux^jt29c|n=mft7u+@#$ zI`1(LhevI>df=KPI>vp!68%;yur8Pg+y)(Gh$qD>t=l{av0+n|PX;&mQMfEYd3kuI zINu!JqLFCIEB)8>-Q;r=9{IkS?DU4r@3f(mVb(eB6@R`yepx>r>(#gj?x|k|d^KIH82WiaH&xi7AR4{`hvF`E#hsIyr43kew zcqTTw5FBX`1-$HF>NR}zQ8kJV_CO3e+Rf1{uJx?p&$hET@x8zCrFe+DhCF(fw|GU}nmDqA@Y>n8WbGkoR^nKjhc`JfmGhwOfo^EFmbKTu)@QjSct5 zo;2^RX9Q$2wulqJnek7j+riVOEDZa}n8e60 z8RH}6=G`aPZihAc@L?w(G=1^v&;bg?{wPku5Udz+Z_gdb;7^`tcReyvnz@^jORhOA zh`-#n`Jr2x`fQ;XhQYSz7rs7h{VNDVfn3i;cc`%MT)8m(Z3HoUiiSb!qDefl)Pvy;HE5k(uNZgRYFJ{U8RpbQ`3e>u!b8U8Yp& zh#rqU1XuoTDZ{!`MZ4EA)!}f&?!{i@Mja(e{&`r*A>M<<2zb^@KO`?o5>3Y0P%lkf z_P`VJa=Wo)yxvU*(4T6O;BpSK((kMQjcjC2Ph-t*WK4O5PeFYL(Sw4artVkP60OyI zJeX@?46X$B{*h+r4)2pI2{JG{?JY8qtU&-wq5o(Ay~4ryGdjjUM_RhIBtXA7tKti^ z7Mnj*%jzEJ=#tMM2oN=-rNdc#NPyo~Rst@DY>;uJf4`fcZ(9= zgaV6<|G~n-;&`bE?dk5U>=xnb?#|*Jdf=fXtZQl-0@98gcfkbj{fYtu{HwF$jFCyl9z zSm|M6Vgd~~96-A-Bf(&f#a5+>kN5Wu&vU7MFlqNxqiR9%Dx-Y@(JHT_!_8W#5Q zcQP5u`v*=^i`{TbPVS4tS@he(-$!&E4d&z7GACG)mxf94h5vO~wHqukvOYa~O;(1F z2u~E}BOiP}#fLo?zO~HJK z%5=dSKJFE_z~0y`W^iBD6*}H7gMWGCN5N=IR;rK}5)dRMENzOJYOB=U=Kkq_a_*?myWx4VAdg>@loJgqa7&~)wX zXAcrTDzE@;GvOkQiR*otQs%IbSJs9SI1e$n#G3AGMd%o-4X>FC=>}`zvs&_)k7q;{ zh=s1_mqA>+l;n4CrLJ&~h(Uz?O3i=P;+BoPOz_zd;3e5l7_~rQD3dkrk|#0m{Ucbf>)HzUmwrdDtwGG*4ahg@dYYj*F?Zls&|ONi z(+=WPz0<}122|srk>|(y6+Lc);i$3;O;$04JNA(}?Pb%(j?BRltce!4vn=Px1{~l- z);qgMw@~7_h^EC`b)$ll7m9%y4lKoNPabJFk$#F_ zO_DtbsB%2jGRd}eN6?O#uKQy~>}pI71mbI*t=Z#52RAs?D}L4@%<^NtZwU(xX12NJ z*rXB;i*nKzpjEdm9P{#OQLBCSV3^9Gdl}Wge(TEeIov3iibLnGi45FB@uM!Vujvi& z^h7f}7}Xl0AGQ(@4pmJTw)ekmA|4N41Y_+^H4mE!TYlWUq}rbkC7=94NI9r9*dKw# zYBlAaEl`KT&5veuf9At#wJ9#2&I99lJ4*f&Tp~lRudXPcnxSrZ0TyEQcy^%E$~FBn zFp2Tkq7;qwIW70OF{!|^HPUu^C5%b-vd_2g2x-v~BUD&c$?P?srIh zPDOVb8GwAb5Nk6dV17^57lQw>(*s^cvIJ8+Rr@buBh-P!P!J;-5{81wNKR_8ft{|# ztaG0Mtv){C1>gDjMdkh;A~%#5wC+h!$X&sj>&DJGMs6>L8LD0ChM_-ruV~i*V4FgtSEfWMs zs(+cxPxfebd`9^iZ4+2(-xpiYX`y8Qy+}sRpjF*Y7k; z-Puyeso=Qlaa<#nKCrilQjmDYmYYs<7_*M1b}u)%Q~Q;qv}1Eg@IYtcx=6mMEif@5 z_3C^|fw($Ho<}dlLN8&3pmBHUAy}`6@x=4+?Z&4Z@nBtzo_4#*UUp%N*{9FPn63Vs zk>e01FOH*gJ8D?z+|ZWu7|H#bG}C(KOmSIc{Ym>l7EUcw3wqBF-L)V`lNl`t(wU5y zw*H9Kr4Wg>FAHXnTiYTP%R97NlGHV(gFMS-*%xKv;4fZKi{`K0MTWyXVYO=PSiuQN z4G6%pvGI!r;?kD&t(_nDskBG7k{{Cm3g;l~!T^giolIVUnWL<8f6AQBL)Un=vfJ1Z z%T$v?I7%~+r{Gr}5R6t}i?`n3CSvN}OWcPLptnOa=B^~nq zVv<7fKfi9n@^7({kLozr`~2u#ud0z_qu#>KG{4vsJv#=8uMdR$wVH$;?+yasxRn07 zMN)h;NC7RXLcz9SDx+T#U})%v{yLE%ZcLOoYkj#siO3gQGHhc&@AEzVLpDS{oW&TL zExU4F&Gk%a(QP>^#8%^{RIVAuT7ox;!DV<<#BEcgu_E|%(>H5Ub|Gf~A>c9UqIgIL z;0b{Q9sTFod44*I-prK78zs;GeiOqpU7PPoYXtK=z=Hb)|VX!%K2^mqv z{=54)Z-$e?)x2K-pC{+y1#2Fg))xey6(kegTGx*BXul_bqM=1NqD zs5vm2E({Gp(?p9<6kEj0zXc0K6a9!4D`ju)IWF5AcLZ;d@v>lR{rQ!EYl@U+dCGRb zbog<4*5Tq#%Yp)0z|?{t8d#h~jAoUuPnM9l%QzP5YASix6fQomme`8M>HC}?i90t+ zYIsBY=2%A!Ac31tvy_y!0CNaYkh30lDnUeNN2~GLZ+5z9EDH&Jso*5SWew(#p$s@W zc(=fz2S}dsXtAzNpNl8^OHJBQDXWDXuOmzpzuqc9ZG7`S*qDDc4cu;=^lwChMc(r8v{!h5@)O-I#e_lUD?T znEh@;?~TdKWmd`>{mR4n3DnfQ>CS0LHZO0Ny=eiEAxR~%4zJ7QD2BQI=00;A?R_qO zC8obX+n1bZfE051nH(w}5K!d+N&>P7_kX1d=y-T#MIThONt{Ax32uFST5eBgHvxK$ z^eh=>6Mz|S03hph`R{pD629PClOa)6)g%qAqra8m2tZD3KzqADsbY@o2?w|PxgjzM zU&DggG@(3TTal5Wdu3sB1!yljqjxbUlzkZiX#jvXoB%u;im(0OfG!#dcuvpP#!(P-aj%_xqRsl#-W6)m@u= zd49Wc$&KqtAQ~+@fGn_(?K&;^typR{nO-$hPcnuB)3v3grJ(NesaQZ{_7H&P#W3-$AZ7!SM_r_CGr~h@D2_ zdmZmiRXb8GZY{qMXR{r}X@Kp-SH|-jjv2YB9DalX36R}Uk4B0H#_b0fzOVl{eOHuZ zOo?Gp2!nH1%zfAm27C zH8*?wqoX3FGyVOcYzDUa73l@jDE{L}7Pcd@xBl)Pu}QxQKK@mrTs{AG8+`rLPD$5b zyyc=u6?OH{x+%=Bh3LDN%*Uz0V+=0$ErSklxdgv)cSX?R-6)X@QSr`?>&VPCC4pLs z_Fsp2Qgg;9Ui9TD@DkpczVTF7TissHe9egSJ&b|U(?vg5wThN{aG6qG;r+CexcN@a z6*UgnH9DN7t5Upf%TT@likq5)sJJ@`?}Cot^YuYnHd$9Ce#iIQTIakyh-){epbv_f zd;N+dHDwGIioJd2up!{{6f5<9GT#^l@lo7dvAIVcjaTF^nad7ex$6p>hu$)O{zEI! z=<31m4{tu}4geI~{p-F#tl#@tqg5Mv==aQc?jYfL>$3EOPV%XHb8G4WesZmY4q=dF;qi@4GJw(FGI>rOE+GMGz7U8)Ib|;v)W6H!xtgWF1{68^h|!1Ne*QhRCWrc- zvi2E@Z3s!1x^GL4bYAsND~682Gca&Z!eI_SaE>K+L&lR)a!I0Y&CEz^>mvClg;0qL z>CYx4j6HL^K~{-3SP`oEP0ym|5v1)*5>yWCC{Vjwh7<$%3HCYCnVcjS<)= zFLIkE$kZGf6W-Z~durIbXQ;Wey*}4%hiz;PN}qqW&xH*UB(c961Yik&P85|VhR-LE ze?R_ zF%>7+TZm@aP{1H3+zU8XT3W6?H;9qXiecTin!pU5mZU@MaKXYQWw4$Ik>} zp>f>~gT7CGZxIek)c0$2J%k`gu0wy(}{y!l6M&lRbIa5^$qk z#AbZ&5KSZm(U*Q8y|Ba6IG(B1g=oj;Ov6N6=TeN0}C; zU+qKnX|OY>(~lH%>nNXIi^$KBjiLIj*9^f8W*oW>EqfGCKcrNzm+C=HU2DMEWy&>~NgepMQ7r ztTxbfEj_GS^A4NZZ0!R>n|6A=7N#Qo=jcKP{sEgdHS+SFZTvos? z@V2;Pcft8krxww%z%9gGUVyCl1@LcDN@03#yb*Ly6ygb-@E5kbyfuPwi~%cp`t=RX zPZcx{(|yqqX#nbX^)M<#Yiz)s@|}Jg7NP!#dsltZdU<>A;urj(_Og%c-=M4JQSYdF zoM#oCWSFP)Cie?ge73w+f(OXOFdzoL@8V05+^~y9Jedc{Rx3lj!J-yR7(Mp?Mc2!V zwxWD8-Y!?cpLc;nE_j_g% zlO$FJ%oeRtD|T>11^@jZHpYTo#leot6OEwnmj6fnX;I4+#@R@)9{4)63n$es)_hmC zz+SkWaqEVJDZMKL{j00l=ueUs6;`HVXmsx>B)(4i^SUPgvIp#FbHTLdw+^ZBI?&)h z=;tCi7TeBeR9Z%A^s(&3DV?%(qE@~dn^ zi!YR&70e!+F!sR*{t^6nmBjLADtQzVYdF1%l6LhP~B%b~Q8?rY^d3?>s zPx@NcM?RYmM=Z_z1-&RPjsW02%}t38$1Ne>k$2u~zCbZ@t__ZH6zW(X`)5;**0|%% z4eHT~qbM+|ej)kYg2!fd$SI%0hv0ddcgkw_kt@_D;httO?4G|PoK{gYGC@(d7pJ$) zoq-N*KUScT`ecixpr8~s=xZ~qXW${{g_7?=BT_%cRr;G+JCnFd($G%@AS8U3wr8~V zbHr$Le*%J|ncZ>(e?V$F?&wIsyU_7shB4ytNI>x;Z@9VtV;=ti)0? zE&NOgeSFHsrJMKt0mC+pZ(Y~$o_pqgKu7Vwh1g>EPd87DxYMejt&??Uw9z0-dNHVWDI)Bxq<%l6a2Jx@~vZ0kpB8ms=QcBBx2(h{8S0=~JI0 zza5-pxvlVsmRIzgW<=YaSh};>@UJ4ln9;DN!DZaEpen~}QU|LfZr9>=Ns(Zp=th12 zxLR!Pu#$piT-Nj}4sXR$<0E#MW+{Pwe#!pDRb6fNUX>+@=0EZi-#B}7=d*ZP7l_A= zJZenE!S}x5^qG{OPcU)8$%9XaVd&@yJyi`yHOa@2WB$5S(3L|mL{2gn ztWBF9h;D3mqOC?BWiRqie(45svO(}_1w7AKQV*rMmPj#w>RDIR{%=C~y^ey$P-6b7 zG5A66tbg5bAi=FpXqjq#k$epU__5rPdCusR#^9u>ul>Y&0c;%N_;Er+Ca~B31yL}! z=Oh!Cs?YiIxguSiL&3E5taj&LZ$+kGT8GBZ3=CcGPLBmGCNg6Kim2yLe5w49Fp^li zEepw^;Xe9*pI;>J4o}CZ9_X z>=8k?0kC7#0)4?pBd%k`Vh+&k;BLBZJ{@)m^?yVEb0UCSy>@zc5^ z-0=W7BKX=+h5euTcTbGFhcrmd9KqXxL^0j7B{j;n4K@`O55dy0%%B$Mkb+Z;Vc(S@ zd0L~z;}+K2)ewVX^^Km!h54*kLeU`FO?npNb;QuOF01T${6C1Kq9W?h&LJUejZNrN zqZJp1)sO)kW#0=(_eS;o9dmF%9l zhq(n2t`BJ9mpOE6`x@z!=^fy0+r@~l3{ zLU%bIHd>tXt(r-d^qt_|_SLVX$OVf(U;!l|}@*k{+AajTej);s-Y#{k)KD=@gBPWi#l zW?YA(8fVUsSXR#-5k?^XQBrCzQOP<$^B<`5dxXfyz1K|Ckm7#B!iO?FAd9WRl= zoOhkef4bfaAV|%~a`?C37rLsL*jQA4W*wW$gm47YbxS=xJ>5=k0)Ve?5k^`ld7^W& zP;VhEFK>#)mIyJTlVvU;PhRwSJe>^+;A0yZh?LE?-yF_F{r(S@V`({CsdWthb1g_I zGX2s_Q%lPTNQm%`co)<(p=Y|6{{IgW03pxq`CUp;H7>sMCRAvNv8ED4H!s3fX!^RV5Wi+CaO6HxJ-Z3*%qb zwmJcbDJgs0H2I22r!l`goQyV2)QX~5=>mYu7UtHpfx%a0ettJ%y66LG{O&M-gI)d? zEai3#V1JjfE5JYlFwz2)e+NtBYp8f9FB0H69-8OriYhYhMxtQ_QXjJ}HgH;w@lXGL z<5L|We4Y&LtY9`a5-pS6BMP8@$}65<#dtvD{EiFDIYFNIvfqqF_9Z1k!4YQi z>*Nru4?*xY`cO>HW4=i#8SRaC z@Ep}cMJ&k7Z+74iI!}wh4mQ+K{Aj=@%?p9es=WN1JMhrW5E0#=9y&0Z<3Mfs*(<2S zBMR;IcqN2`URQs~R7h`ra25q^@#0-NbxGwFj^EtNu+8_SFQ~@lNPV{P+giX>Yi<3J z#4|ek;qLj$DbyRu?Bu)?WRm#Ug4(K{Rv3MA;OkO$23gi=t9?1g`eGPA@CSC{QCP zTC|%tlCpr2CNb%rv$E4)BAtH~wd&TK$Kzbje04>4JtGmL7jp!gfXUs2A$tfG)kfi% z?efo2Ds7fc>|x#ns&JR~K1&Zf@!LPaGd1QMk&qDqI+Je0$TZN+a3dvN3w(h+wir~| zbsPB!MaI1ka5;8y_E3Iy|E9!A|o;jQ&rE zdg7*CW}jbE^cdEg$8nf_3&NvOgJ!M$ELrpsp~OtUnQWw^C~3xj>(p#>+hS1x6P4|L zIFtH%0#43$BdS8kvktEsn>4c2C{#(4E4 zEP=^9P7ZgMvV;eMokC*@d0jmhgJS-0#C5-j!l=(XX7FisY47R&b2IVr#<4!l_5b@f z`|)sQH|7f^E7h5nd|w3GK4&hE;f#uJECR7&!6;X z)Hp7Y+Wx-fhO{?x+v4x7z5B|i?jUCsI{G#Qg|QILW(OF=^wy#wLJN+nkh0p3oWHY$ zfr8;cjksMGx_vw_F_TZ=cEItc3qTL}A0x~E7%sh9XAahy@yZN)XhqF8PfrZ?CZE9p zVo35&yBhtb&TbxBPsz0`Cn)BKN7HNWJ}4em2<{@eQr^M}yISBR)&6t+74lYLf=mXY z%w0MK;K9_!B&lJc!0t8I9UT|~F^kPdV8ILeW-EM_Y+G)KwvF>*`MCom2%IB93ES)Ntq$ESo0%-AFf zya!_PGYmX$VwJQOsm%ejU+d!1@P(=}os{e5)|l+oc>_DqnC%|EXhRt;GJ;+2{}395 z{EhNSPCORlaBuTsNV_^!wG$+g$eiwgVkEqER@^}7#M@|A#R zi!lsK{$XrOimu%mnPaTw+2i0T#t82bbC%;3c%(+gbHywn=iY9|cf?&60cX+IkBJvY z#S-CsZm2|fiXjIVoZH9tRGXP{%HI)p=3sE6#)D#~v5V(EQyI~XkN8&(Lo=dhA&)}M zIXY!PGbg0!eN<&N?sgxRmJ^ZHMpfr;!3HXYU#+1SK9X}RO-s)B?CjRZd|Ym*Vt+3f z5mkfOY`2kvbP0V$?|NdD^f z<92v(gb9x0QM@^&(lcWH-pme-2h?A!L3o3r$de0Q&jDI$HBC;7k4r|hSLrzNcLqa_ zVr*3LzG>jbL}#x1J?iRc z&o_rtBOGwmtTQB|TtIvctNHk}VO}bRE6;(jZ7eynpsBUfxM+GtT(|&g-uY5AceT-N zv{m?o*>(mVlF>#>I`dh-J;Jeby#zCn9qZqldMfiZXEu`nrZ(`b9T!l;$Am0(Vt-v0 z49O66Hmd&|TGdrEide0VjOat{feYRZ6_?;eX~CzXKyfe z>Xf{i79-|24OxVyu zY>^hmoX8*^5;Ah}Edr&-!uIy|JWB-lQxhuJseoR~)VXr`?}y58dOsABy#E6j=6_o~ z{SSg8-cL$O`rp;qhfB@e>nzPiTY|*ILs(GpGvGF-^MODz)l}*6YC`B;`rDa$S@d3J z0lMS+m%FvUC31MlX>%P!q7Q$)0UaXT>wDhj@jTmm435oUESV50AZ7zXck8dls>GPW zXOy+I7v1Fk%vXjN6&B9l+~2gFN6Q^0eYc%=!9DY}CJBIo z`Df(~;AnMla9|5(p2?OwwHW{N3842RB>ZI^bhZJm?TDPWQZ6Slu|h!u&GyYF0JZL% z=Hp~Aer)8}@wyOObv^xRQtKmZcC%NnKmGNh2&S{TXK7J8S%Wj0R8wt*^{um!P|ZFo zx2WRJuATPk22(xh!kL$OXJ&@qlBLj@b%jcw_%}729or+x9JQ}5p~u8yVsJ`U6m_U5 z3-J{n8P0ddhz5uEKl7$9moRwV97nABv>Nq^upILzMfCnF)x=8}W9&k)DKjbGm0al}JnO(cPJY+NvtK>$kWfY~_!yV2HBe2YP-kJHZ94T9TY7k)3;@>d?^8MB; zr&(^dzQ<1nU~j@YPQ;$K`vLkLZ6bbtgwx-3=RN&gNW|fO3C{_6ZA|2LOtxvTnCIy4 zkHEL$alHK-mZ0m}42f?uRj!aqpDp2kWF2jDJ&Mhc{n>+JIAFx88clLr5!=*+x;3}< zo-!r$x6Hz=Qu_ooY_hC-g|Y_8E7nV-5#A4>68(|(Nh|vW0U?M4Jj+%7>%e>Moh^ou z(-ruT6`&WLrlz+KMm=lQP$Qz&Dm(u-p*LBkM5f+;nR~R+DoCt^dSHC^TXvsGFJcCX zNkz>Y_wDsJLuk1(iEnHM;fCVMj|%yKA0;Tn0^^rB!Xnp_^0P~!|9<`1?>TPDiiU1% z3*NZx!QmO;IcX5%W}Wv#IYnKj%L$mpwe4)nbUV8sH~x(K?Zwv~z6l^9F_|ucEOyU) zjuCA>8nerT+jmagzY9Ydx$&-9*LV^Yr#by-g(p&DIzLsT@_no#3O%~-HeYcSpU>FD z{bWtvcv!K;jJ0;kHr0zFcuWm?bxRZa+oWXw{QB<=)QVr@R2TL$0X4VSHfSRf2c8L% zn6Y8uCXRPL5Sk?i5OSp|`;y{P=~(V~*a{@gi1(vCu!Ys5G55DHDyGFzee6 z#d5T(DIp_aX+=5wE_+;XxE(& zsAnG>J@#WW2)1;GKQgY|_N>owHL;2WUTqRxmv%eYd_1Pw-FVi*v-}m&(`P7!f7}|I zCC{_b>dOSG%QKnp@-vAmlZyMLT#qtPqeVeH9F9IA6%Py~k1$sAQuM{EZ_TZ4 z&vj`L=@x|GF}E-8=j(}arYd%hGFZY6Kc;43rjBzKdxL>?eqS!gu|oza#2XO8tf;KH zJCs7exYLjB+zS)ibGL9=CjNY?PySUGlu*})PiR+0fJcT3Cq9+@}wi&1B90F z{smg;4>$lVhS)ar4V%J;>4VLJh_nX`px_9+W-r~8z^2*vETUT`j2-g8SOO2p=wF3u zdlNrjV!oPG!tx&#uo2>A@a#-~gkk(1XK0-(;#3OJ2DTV+7srL#`&1#4f#bgSuBCLp zYlNKneKv2Uz%(ZmaHIiN)){aO0GkP1w4OiFdA$(YSGadQcEVPtU;flEv;ES4$E8+Y zal|Z>+TjT^rO{bYh+MsJ^M@#rwsR{rY5WG~`E^yjv*+kj%FU9xn%b6vnGg9CJ%zo1 zpBsi!Cg+cTQTKAgpIfny#$9d&&4frVL69Sb)0y5>#x5={JiJ`Ro-b{ezgme?$l3$N zt#|<)ZP z;j<`xJA*+f$=MdI9fZNu($)sJN!u&hN@U_Vl59iCkSkVsP_7P4Ph-EEBb>GV*PKDD z-372XO703pty}qjPc{B$U;h7ORu}_rVkks>XLP9~5btbUPm`OYIW#C(WXqes6Z8hC zz|&7okR*-DZwz>icr)!ty|Xe(6|x4+VGA2b^03)tZuo3w8E+NP0C(yUXim#X^8Zji z0C#Fzz^2jGVDn6*9z;X7zquI*Oh5|C%flBfDr;)u-$yz&HOtGofJ?RL*RRO{fdt@J zaB*_Z{|W*0p53dfqyVvaN3N<*3RFtp{lAPnWS#*}#0UL&)XQ!CLN34@_E{MiL#*NW z28WNQ(G=+cT3Ap;&6PMJf0~~!6DDb5fp=zM^Dv6Ug+4k?T3|9Z8=Ecv1a%`+~Z8)9!u3h8=cB!%sXUQ3b|H5`gFzwY`$ z#J5cvg4(JFJ>iRgb~F4fG_0lVig)Im+G9^&vin*YRy|Xv zX#LbtK92`6DcE+sON*edR8Cag6eOvX78o7?XZ@wgXb6RT)&}Vc3rc_(kHb&S(9%6P z!&MP@(uI3N`ZV+Rf|ov38)uC$R<8I!E}@id>Pk|dA425Luo`n(59`dOGsv{DnvH)k zEhZT>zWOE|8P``bI+ves8@fHM$*8O<{ym!P`n#SZ3m8f6ZpBDK9=5pEc@nwp&Q7XY z>v=t3mH((dULDxG)=0=5mp&dx#)Py6(itn+2mP{Q)z|%uT3sXWJ2noO=rr|u8PoU3 z%(wkR&`Cq2xZ#a&ks@GQ_-%_%{96{m>q}%jwhJPyVzzK_HSaEyPMAr-$T0s#OzxCi z&D1vvdBzUM1PtBd0dcw?nVT-Bg7|jfMz`@nF{GyX!m(<&>XqTnV%e-JU;2yF3z;qZA$) zQk2d44BEuV$qqTr(z5;BGhvOy-{H8yh-qiObyCl-S;P`MKL-|+FV63VS98DoI?Srd z6m~+_(TpHSZ?BR$6oW7tf+bI!v#`&@M*w_{O_WrxL?hL!{APA|`_sYok&KjZl(hgZ zBdzj|^}ue1v8eQX`!2)~D@PueR}f9qO;FVeYBrA(y4%^xV2{5rF|Q}F+yI`Tg*0m{ zF>fFM6CJzG-KMXDIR-g&_-l4XS33-I=cOWc01Ngw>xr9OLRMq+`7wllv~>RG5GTaJ!q02 z2=m`_i6nCKjcPHaxS4~N26Kj)VCvE}-9tLrr*CE!MDS<08dyEn9T+EPH~&;b&hMSY z9f(;ydr#cfI}qeDVLIsl^Zdoo(1VFvtR24Ph>qN327pvF{KY+T;LVk#_| zJN`VWQqZfp;NSZtTNmdP1MNG6`}AiU?uQB&e)Rw1z5?VnV2#397Yl2J51ax7^C>w9!$K&uFafumwIETJK_;z6O`_-cM)lJ>K>YGlHZQ)|oqwBSd$AGvsc zv^AUKJ-qGZDr$-&q_jH52ju&4Z7=cBKg)t;7B?l>K+$(LW=ZZ^(%ZYQk z##;ys6$9YqlgwW``)XP|ZCgH$Kt(m`8Sgb&MTxD&QIS{(Lt7Gm_;7M7BQElFTQd_z z%fo|*k55>_1|e%%_lp(8RTHl9KN`^HuAJ_CB`W=ATFoV1kl+>0!oV6}4?rgIqM(yB zLTlNYd@~$VN_O#(0gQdN&@bp-Ik%!!`>gsi|bzBe9{igCC*cq_^+T8HSNC1%h z{a*mW!`0pb8pQ%Tee!H@^sAT?|IpCT71#S#Kx4a57eXSiI5-Hqy1IJ&kIs5*31A~; zRh7*e0?q>B;MbQYlR0L);lSBqEr78o4e%3Dn_F5`o`S2ieJFRVr+}Yk6y+-Cp4pLC-#AIlEuSux*` z`u$NAqX`Pw9D0m!^V}~q6hFqE0$+krYrZIMA&itiBSa0Qw=Wf0lK-lqY z0`&!%)S?Iy6?S{ymT00evAc5*N>9r%queztRy1CA070NCYxYAl!9NQ_O){13i_xx6 zfS;Q`*;YiJVcwgp?P?p&PuJX@q-9tf<{4+$+&urpD~RPz7qnS+zUFurG=c?;8a*O- z=N}RGCuwWvRJ0^wQn3Tr0U7CnE|`*Nc`;91!rOdM(b?<-Nwjf8@ja9ajY|+KA;N>2 znG5!eTN7O@|M`0qn}2?Ku{thGUz6})C#62-fBcISQP$Y|W)3>m2;21WPLV*^CXh8ln{%yH)OlQ!f{ikn)mzGwu*AZ?V*-+1v0~~w03p=Sm z_X}J*%j>vU2Vmm`?Yw8Nwat=bgpGCW;v7EnCK7t)h{g*!K+lB`677!@tz@;m|IjRj zXPB^LmrsRXc8X;t82VD7yiLZ@Us05nkdOkUlOZAfXLO)X>Os?V$6q7Ftw6wqb$YHU zu;f+J)WVE>Yl9O1AcKvx4KhskyJ79#$NFkoO&h%4V|GHy?NrLQ9V~bhVx_(ab-+wK zw!-w7Ckv+Rv#)aIB#~0&sqvRJ-*ctcYo__b$1PCc#89ciUJI!2_J&AsI7s7zfl?x9 z1Im);FW{%b&kJ1c4?tfH)QG5nCJoqhPy8Fv9aU7=gzl{Lv48uTPrA+Q(9zqh=zIob zgTgl9noj*f3HSG-Qk_6-y$F-gnt)n7*1s3JyZTybdt_pE=nh}9$l$J55^r&pUPZ?1 z&cMY_CFS?e5Z-L>2gi-N-=(?Fz6r}-v1-)Rd&N}4F>}9t=3V{jEbF18AGs&rlFfpJ zp;1)m`_NV~=dOB%J1~)3aL;vDz6GQ+9yhc;o)+yETVELCG$$*G&n%f>W@v9f0d^e;HcI1@s!s`xegulO^e%u~lt_#o zEiZdujxV69KJibDxW|3XJ_?SUt9BSvfkZWOa~#ovG-_&@ZkRIJUf^u}dx~hJ!d&@G^u$qD)ty0gIYQ7Hs}3NVqju`FhqT&s!+p4`7H=LE;<&#RHVN%AM4cML zq%BmzT=LSQ^S?epsK|4InEt$==MK7yG}VUtpg1P`bk*akZJYc&E$o7S5b*)FDd1v- zYg(p5GW_9-Wl9LX5=F@F7)8)AYo(F?TEW6gETmKy9FRya-SCmNDZHz8wD0^a9SZYh zH_!2X(kD7Wo2pmHm*-{wZKQJTe8}EJnA1W~DdHqY65dtUpJ^mTvx*nj$I~@Lby5%8 zU@>5=?Jl^^1iekswV&qu(IXsKSEI*r4kfwxI3(cAC7%-m|(bp={o_9gmMFO2ep zDDPMWSbDU*P3AEF7}wZ}^%ZT$0wjO_?(uO{B5vDtIic7JZ2RXscxLmH;dlrOSTs^_ z8XxV6et$qffU@e==_I{@5)eD^v#j8oeS;SU)-b~!49s%Yx8&-eV3XO|S??dtQxxLz zupbC`ov4^~o4Zj9MNg_@wpH{2p(qf*d0#*80c?ac@n!1u78B-cDn++ul2d1>h0~FoSqIYE2Di+vtme`N+=k= zCnDcMjMq>CoC3!>u^L=Yw6g_$2m#5ew5%+^h-lL`HU15x^4Eus0atc=X5N z;i0sg+?JZ`fu*v=GfsT60XsW8KHqn`z`(%yG*0^DTVY8_SU|J8@S*43qqF2YRXNA2 z=&n zTv4kyadGi{`?b#F8orbi(;9bKsQ|t$7AkUbe;f|{xCIm!Tn80 zeV?fJ)DqU)zBQR-&yo*sbgcOm*Vy?cA8JO-P}l2c3?$^Zbq*U}3`a-DN)G*I2MSqP zS-_Q5${#r8;+vccn`{o8ZecDZAd-{qcqulU9E(ojB+$U`mEi4g3JWR>WhlHLh=7*k z0~FkKPoCd%WZvrq+OtW&v2E1oj?^OU@?bzW&$0siK;wYjxvMrr+ZF|Un^?9D8|XNN z4X7C=J6P)Tdc@egh3&7uXd~UFj2iM)?; zJ;7yg_uvEw?yiGFaM!`zU3T(*`|keV{Wmk+J>5@N)va5n?m5SAWQs@SnGCxeyO6wV zo6Md=kWpb3i4kCB*({ZR*z`Sc_VswPdWdkCd!V^~4wZ(1FkUP5u7`UvF%S*fP3vqLAic*! z_g`Q8z75oda4|j{ZDXaA%8Mc&+|qipUWb-kc>-sz#MMx9f9&{CXfTd}!XcjKT@Glj zxu=2Qwk#;aeSX&H4Wi-A(05vNJea~hx8jpaO7OHw#~>QZNZ{wmbzt3b3%X!}3n39y zWuH$xpY-if&>J-DFU@<=U#--TSouv!5lX0GfUVZT02!YPzfU@*XL0Lw>h)gq?;p(o zWC9L+2^nB-DWr-Wm`=Nd@aNeM?x)ek}In`*NrAV9ycoO0<^i?_E;10Hrolr%6NtyMIt73`A2N$ z@7sRwO|43|t5xdhnf^E*HZ2-nyFm%=mmj)+6g{ZxM(EM@y`?Sf^!l)8Ori4KEy68d z3IoFHuqT-9NKhF_GN3vC8FDDzoaGafU+SiHY#r0lv!%6b_^Hm%!qKP>HS`4>z$P~b zHMscWMR;AKUD`?xBT`=f=WThzoGq4^c@nEaeVU~gnoF|LJYx%9Q1+9_h0B7-NnO}S zXjr3*zYsoxGd?V5yj$*NLRyI}tP2bR(`wR~dExW-QqHqxNMH%^ZR4EieS1`AvpLk) z_3!1Uh>_uIbj5u7Z4YYLHl`9l>{)zRpfhr!K?Vf*t7;KXj zo~OkQubKC9P-RC*@SoWx_7(Dn@@1t5nn+`&m3NZtbF}#WcEir83GMgfZVuT1U#sAN zfsasQ20I~9V{4wn{GhGL7ZlN0-hns9CiGpo3cx&8H^az?%E*go5%(j!hiXW4xhU4pl&!2?wZ&y(*i5qPW%q-Kn?YqgpFD%y&WAb~ zhf;c3qtHv9idfH2rbN9@!eLI^U)kDy?~U&pM#G)zel#q7Ec8Q{OwnEaU`K4$oJHR~ z!&zS4JA(Z2{xM|yg3g=KRpyPgSYGZkq`JD}4J^t&ZpfLi^x||L?==GR#89^An%?3e zw~orHDPb$@Nu;FV&9+PuZfF6Q2JqQ3`~{$Sdp3CJ@CexJT_O~Iz8U=C-@*1Bgf2Lf zu4fiz5#>;MJM@(i@<2204ic@I=q&NobCUzaaU6ZX zY;INRg)wiU87!WCiFY+V(aeP`t^Oo2voDatWktX;=yiRFU)X}uG!xrrJ)uHZfh-}f zfswPjPtF>HRDL%uCJg+nZ* zDx4vsAnXW>MbG|&4H@kRBq_7JKeQqI>fh2WIZ4P%l*mMG39cZ;-FLURyf0(FxLSDM z9!k(>h5F*YNfCJq+#Sn({=NVgk5+bh)DB%3O}yn2<8;Jokk53EoK=L^XZ?liY7wn9 z?6VEC!t-GO`-&@^@lgiJQK{U|rbcev7koTD3>caF21le(m^01^&vVt_ETK=D$_}0o zQMF!%FCf4(7{Xz>S)x5*Q`SX5r6v6S-9T?OvS?33%jc(yBns*ue7q*6&^st3qRFjt zPkGk!kmMKmn*&jFVVG}57()$WU9v_;0|C9H!f^Qp8zKiQU1;8K?9gSngDM;Hdf71& zB=aguuF+K=)^$aHFyCb?I?`e0K-{xk;#@kt@a_y7ikPwrj#y%(n!R0V!9WjAi!!%g z|81Akc=os>5tKe3nEL)!H@C~rQoiIsz;G#4XvmIaA+Dw1ufU>8IKTTHxVWi zrNlAn(srq>kWGJ~17K0H&=2S$pU|3s@^NVIzIJI$6D%X#=}1t`EiZc@O8IgZ^yBg) z)%Cly+ZFD?FtE4kJq?Ak>$=%sV$~fh;8Fy|zjzw|E|aR_UXr5jirMbgYtPh8z@6iED3t<+BE%&98 zM-$9iHUvOH<+LbZvWNm|R$q}KV_ta&R_bU}-1`l`eql)Wj2h{*JMG1<2SAPx{bvj- z4I|G~H{4?)w;Hxhb`>8|S{`kQ2^QeE=)`3nXQhg~gI~hrgoM+h4%bN$=J5Jc zTs9lJL@25x>Cd7I#1`Pm%ixfgTJXX*6dRsBr&8uw5K6c2;D>(mf~vN@?ZwdINWJt2&4rcpr$3SLM@2n2qK>zUsi3y5eAtLHOnf0Ng0p=R-D%+6Ph!-7;05J%Ex6^2He7;NKN#9R_PzVL~d8m9L3GKhJbUHr!GU$~djl3c# z8$r35#Zi$+kCSawlq9(lp7tiVdWZtvL!YF%Ys9GuM6JR)K*X(I``b}Bp=!Vj3hJAByUiWq?ZQff%ztNJ zR|X3pqOR_rAH#lahP64gF@x*9eXHt@IN3j^5DC$f$2+nUh^x`-LcZ3#?cfN}$NUTn z{D8}(8zg~uw&{Q7(nxyJAuN!_`LJFdG-Hru&k{nLmkW6nKB0qjd6~Cwe{*D0YAKlJLwK7dMxN zWg&78%h?pY6ve_bc3UOh$tT^V<6(1C$0s(^vCwYhLYgQ!sylwILPYvZ$i#I)vVVN;TVu+Mv4w`k z4l-cUI35fRL}Urdl~z9p)=#$SEIQDHT)u`HuFe9|MC=%fm9T#N>ejk>eWVT766{vf z#Daaw#2Af*>A%FzySw_UKcq`U5RIQox`p@+0m5or?Y;$TpksaOD|10nq-gMfHlk+v zh|`2mv*#a-#dA&8*^Obe%J@_0JPzXo9)LjIENMo@IE0zt7a;Dig|q0~ITcB|MqD58 zf0|dsf+?E)>WGPQj#~!v*&^bXQD50VX(x0gY#EzF`X?vGHE^ycJ0_M(ruj zv@uP7690Sr6;cXLW=I)yyRrJ0bwg(>p>0vz!stu>^y=qS+Y9dccj|;vlu!QwyHX> zzlJpP2zAgi`z6DRG7{m(7m5?|abL7zG0k`aOQ2Ptvm>%YIB=u*JS49hS#~9q{VeNi z61m&CGaZua;NRgocE1r*y3CcHxe4^%Z3f2(&%k_1gS3&De$$U)@W6CL?d)&xtp@Wa z_+zKUM?gWA|15}=vJ5%>AoXVN!W@3STg={Vor&)|Jvij<1(=fifSz|2gE_fyAkZ|M zef(zX^4G)z55UfpFfX~)oGvrxGK1Cf1}lnkF4YAkb=aXa2YA=dwRh(-blLww#}z zpP}V)tn$2gH1W;Sk`5wtj)H`QbQfa>DLjA>+$F888LGa@%LEC49SVz!VIi8|@CXP$ z3koQoyCorrLPA2q>|1a6H996{ZyZDf2BI9TkjaBGJv}{M`Q_Ava$i|rpCmLi)YdRA zEiD|6m7vHd&s7!zj?^Qdk&2j7wrm8v6GNVT--bS^i***Dz8nnE=%=HjtDZ>Xx>TVH z@c7TUkOPnk;fb_qntyN$R|sSZ$oz-2Ok+b}7dyx+!IoG5e>?H2kE|~SbMOmn6FVno z5M+bN`}PQin3%Y^x!Dl1yD>B|vA0mH@}>{D?_=I-yH_GH_Q5|-Ux-rn@OOxV_f%Iv zz{B(NwT&uWfGFeh@K6wsR@mf5RPo;vMM z5U)oy1c)31O^l5fsj{e&qf%4xoSmH?sId`4rbWonA`1!%@+vE1AoV5Pi;MUWW0jZb zyHVO~_Tf+=_Y*Ciy3h9ZECyd7C>JsGc9L8ogB2>}lIn7s#9JXJ#OZ~HMj@?RdXNi? zBSic#{Oi}RhKJKdX&h#V?k9^q3ysPdZlE2~G%hR5Oqj=ix*&i6Rz%DiRt}WdFJ%vr zUB!o*m#|hc+fMg~gq>q5&NyK~tP~vwA6ZD35$}aBJzwxXery^Q*A>|c<8IW@3fLy7 z)EA2AmY|KxdQH%KdQ!rNf{LNM?D^7eV$=CkGrif-m{wO$ceB=+dZo#en$ykgzFPnd zt+_3pqYlVKyJv}(`G+>{<^f0EK1a-SR?4(<`~I6Uj5ABl;G=r#Wphf^(~ku-7V;xs zI<;#8VlQo)N(06IXyeL<7Z7!S=-Oul9q-t{@{*eI2|&5UWSwKf!x458?hxlrn-$t|0r1F(yh4sp zx&ZjR*Ju5a-PFq@rjK1W0F-*{=|tQEhcDA=jibR1bR0*5#=jU|7Iv&h$dYH5WwpPIh7R#14IhlZe-sUG29i%w3HAyZG!m z-~VnfNMl7#_nmmqZ#_>~q!wP>Wv+()2}9jGb`UStaIle#N{G&E#H`t64_!&7kg_UJ z?{#ztrPtYnwjz?w5YbCkxDIW_{iJt9s*aqF2^hkozt}Pkr_*TLvp23&ox3EOYJQCn zqrKSMb4!$wQ`Xdsw~Zu&XCRpX!;j~9^F!1nrx{6>*esQ&XLtf0t~RZ=z?10Kc|e|% zd1x{hF2u}c^Yj#10tO*`LZ#oC*Q421G&D+TtaTT!W_=B6UUgNjg|L}ae{VnS3z@{+ zw-YD!pl#6CJvbMX=e05Nu04v*ms6!F?T0NV#25V#Lq39#=qFpGaT)7RLQ1i3}2TV8kfrh6}T;+Wq%%Ox7FnM zI!*tn3R0!KRlugGPl~G~0^jzCxd44bMj@W%#Nu=5>9@}mLnIhg*z5w&zl}V1SimMB ziz3}vUbFi^ZQ5BHCGaJV#Q#H1rM=d;Z{^CoiteB5MgBoUYg45CzcQXKHoPfEbf2eO znO}T83|AWMZX5mf?LDDy zCTxVZw$5Tuba=Yu?F2x`xd@|_XqQa1Fg+``zbH6h3c|)i1AjuJ!rK1Gt|S3_a&L9> z`B%BwRC{x~ytTL2j4P6=B@m=J)MAJ|XB$?~L&x)Dk^MI5le{X{q7z1=fI|AU-;DZ} zxoT);1>3?c2Q=w~eUHs@qDHirq4eC4quQPFZ29wRBJE159hDn|?S>W%(Xmdl?{R5l zMlIgq#JJ`Or>_$4zV+>t+eG#4|F((ih{c+#FZLrBX73I^9KhjS?Wz+tH-TDR(-^o^ zyStc(9iqs9WZ1=w3UgkD(W(1MoMydQHc%CNJ3N%^Mt1^i1g^UAV#7s>60^``I5HlA zpw+&nW5VrzuE9FHAv>ZSlEYfEvuu@1TqJ#Zu<2GZrR{478cG z1=VXVY+O(~r_u^Yve%~i9k**e%oL7o!i_V=RJ3n5T0_g~JSJYY#}eNR(2$D9L#JCm7h)MqCPa6PKxvth2D(l! ztH6ODa3^w*6wi@7`+3!OKCuK|giUBk!Vpe~8p$g=Njk8&raV)IIJihjK_FLioR*Fp}rkAfUzYIqt z>!Vk^Nl9MhEnYr-T&}A&9JVi<7kon0y`Ku|Kk;(pi(fi$f90>#Z40W{R$IMb4{eF2 z&Cf}E;?P|)g0ce7!H=0nE;O0M7h(AtS8l~r!KJY3MfKh5&x$hn;?yBtb-Y81M$h#( z26MD4iGIK$)bZ4{^!;W;7u^RdXv1{d0rh6kQ+I7+WYU7|&ug%GSR|oUVDZW2Ihjoz zujj$3Yb&*9hTWl(hAHdR$jUmrh-V!hF)4|}h&87_+JvYj(MQeg$Brm?6rmu-M>iP=t>a&g#Z(2>_^e^f4>fb;#Cpc} z;$h#1%IO48a4Lh0g%9rJl{6m@iOWX?re^kziQ_=6(e!X_;>uqqRQL7aenA0NuZ}@k z&$Jmu?y8!mBvA<3Vx`y|8moVbSe1928qHX8=S~Ta>b3NNKZ~PPPUdwo4P!*cJ?=JF zX5x@->mOz0YaW1mb4sV%>ez^4{CIJ<6ubDWd!XZd%hqVW7G?<&tF0{BW>GqI8dNhE z)exNA-s4=~!q)F$C)L3xAn09GL2I3F!$?$*TAp>>-WzEP?U#9)b40zTF~8{uy?k z;OHHTCZteEZgmj&%InGdc106tn?%;8f-L{#CeOqLJ7TBtLkk}%?$6guVxwwDTpC$x zlaNCWh!%dgX*&H_12wx*D+jWNL7$Gh%xC>(MupU4Lzx?B(|%%*bd{pku_=s6QwQH* zC;u}dZ(oziE}~7v^*U^>-ulIzi7k@Jdi$eo-^4ho76pCeA!mJ!^SzR7xsJ4L6rNBM z90e11Y!`fc515g*I?8D}eyKh^|37is>g`IG=g6+7H%YW%*y8?n%OC&ksGtikS75@t z{5a;hFC_Xsw|JDm{k`X10WnulCK}K0PS>CMPYbJj6eQ4#=cz1z{FlT3&%UYr;I2*A zF`CA?mi*(zrr?dJgca<+JihlB4j~*>IQT~poKyD(>GR#$!0@b$=WFt0>~uR7yJt9` z5K9Ufr7V(qiL8m+L!WZ{kxs}b4*)XGcVPB#(dQ-TLdo1xQP_kIlZ~7;zHCplNdkeb zH@5X;5pnMqIrodFAh*G~1vFaYN{ZZgH~ln9DjC$DFQKN5`=BVK%wPOI*hxaH1sN+a zMbHuu$VqYNCUJ1KMH;?c{eDlMJccDZINtggJ^hwmdGV&r&tM@HNK31N=OXoW3y`k9 z@vVaX+{vv@fM%2>EPw#qIo**bV)jvksB;Z?{bslyZNznFmCQccsT$;P{;3yY`8&JD zx(y~55Lvg_Dmv9-5GIDyJiNAZ7K=upeuRW@9nC(susvS`v&DD8`x#9LDaf$4m!^M*r+L0Y7>UjQ%D2LTh=trI7n4RQWdQV$>WOarK@|jW0MeYdB$G1^ zLDh0#q9>^Z6_+7Oy5fqNoWTR*8v1)&w}F1Y*ria-;p`nv_Yvol zW^YVD5=N+y-SrF6Y7C7p-}3TV5PtqtCohAro(mmTUMYc`DZynrT%zzQ>Nx`yVapZl zD!1}xtT+R16Q0i~#W%PxsnA=k3P`nT3@VxiAB>EwnAmQ5ieC2K1m%5#{;mAtS|BW1 zTGWBB7}|6Uj1sDHuw{E^i+g4={BPASB{AbtVdTYf*05kKv~ zItP8SQ}hpkqmAI44|V7TJRHD_+fLaR#_{F@j!fe_zY)&trWlD50?xY=T1?nnBkA@y zC%$W2i4;;ZSA$pe=K1%-zKhwnquW9`wMDlog(~FXl{HNV}mlGWsZyNE=74(N?lK7x>+p>yG0v>?s`Jit8@}v z&p!Zflf$1D0`p9U9gNreT(>5vF~d0<6MMeqbvXx?Xu7|OXSOZh!a(z zp@r}YuMYEFJE|_%WMgji_!e0|d1C{O7|BtoOX@aXeX$TT-9Rpe^{;$NwyJ~eUi0t{ z0`R(&A`A86R%np9zzU1Qec0%ViX6XkB?`-8V)LZs!y@47;e?3SZm`9vH#YI&dK|O$y)UY>ZP^4_x7QpQTKBabe7(6T%Ah znK{nBT&eeCQ`P+Ww*KsyNcahfoQ9zh<=OKydFVbKk5S%`spX^Y7Re_2gjf85q@{gF zNX8Kyyc2E@vZ__uS$Ka-^$a}HdK8M_YfY|QV_v<7KszUm_OIcghxgo$rVr)D$Aove z_phDr*Zz~w{Z_(*XJ__OH6K(@B&BT8VQfFc3@DwTGJV2EM;H5Sr7DSxjfF&Y@&WiP zYI7%zF;zeUJ)wQ?_R6w2!|8J;+@)PNPe2ZaxM0zP1G~?CWtL0=?#stsKe5uD6Q<&| zXx`0RoEjv@`ur&IA9L|K*B?2zpN!f7VX}1R{>{wOm6H2O`Se=C1Izo6Ru++Stg2*gvZ0iNh>6+C`O@Gg?e!k+ZX_;?yGp zatR=oZ@xHQ(W&HMlkMrEFOpJH|14q|zLgb<+}GCE?_7cdRZ%WI%(4`}f6rL{wg~}g zEH^rZ6*GCh%>)2AM0g0etqW7Ol9_c<@};86JPe`5(Q&G$5x^udR$@y~U-(y;SmQ zL25+nN->TjMRr~C|M$}nJy@^J-6~9;DnPFgNNs>JsqAxqUpLGBC@ zJ5^yxH9O@l5H2!rFq34v6Il9G3-ar*%&=B_jNGbpq*8{IX2!OLdMGd!<+6LCr$_8` z`=af<6XKp^bX475oJPZFE)PM&NMyYl4hRAkQ{u&snbVr_6np z>52PnssIjhn(5zPVjQNvuHkriuE+C=r!CA?myquYH|HCq5Pa;GKiG34`yj;);-S0R z=Amp;`!eZc#x!!@-*(YIp{=Dg6vl`dQTX{^K@_(Y_W30d1OsK&_uY9W?D}|~RFj&T z`h?nUKA!R0Z&IELQ%+30UBG@I6304aHBTbU*TD5s3D9J}_C2|#xp$~Z;tN$m;1Gm~ z_pf*gg@DWObh%+U?=exxj7i^tnWQot+h7(cnd7ZdlGR%Ik2rJsHikrkJN{&@^Lvj; zPUQO}lhlGG2V-Al;^P&<{dr+o)v&y>GVSRZB?l+T_2wI~+PUzS*N(4%YfX30D_;}o zIz;G-1ua*Ea$ixmxAp8MosDoKYBK3&n;ebAEbhR17(Ek)Pp0rH0nBv1j#U>iAxUGA zASJ{y?`QSYckg@u#mJvgBZ0$A{vz}0y$T}6=n;D7dGbj*bp@%Qq@zK-70EHPGV^oi@OFGm^W#g&baP)p>ujB48xb9-MV9E^wRK z^91(ToF)*sO$RMs*hJFR@ej7G&HiV1x%Ar!hYD4GbHh21~8FgLIS!joYdBP-nDjCliIS z%-Ou|oHbYoe#mV+2Gdg4Y}n(2BvLV%P<4Anh@R5_dK8OV8K)yoPPB3sXi~pjyQs=1 zs|D*-po=D*_w()3DZ#N81{A$txYVeXjYZ+T#a|JJ)@_$IOUfc*s(}(jrJwYrxfvAi zwZ*bJvWKy#gK(5Xq*go1O`D7}f|C#KbKZ)9l=k$LB-`A@2zE037-^2Vz>@Sx1%s>Y z0Zxt(Y~`wa9Z_-7NvXI6}rjQEb+SqN=Sfd_^B zJq`GaM{4{^!|!_78?|GM{{GF@RjnuH>LUG9<}tvo#Sfa~oYh&U)n2+s{~cTfkY2IY zODAUG+#hCCUfURmjQA4lr7BWG)KtfW#ytAdz|nF9*?tEriC zX7Sp7!*c!M3T&Ccu*Mi3wV3oQHto?izPc6%C8lFJCO!xk94>b9UcXjZ-)~Z^gpC^x zUu|kiF;3!Oi1?S;Ld>i(n7k&n-5$Q-A?;Nx|9F2pO!Drjpqts=mleQDF|?mKiP}wClm)vjk#J1>{MiUK4ef~D zXLDF>V}qQCKZ-QI*)V@b+{}(r{2nUVi>zP$7rmp(({*Fb3pyIZ4IsctB>S zfGipPG+qu1FBPHRthGa5iBe8PBF#gwmI&@YJFPL?cq{sFa>mH3nhOOg61t?22=t~5 zpVoXok*lblb&;YoZn3}t+-sT=Iby_5c6e5X(^E>|91L?GBSdyd_A6m26tiq@QcJZM zuK~0pf&F9qOG1upi56ZMRq5LCUa-p8J&`BRUi0cC!oH>Eaj`k!72N29_65~B=cCk8 zKhot>a&Avrr0pjS==PJfiW8#Fb3xspjZ5{d{7AadGseEeCq>fRi2xc0!NV&fZOe+ak18ok-y~x2ly++e9SM2 z@w2gHw7V;2Nc5WQ#)2vVOgE3QV!tU;CQ(QsUiSqY88_TxYVT(qZZryrzNw{&i~7r~ z)d=$ap&E-I1X}X-{|rL-{!M=+_yC5t{qG#l^v*BH`r`q9$D{_@kY7-MW!)j&+^&OETZiPMfZ~O;pGt%J3+Mhp7P>di-KC0f(&zESmb?E3X|BRc zC;Uv7{R`5Q=-c_9fE)X&(ZxR{ zTqr8FN;PX`#0_rDi_U7qS8|1+6^3d4Ca}x_vB3Mue%U6<0=**OwIHMkmG|rbqP-if zFv-!Hn;px6_T%!`;7L(P>fmbrwuL05QS9k$NsvvLs#abV;W6@hwf3%NH*20R`R9*? zOxjWheUbk|dT*ES;JXH4Pbj=1cLSr&^OU>;*R@6ZRJkJio3a&5dp}z@Mqm-X!n*U2Z_6?t-u6Picxi zS$NcRda36DS=zCA&O2VEgWm0w- z5MU?${+LIqBBOJ~4_SQpa(In+2BIb(p;&xTm@c<{jDP>+%5c}+^DcxoOWwi>rhQPh z?n{Ho>EHET>Y4d$E9lzeZ9*ib6PB-229vnD{g+OoFIi8*QoRf?-=<_KKsS9wWy`a6 z>}nqPOs(_~Bh{r~0mcV-peB7Ku; zVZm~W8LbS-su=S8K%8unt^*3X&FCTrlY5V|X{YNckejY%VPvAA?+9tM3<2t>7)k`{ znm4VgOhMkj+s?65370fTUO$m>#?RH>-2aom;9$vuJ}3UU#!5AVr??GWk5jIbG_%PMYI8mS3*0LR6p4@eXg?eFn^8=oj?S{Go7fD z2B{vyt3Q|ey7*0o99}UL4+%|-kSiWq)~0=Kh#$zjMzCi{cZ$6`tzPz*&V<=lRUnrY z2si0Mt>;+*!H{5b7%+l~25?EKz)H-322Z%Q{rg`XJ$!eaQ^q?j-xa1Bvh$p45UFhC zSeNSwdsx1K92|U>YlyYnDit*c+0s!0{>78rG1B>br<-;f1Yw zyF>{PqH=HQ->%D8u7=f#h!fivEBNcI#+v82#ZsN-*cq7xM*$N4>Hd-_8~pigi=$Xw z2~+pT$|1kU3}!yKHL(6)V7u9B9y2%#Y|sEN)MX=dYIDeD9VA`f1ijDkOB%;pkVqU` zH;x2!G!^}@;&2LRJM>F;!cths1%rkaftB}^%%Oj1HGp56J$wbRbl#%h%cc?$o|B8# zZ!FxKjK3b3Mp(FGb?+m1wpI15^T%6ULfU*I4vG~FkdW^C#UcBaQi}Sq%R*Y5f`e67x23q9fH3E@2a)lRUk9J*lpRxNSo8kecrVQF; z?n|t>jH4>^$!)Y8+?@7nsZwvLvpH*$GUCSR+LK&yLjQ zsIAvqu}Zmqsylc&h@L!hk9g!>DZX31(-l;_yDNRZ*=y|L51`R*;Z?Z$hRy-0`%XBj z)2=hO3foh@N~-zX-mk2y*v~ju1bqK)X}TIF)e=Qdl>im$8ByCj>abi-FB3yFg*u+w zt`4HWI$WwNVc5~{@cu1@m(T^vnrlY4(Ubtwd4#V`L9Z45iGzS|Y{dBpA zI57Z%@W2>{_0LJ_YD;ER6%_PAz#Fd%2&uQ|-&BS-k;Na>(9m!MnaNNA9KnOJb);!K z4(p6y+u)=D#(@5nZ!7gy5)IaKvkL|brK%DT%mFq2Pxy6-cnBgy-82+}?3DIhWoU!~ zH=<5lWIW|wh(&z)ATlE3_f>n7<8Z@#yby~(&jQ3)WB@4^RD6%?;dIDjl@R zrzbAEWuUj)v3i>ZF$B({Kbrq7GmKgEaJsNjs?pGT)ncDBGaPqGu3F+wOu%DV>VD1m zWVBIh7PXwGEw=4p)^4}L)37o&f>ZwYcN@vEQ0gWC3_MR?dl+aNin|=%^6^d;iZ%63 z;d7ja$yFD0LfHr(hq5`E{UZTk)dTn($%cpd4Fq%kbqG>n@8I}&3~uY$!uRU+Bh;{s zEAktVKY+t@Xs8LpeJ1no@$*u30Kg50p4WCU&h2>KYCPrNg<&X!#dk>#MuHrZ#3|&E z&hSEH#Cu;S>JF!v!CmYG27Q8swLVP|9aaZzGdDqGYDyrz(Dv5gx4zlcvi5)ezlxk= zM*?q+y@9P*f6W`_`t)=qut@{Q6 z>_U+N0V9xCeff=o;uhVW(3{h?MtGt|UvfiO(pLIYd>Dws9&CK4i@XQL!qh<0dqYf! z&`v7W8wD}&I0M>)L+dS9f{U4tRlBU9Ga4Mk>-HK-MLdZJb!j+36PR#adXl-Wrn5;k zi{~37@z|skYJrPg7VrgoI&}ggiKV6v<9E9E6i5E}AD?BWG?mdtPmhytA2D)lGNppo z?r8Z8p(L0TYAzR*`a7V5JN91?+x+5wkqO#rDj6FMiNu&!U5BE{oSzupwu{{Ga1YXh zQnBc(_W6l;u#UGBe=q$)Mah^e5-V8;YBnA_W@zPdoI~?AIuK2e&0sJA@2Gj^GUJGj zlT|MyMzlbwC{n&#^dI|$MM0fX<9{9e zC7qf4(XJ0but7a1+Y#fc@u#V0BOI@_46v_3dbSTrKWmS7jB%m&hI;CF;xLcpWFL9Y zPI_n7Z33T(5P{dZOTV^LnT=}Zxr4vY9*AZn*Srylu)$`A6r{NgWry#)eu7gfJ|ClJ zLUE%!oA}u@ky~vV(98PAJhX@hc#7L-C{M(pv7j}trpEQ@XZV;}7Of8i+YtEVbm^Lq7E>=_x!3Z*gH{AEYVc;BnDcez^YNK7KseoDRaz zR5VYILcuDDFcI?&l4#W=#5-(v^hw8Ct+z<#SMN|^A^U>s{={kq>UVcNn90eDvZ!M=<@|W48XUG_ zWWNqwp1s0%*Bc%EWh`xn5AQrIKCAOs`6lR;ZwX~!)f;w2;soDzF1<0x+mNS>K3a#n zK<@E~%YGq^Mj+a1sTlX0yhed>@-i2xZFecR4T4{V=|rXsn`bz^ON=MFpWyv~xU@eS zy$Qg8r_u-Ob|_2_)z5O}LNgNordX${7&)UCYZy8kBH<7mDHh_9uGo%g|KmMS}HLlxI6W$MtchG zp895c^*utT9S@^P-=PZ_h@l4J=(Ln9Chhh5?f`0Lve>$z!B?m@)f*F=5L%8^-m&KK zW-^+|i@0&n%2lEqPw()cKNiBP;ptA_>?c0CS_z)qh3+>e+S=(eR5F7_cka(&2T?lS ztcdN5_639fSd=I1RxUTijc=`7cICr=9Uh{b3FLEeAuqV`HmaFR$4Wgp6mCTWT@x?W zG>Uedl+^krQg_g1n&4H(!So4m{+&)ajd+tR<4|Xzx<}h=X&_~GU1s}g`s+6wf5vvi z=#@x4Cvks$w_ktJbEy=-NL3xcR>t}ojl$-}*0*lMDN@o{_nyJ~%!>x9cEKB4yK|&5 zLHm*%rFvZXn&z{bnl=k9xsI^68m-9Wb|T(qPm>mN*8d|yIIY=KXwO{lpv{QUXDyV? zQH$36lf6lUor5uwz&^}BUMnf_XG<`cmuHKOBU)V{aq{R-pQUIiUdIVd zov}VIuo!5OHwo)5W&16-B}1D=-kizQ_x4wU9Vz+K4iQspLt*x#=RB<^hS#%O)WpSC z(U@Sxo%o{zM(j--tO&l_FXd=Wv+GT2F;^L;* zF7XoAVc1#Cw$Sdm404XzK*O;-J8$3FyL+>C_cS6&ODkF}mGcYeB^~^d96Yg%uoskC z5BZ|j>8_V?+*Ej5?j&DtqhLZ^mv!2e7#>F6zmXMd#Rv_{?7Cg;*=@lcepW1uKV3`} zEu(B|*Y8bjaxN#cmXTCG9ht|dS+_D6DUwY%HN2Xk=yWE}bCPMH81*-EM2g0Xblr#t z`FsiJ4I7kBxGk18DGQ`cGd!+|e`*z(xx=1vbL&KfSb;sm`bXAzUUj= z?soBz$2x{`Oo&T^v=xRrNngd?Un@-caG^i>JTmh^LT_icaQJEg=x@tyUtuE z={CyDa(~jrV8C_hdhdH9Fc1$CMofp|;?W>Y2}y@IuEb;kqmY_^X%Toxq&`PrZQyqt zdnHp6L8j0k%#U2U{Axg<4t2502_#pq&?Y?|7HPsrQ))Cg)Vf0~cu(r2!#9rjIzy%5H~9?O z8P#?VYRV*j_w0E2WqYBlI{(zw_rY^1PiwL;_xmEbnb3MdtC5`|4Fx^Zajvk=n}d1W zHMyB3|B)bYKD!jj^?vLHjnEQS?%x5XWxm8)qc%PY64O2YQ-6wSsLNIEVAS!l>0u{; zn8)=QFL*SV7lYou*;~>=*!;apRT_2(};kEFXHNn4L^)O$0h51u9`xOMo+4@8z933}gH-$whk`xwL9bpqK zkOzAthJiCs;eJO0A`6$u=j%$^GCns7q3k9bxD+ICbp|cqgV|zKy?uWA6<3uOq|5%9 zL0rbc%Q3g~u$fhRSCs{J^h`S&zMIWowWy*meR~|^PJEneso)cHG1A>d!n?EZyds$E zm=nskOOd;i%-}gmB-}8cQer^Fcb1Q;QEB{xNta(rY8)dl#xphYoR{RejtXUz%j3ro z?kMi2{Cy=(00p*H`mydexyC)qGd)DfwQo%1W=!Dhym%+xBhB9mdasSHB$l#GAHDHi z2|>MoiHTYQ(z5FaRg0`F%tOxlyo@*%&3%WW6K&0`+eTc|TjM$wV$d`Ff6z%eRw6AI zh+6Vp$38h74XCnU3{O$+xmIjMV(9SJ`G=51N2L}{N&4JTJvlvD!H2EE{;ep$(BL<% zum>}XD(PY{pIA@(R}imYLh-w~Feg6o7B-~g9D{xNy;pM8HTO6qn*e&YM3=ssqs8gO zr8=!br^SM`_uga{xsNp-cFF|vmIIq}Czd6<9z8j1=cy0hyK<@sAXU6K9DMp%ufgJo zBBNk#>Dr@RVhc@)%OXLzENT->+D3|U(RK1IsT=R--j<*fSCDZ9K3IL&T!wN^t;-x=9MVit^-I^R`FENrCV{>tgc zY4s=x`?-pFPp~MKmqc;GmJDLUYeU!J9WQB%|8u5!d29glPbc`&~WYl z;{eS5+ z)-m0lX7e3Oo*|<7^6pIK4@8xrj`dMUphP+Qfuhk5#X%^=rjZweHfbO?7DgG$qs9tE zP==dfo-^I6444sWSv3Efrn8MZ+bb|3MwG|HjU&1qv1IDopgA@p_%3|B{Du5q68%bm zP5^H@-gHv{I&TzCxXHS{c&tp3uS2y2_#~=kF()JvbCHP0N-}15r-aKL&%U(3<=GLN zj*wS*r+?{Mzg328E1M9v&7v({Yd5x5fef}nSH4Ca-0g32u+{<1U{z<`dO1q2*Gc@u zRU+1ARlk^nQML9iv!x;V|XZJ`Cx`P87!eL_m=uDvu7Qvtw1sSCI?~gi6n?Dv*ttYN#J#~!x zGnPFCH9eUDc)b@k%Z@zlfwM;bnLb{Ly?o1BR;#g)!@~_v($IZfYz$mt;{V+hc@)}? z+XMtyi}eS+M8Cw38VI&}5}@ZdO=gcvJjc98VT$YQ<`r3^(Fyi} z-)ZaZXyaG8wF+llEc)C|Qr&_B4Avll@f{b@mD^td+R+Ht0syx0Kqw zUO*a1477-cJI7Ij&9^q1j4D&J!5L5C-7>tKkl6Mjs6taHn1xdmho?WAAtY0?!-aGB zy?f2;zR2!bY9FN;^zb>ag43Af%iMD9Vd(8frsnQo61QE>VE8&=U=iW=_Zed>yBZ_$ z38d%(I20c{s%2eDV|zA?FFGm`42>46N*=~jQ20=Na6F>Q>?sNtF;Idp?Y1hA)dgYH>yJ z8^Ltge?)7`HCw;qa?N7Yi%)CTIN)Y@C>VF%aUvDpqh>YZb2;dkwSU@5UZ}fmu2a0g z*Q#F%RN@@+<;+j(G{Y5aLPm8PrklS=xuAJ_S1Dpt^LfU=e!*=jkM4w`QRKg*gP2X9C+}4T?t$d$nMxgvq>I}>vtFkqkwO8X2se&1zUHHB zMxnn2bW9R$b!MhVO_qdALT#TXYIo?h_0v{QLE$r{D%kk=_?uf>Agtk6-`L3d!J#2iR@Qicm{;d|pu9Vt!z2op?Az>%z-2Q> z04Rm|ri1Ya#DbX=vdP;#l>khQ_XUgI5cmL)`#J)iT$Qz-v4|;5!nR#dP*H`C<|<27 zOVK(zJC}0N1`Y7v7=2Xy`~-ktl~%oi8W;&%z5&&4Z;*A<|Gob(0W&dfzeo1|XhM_W zWCnJtsld;ev^BR7xJ-MF|Yos>An9%ba(HFlbfxlVX7gwQ@qbxM#QjZ7-+8np}(X&i!52>*(QZ zIY^WuzKip19WVgs&N8z1efbiOL6hg6tJUT2cOxDL0Nm}q56XOn0L2nOCojdN6%{Rj z?QF1_SMW2O&lQCOGU4(&uhnZ@_7qy(Ps-*hP~x%DoRyX1c@;<>=ztsI;W$)d<->;$ z6F|E8PzsY`1Qvs%qvLnBEswb5hf~g#X2(j5Ed+1WUHT9YS^=N1?dAZ->HzA;h1 z5&>{~OiYY+G96Jb$aXOvxI0RXUzS3Xy*GsofS;eWvs!MfeEfP2uvnLX+0U;x0%(@0 zxj7mkVN_UHSo%9I>lt`DIyw-Zw!g3`n9Y3T4=_NoeyH=bq$VOVpP!4B`urIC<>Lz;>XZpb$SH7BTJ0ljCf&))MynP6^~cAI--A zLnh^Nbq4dXOlf1|JX_6pY$oMUkEXoSJ0ax;Q+aEy^j{uD?z%`skVvzZ$pGs4gut&^=QmwU3*Ni^{OBI7{IEoHG1F_iwl*n$LoHCF1@ef zZbqir;|}S|?s&fVaPr~rF5pbp2E6)%;)V?HDkJd|ZH5+L@+zQ$*d9@TSyB}Cx{I(5QU-31#E~F-scw|E+hK{>h}+>@vC-C&D)l8<(C4H)=M#yc=bMu9 z=xXVt&kVRH+vAul0qtLIMYWYW)CEqw@usBGnv82nG3d0T4-xUEOLEPjH`eNfpgvlW zf4>%4LHJ2!vqeUtEPj4vDGVZ$d3~j-Z8Y+Ds>Ql-c^EYMaZ6G7rPDK|`nY7}g&!d5 zP1<&v#&&j<(dgKPzyWpv^y_tXYf6TX7L>)2e)VVJbyA9s=8kF3GwGWl195BxkzLfv z-~eT@MN4OAR=|L<$bPleda)E36amw&8HEc@MbX`fVmr4X)l&K2k2kY$Mh=0bz1lkg z`D4sf&rCZv-;Y1WOe=FAutwxca6H_Ect>h?CX1`UGMla=Gy*CXs8O`KYnl=_qP^^5 zj;vCpFqGh^LVBmJkVd5lZT4tp8VOybN+#@Vc*-oEPcq+~WwL~{E3vBT*npu-9c4PD z&^fAgrcx~`+-&H35zxF*Kk1jRR6mys6I1%^t`g|*XA*Eb{_!ROsO+svHQT@w{;+4H z>x0*jW>0x&3*@eHEu{LPD0WgkFcm|JSF%6Hd#AdwHt z$yxcA_>*_0Of6$+QOka;L@z^|qb#4x4Q_I&(>Gf{YRCPOL_NbTzgtmaJ5BAh>(dVF!($?{?IeWS;j{>?^VY~4V53Zvem{GoDy+8UWjk|Y-o zYZE$V_-ubk2$C9phBR)#yMoBq5Ac)Wt)M)F2AL#UcKa30N`bGukO!1(2}TpKJ@3S7 z)g(L_SiO34L~Cl+GNpPvvxsM@4N?jk193JvDH-4#P_M22^s@lP2c(aU%I?i>qHv7q ztqB_c`^c@?KGOljwpBC^^golGzvue z>Tu-OqzZ0Zmao_tD^?pZiGel?d8E&Dn_4|z@Y`3LDN5B=wvWF_DKHuEU;KfS2{ zZn|^!{|@x^iQ7G27H{I`W*7qIGQyXLS~-L?TkpfE z;jqpU(ao~*2sBrQVu%E^8NNVWryTL8gE!bb&|DOK{bg!IEhHihLVd2~tMpn*);bN^ zW54~oQ6=IZ#WH}EH23JSp5s(#uNHSr$DZ3wNIQ1chNk*Da*{2lD3e!&b_J9Apbqu@ z@qXCEc)Z~XPXf5swwqt7g%u()}rwg5+7jm0+^7I(aQ-FP}An&XR-&kr)bq-DRTNIoMD2-HnUG}{z~ zwTUX9o^a8HJ}pCr6L7JpYEBl@9&m<5tosMPL8!*^uwz2^p4rOjN-U@&ow}s>tW;v@ zM5|sdx=LF}QxV$e80M^6sg&?p{6$ntRJJjDW-JTu-(lEl;=|#0gE*J4oEKr1tVvDz zx<8T+uZ1svF#`j3wY>q?P6T{TIffAIzHgiFJx^D77MdH z6IEhbn4P_;|Fk}In~M{f5%JnbV0Dsqlj>0_x;O+%JZ|7sqthtJi^mKf`5tviL9^tD zLHE0g2EB{idk5Z}(PMAqDZJ>6$at>7Yh*d+4}*q!^TzMrC${E3nJPk*gpR%TxAeE+ zAB1LeyrN}mvyA$;Nf2qY8^|R%mwB>$5fwxHXBgDOFAr}MbH(DKHLIdY?^drsFjp+4 z4`f0h+|gg6IZe~vO6))>&ULdWxtE>ruYY3kxaE;IvcbV?=XHHG2kaiQ4Q);qHkd5%Lz8cWs6+*oTmj zk~Wx-KM+pN*OgFG5fuQWg7h5!$1nr`2XU|gbLZ=ekN@+9B!}P!nl0b(=5$7b&gY~= zaoHB2Rvwr~*%1}9O9tc|B~ac7Qi93trOC+)#n}qo%ZpG8o}kZ|d8(z#OC)EKL(Ttv zeN;k`PZsBj+w2GQF&0R~g7gNW*NCXt!(06^Y(PW|$O=?4YHVbbd+SLR7rF-vc>H28 z>S_+%kIVS_wm-5PJl_X>kch;|UBi2woHHKJ5nR{z4qy69!l$f!dPyy+;*pBa6}9@CHd2-@AY;id<9xVUd^SNLYO0X>*2X9t zi$-oEua8E%oxHHVpGIjrw3T+xCEp{ z9`8W{R2JqyUDiC=yAHTbK2pv(pLPew%bwe+j*y%@CmYD z_V0V!#}A?Dwx1`zcu(0}6ZkTAW{qZR&F3UTM;Dx7iS9Kwnl0byV}r;b>qH?~3-#77 znsPqNG}!2p$6$H0Ap&BKFaFta!s@4e1^`DL#|CU0?Wd=qwL?SIC|@93`YJ08W5Y=V zH4(u&yFy=$0QA+`qrb#u+s%fP{VwjFrC$hu3VWuEJ5 zWzm!$!>9ObRl89u6+ycBz(U$GcNwST*!np=8F5{na>;V&PZW_EnA%4@K+(tu!vyQ zLEZhkuv2kyQ9LD1Che85y2pZoaUCJ8%RNb;iie%AHqaoZ4LhX@(iM9!my?V1&f69r zxg4yx;tr(V56xcwm>+j_-cjb;+?w`|_jk>TKY@IeU(ku@67z&Fn|h z)cX0z3akG}Ehyiz(qd#~eV*{KoxdLDD9i+>Yws>2Bm&Dd&qqmVhAiAy>@KfZA!{l+ zNlK6iXfydxnXrI-s_LacZ7B=Db6U59R<4}HJ_t1-*`8{5gF{yWSNU$w;Qr!?HNy=a8gW++}zWm+Qze7cGMPqQ&XR>|;V#R9?Q_ICvZ{dth`+Q{Z{M~_jx94f1+5(&M^kt$+6+&YC;Fzw; zmREG(d%e`Cd}JSSX*k(Ya;zb8+49yIW902|P1cCrQc%{A6cxFp(<*8{Rr!!4WhnHDR%KWf}H?0Y*3? zHuFf^Sds_p|247}SF+uKw9{A>SJc;*$e57`)hxyhk=4wXPFn`$o*8gcU*ZKI)DsB=*w9ixJv{kqZHI_6OQfTUUS*XiYpU5=_YFuIj_^ zln*s|CNeTIvi^-wtDqD}if0^Y;5#Nvx$r&{)MA<-szTZM7CMGuuJ+i(OkbnD^hhS(i@2QW%*Zk&>XQ6^-OV<&o7;P{FfDC9* ze20xclWYrz#~#6xlrzD4F&Nu$fws)U-ioHTkB?ZlGTs57J`<Z&X{#K8~|q^D9(+cW)k!dY``5sgJNni@cPvCN+(YLpz^jy%yKz`gp!hyCse@6 z>eGm;^(1UdOT*3Lb<2ChCo=--Ay$;4;E>=FB@)tVCqVCN4Nn}l!~=NcX=-VqVq+I` z%+K#c3~9D+XdD-m_>*UQ_xJWX0?>}KvhqyRLs4<@+wdLC^<{%(BmZc=8XXfe2n6ET z8A=k>&`1HQb{qLyw|i1ntS=|?)x;b&bEbfPmf!zblTU&)PU@1&<800Ba(i&5MCmsm zgS8Iv_v#Fd%*xE1xi&Z+h`?rQ5Cvo=1%_YI1^{#L?*Q1wX}j=;@X_pnl$8j`_yWRr z%>j@^84N7+m*lXTNvAPV8)RKN-qs0gAO_wHCpB#ctm)p=Daz{uBO|IAixGKqab%Lf zFKGeSIUr#H2n7aFDN9}2iNDWD+Ibd3Mz4f@*B|R21_mLN^X~<>vVA`=y{>yhSYAVp zb+V`1d5%Y?)wMLTzdU~H9ch_QyD~Wq+H0vpPTMtkAcJ%hLtierfbWbeh=Oe}z!4uJ z8@&~mV~|ZijaWEw`tS8O+{M*ZkI*@D6?(?S)p{klAcjhZp+io5YD?Lf&0eIanl?g4 zRDBz3OqV&4JOL@UD-VoJqjBpUCK2_t{)%9vzn=DVksaR>2t8Ny3XJa5ks9u4C3alF zyFcnkCdl#R*>PryY$md5RDsY|2+hPkdo9g0nEZJNE!yUYz*C-pb~ugylFb}j&rP`w?v%Ibs1DxiFWe1 zT+g6O5QE>WNHG3ElB>#cE}m+>!xqPcB@(eXCzhZ+{6bG>B2NhbEGp%qDD5Wa=W~T; z!&RwOepB*RT3PXic`+LGR^*5^WoPYMw}~$5WA7pnkNs6hSU~ZJ+tzeQR>T#(3z|IV z{)L$^%IwT9m+YQ}t-HsP+r{vbw)9TBu8P2D|SJy zYuJl68X3G%`1#YE{ruMZxegPNLqc5Lr-q~+5HI*Tc!LtDRu{?id3x!V^eFb>!#^QE zjj-vlP7GsGh~a$w&M#0mWWUb{@4A-C#i(hcUezXRp^Xiex;hWJYDMOvg!JZP-Bz=B5(jhYrn+J241Ck& z8j|Vi42p~K999BTVQRxz^}RCKS>{nC>K(L6`1^*IC9*0z{MA~8f}=}!J>aQdO6Wzb z69?S={8C>XHDW@hPyq_d!t__q8Cm&KecA35^1jRLE0YuCG{k* zmH3mTc4X+J~4fQa7i0ZVGFRP~2 zmus_zoQqNB(j}JuFR8;uSInhF5i0LZb@_VJrOhj58{<6ZwT0GI2R)>41DqNx#19qd z+YY|h<}#k}G43?5Qd@gdAZYa2fcJVWP%uY4oGIiB*l>^Wzo!j$`qS%rWQ&>%8#P>h zs7B9Xs~~L4LSItde@T;FLA5$7T#Y1}`F%@>aIn_IwHk$RVEQM-Kav^A z>^Pm#)JCKI{hC$+shwN9b!#VI--YNp;;e+Z3)ngeP`ES3t>#zM5O8EvzQCPdTy7Yy zOHg{S6vV{0R9|w3p82!b5~3rFq_xlo^6_u>Mt_W))QC1JR={bjQcRlbu$=dX-}0eN zJ1)?w5Gv#OdKW|gcgh+e;@9-dIG#fNVyghCoV_3)>i=t;fXZ<98=kuLFa5LZ>;1K? zQ&R8if)sz3GOp9GhhUY%Ol)^;wz_#s_Yx})UddzzlUq%X(T|%u0%+qoB$O9nth9-0 z;gLQJ{It9O?83a!g6+41X9m%|j1ELIm@8ct?^SLwAfTb7~w&-aRaU}|xP~IL*A-O6CLX#;qNsr;aidWYcZqdYTL1zWw z4LBXC0`6J&rDO0|wvbVMCW<3|G>MrD@;pw2zs?8mC7P(ix;2^55?6Uysu>wVcgJ}%CrX!W~7f6m@HGKEDE@%%AxgY80A&`%97vaZgWFkyyk+!kLR_k09P z?Rt{dEcUdV+}t_>@x!EmN3KeGcWQ09?rCRuSdNV8Tpes)eD@;RyJ+@u{h)LXylDw7 zhm^6QnySiDm9j7B8WD0=oKN5=^Gghkv2r@Ot>**JG0KSHO~2=&3`48>xCoEx8mj^p zm?C>B9V*=`^WDe;gyX81gAcM;XF~Fi2T=>xM{=umh?7V2#9P9@acx;+{rw&%#{Yc6 zJj7ucvO=!9qGJ7XCii@5dUoRGOSs3nI)W+xbCRtp$j(Fp;_hmzmICVyQ+uHhgnghJ zsnpx#f!PS-Jw0iN<^$zogOoAt;2lhM)guwYY1K}R=B$<%u^38HW3TKPPg=qLbhr+= z{-MHWKGBx{=?E<;M$y>G?fdCQpoPsC%EAR*o*DCaMSX>@3!PV7U^s1~s9z z6Y$Gi{Ke9PD>qPSNAa2cOwwtW0ZgKam`V5w_bjtvnNzgKOVeE=BJA6PGJ2Qy;I%kWJM6QVU1VpnOPJ0HG-2yIQqFPIBF5_f zNh_@UwpuM}uyy!j>bkl=Cm(o8cCwPnDuiSx`inHiqQAyQD{u&Lc|u`4{NX6m>27&! z`xenM-;_qJm67yZWUQDbZh@WEIDGXR-u|_UsfV;y07AVr-uk+~xDI{ZN>#Ru@=I5L z=Sz~M$w~J?1onCTf%F#&^vE{1guj^P2&bir*k(SF=?4=&LzM5UEbSrVOtZPZF`qok z#6RK@J?Ts}OYv3QW+`oZ=bRUIOj@nRCle+{&h|!QPA8p#Qc1Sl`>BOw<8YDl^vd0@ z@i-%miUe2==MqjCXeaVs#pWbegimgvR%k{FeiO4+a$C0o`;#EHtVeCba(F71e#S0t zzzGMb>wLTm8r20oAx+|?N1b<~ z^W>|QDXH zJ@r&jUZ90Nq{U80jcyhBSkb-p z*mypBi(F4ATykH0iZ#j4$305wOw+!@DTlFnv=)X%x8-$vROg<6zYAA>d&eiVFVuO{ zCQk6!Ww9G5^A|Cl(n4CyeO-6p;RM|4T#D-m+t_Pe`jOm;LbvAfbz}A+lG&v)RYt_I zW4ddcKawk->bB2|XAeDHZ>WW{mls;izWB4u&uCLQTxM=3kQ_q}N03flPSKv)0)+V9 zZ*k|uXSo$6pq%z7oaO6|DDjA8t}L#EA@@vg)i}pZttjxfxTP;NYgR$p2YGg$R%_^f zpwH`<@6Tp>Fgedyd|B2_aywHa?XQJ^WFHvaTWb182|exvj@L>7)*qOr)`f4(=NUdi zaTY_e!}sqfo0AxEn*unZja)x^>$@lUhELO`)BT6R7&hrJJi6Qo|~0Z8vN%10JRoRlEK z|1pRV%qG~$GYD@8L{#;pp7)3uqhWzJ+eEvTQjWqu?XFgCb;b`;L zxI%g7>=t_8Cqy?K2}a*c4q9_GC{jga^dqewVvVG2O9n{EX=K7pRfN4`YiI1d%H2)b zA}Z#euW`!aDh44x=oe&@&A5;$l+V?ht!cKhmxw$h!_;WihPlyYEz~G3?qPVnykNfJ zn`p>T@MMK9({7-ya(SjzWLj?{Bgo3OJ-Y;d@vv>^g<_hS3-{jy$#0fgw~d0Oy6@+y zsq$Y^`Ay+rqMld+UbW(9TCHNyYJWZqyQ2^u4i0@xz_B$Rsxj)pv9b@_NsW_RMESdz zcr5AJ1h0J-+GmI^m*j?!U6;gc#}H)G7ChM~59spsrv%S^z)3%DfUL0}C9%+BUBm9b z-9wFtI)RcS@^0xqRJS$859T7{z0SR6Bv>$Tbi;)IVjt|qAZUD#vCSRjT89lOCeqd8 zAfI9ZttMOCQx=Z_xp33@c%ufJ?@8J~nN)g)LXUm&^;$c+KYH-p$N}W1dOnxqrP}QJ zack3E*qvTb(#2Ofd3okq{qQ3e`Bzk${<~V|S@qkGK~A0>m{7Q|lbk`yGnaHQ+974f zy8E&IE+Ss!`y)4BlEpg`(~y=WS?jov zRNk#>OQ!{YrHx0=+!buf`;&dO7WkW}6!g@1KP9g*uRRv_(pf_MYM5iUWBHKi1i-tI z7t*<7BKu~N;4D}<`tJ;#y&Rbmy640#36rRi=AX?Pd`$*0k8REy{Ay7sCf+=dHgI@J;4Yi!p?@ zU#|&s75o0GRXT=Ma>6(=qsjDd{ParmVUXR`8TIm5^ePYXD{+f>-1GlTBsOy)n(4_W z!<#09>F=86*cy!GU5mUOrQtU(-c>BA7MJGoaJr<_yAB#-G3k%w^(k=kF!UXnl3I}c zS~xQm*`ke_U?KRi$d9bBGNUP*u@xwbv zXEwV1oZO+^&uT^}d(I#CZ*nyMV1hFx68k3iXPq8?KKmxNsRWbRH!_%!#_Wy2SJOyd zSEJzmKCa<%2Vz5O4FTTq%#f|-7gyHQU&8>tc1nlqAe28!>af992?zEwcm7&DQ#U9J zPIEd=Em7>yCd56cN#BvJzq6u|P2t(Va{d4rMH;$w121j44cA+IdP-f>!WQ`A(K}Jh zrGVaUL<)1Au2NX^uE)ae-`+W@FP2BK{F73%{`|;YxDS@xp=yDGKc>660%l>iEP}uJZl5o(H{*=1&1D}pTXj#s z&{DOIk1}Z0C)2bd0SMtHhMK1>ImJ5drOtyRe-}+RT_PtEH^=u>`}*OhNLQB34@MGx za-3bwoCCS0)=;%gj~sc99Lt53!OE>AF>K*`%AJZ(`RVv7&%C2O8;>-IZHC0X8?~Mk z9b5sW@CLaW$B3igc>FHcpWdIDLn-&Gj(U^@TU zY2)#ZMJGpXu^@D5BHSpWu9S)b`(2@#k7xI+EV8B5KL58j#V&LaQhZxy*GF$Y>fS3n z_#S2q;-$vub=QF1wgU5M?QEoqLDosTF)-+AkHun4cbAu!*>9L+jM9p4NGhhs;|aLR z>eI;PYDW@M@zZ5{p`Jpz?mi`zNE{Y@F81O+wW9*-YQx;!R|Q+1d~K+S=cymgE?no1 zqM)q;o@YAUBWd+%95z^E!WRWQ+@Mn+cEK%(EE?%{!2xm8ae(B#fZiwGj9cFPu~zYs z?#hw6oUyzUIG(gjlXo%bVVCY^DFFo4Ig3_m(0#ng>~cp}SJhi9tlJ0=M`i_=i)I(W z4<6VazZl!?h3+@UKK{FHU3aGz?%)=b6_j6}!(A9hTkcFef!ow~>m}!XZ9!|4T`lHu z>Ak10y^5Z_p3FVJcIWT*;i~5%-4j>6ECgOqQqE3()rIk1?*GQ1?TK;QTF3+NaFGePzb9P;BjUfo+|oZq+shz>gVISCB}(}XqFwX=I!>oi zRRFL80fS2ey`O*+1_n8ikbgzzNuls>SZ+Q(8vr5)!1#~q>gsRse5c=&^O=74@w6%_ zz_EQx0k)a10_b6YIZlz)8*ZN(`7t{?yTN`nJsgt`U`+@+Ix-&|R39b0*pPW6;kRAxhHU!joKsI08j z@{N{azz6hh6R`U*HS4h6xsrdN1PECunJi}H1^vFQ9Qd_MO`*ABqsEo=<;gdy3 zNh`U+DPYl|QY429C?M@rOTPaEOi2C|7Z?BktJbF)pRTkVGEWYBWAr4%qFt>9dO9_Pgv7AzI!tGy(W<2@+r&kZcB6H4*}Q<1iF5z|913 zL801|zzFHb+h%>`b>wl{48)_Pq_k%EG)ba%%RI@d`2Vq(DMOb~T3cJ6uX0`szUPWq zLohbagn*#o$jHbUswsdi{^@NJDmp`grM4h;5_W8DD;kXO=AHXSF`nN@Fr(*nY4NRzAf+fgMpfdG{TD@$cUs9X@x?kJktM#elu;ok%7h!1LRDfj()gU=j$* zfb-%z_O)bHxzOD90d{WDE#^|G3e}s_2m&JF>?x9)tLu!5wC|V=@BhqnfrNG|m0872 zH>b_fU6HYmKat~|oSo|)m(WNy0JO z;A01jL0a98(D3le2x)=kKEhng#Q=2iau0*NF>n7J4>NwVA&i@#g4^3$>#Xm9LmG_~ zzt?4;bOL4Rr^5YL@~+D^RxF1gEHkFOJ^KF*sU}by$h2#X0S5C65QJnIvMU5w*5-XG zdvjf*DcMfE7>FYSiCzIB7__i$#VFu(AjgM|tLj-}+l_>R1T+0A1a1`*cswp(OVfs4 zi?OHv8IscdbGSS|6;~jR`AT|8J=q_Pis_NWM>Ee8u=@eEUjeOaIUQUHYgqd)>l}U4 z4wtGW^AhFa5uAeT;B+zKls87@^lEi_cK;FCL2unvgw!e~+bYD*Utdsx;^O&Jw1hZn zp#(YVMn6x$77=mVoDN)($xH*|Z*_3V6VOt=vA@N#ov6H7pc#mUxdEgxeScgHz<1S> zH-f`Ftrl2>j|+f?N3XdV54tw3h3zDtr6T%4_G#awo-@?M!rtj-g@;kMqucIP%N4M? zye2dvU;AQgPelf>6odK?d{)lW6wZ(Us;$s#3Ye1a(?|<aHa$)uDP%)Q#$or?WM zGz(rxLO8Jb7noHH84F};WA5xA@+&%%>`-VQ+4ld1eI?;mD7>>YQ*DqnHOtFt+K6qe zL?i5i`)9y&2CPzsLTqqtRx9pt(!fEj(jfqIeCMpSqDS58kJAxX3X`tf?5l@2Qz{vymGBz2snj z+^XBQix8NdVEEBQq-OC0o3ICexu^r}KlKkQNAkGUF|E?(bA<6kCGJ0aZ(MQt6J}p( zSCj##0gETVvR=pju=uLwtn6Mz5l$w@zabg^e*29dKjJ6zmR5 zuxP<(is6AG?z4D*@e$B4wEh!O=!}ihJ{!3K4XJ+|h|-wWkGwM8h&=^xB`lq`4V=T& zS`Oj*vg7KFvKX?}C0yn?fv3Fdz?ldZ0_>yD%RfcC1RIEJzl@^FQaLB^z$_z}g`_VC zk=T!`O1K{U`}LQcF$?ubVDzE+w{7}N%Iyi|dhyz&WgOv7fYMvF3l?P>!Aq~FJ?Gx5 zGT3~67#5>H5zaroZPrt@Z)K|Zqvydl*H3+SU(LF;JOtPiEIZ*`-TKs-ioq3C+>&1Y zBHb?!)0LIkdf8|RkL+ie=J-|K(^=#u%L9v22d@SC6dLFk&AyK>1szYe&3@-2CwKa zaTQJMdO6r3atN|Z8ioEbX~zhyz4@76nvZ&Vt?gh16Cp70lj-Yid zm z)Yugq_?l+{A$!=jM#Ton$sLsS0=u*+OnQ{oXU6odfURj1>}Jg@TtL6V%q(%Qalj6# zJ7X*%l3(fL_!3UE+w{=Apv8*q!EyteNuz{|08Q4+a6eE#=iu?`rR}(6(P57{k=%aG zXsR%KS@TpD#WvO~!l*Q3>BD|I)M2J*AXnyK#PaVTZ8s3E#m!+->397Yo)G)X%}pW+ zcL(7h8MPAKT2ffHI~VB(4g(L)qTe^4^WqyKTDFEinCQ{cPFp3%NvhH)mhP(C@?~#z zui$I^XmZGLfhgGs?h>UQ@ehJV_>;?^Pe^O6Q32!E4tDF3av8q^1<)tvIgIOKnvXn@ zm&~?71s;q7ccC5*!P{&AauJ8|AM1E)z3=enxtGe~ZN<1Lc<#dA#piS%vP^z16TxzS`W4NKs|bC2`n8pS^dd zvs)nv$CcZds3ntiCuXfT_~b~)U`ugagaTME8t8;|Q_BPyUMeM%>suZEDG zbgS4Q#v$(|pRu=sEC*sas03w)`37@ffaj)ULbJ(-C_2ody>ylBitO~7rEtWb{mmER zh%RY}tA`UdBknVYy~KS2zJ!l#CN)GiyvwY9ipTh~psKc*zHd!z)3?^z>MJW2K}O38 za?1e;{l)W^>B47idC}93pYCX2F-jNV;Kq>y%oOUn@{4e9+AVv!zz>8ITfbQBee1i#R%!W44v( z%cG!CdDM7O8BFbi+nk)4kl$J#(cf2Plo(_cf7m#ytXU%ZS<;+@*%{hRNUKtE4tU7x z$8UB)MOKxLKFO*ouV5G)KQ-iTI#48n+TUq?$enFpn?j7&62EP&?h8$ zc>mUrXNEknIGFxTTrF;f@`c<#KaR0e+yDJnQ=YC6sCrsAU-7ADuNl?thUE?*M@svA zQ;(-oXIN&jUNn;53dD|S&hgJ!_8__|GGn3fy_6qx~B)5;zcYr+T_$G70TuOm54A zU$y_)=V;@ef_f&UV;Osl&2H8;> z`gF?^T_D;2b72HO$bVSBBK)U?mct3?f7HJRc*eT1Ka`_#oKExyP@%Ca8(t+v3!ID? ziVk~*JND4h)}(zwwop2+G5>6k0KU@YD-QGB07uldy=}h3v~YN&2_(5z$|om;ZMg(X zkFq(}?Ql8M!(27x5rbAe+}TW4rA(EELPn)gL|g{zg{yL89>qy(BZPc6{&W zGgl+p#d&L&8LTd^kbO1U1z{WE*t0mL1>by>W43Wg*xJ6I?S-e>!*A&xik?C``nM{? z++zrO|M~T!LJxIyZN9Cli$W4>Mbx)Nyk}ms)f05X+8+Z$kckuA+>o(c1VXPg`z)cC z4e2!z=VCj2ay)-W)}m;xA$GM_yoE_lq&~q-`SR6^{|{+z6&1%CcI&PrBqYHhxP{;j zjr$_FyE~1$yCi|&60C6u?(Xi5ySp^*?sAIt|L5x5oIUn!QB;qv{;Iy$<}(k&TS_)v zLtt5IEZIGBtn7Mnf1?Qnum+pAJElZR3Co7FRAy8hNfc#&fhwhDYXolBAqp7D(tA)%ux=7InGiH_W)!?S#+dik?0dl4@rTD_|_yG zSWBB`SsZ`E=XJSObZ>o=i`z@r{bcZPd(67u@5tX2U^MH-yYKYp(LeJoUad5c`M!d@$%td@ z9e6^wWlePd1u=p^eID+???MCRUuLh>t)}_^iG8HLo$Q+fgtfqD#ed(<_hooy1=Utn zWxcA2_5LT1bvxhuG&3^;=!D{d$zzNENo38<%>mW6a7!Tpw^L9&vC=jrkTtLV0j9p! zx3)y$XjI0gr??lOM`oL5vke|!T+*wW+=9mnDj2@$a(pSL3e9LX} ztQ(O3Q~1aGr*{(qZXbMd>8(YXUe6m@Mvn|Y4U7&D#|Pd<@p|2&IyyQ&zZP?z)`J$s z?I|I%mms5N#%_0|KX-J%-p!9`FU1A8Vn1KkwXN^&=C7ClZa5-9_`Le%^)~pumP>%$ zF3Rk`Ity?d{dIEJ7O-y+&U}UG4_8!ID;x8AURxcZ{;%szGGIO{1C(s;w&L}u6th3J zJzXc?-|VFOii(K^j*ZD5#R5f?*R7m`6ad`Ot&ipy1fg@x7H$^L51djUv<*H9%*2#j2t3>Gp%c zobo~ccIb)5_1Rsum9n1$Afy!yt~4GrD8Gb5(ffY5!OmKsp-QNmE3ILi!XFyBydKaJ#IZ|S?>Tw`+6zxbtjaHA=rXR&LQAbKQ&t4O_$H_UT^1n(H5axP7y z)<$~_dHj#?+6JyPaa;Ht-O*P;O~6%tsC$7+m**?|Jgs!7Ns~1;V9Ow)Vb32ooualy zqnzmwSEx)=cBie1TPhly`NhU0tYlSXq6-2uGyge=c~q%uAIW)>Gk2XRbSlKdUI`0?=)C8{^3OdLZz?cv3NO&z#(usD)Wd}(LSd#!>v$9Pgz{0V zVH|eMDpiL9qP#`iY+Vjd)Hde6gebfLNb+p3kt078{v)><=Ouk5wrk}G;{4SBLI%_=rak*&D+ZWdBeQy?kGu$n(<3kQ# z=~PG#rIkHbj=I?&m!a0FeQPX)1oRHv?5J*f}$0*ON6H(V{g6j%YI^KmR1?(V7OY_Q!4 z58hT3RzISYD`=?}=7KEJL?6civ#XMlxbIileKBp`=AeqB59M3YICnn&UTT*L*9HFD zH>$PL=8`mR4I4QFh3V5&S^(uUM4{T>K@dGX&YMA_2{TnKb((=3oAKCg-V^JdjneUS zTu0Gnc|$cqT(WgoM+S1K0<0FniCUkj1hmYjSa^dNs(AzI3frV}Hp)Y)qPAa33*!}& zgBfkSvct%*Lt6c{FQF8BL0zqxZw3u@cRrc(Jjj>Ac)xdIkBM+4h9tX3-&1+=?bYL| z0$RLt2uR6g5=$ zGTX)nd}u8x@W4!l46UZbg)zd6Y$9zQ_L2~4i8I3|b#y0*;YL7^XPob&;zM72DEDbg zO#M;~pv=phP9K6@lZIho`5MJ=#@!yt>gCKJ2^N~jsU3-3D-TxIcB?9i!NzUgB&=y? z+ygH~1?ueE;clZ8a!b{sYZSxv_6kDR9mzv8Q&W|E0^m2KF{&V z8H;R_Z+7Au*bO%@`0LW7f*T2LKUXJOEq5G>hBaJrK6OWz*#aRT6Az=g&04Ukh%yeI$zB6qLo3B{l{0UoHy|vep+$-5gxdzLEh^W+j zP#h)e(gJ4p5SHXY_>s)UW-P-oRccCScv=zcE<`E3zJ9ShX<_uUAGB^TQkvZLo}%)O z6pfYu5lUUpo@em1D0v)WJ3*v!Mw@4Ykr(z22^*D$>6a$hq;6^gM_j1H)`ZZU6Tb+d zi-kQ);o}lfU^%TMe{>8Bos}Y*($U~<8LeKb^wivt(~pm5#u>tJjfcN_Ea z8Z~|S`kv#gawfwtzTl9@S )Y?7*(mi}>$^Yl9NzN=34t<_qynmC<)$VcnWb1t| zSoPUkpI12NW!s(N%uKxF|K7!vgETtqnav+%DP&o@Cg>HsJd~ip zM02vty%xTWCfYWo4;A)jWZT3|&_u$)L}t5^=nPuy#CMM6xC_Z~%pq{cE9>i%DaPbw z{ay)@NmQn=H}*CdxKq_A$&Og|yqsM=i>1m(oZ+Mkj(a{4L+XegP9u@H%e3Pr=LTEa zz9Tt8$cUxj_I-jac4~VDI;P`xUgyB7hOYh+NVTJ zNutpu+|puc&E%ZxTeF(}j6VB-jk&v^<}diFaJ;HbZ)5b&jFXY&^^P(GD{}$y&GI8{ zm&p>@zUx*R1Gn^s8dJ?gO5VgVcN^4u!DPWvoCurqPHo!%gdc4EJHi%A>F}^5%Kkh{4b)s- ziI5X(HNHRQUVB|4dnU+b#G6mk^PzO-d&o8RUpv=T)Ix0h_WfHUmD9X!j?C{xpWO6X zlcmb@63Y7A`YLfsEmP0=aRpG)zYi+vBw`jWb@TJ}&E;^VU(UwH@c3YF(+`Y$L*IERn-NDeLeF!HpoH*1oFl z`U@teC|w?|b>B`XRyuSVOy`1mf~&tK@bq(3uMW#Z%ej>sbuwM%zPDf*X-ct6ZEvUW zo=d25TVJ8(wf=(ZT289&x#;u~#xZ#wYhO;)PSUZO)=_h%;jGKTHyRT(4+=AxUS#6g zW$2&P^Ho!eU7ZilHX<_8qt#v7p0a^&WY_zp>FwjPa(a*a7rKdy%9hcv7YM2ut(y+np9NbLBTnHH#N0&#J zV$#T0j``l20<3=7K@u>S1U9?37dXg?)!S%1mg*2>RPu+&9Vstj1<|Xl4ZMAr1OlWTRQwk|#v}KyYQX(tLQq&fmEVfYaDB;c6 z4Aj)6s;k6*)ZnXTmKmHiNI7_4=?GJ1WC<&LM0qKx`*Ye7y1QCTRzmo;J~tv9oMVpQ z^KddFz3iz-3k$W{U;MJ{Ln|+ZQk{J_XP|z=!OeYZe>MGvGhp(b&+WH`?VaPFHC}?{ z%ACD6x|EduOJi5hdsQ`~Ti=(FHg0L-n>!BM#L}Z=|GT-U;*YW$XHQPRtix(pQ^+mM zg~{NOQ?x8n!Jn)?_(a;WIG}7@eMIS~M}j>u(a@+Nu4P4QuELqe7UH!+Y8Xd9^JKEY zxEb#Z;luD8tG|&(4pGd6bIp~(?cWX0i^I@IVb5J2S7Pv=moT%~w2&Tn1V)it5CdR3 zT+Sq6BDF%y&f{KcWqo0$C%^2Ss(G?sOBXI$69pk82(pFmSeKsEaZT->e-P>n>@tyR zvb3z38&c6e#Af;*+wtn9TA(yIInlx(+v@8NMvsRS&s9zGlWwC$7a*1^UaewFg;mYT z>AfR$)8a*T@bCJ+ybaI_OsF!k_rJ>{rqrf$*3&<8iyU&t5|<>LIvVXgi&6RvKX=Sh zv#c1l%Q@!(u;amKOtk4Dkt-UN0!0kUwGAOAGTG7JupV}zFyt&A&)X;TkL$Q@1dr? z7z~uGbxK4Ss13Q|RjwRetWy+MVF)R^KD;AUv?ZsxNQa%5@C@)<*701lM&AWJO_km_ zpu47T+Gh+?9K4|}%T@cXv9m>X)?y|q&=NDmW<1v*C5e^OvQN<%VH{*mY;qJjYA@~3 z9d|%RtkGZorVFQbkAOqg$|!T4@p0#ct;6Z~gUew;$co%Bj#-^w&|<-o`z<${C*&*v zKafLzvgt#ci}EJld zyIO*Z>4B(Jc@sErLl$6icDqiB-sQp!E=D6Xxr=eQC^Ye8q;!Dg(>l_Jne+9(Tsi)V z*Whh%RN{i}G$gpiR8tNx`y}>fRg;m!q{Y0xH1~N)dnkV6m`uyxF}U8rA27XoPy|gQ zZV{mEtn?JkO7xG2$F4_C#-1>d00EVMs9HlQ*5uZ|{h4yyayj1CV&plfc~I-plv!!t z^-l6qT{-WX7CD>la(gE>kK)Es!pzfM?q2MxOuOYf3;XdFH*6?7-RM`8;fsjk>MA!` z3%PW@c{#wzl=pzYUkw{e?LQan_&0FDU-j$*00smTMy<9eYy zpcDVEROIe*@<9z_#BO^?V?X7?oQ_9fB=Pk3M$mV(7-ki0{JS}-fB^$joB+PS{ z^K7fUxwdq$RgV~Ls5_Y6(I%bv#=$ggh2zM_m|vrF{*BLqrM4DA$Me{DJ&?Sqt7W76 zeEfwAjg1&9?8xsbH>L~^L828Xb-hpFVerc@(2M`P>Z~uad88fAQ*9)|)Z$Eur~2f3 zx#AX*%(bWnOkJPh&&E2mY@>Nx5#dT;a}uVDH|FD=e@mbqUIbZ)aB}e9T8{0*4pr$q zRdf92CACp{(C{6@z?2(7H%Vf|f%La2BSO*XQ3s{lPOL=G5YjT5wU++*;jl6DfS(RO z-LuF~MARtuu(xb;8krqmjF0~9c&|-SofSS#w$!}_KP$nR++O39(ZVBalM=G*KkdJ% zzI^n<9S0wux&H$2zT|RunfYA+CEU|*NDQj7@9yq)pjk?dgDmGSkB4k2BuOq3J0Qsl z)P~y!IPbDkKB^3-F%FWJik5;s^boGi7R$w1#c{Fij*khYuh2==57^RFogXS`=RsM`E(Rv6=G%p=4s}1IX>2+7V_j>^CBRFrtR-iEaWB!>V zXu*1TZQSvcEB;w@2C~VM_*3X;&XqcNPQ`nV)$GG@-bHgtbBD~Td04f!|KNc`_xyP`z4wBdIF7 zlSmAP?jktgIY^>-r5-Df`6yo_kLc0LErV#ZNddp2s@7yBGlx!@Oedq-3{N-8g3U6E#}HXp zEp~7z>JE{BJ*c3_Wy3H_tVYuiALu=^i8jDlxVgBXD9Lz&n_2futga1)Oo|t3Gakae z>elr)38gq#U0P0MwsNu`1+1@?Hl!?t`p|=Ayf+?FE)sqUIvXzpTougDz%4FwB@MLH z4+jr}V^=i^@o0MGTu;?acuu6^hEG(?3*~11qWM9>Rvdav7nC8uH+?|NSY;C|Wi%L-Bm*>UxI)u-Qg* zZaS1{O@o!ZjpikRt zt|X@KE3s2F$5R*B=;qvya6Cn;b2QR5+ScVeFO>k$v{;njk0Kl2WPVATGPxM|Ps`lGt0 z2G7wI{h1>BtKNqiZvQ5d%|Dqo-#1(gRQM$=TzZ1l&{w1v*c^(XaPQCHOX zd4aCjV`!BfSNX4ut7I0Z19R&x%nlvd!|H6I$?eM(I=GkE2lIlmszdBgZnC?A%g6ZR z+O260)Mi*h`#9z7y)xaYZX&xv+=iawJpYo1=f{znFAhYM6RggsV*)Ecwl-`wO-{l0 z>#W+oLPIi!SUT@lXe*aYZ@nG2iVy2&~ zw#Ie-Od=b@bX$T;!J-Cp-yqAoS)j%K%^$Yt@@2gfze#MY)LU-*R@Y})Uf6Ch#{S-V z7sP}5LWr@)>&8jz5Q}d_q!%H9;VqpSVJ4h!*KbTGntD%#Tg8zK4euFnDXXQ04%TbB z_88ro@Lrc1%CDA6FI_aU#fw6%nO4abrj|+CD;S}Cho>XskDBzkICCofAUzZ>tdq_! zEkN+I8_a#_4U+wAviFboLh`Zvaj=a;dq)sOxF?gREx|ZihJQc+Iy#=Hx;l=1d<++H zBZg&Gz@@7l)H0lTcpsNW!?D!7E7IXw`5y*H__5sSe`-xvzvMTsiO%6kJ0*|1?obn@ zt_~*@4v!s~b!t%SXA{79#$WV_qWvyb2otHW2l*SlRfXDzFYJWF;}xR6ioOlpKOUs#>>1Z2g1H!+%8cn6_a$KM zmqbox;NPARusufXj|S$95nqM%jnb^RF%lq?Rw@ z2rSsf)|!fum4zpAk5yijqI8Q=r`4)oEcg-ioZDC$Hu&P8t8sZM=`xzQtFXg2_>HmV z;aO(i^R~uw{=ssg7I~i~0cUwaCS1`I&dHHm_R{VXg24G2&X^XQ3I9&r+J+6y;;x3W zju3{Ep2>D!NoYSSp_;ct zY?5uxs%^he-T-%g{P&au==*O6BPt`J#2a&W$(sqK}g zLc^|Ck8e0IBGCR4Os>JsmXs%+v2RN4uMepiws!08w_uCf+_-}Uq2@`_xAu6!rr$n( zMkJGeXt{e3v)iBGA+j@}J_uI3*5Jz>sO}g~Y0YI`se4MjC7;Dy4894%saSIB75rK+>a@#lNUO?7OkE<$eOmn*FEa{nxZhPb8!kf=Ga;3f5hDb@si^cR9 z5HA{EZd}`yW{m;P^fT@L6;hOk4QI}@#Yo3E9WQcWcJD{^;-r$+xN<4}1Z=qpAO#k9 z@J6<4pT%A!?d6Q2VYE$uM*;&lkA)y2W-GlRYXon@b= zdj((+g-z+TdHnQn18|>&|A$=vkr)>bPqEHMXCRh33pq3>XiSsN_o4$dBB82^70;+g zK|>>nS)D&XCIsyb$lu=D*%1Q(<#!LOFC)M+!R2=Pmz%e2&$#cl1(l!MF|2NfTUUf1+ye-uVV1CepKfLzJ z0gBeQ@mGmqE^wR-@AYDE;}at@Gpa{2MS!{IBS76!ctAl-x3;qSER6vW!5m)^a}+>Z zIUn~7Mc}c$tnGOK(0t@RKRrL30Gc7l!lv&3Y934gM0!dv2Icw1sDRRIHy%)U04;XP zP#Q8{34RG^rra1nBrUg?rn%|`+uFXe_j>_$S0b-FXgG;YiN#1qN2exKmHEHB;-<&R z$;ssV>@@=r>0R(*V=u4tgs#PI^>GE=_?=^i6Vi; zyy{2jwCm+t@)BlesbgqIMiP4wy%L40SvHy0T1tuZw+*zHGT|3TO}j`rWJ82a8_wPg2tV zD|Z;Lt3Y+=3P1vWLBO8(bv+Ib4NYO+1uu#zP`<9P&*&S!vjL8_svbUNi0TDx8!hZm z?Fw-8l^D2{vM2pB%=3;j?os}s(bL-cS2n6ki7UnK1|Nu3sQOXD0zbbgsv~agpI>if zNspVNBRnccZ$4rlhuW99r#bz4w}@>Z;K+c0&1g8j(PM$T9^Hdrm%tg0S{7co2|&#>BGwi8vn4mA+VaWzi{`e~)km)m2|UTaSxj}D7yQ%IW}8lg5Yb0Pz^_c(IH!|Dw9$0^EXOW zF3pKLW1#^KHmFOE%l#6wEpmjV%ev=wX(MK|ZRDRbK&hpS8%e;W$iw;q&{>D;N9P-s zsaLE%PC|+eqIyVx;~IGAIaUn5o`X7w@FQRknLg)!ML)L4cBOS7UVMVpJ8;9L z11%U1-;8MMW7zGLrx|v}pPuP8Z!1MI7S~czo=CDknWgNWcQ>4{w&b{wb}Sl={YUqU zIYJkSc@`uT1~}L{W?w~?80YT3e;3F#(}sU-Wy`L19x12o46&BDB@XQU_DJAHc8z&C z<&6-X-Dw1Q>LNiBb5f~uQYLurH-q(JM$uwKZR(Yo&R~!N`^wV9P{9^&Q!!-$sFj2t zYGb@aCS`@PJ@j+565L2{QY93}=RG_LK1eWF4*VTe#x99yG~$DiARQxCzdImZQ?w1Du%oiT}cUy0#RZ$vp%r`zlMw<}3Z+kSQX`PPcdeP)PCuU-); z?NplVl?P~NP2qaVLG2GTvw_&s6G@h<$ ze2y>ftzYMzX#(iAn7zP5w09wk&s{IRo;z9|BgJo!dx zN7BwQ1K3Uu=gFsHXO12)YF?PJIgoprWyviTL@Qvm7NLQOp^2^f{QZ_n^IwQZV$E3+ zxID3#S+ar>h-BUBX>!GcQPFCWgw&4hugpT61kbueP07ajbuZ%?7?ezV(OJkt`cw`y zSgdoM1cOYrl{qebtU`l}I9A?C@5wje*ZwT(kJG#Dd+YTGzmcw(AfYFf9AXLcV4cN> zF1?%QrK4KKlKopp9&^*T z;ezg`=`36ni0figTHjc|wy_bC9QdxWhQG(`kU}=Dc#Vz*?0(lF8nDNfXKwk9j)2^} zexTP5I^RQxg2G`~OROK<_DN>=t%iN^seYtuc&;f1_z@jNO7nM$^(SEu=Nw%^`mj{L zVD;MMNe*(yog^8awmAGbVA}Hd4^i@+~Pik%c~p zq_j%hS43{#)WL*cZ7_&+55KW8{F5vePJ;}YNI>O(~9VZrs&L$pAIG* zL%mG|_ehNB-1tQNot))BpQ=c+t?b^lrRW4XYT25cjM6hu2GjfTFi|}sfm9LgA_)cg z#;0nG7G^9(A{>A7?U^<_@3J?E;ZO7#QT2-ackDn1+;s@(2YJaj`AVVe zx>DK6p48(hhJF#Ma-yXp3=W~bfk*3*-Q9ZhhMhO2vk&_DmePF#dQ@o`g#uo>41)pc zYpA@bsry3>Kbpf5Iko0lIM?ev^dz8J&E;lbf4_9bm9#WB4-eRZX{14sz#sc#&0kMm z<&k_{7YNjFi_U+2;P*CAo6;ODlnNOT_buX^5f10e1J*i@Z;@(@@82}YZ^UoG!E?fD zl^dooVS7=({qJuK28uMLso}jlhl^XR)LWy3X8qHo-jRSi@ZE$>v{8wkKA`#52k8)( z8OtX}I2~I2%E@|%e0@E)$g{R-6@ERcr;pzFdy-re&(0b?DvJ_iYsy||>SZynvmks) z$B~RZ62ecraBxg3TDICrBRdB z+%0LG^`Av0v^c&n>*|+K98f7B4+XL=k#e5gn(ExG}W+<<@3SbKF^a zGt(=^Y^lqO!TSK);ELUnoS`$G?#1|YTH9sN!KKmxvrJh?l%6_t_rK%T-`O3~zWaP7 zNn~fB_YI9Cs+f!Rg8;BN>{k)4a5YVGG;D^)VaUt-${7Dj<1pM>guT<|o;v&Gk6~z6wPhEZ>DB0xazoLEW zr-E+w2VZ#D$zqb+qngM)NK=B3W#5Ib2pjXR8EMPmnFEmagC3O0&BD2wwIc?^QQP#b zc8C!cz3%soiLLBn?VbV5K@CW3Uv$r>Y%8AbyKgP1?|;<^0#|;+DN`Ru0)CfjUT`pQ zt3ny-)c&KY!le4hO6+hwFh#>C*u=CU{CF!KN#o0Kp=q93_r*6`8x zsu87@87S)93+^8eiQINQtvap!x++^b9_DFt2^Dab?g#|F%PDqDFEqp2{D9fd&-J`N zgf%ZCf~CUKL62sw1~k$(oJbF|-#3=VwifchOV-a`{zDW&GBj@={@?&xY1T^N!|Gk* z&)uy%35bd!((&{}ZGHaYx(R-CNxDK?nY)?j>dVF-ZnF*SIn6ob`O_Us0yUeu3-&YE|s>Rk{w5v}d63IUzAKAO8}ieRn8EVgG0o#jw&~`H}TV(zUH8g=#^Z z>xF~iD4eoy{?{>g01m!ujxoMBT_A-W%?W?+w&z%e2CWCuXOreg@_Rm>@xwr@U0_Sn zGAF-Dd6=yQ4fbMrBN1L+H5u&j1;mElR2u&v%db>w8!v!1y2jL1~b!6yvbcHmSVW zVw&U7U|C9Ng}h6wwW3U(Eis%l8c6#DFj2M>+ zK>n`awKJNAj;Oc35EX4&<^`Q7k!gaoqoJx02B+ zm07P>DiU~jeB23a04dzuGHr>5#L*g%^`n$Jpq}4% z+tswxu;t3;$xm!%VLwkmAx`Z|_AW%KM^|={pSz0c$od2k`rW7xGmX$!>fSliXrVQz zc?;I3cK$t9MNDx1BUTx<++=XT9ivmHT(tg?`TA%aKyjKxk=1irthXsbDK(hH1Bq0X z=APiIc2guyGo|YI{j;%<@h=a%zdbg(_ORNOSd4xBy~^3>^%j{hNj}TPlkR&iD&Igf z5nr>0i$aUU8x9w=tuaEGJU8}}7$d8x5FaJvW&);s`8v721tvmDdL)Zk^MKl$Z@SYx z!lwpkAgo97W=>3r)ep|)k^KM4jxV}?68U_iy1r0~x!h|-PCk^tPFRxQyi1aYoiJYM zt=Cznr|A5%PqW{xD8OtX5?~+DYSr3+O~V9Q5S{FTdHPh`vo0(XjAn6UlE3EC&5JHT z=&f`)BXAEQ?VhD6#)*#h^tVQ*x>FAqs{fR~;mf}>H~j!5+H8%*&y#cM{&zJvW1RYs zJG#etUbjIjaeFLirez5yzTB@QnIo#!1G%dUyUN2?5W*N$74lD(tkbn_-&rQ%Z0R8h zRK{;6z{Q_#u78Gp;@Q#=7o#BvGmydFrUF|tHK*9s+F*988HR9ry!0t6!@cj9e^p2kd#Dou|`ftmM=9(WE)#C)c*5b+!wuP_Xn5Y^%}UhKmUeBCnrn2JU?Oj zzK4SY&Z4j-2PXnmoQW(k6pQJ?-{Ij%MQUZ+oBc6Hrlz!2Wx9<{68<02zDd#>q4e`} z`#f?E#xv&0rSpfBg@uRDH@jCY7#YXcLG0fEc68v4?%>OVS-B{HIgvU(HT8vLLr!eD z(YZjo0R(ja+_pT>zOAqu5%2BoQBN11hcR#M$v&qV)1fxoen24_qD=Sd%$A6mB7fV_ z8~CTuuA8SPEr^K2X6Zmn>mwW{4hdMW;F}R0YCb9mE0*Rv^zF~j|5u0Pzb-#M zJ{p1`TXdw^bhmS_m;H=}hBn~#CEfcP7vP)I0~iWevTdsZq)4@7AevliO6xNmG5mMv zFGcWr=WjrMt`}tw2ti`g&*cLFpFt&C5>zkVM=cPW&c*q;aFE;1pQyZxXff-D z{h~6nx$+EEu7VVBFjETPi+u?a4!UI_ zG3^8}T550b;T^2?=j+ z@9CVkKmV1#j-n)Wc6C*I+}hsGWuQ-PR)9uDMGZ`gq>(o^XFt52(%uz_vIjU@M3eMF zp@sjm-~efggG!WXFct{wBit4e7Y`&)KRR#!=JZrP9>TXm1;{EaMWE-vJ%TxOSJ*W^ zh{Sm9>TLhxIZ{O#>E`zGiLsBY(}*d?aDwyYTcns=_H-V&q7un)U?iOE4p*ruoTtil ztI$Qok$nXXz6~uq<@u?>!6gw}RDh$G+5ui|%9y-=Fd;bRH(Q!LF0W$z6En6tGiktX z{<{etfe~#rM&7rxjxk+BgnW=c^S$e{GGP~I-OSvnru!|*ys*BvwqDx25)H^G*COlV z>n~^Xv(tCa5Bthxx~QvCgDSWC7kImz{#A0%L~vUnz*7JMLlL(UXMenTH_#3HJZC;JV9ptt8t! zNyeblwSV`i`vJvNTmU|RK7!mKulQWMR@yZSwxg#cf^Jn)7(5;U@t{VBfF4z`uY#ne>mVA>8#KOyiHtGes%z6Ygc3|16r-2E`6L&a?JI za`pBkHLgjvZsk}u5?Jlw&>jFgkd&;@$B(C{BqEIqOY_``|LSRVq6|DhHr1|23H{op zSvRNSXMj9d2#GHDFlMFTZ!xJ69hC}E?)|v?nvp7DTrzPQMdJ>Wq3TQLg_Qkeg-=b` zD0SNk-9|AX4zo2awy)`^=^=q>*v?!JL6h4LiR~791evYo>g~G@st>!<>qWMJJX{j9 z_4(e?1K&BXVbRLu=t%|r#PB;zQ9Yd?^rZe{Ci1aC*B?PRM2*gh4T^2WN@70iqmQfU zqkh9(X9T=KbC+snCp`#pYmPHaoz;vjzh=chL#Vzz!w_0wLgs2?G2P4Z|3K#RHk|X;(B+qy!Z&VKL7RW>XWyN<53!|HQ)#3cQ~<~sUS~HpaAgA< z`xS|t=4!CEXQlY|>@LL-s;KnQi(k5Oa<0rBOEG^n9^7M_zSKoc2F;N#-|K02`qvxH zr+7gEMrp!J0!{UK?msn+o%bPH6iLlT0*&V`#?W$)hTC4)- zQUs(^@&s18W}~ydEa{tO^H>Yq!67|VSb1seoTtD-pH5wGz%`eO8iRhg^Li-B&kU@0 z8^m5(D(K~k;Yu=%^ zA+au!pxV!d``dI^^w-oTiCR*imG8B5J=k}W@rsGXBJXzR4~f0*O2}ZNh`89qf`5~V z*j}CmrfO0Lurw|`pNDM=#o# z;bFVV za1lFcBtT=0P2fqO)$BMx%@-^k=80B)7{;ySi!nRLn@c>r;I@Xj``a^l-|0rH^URP( z&oo(EZ>gLyi@inqH>QnEddJsL3$O8NhB*G&PVf(6KT5cDoJ8spzQIZZA-fw*(F%gc zJ9EYF0qgsf-*RO>@J24 z%2v~%@ut5?idlT|>5bQ$%5arO&+9g)c0&Rd6G%)o<^2)QA`{qD19HYTgUHq~WAk}2 z1hFV8o(n;ZR;ziNn0{RA1l>}IZg-_8`90;zwbHW5gR-YpcwqT`Qlq&1Ys?l@n*ly0`^xaNue5Y)Y`1{RwI5 ztz7@SG|&0AX#1sObpeIUp=h74!ySUe-Z?-Md*$#@xkGb>HE=8b30I1jPq;V`X@;nR zAP(ySxztd_;hR?6hryfvloKfwB%KdE#3IuT51;~!;tJ5Ev3YIv?Ke4hPZShueH}$! zeA~XorJdageGm2%@s|YYE1zHILBKvTbjE`_8UwL@((n)`*GN44UF)=O)e%dNtt)wQ zc0G=BgV*jqMQ40Bfo|Gpc0=Lx)UaXLT>k}y&)Z7uH`7=p28B)4oKhux@75PD#KtjN z*6`|0v;^mA)42&H5u^CQfq%eOR$X%rIyz)$nz#Zy-<$K0RBfuJVT$6JtN1-%j~H)v zI2>lTZSyZRcn+c2g^f~fESDE}VpC@VIK_znsP*ov%X(1zCW zV8zk`E66xvwV!k5mrLy%I$CP*QLA*Ek+$9Bi%~C*wDj_4U?wikh7HIJZkb%;&@t82 zJTl5AlYUs1W{Ng*5J%=qEsD|CwsLcGk?ei+J$mm4B;NN_2!9 zaK=VFxF0^7VDJD8(w4@|lQ=pf?TG)D*2u`7$l$eiVyjLL#IeeAJTsj!IPo`#AA2>@ zC8;H`xW6x(QonUG17&9p=Thmg_0IHW4VEB{342Dt>As{2G{ujE! z!om=@ga8ZnXv^8|EYqK{^%Qdx87*j8fS*t6v+7qWa0-vBY<~#g<24FcIZx*pM{*3P zZ?ARuh5XT~Hp`Hfr+Iy&q&_}+QOG1^4|!9D-p(;MFcco1tAZf?gM*}iYcpEqBhbxf z8y_Ff0CFr&ZtjiEO|tLZ=^;RJ*A}K$0%Q!<-_tr)hK3NWt*u2`-_P4wSXeaV8+u#f zVvD5R-vdrtR*SXaK!1asV!$6_ zVq?33zHGho0R$9i;JyK18@{^f0h+e&?&6RAl<7=>qS?9N-_tXoHcq9ox!C&_Hxi#i zHmd61Q)VC!qf&W&Ut+%Qm?C}ZU*-Qh>;LeO!O2RCbRy{Y>8TweB4R9&8x|H8U;?E> zE#M$bl%oFi*6qJN=;nA?3CbEpuHE2ZT&P@-`S=L5=Vkx>_a79ZFJY$)s9pp>1ymAA zqyAifRnP0}_cyS{kRULJd|L&ilhAa?88C;SQE3$Hkj&+Bs1AI4uFVRn0AxbRCYPgp z<$`F?0)!6S@TjsFih9=gC!I@JSh(D39#mPWtmGd@xgr65Pii#{BzvFSW9jeKLm%N^ zf0s}0+wTPg1U?`W1Oaa3m5+t5nVAyYhYuTjd%*<-)Igv9l}!mu<+1?%@tTeSb|Bw& z7w4v@r{C%V@^rfklYVO7*B!^o{%T*vSY1YAXt~K%&4iw25Jy{>;K}`d0pf=VJ>8pRM*eq7zks-_hn|0<^E#Z;)eyKQf z)Fj?o`S>9l7JoQUOczH->+-q(A^+t8@?}xmUFPZqGu!xXRe(FUc|%99ob2WJ`4>tA z@Y??3-p767QXf#dc1q|R)tzW2bjjBnTS25ii8LVC?TzsAcHhWN_U5} zbO^%Gje;=5(B)7=cMTvNN(>D$G$P#&-8uL0-TS+D-L>w2cmAC-vu56P-skN7?EUUX zeir!{dA_ut8j+9+i47cPhSoM**HKKkh8Ya(IzeJ;9~e0plQb zj~AzZlil~^Iy*_8p-n7%-p zzR_DD%}lf%4r_dnP|!`fouf3tJbO$_-CZ(7rSL~E+$t8wFcI(LD5 z*db%2ct^WZME)=H@3kVpYCfmi!TC|olKp)w%%_7v$rkb=uVa|v6uxCJmm6+>cmu{M z9NA+q%M#k*b9;lR&NE$TJ5I`lKbw@@DIS~5jAPenT#vKk7d)ZsyevIL>FDApf0)fH zpNl7l|5+R#wV-Z&v9_Q$MZ(zUwb&w6R6*=*IS96}T@=(*6KH^63M&p{)ruGTTF%qrq$j&-;SH?op#}VgeQ@O^i+irCPmiBW2 z9%y+cb7i`wDa_NzSHg{1!)_yk$=nl*b`BZ?6d;6kIv+Mcz%rdEOT0^-JdK=21xL5c z)V;m0or-*+9U+W<5c&Fx_0(@fcvefH!K;?CJXMAQ1_C?K@N!XA5J*s1_`^qw$X1Py z0k+mdMcwRSC$1@2ls}`JN5*jm%;OAk6PO(|`rea#nhdqQUrxX^`AOE*g0tgHWc_Ej zmGS{v7P9WX_2GleV%g1gwRZ*M`UgK!e3xmFQ#H7PSMOxUS;_ivv>n!k@DqP|lO~nf zJhN#}=a=SVvdgl<7G^n77Mq&q2jbm=lbcj6V?i#x#GGy6~&1$sH_xpp&`0#kjn$spE|DdI(;cJPJ?rBa42sx(p`Kj2$jVZmFm$^RT+|~VQ9`l(^Ftd zXCy^lePsTnVQKZg+I_#!8re3221J~?nxWfO%u5C)*y=M4EQ1-diGi5jq&`L_V2XUf zjivb4G&{4$C!W^BlozbzGu%C<{RKl+n-=dzK#rrf25^33Uv??q)U=ZhU-Q?#5%c!F zVfGQ+F|mlnPA%!jzM&s^-Z+SE1UB~}5v^NU0$;!S3q;mK<>8+*2i(?GY2-1Z8jQ3} zrBB5bsJ`6ND_B3|2E9r*)myA^9QrKe%R0j)<{OuILrD4aaBTbjZZ(i3Q{ZoeYf_y$ zX#5wpve!TT@QT$d^VGgtf0K5wmy*&+S*#}camR=&1-narh+!I@t3a0SK*celx}(2| z#ULsT)lk74%CCEz(8V_J9nN@8i+$sKNF(EbGN0TNHxzNz)zcchm1(_>Cr3SG`B!;~ zI7&)eD-r*$EOgY&1?Mqy((`|Mwv|%p>Vd&Vao03n;SeqDrqA_ne_cb1xgO<14=ZUH zb{4sY7ud9;H0#*DOnZa7db)W6kIX_hek$Z-U~}Q8QhQn}uY0h6Y;PSR7w_^$ zkK;s`S}~gO#|-S=?ry{J-bh#eRYQ*0nhU;V%e$?do-zF9_uTWud#}RLjoQxo4Flod zN3jGIdMFly27bZHK(@9V2BLno!2e@o4B>90=Mdfa!Ar#(SQ5FFM_{B4rzI!%&7By8 z{#bJ{UB(}zvkgPoUqcAgb}@%}nDG{ILqrsN!IJ$gXNg{g@{l!XP=ON_ecs2g+|ryq zYD6?eyrhhm*4q_M`t1!Dw5reSwNZNt`b9LEpV7b9qcRb(L~njZo?2T3H9h?$EJ!1) zB&-a5i}IAFP%>4OIfa^%otsbB@u}9F=59Y=b&Vn*^&LZ$^2GT9r!O3_tUU;o7XmUD zPE|XN^dQlnz|+m-t}L3MP^Jr0YU&7p!D-9yW*YV6e6>8y zO!$5g#D1muEu}W@Wn)N2_)#U=X03%$lW6Vfmicuo+HoKZPt8otW0Z_|yMOMQ9c{>$ z790CSHiouue@RQf?kcD@CF07aKB2{)skvrtbcyk>BDOJr{~hVFzUOAdHtSi6Hae06v*v&O?C9tUJ zraZdSGFUh{V@7l3#;VMu0O?}j_|FX>W4u)RNlQj13phi8h<5@Yf6EH^96%UCfB?5c z^bF78(UDBp6Na2|$dA8#l#Q+ z>Qyd~SOq8m#$Uq%*-LavWdS`X30`TyDcRL54ZJ-%HrBbY00)qM@sQ4`w+&#!*rU2w zz6Hn=D-0^JkN7uDT74_x0a-3< z{SnYLIOcMyU0z;Z{ohK~-7`3;sEE~myy&Orf`3np_^rIAX1q_7#6Fn8@|xTkNU5GWCSR6zjN-+>~}3>5SfO^TMu;6WAAey6%19R@$#+P<~|@?)oKi91B8=JD?0=N zVYi6DT>h=xu2EH=C*}@_0W`<4_P{_Oki2l**T>r%i1F$HY{@hAPVg7D#O*NvHLCHy z@;F7$9QB{P&}M*30r_lQJw5E5(fu3mfw8(^zpnv^@FtD@Zlai9Y zb>HqfTpO6k4>}CJyG~H>Jq+6qJ&njARNMa<2iNF5(wr>Tf-p#h7AZYkBLD*4B_t$1 zAMV?s@)YKpJ=Kiap7>GhF0_w8DS3+YYYGn2CDp}%A4a6jODW!(q2e*DvnzJ%Kec+@ zOfKlYiIwt6e7T%iG!P8`xB-fDy%9JZm#JJmAQ*-V_jIQbO*{-~X$TBSpQ8@&U%PjB zz5}=}X+?f~3C0mBl734GgIQWz$KF;en*;`-+0RTcV*Lz~$Y+30|4c$A?jXT1rNPZk z=%=yo@-k`XcCcKZpg*j+y7h23o~vHGHF(3>c0{pYIK2;0-Mr!}XZ4n@?>qcXHK%F} zPTPKwxyxKuDex}tcVhu4$bsbYH+Qe=J-Md~dlq#N_D-p>2Bu0C%3!U+P~K5nnZJ{c zw)6Wo_BTe6mnw#e1Ld|8WyB|g_hB`16(mg^sj;J2zJul)Cq|&mX4ddP0fd;;foG)I z207(q!2W!qsKL-|R%5+GI zd1%d!mn%y_wj2Zlf(<=)j}j9GYpP#5qu;)oN@_qv?K6-8Y%W^1#+uiug5kzNe&;z~ zu}ZwvYt1#30W_>}G3_rN$h?*3HxX&lw3X zzVW-@hqOo8P^&4M1G7c@WtI*Loactnl#OWZ#>%=U+!YkKl(-Uc^!d*~#2Kn3Lc5~l+zQ0|f5svH&IIYFxaPy_{=IW0X@ajmCv>emk zYK-B+yKTJPyg9N7cg`D4UZ(QeDe^=m^HySBCYAd2iMLvkriWU`B^ zRdRjA$0n@>m7?|1Q-NTOh%dmN&C0Pvs_m`2EE$oJ6LEI5+sN7LmcI*Rp{lKQzbN+b ze?Czr$y47*EpOQ03sWc)4*a}&4}deNtBQJtp6k1T`U>|hd|{{&x#EJ@zrpyu3a3cp z%Xt#DQvAc+*sB3>G+eNA0s@iWKd=m2coCgaCe_fCyfh#17&?^bV*D(|$&_W|nwBKU z@y%c1=&1Ti1g8Uc0&;h`w-}50;ahIAZVixeV?_xj*$cqCDI=z<+m$ggd?8uN^p@AO zMP9H2G30YEZtFg9rm4y2ll?7eN5P@oTEw~RYo?Q9QQAK(io;1N%C%K2&gLbWM7|bB zSS>#MS?_{nHF@*Qq_!{wX*r`MOdPyulTC?aq>CJ`` z$iKR+0(s(((WGkVj}Qo8B(M#qpQL+9YHQ*4V@V?SzC6;hDO*>wirA~x3ZD^C+%@oX z9}OYzuIFgXO*Owt@hXo9iv@Q}^qz~Vj{==U(|@)G&`d;0_coZ{Ex_H=iTR%r28%ry zTWD~IU&&Nrneq+4}!^BWIX>J*-0)@Nf^>0OuX$%>sqcqtgVC$o!R18k`z6DxG4pL-(1@rJo(4?tu_MxXKs@sOJ zxw53Lv34*RlxmDC|F%5&QX%_IB#Jh@+|Ej#$ec9PBU zPeqNDr1;y9?@2RF&haXo(juZfv|blbtIz*-R6ssZu0=fO4OPf2Kfg?4(=OG`*w$em z`|&FSw&jxBFyN3O6gCX^ms+!57)1`Ib(to1`oH-KWyij|u~3Nz851UMWm7S~%Jo_M zmTOv}Jod^p0mKgN^z3VRzh#fJG(M8A)4WDMNU8Va4JGhhZbw(;neko3G+_R!e46)v zEN3%XnZY-YhjXD~;C^9cU1r~yCiI=CU^w?ZSdpWK=g4`R>vPr&7u4l%m@#MlKbk*V`lXv+R~;(~Ku`!Y0xw&C`}vEL2=eA>JX1E-=p}ACvw4k91uN z@6pRwTJDxvS8tNxKv;USfR1sk(~em(gdkcqc>jREvv)kEOACcKid;XMn&F?IZ6&-3 zAAX&95S0-p__w|j#BEF&r<#n2r&QCkLi$=5A$HbP&EHw4!-p=_dEDH0!bJ@Z`NXLI z*&)S!=e>`8>;$s6i7?BBbRSvA<}iOD&##YN9osOMdv5q91F#l94~SdgyGnXp8csCj znOBeeF%|3$uS`Yr5c6=;1HRkIj8UqQ;_5%_kyU$^?0tg9#muaUh^EvRn15+qep?n{ zP8#q$7%TmVBV_ih30>}hbU8{!(CNdAOldcSm zTwD>o6_le%l)j>BE)Z7a_Ar-~H47OY@=Q8Vs;Kdd#wl;R#(@Ve5Y~Y0`X8dD{=o`~ z5zxIdq*4tb8EIYIiG;Ckq7pvBVT!VO`lJho0ySOR0Hxc(eOH;yCAFG(jARq>8PCbx zyYJDL)A${xK5*)HCg7+5g-mdJNXz|B$CLRBbxmGb+j$4HxWGh8i&=i>ICg`EEfIXp zuHM($sGS2WY)Z-x#7%@3zov9AKV+2-Lyyi7g0DhWcZ5WSeUXs~3RuVdU9a7ESWx7P z*%LHr3Jk>(gW1qbVVjGT7K8e$M|~fJe#<@I6;x%olR!cZgIGJ{H~m zi}VDRPDkKOXs&FOX3C>G3&!3T;1HhqhYu31J|}|Ci=XeX`y2G1<`h8JGIMZHwk|Mg z=U|Ni01At0rbu`kFmmJL6f-Zcwq95r^4Y zJW)|mCapU%N%Pk2M@PqfAS!rnZVoWojRAZD;1E(uN=n|c3?l(Km6am^6`8PsXNkN7 zn9kw;l9b2pTwjh%WLR{xE^T$#lLH$mF$fvRs3=|QP)hh+w@5x&aJpO XOM^dG#3IALfR}=ds&s|qhoJueM46!F literal 0 HcmV?d00001 diff --git a/example/deep_learning_framework/Principle.png b/example/deep_learning_framework/Principle.png new file mode 100644 index 0000000000000000000000000000000000000000..0ef12d2b71f31949258eed17f55dd852f8307586 GIT binary patch literal 25277 zcmeFZ2UL^W(=QwWDIy>$MQI8uD!oXrf~fQ+y+k?!(xikEK$Ic~D$+|3r1#zlND~Nz z-a;=yy7UsteFC2Me~#yz^}XM^>nrQt%UZ0gC)v*~vu9?{%x`7~Dl0xFxp4gg2m~T| z`b1h41i}dh{$mL7fp3bd36+75bB?NyAA)i_X%~P8Jab6}Nf4+Yl=$%ZdElAI?uoV| z2t?X|{Xf@Yn{EOE8L2*%mQ;5&Sg8r>rF#~?>wf%RzGDd?tkxG-W{}mHaqfybm{$@a z(?Wc~;mT}MU|Unm4V>qAB(E>|wz1LRBuP}?#<@(vLAK6)`34Us2jWKnZfFDf{B-n} z7j{p=Z5Qi=j&!Ts*J=;lww+eS#xR@r#3>-oyNj#+!tm{m#r91vYG7s{5CNVH_QUQW z7!Ui&{J+j)Pe$Edj~qudbAs!)QLmFDbSdg3PuZvgF(V`6G&sDPsp9cQ=xkzcPhj2N zbPz{O?90m5>h<@wyj}-sUhe_|0?O@VNk56)sz|p&h;oYUP5PpTj6>sJ4G52KI~VQu zkVUhU89vWFUS_^TPEJmcHA~kj+5+3#c}n?7@78|09D_L3QFhg1r!kMcwpOZb?_ zpqQ2C4lx{#M8Vu0*WWgIhI;C*uu4lyH$Hl{p>p+ll^w(l?XwO)UV`7*W#ujcrXG;a z0(AUoHWeaH$g^2@SLx(?eftBq97k-=_SUbIl4sK(+`V;o*QnkZ#5drwW8Zmso8#$A zq1Eyk^g%*%G`h)M3 z4~;GoxAPOkREC~E@iOlUDP?73FT|6Pru~Y^6>iSfE=rASp}&HcdV@8bP&Ryo#AD2< z_0qh~$+CMGs%?9mr*FH7+h#e;+cmrPxGzy@l>{fHP>hj1;k1iqzGIEXNIU$;FdzF4 z0_Qss9h*T6$^#@$1kD790W$UF$k)dbjZGiw0s?%Cox#Oz%R0g<8W)1CUfWUXYPh5c z>gu~#L%|on8BJvyC7A9Illx;AM_|bpMQ3clyt~lkgr9DV&@eU2QLo}E8`r(f8BzXq zxtx{_odXkQfmr8<4z_v*Pt^pspqD2K7-lwjr*Y)K(v6ZaBxaBz6EeK#nI2E>PvpLI z^CSviKXEpUQ0%xYd-r`FO*+2J0~3570SYr_v16{yO2o$Su&T5kEm3fQz>_TeDo;Tk zmj%#(Gx+mxtBHVPapVPuVW*P1bi;S-ajoi7#wEs&8mFFMhtgpl%80a4p-}yct%G)Yc^aMna!^?_vzl>1oea=Y?%F*qpR0kLva$N)@)D^?mYRSd*7+ zg}O|4%O3tv`*tTXg`7$ahl!E2$E{wm$|F{f1C_heuw2KRT~IhMpyAUgkqyACq*^1v`nAJ0nLa8+nG4a?vu@85SuY05f^xTti#9I6 zWS5wxm2Yt}PTyG31YZ=BGdxznD^j-$5@Q0s3qJiJhl+Yh?~}9kO-{3K+RKHn?#n9< zL_Y@4e&U^eD0{kQ4o*sBdd7;_vG;J=bz5CSap;1eXw@~(h(pM-z#?9=>d~b((cl2; zv(_w}5w7PIYHkSiQ&4ZW_hek6nSgz(OV>9-vWKxKJ!LYwd%Ek};Ixir(}wUt;HQXM z_G=@TF=~vQ5k&QsI$n2`Y?-DXo)P@PkR^}($PpBZo9v)1v|bgjhPr=+x}<%1(cxZ6 zVk<6305QvHhYR3c*GuW2qnoOSc_(~SYHT^VMR~ODbH2c^(qCcCGBk)V42v2gFry-; z(mtiQ^%L1zu&|(Gv8HhG;S53Mia1YDz+J2`e~2JLa^O<{PMS`Iiuj6(#QmnY!j)+9h{ zFkki0Z^ULZ&F^QrTziBO$m12R2Ss}y>a-qKT%>#3lm>$eSrcI!meLBn`fiXKq35tp zL6uq2JOQM6rMh?uXoZuHRH<$1T z=);@cZg4^?mO*(_4t1HDuFah?S#$oUH!<+@tMiacW-B8l=>9yhp2ID}qlxw> z;8%A!<|0hz`UQaoAbmzR5PE6iq%g*Kyt$92b92$BFB-Lk(D9``G3a(U{muCcV3UDByqP93sX)`q)W8Z7vo2AKo|;15 zHzmMZgg)dz3qO!gH`A$IM2&+d6M6~T5KQ=gv!S;^QRVeR+XshaAT1NZVEUV!IIN5_ zAD&I*Uklo)Xt2wfvWI@jGPrB=D|lfGwGLjS{A=r(q|>7Xmw@I6l!M-52)Vn zHywnPu9%n#mKaPrWIQ+rPCT29mIx&eXKqY*_{VEGoFqh?>VLfMOBkHO{*>UReX(_f z(Mnnw8K}Xh?gu#zQ{8kyB{z3_d%O0PbMkuJaw87y$v57;wK2sp92YX~H%tH+Jjy;aN+BIeM_$|FbA z_4I7&5_mo;C>Sl|Et$mp4Nf;;BoH|<19i!EL=u5>q2wn7VHex4meRsL-KM3Y96nr$ z2cp3vg&F%TuM!azUr9ilXKMvXU%bZ|at~Ow>|04F-A#$eo4QG#a-a3d+cPK)p7dtetObbqa!5XTtN+S~oI z7ruB2geExSnEIHZ1ofPyA!lfR}&_GOs;m zy+R!>*I}==rh}M>ywS-1{{2aC-G7MFcnQ}y-)9-|+yp&0nzia3{Q>y#!7s`+-oQ=H zFU}3l7NXwzhW<>`#o?vm{+|DQw44^8o7AxtP>zGNNCw`wMVVss~^f*{a?-MtmG2VPW*6uRTmL$ z9!II+Y(acVrXxvqsdhw?hQ}rH0>k5Sv-}7s|D__pI5bKxp^6d1+Yvl{k*+hK+i#@?_;2jAgPV!h)<@Vk(OT0Q(A#@q_0!G?Wd19W%~k#@9l;VB;X(YJnT?n;FW8s z;mY3W!aJLk1k~7D!vdJ`$xD%WME;1>mD7#h!Ci34`3}tHq~rnpbt$ZP7cdMNC0)G+ z3;{1~mNNd;OjrIhqi;aK(cplq#l^F&0S5)DnqYezxlRTM6CmG1WY%0N?=Aw9H`JZ^ z!u4xSYX@%$#~d)y7L^0dc9xmi9E?A&jkOuX2+KI=h{`=T8z zJ~tZC^V>L|G>CNqOeSVMFhu~CGk*w;mF%#o(EeVd4eOsq<7oF z3TjR_jek}ky*}H&l(YP8)Tdc^IW=a*H%piO+J&|_yl{*mYV+MA4U*ByQ{8HsT zind-dFh^b4-`4O{7OW>gre_v0?MYEtoMT=TTtiIZ-qBXtj@a-^cF%{;;Tq4X{&dML zapR1t`>`8``YT=JRdHsLDk>H5^Rx^fIWKxdTJmLTQJv|vULSBYB8P;{ z(;qk{ivPNa(O=mJWFS#SDAzkimJQuzPt9(XzDHzg-0f9*IAS_`xzqJXrPG%>tBQjs zIsa(EVZY@gRGGO3nH5MN(Swp7KZFUAPPEVzi+@6_j^~k@=R3R6j{ScRm?x~`65~2va9a!+N}C8G<$8C&*Y@URBFis z$sKWIaCf(%vQ}zSsEjLYn@LA(e^;c8WMaO0dVT0OiuIJ@7-!NoAw(SlB_+-7d|EMI z%TU8lv(Uabx(;i?P2M!zU*J>XX9+{%T-n7m4Q_i@*Mnfx)~W+>WBS9NZJ5@Jw$@p4 z@o{xlZ@2axJd4QjK)n5u^wd3fQ*C9G8SWLM1EX4YpH#hcr*BPS4z3wPvG@?3Fm%0T z1~clnG&?{>G`9{j4H?`O%ySgMYc4(2iDzss-=lu>;)UK>!{KXl=#|;RUd9M(bCIyC zx&txy4>Noe2A&?e+?SZ6aPN1mM_x=+iS=p2y%C6Xv+{>Lp#LJdKXWbd( z>}54GN){EptQD!lP1l=Bq{Z~1{3Uux3=ZZZa#0riwJ#lDB|fb&!zbVgU(cXgTn9_9 zTajIv>{+aai&2iD%tzYsI}iKUjQbrKXkGG`Wf`UMP~pdf(1eP(=<(1Y7kzgAP}M2M z{TlTFs?B4AIh_q*@)%N)ijyCOu%=S+JV)IOXaGGD5cUiKcPY~%EA@_P6ep=*2>^pcM=grn?7b7VVZnMAbUC+%Q`wv7cR)ki}57t5C=NA)&ktu3NS@D>_XAu-u z^o~Zb8DD#vmAQCW)U!@ca7A6zVpZJcl6BUieR>2c^t%bz8l%T+lZPnyEHXZTgmfJL zolLihQyy${Y)ji;90~M_F!zZ6_TlpI!}>w?l}6|tR0x@yv*GlTN$J9pt6Gc7*G&%v z1ya3f*Sgs2C#;+geufJZd_0>=tFlV^h(Y~s!D0m&D+RS$y;$B_CSLR5jNS002lKIg z|6500wAQ-_BC%bEPOBZI^qloB>a$a&jxtQkzMD-=o@o=uc;^#xZ<~C|Qv?#J*G6ic zg5RAq^JF#I+DZq}f~!|u-kvRg2oJ-cH(h&ip9l@5yh_OXzI7bI^`JcbBKN@kb{&8& z3(8-L$u(^_)(-nbc72+rBw2TNyN5M~;hKu=EB;ZVkz0jy0%h-){E?0ZMgOo}`#v|- z1d^wt?tULm8WP}4-Q{}rIp;S1Tj}-jSHoLcNv1P4c;d0EE{hJugEKh}VzM!-v7hd6 zm_`T?T*+c(84y7ieo4}B7o1O9tsu*$pmsc9FNojTE!vu3iM)+b-hX+!Y!Dzvl2UIA zx$WSRVBWC!m)}mwuwvQzHlpB)CjD1emwIq@cMSJBlh|IgkWVBWT8SQ>^n$NWXzo*a z7F)mYUiYZ(MQtK$^~r`g@}weOdld`DxoHrTVwNqGe12#t+xNVLswG1-*O_m__Q6t z*@pB@GPfO4tSRe;YKTf`tF_BcMbI=p-pH4Aqe*h3%`h&h#7{1W&Y!`!h8xEp`vuhf zh>D-T&+UoGJ(qDKDlJs^V#wGrnjCWJ2H{lL4whnq=tiLoPn%JL&ixGc)kik4*|(b; ziBcYdDQmM$TghOW%RQf9DY)|B=$X6fm?~9pS7hMKdutE&5%ID@#f5_@Ox&Pgg7RO^ z+6(wpL#b1$+-!3k{H+-mtc zI|4np_HGr3P18dwLPKky~AMp*1+gSaB&Eg~L?OH&+ofE0LN(-5+Jipa|8)VPA^G#@T5%8)u;l?a~_B8uIKDhf%1P9Ckek z8`>WWJXOS%1c%o?wY#7e3bgu7Gqp-YO^4Of)z&IX+8-_5hfxS>myJwN4E}!p0+yVj zg2CkhbKa`I2tB}3HyGI?HfYplxuI^(N)_@s`Wmb0f0-uMTrAg0T~!(V;P&0r$jglC z(O>o$YZL0R1~E+Ai#_R6`HSAY`v{-hzMB?qt(PDjp*baAvL?;yaVaJCFB|mVw=;oY zjMv}#C&%RdS&f~p=>H2N_rfPau$a}+ z{GO&`q{PaGPT=roLq|UY%KCWMk)W@VO9{(7-zy?(Z*jUM%FbV=p7+PPH1*lywI`&T zi%f&VVkjWW2U#v?LCA5`T-~_^V!cj4M$cR2yuwYxXPxgfRqR4%o8YzR|Kcbr@^>kD zYHFs!E>36PNRDPP+Y{+#r<7g`d6korxA>N6aW zMz5|Nv>P5f3;`iAHBveu6Am@G`0~Rf$9^4dvfmj$tqqsGM~U0F4#5bTUH?YRF@%Xp zF*qCx-AUgz8Teho6yF2EKX)KJsJA>^)G`p%F`tk#5T^YXqZ9Dt1T@Br?t0!T!WWsu z0X?b)5V@U&1*X9q9*$#&<6Z9GdkqPXbdY0c%>w4zc1aXebfW8$>0}!5_hu85jGni! z;laBN139`jW94g|OZ}DHa=$OCr7aH=CiW?E8wd1x3LR@(>LdkypQxc}mhqd6PipOt z6?pKUOT5s%$Gk0-#ZuG<_?hzI^1bRf$x=8oho_FIKm-t}dKTC$jx%GI{>d^%7X0c7 zIF149h2ggVp~l4f-FuJ0w4~3t71W4=Nr{3t8z=!*o7BQ#0;%7=enJ@B@I^M&^1e^^ zO&@#csW(k42gdWxl%8Smu@_Kp0+LaI$09FTqmw=pgp9i`_wm)yZ23@(6Dp# z3VPKg-=TXk_t=gLR;$C+ZZ#0~z$yY^kvfn2=AQ>$_Q0LVyvEj=$WU2*`B)@$VM8wZ z^JD>AN6K7VeI_Xo&RpPQp;b$elS_^}a^Ch2y6j2urZNRQfm&h5nGyLH!a3jP#INM} z$kONff`!npy`3x4=$s-kOP1t^kEXA|10pS7L+=r0Ofd`XV`|igLbAm5$}ul23l;3| z2{3oKo5~|`jk6}ghOfN$!fT#VzTL|kUiafeoq+gh$RT6}aL(Voj&cM!y}hnvV`*vP z#7pvULA|n2&j@upu#`!Xbrg1RQ=qP7;?WXX)ku^kaI&5~dKEikWiYnwUpT`w*)ND^!csU)jF$tgdF^ArU)EiHaFw5?p> zdr0qNpYShxRCR^g_c_mXX-6qlh#A)1Ze*&M*d506~tc%KG&TcBepqxfffaP%>ALpeE+s&O6oL83E~^McGFck)DB7pm17a+)H9LE%q9;_VqPDSM&Vt$ zy>~SyOU=G!$n*31(`HM~cLX=1wPTM(hCQI*?D2tO_==D-K8Jw$wCr-N5?9)U37SJJ zIZBJeK5-7t>=a_G@o!^OQRBYoc)kCEg_7PRpMg!u*PC+mf|Ndy0|$+gZzlB7pp*Q$ zf_#n2lM5M@2@V&}N1~YP@7Z?=`V=VY3f^>nxa`Qr+2$hEw5@;lAfazDY?%CCSkDA} zv5SMb>RTYJR6UY55?7ZnIE*u~m`jjSbXY#r5^}Xu=h5!$+oieHSb(e5^*Ly$i5rl& zM4C8;7qNro9*1f@+858jo5+XXw1|*I? zMwF;>qj>enLz#`^a@O-lk#bGj+>tuQ7(ELS+6r9bg5fov>s?ZMKK2g;%x8>cM`XaP z8kX+(|FUJmbksArX>=X>?a0iWg^SUZ@qRw`4hu!W(Kc7NeS#{NG;}%KE%{5Kb_RXJ z#ewTxy1WhPISl+StSS|A>I8`Pdi_oB^wr+dFHt;ma=v|$|9+NLUpi3id&+7h#em$F*Itn~cjTKQ3+yh%@W3_tqx%VymQ@}r<18k{Ql z8ALe;EVEqWt2gP$_NGFO8sS#YaE)vJeM7|Xnj5|8V)UWSHgkxGa57IzBTPz4=v6oB zdnkhIPJ>Q)%w(^Fr#VUoaFnw-HcQ#o9_T&w_)5^MVmr^daEn=h7+tjw|e5I}a&tDUxRWuU%}9vUNO4U?U1ikoBElJu)wQXFLI zsqN*~W9i@p1<8dGI5)1_r*gpO@tVCCZBhobV{)&s!D-))t53eqGxxHA>DwOi>~DL| zY?ml&9g1c+(iSoP3&+RSQGcM0<7r1G=L{t>C_z}dz3HQQg031DijQ2>BMqwqj<~Td z=01Iw$^`VR3xmI0)pz$1t=B3$P7?b*D(M3QQT=;z!%V(Mk=oU*`++d?pyRA_HVldn zJv;X~)=$U2@y}xmPt*xmC;6)>HDb`ps#k2@F}Yfs*RI4ag`HLR5#6jJL zI|yW)06nmD-0FUe5-M0rK=lc6lkq1wnw9(ifx}Gpg-w`3Yfr>Vx+eQhjz-Tpj#Pz| zO{hUY=(kh`gzlF>lyuSBKtkCH43#kSVuk zU0(Lbbil~fu^opj4QtM&dan^%cK8oHj3>q1f@Ot#jnk3_rqE!Ylxf;796rC1(Db8U zFln@CZ*tHj^MP?fLfN{!Fovk(qa(+BOx+Kk`|)ysk-TxK#gZA8PDz8$up;jDPZi^j)y3HvnK^UWx&k9mkkj`u|{j zGcIsUyx>x>ODq7x>9I)ze!2o2Q}R)8I)DjL1~1{y{?T>ktjl(rvJgZHLq%P1so`_z zaaRqRU))ofO*tSyfFXrlyyQg85d#ojjt^$VUbDxz3FI}teMUQS28F>P=xB39r>^Y- zUG|FataQ6F7!=Md?aiHj&DxD{bbbSAk=F|t)6S!e7S%|O{U2}9fPcfS1PG2+MLpH& z1kNSOSx?lC0a;SDc1VIwyXS)C%_t*%7@p|p~ zds&!M_O>DO0G(*}6y)u32C3IoV|l$}?Pz>0!$$>53R1@ZwK?aDwPnnx0H^fVcCs`! z7~KL%XrH|&p;^9|nRTZqcq8qQ|3BYGf}P1hi@y>wDy4bbzb`X_)X!jM`2HM+m!bde95=3#ajrJVk)v z%fOlLxQ^aLSYRm73=B0BhZyn0x+pJqS|HOisc+89V-~u(Pm{~baP+eReMQrQHG6XM z7yNCq>EFpSs1FI$Nd#mo7CMu=)WP+b!Swi`&L=-cD)tQ1r1B@F87wYNds12!W6t0u!6pJovG`nq4E-C+vuutd z@I%>SY{bh@d4}U+mMHIOriZb(z8h ztt&)ESl2X0?$S_8ZS!rgUI-qx(X@g;zD!;46~4dVYqn6%@AseV0c(+Y(HCFp&u`S3 z%4z{`DgZf0Cx-hXH0r8fY#L0bQl3{-=ai_J;&;(eYw|UT5$lccO?u&xQy07j6!e#~ zXdCD@&0uB7~(dkFbu>z((Eo;L>>dH-pI?J^0a*HS%5p zGW5rRq7p?fe4|A!@g_jxNBuL{naL*K@MP6HH=_tq9YxE~@C(7c1d%3~nPH>ob-JGj zmdxK{P3(RP+6_t9#9by?NP`!=M_4&2&kq%I%f*bIyjjSP8yTFtVck^5KfL>T?h~wO z1U6&>F9e(#!ub8&WYR6k8;Jhj@)hjJ<=^J37CB~ItC5bnM;iPLcd^eJ^~{`wC$i5s zzvMHi^SY;^KZ-;UCC4_;hid8AVWFT7_blKE4%;CN4odxMedf%mnzo!noe{6h6!0<-g>&o==J7}S9HOH;0xhu{L5G@Y5{2m&lYS?1cUuD%vcZaa`-5$)d zRYYavryBYx5b!_<8>dEbk_$QP<<85fK-Zj)!+I)Oe0;GX!9S=pMI@qZlV}c8THn-Ft0SbiH}YmfO|f!M}jnKpKtS)e^ZVNy4Z4m3?N} zg+(7XiGrh%+^{q$(?Gekq}{rqW+(-ojMXCT6>6&A6He+MsS@cu$|U=aVBy3!GZR;< zXQNoWWzl-{IgCJlwk+vM`nxh=-?||W_$MC~oF98PsrKLVxaVxdS~`@4`u?7&V^{xq z_dHsR^`rV~N-u+6v@!T1LhNQ-TtRQbjZT53I_e>7fTy_%44OX`MF~*%)p)+_^tc&tj(>1_=PKi)k_|V^sQW<(+Zai3Q%X06{f!Qt9ilcssQD=_`x_lEz^=NVDgHcqk*rqr&xZEr$a5fh$YMqsB7+&;ez*#xI&T{r z{H_7+gtvEb7ZHL!v|LdVdEO~B!&~Bqko(;y`|5yXjQnygF@R3kISQ2vm(5|??D}0^ z7NsVtd#$VPV2hMw?G_$$@FGPjHFKDY27YgudFm111Mz)};)dnJWsQ;qgwF3{W^5!p zf;f?aB(BYt2{TG)>L`(@_ts8CMYDZ+RUyM+YX~jX8=o@RC zX3TZO+~(P+jDJ3PuqrK&G__m(1|#uxu4Mt1rLagP|K|igU|s4#&vcRKhS>M0b$ebQu?J=pEvEe9-(Q>Dy zrrCzj=}p9YhtIV_+K4|N`PC7g*}^-xr+^7S#C}G;pE8mN!5@)8ucayP1G17WUFFhx z?y~Y;46cxD@JqHs`K)vpYzuJmEp7@4bgC^^eRL;vC-WgM{B5H$A9+mDz4iTImB>c3 zF=8!e=;hGLMW^aP(EMTHHnJ3l-lsqWGJkKep>9gwKSUDzhV{37d-3tQD|6(=27c0o zn|U3jymwYO2pY`mUhH`{+^d@Sp>kbaZL^+4${k3*CAlZrwO7e#LmPJV)AV#hX`!8BAYWr5q84+!o1Q zXY4(`HWBgRSY)qN_Gf6Ssiph9JxBFbD5?h|{ZG@QbJdK{_swNd-nU^@X0oBhxSu>Lc`JsoeuEDHi}f+Fqh+{GE@8*={`YRq4Mm4*&q% z7HOy)9#r@jaFwOeZhJ~Y9WSVnz$02R=2)bHNEA=qmf6Dca>8q;W!7hQhd{u8TuFr} z*x``7iUOu0pG_s+b-~kMUSQ*CLYgH?Qj66hc^oQ_e2=(3BJK+fpP@bP)hurK>F5)f zZX5EGE*f866j8dZpCPOH!s_VYy!~&2*w4s(W6nU)`TTuDn^YRN*cumagw)+-enmE9aL+0l?TM9rBo}Y zD7%ZN&TEr-79dN+?Bc_8{&_DLK5VtOno1eC+hnwPK8L711=Pdaa~78`Pfsz?;4BvN zXt&rOuIiq5P~H{%nuuK5_9s5Vd>r(UK01m@Fh6qt;zgD^>ys#>8I^MZ;8qo&>n9|vW^j_zKt=m0>2bx%glhm^l5!ICFr)s{xMF$yDLM1pexB5aE7N3kFwJ@JvjUC&4iBDVD zO*+Dk&~z@>)lUT#TAsTmD+(z4r9qB>KPNr|+CH6fq%PLz^3_{!$|GRUbql~@`>q;3 zT=uL}$cI~Y%F=uQr>2b_FZmRJ$3}b#UV4>(A08YM>|Q0xhBMUS7?1tq0@A5DU+3q? z3DgO%Aay2h((4~84d$@HEl69RLyYnrOm_uVmPNy5& zyA6im>@p=hneV3_|F0bF)2ZIZRkg18zP!QnlEv1~_A>Qnuv&n7)y9qQcaM#hswKrr zZ6O$rH9jl|@CF!0U@%bp#^tCXua<%q+(H&k^fn~t*iOYR;m#D_He}`KCs1X#a^;`Y zWFVOGBO%GiMv<-6b?CI@?d$Tm%t~4WxG(oIVi%l)ir4%v4)z52Q(hZdfxZa|6%`#? zx|!%EoZ&P}A)PVT!U2mwe)8cbFHWi@jtn;>6EH9&2I^t4@_g?Y+URS4$Z&OgC;4t7 z1wlDbccJVFdsn9Em)kdRuuVMuH=X_V1Jwt@tI>1w-5RXvjcWC>yj?O8@iwmrnp@X) zNg~-9*8+{N{g}JyZz8;ZGf_v_Sc5swIPhK`(*??eEl;Bt8XvBGcXhpHg{;3dJHKTb zh*h*grnL;~j?v4#il4nwh|XtOJKi{yC#$5;&1R+&m!Ra%3J2Eto>t)qbB!rJlg*)G=ixgX7WvTzZH%50b4n>yg^&Dl(EpGXa*2DH9e53P3$ivT&=TA~ zWz*Ybt}uCXXG@c%^H#+X9i{!_IS*oW@ItBXO|SfX9MM0N7wxDY;`azp2LEJIuy*5; z?`yc%;rRhd@Sltk>Ff~mE0K{A2vx+KO1KN%x2}Jf)nc>b_>~<>kc6Q1nDSz4_U|@( zoEz2{hMgSFKMy?zDu+x5G9$Fj<1+smH9uRv(~>Dw3}&^8W0?yC;y2v;Mui|jiRJ!d zQzRXXOViNv*(q2c_Nsm#owo!1N5gJ$6TbIRaOHE@H+o#q_j&P?P4SL}v;-4_gGji_ z(%)w+e~=Q7>HrMr1_NABm*7gD8bVy;6-?(b`pa*QD0o0kQK(*IyH5>kN8j3CHWxQ` zhOXW(f&SS#mee#j>LTz04DnMVTkISW=+fhF(>bszogBK&S2Kd_w@^EKbp@QJUpB&wct|QGeB!5M)xd zP#TMpl+}3kii0D@rtD88Cn8&2QrOZQ0Ez%YbKBD$^U;RK8wgv<;79JU;?=p5x17dOEl<|+T<&KVhR))!xbb#*{DPFw>6zEIMzonobIjuV~ zhVONg?M4$L-C;V|k{br-npIg84^n_Fd%279?C|q{BNJ^oyTQw71oyz!^$>Ilj#(Rs zO#mg2ce5A&XcefJ5aE7(PLdkprejdi;C>wlI;K>oO#Q8TC{%3YiB{_un=^xf=CnD} zn&sz6_suqH+IM*#b1WUt{prk3fb{+4O4ZO5c0-SDvmziyI9z6H_@}^p(4PbIe}sD< zVFU9Nzhb}tQxBQ1Rv++t^su{ccJTXK;ez&ju!&0mC;4~W`=nV++nWmH5@D-e^>=0% zPF&0RK;8`4!Naq1E8EbYyY&aN1XC4&6ejq#=I)TGlHlBDI~x#49H^2^yiddbvRH8L zKgF7-b+p;yJKN6t00hTpeGZi9iS6vIF9BIhiPd0?Cj8*ZX0Mm0d*J#8D8#)9{+0otqQ&lsp{;0%wPlHYzz6fLEbwzfGWOg*sM1`n-LSo)+SB8bTlOc zP$}!yL87VnkcsC#2wMgQx;qWvYf&OU9KJnW;}pm6pX{>B0Id#!PF%m~xzFs?1SgI5 ze)r`BVYvnLeWr=Zkk@@@nQLhA6Rtnv+mnD&~!5m%wR=Rd7nCJ{v$tefA zHPAFaWgsue@TYT*A_0Q?JA#btxl18>=6nge2AeDXtIr%;S#NRjDUbKw<=CxVVc|!) z)21j$Hwx9RyeS*1yNktzrj72wSJwvg`D4Z=#!Q(ri)p##Oksq<0RfIXJuDoBJ}Som zk4nd-0XHKwl41+h*@4n;>1A)=c^P}-!v7XuHdA_RRN$KJx~%e*je7c?)_|9ilV|Of zUKuySO+L)<3<1zdlXrkXpn})SkYd?pAGGgbq`=t=E&%vZee!xY+G%NE7^oXxavOT? zKl}&r-5aw)IL&LOb$XMrrojz9ln_<4VJl-am@J*DF}%lW91rI5HcEj*o&vi&UhNo|aZ z$5Wuyv6H$8f~(ns{&SNorZVg|p|H`Qp<@(%JWcD+=0O zk;n50e|Ai92ctu@o5q)qbz1ev)lsuS(bbNY*u3MP*S4h@83#TBmpeQ=+?!u5jg~gM zZf*~Wsnb%02rWFw^L!8K2kO8F-03xx=oyXlflCRr`YlpbRxP)VZd~*!_&PYg_p~Zu z)FU&VSGw^IaLqtL=BZGvTak_Cpd(9faYk%)*m{QPuK|6iC|zeXx`OTFG31iWMyoar z5~x1c1*Et9gaHa3gp3{VN4YZ`n{U6l$;gMDmDA_6w830`a|ZAAeFs|{W1_XnHRq~M z%lWvx1Xk%rpk$o9Aa_k<+d5Wlmj9cDU@8evkzc()3@cf(KKMDe82C`y8cT_#`*49q zzlthbWk-1$V7$GIrM%*$?=*uYSKb08{=jVp1J9uCakE$V$9i=_ETw>vk`+H0_9n;Q z{bQ6*N||P{nZUc)%Mm6{2gZ{j11?P|2>Ne5m`{p^M&!~ji;}-J5D~GojEOz6m^ z;uS9$%L1I4q18jWyJCZ7dWA zBp(k?&W$$+sWl1Hc2QsEu^NZO=a?e5*>>NwOz&5DYdynBCLjzRN`Oq=;V8dE9kikd zyG*?lCFB*2BBJMFkbm3Ej_B(+&|@lP`!&j;T{4*v#kUSoLmq}|ldu};($(}yq*~qs z-8GenP&1K-JY)go34YJS!{i(bYZTDF3+@OL-6``W6ZmWKQ24EWv#43sx#8=Hp-!4#``vxTVR%VMjW4aZB53kM0VMvX2&Nojb{!m!Uwx z9D54eCSCK}u(}`TaF|NXg|hM9iio*8^p#|s%aboB!vEMf8P45rHiZaap*9hd>pYg< zMN&*Rn}wvL)D0=Z-pyei45!7+w5PAuDj~n7u-A+`=4eOixLl`>9K&2MXD1|Tg%Yy4 zE7DB_A=b?S=YFwDm6|&2NCSh>vm0$irmlpG4i4LV-O1V7Y1qCz58!|=_@9@Icn94+ zqpQ;G?63H*O}y`s@-GZiU6bJ6^=!hQ8o|xph(^4=b?jc~m2p(cL(_XgzjR$f-xoZx zgHg}!$xD%XlVuw&@d33vCBbmMxvW~K!XQ^i;;y^m=8kC5#IrRn`Gin;uzVRz$gdP9 z8MVN*e<4_LZY^|Y^T}=D2CeLMe?_z#LVu?R3VMue+N5oOK8spDHO&+a}V zXArhRfbmr4v`gi1ADa=NADmn)o)e>B+0CkEQD&z~Exnd)ZaAW6hLW9V77rym?nWlp zMRo1C51Hk!kflD9Lc6vn^vc7&A1r7W+@ELT9%gn%Io#8if2)~u#D_aGtED#4yKX+^ zmV$Hw8_{HVwp)t}tlD9KdtYcSwVJFFhhiFmR(rY=Wmtb}LOdQ8u!EWbp$?P#PilwbAt)Vg>h>a4I9;OiEK|g^m)~~oK zYT2F{AF3|?HS(UjV))vl4cF~jRz8|e^%+S4GhH99qBY3U+X%tIRzB7CtJd&z+ba`H zrWhx9L%9G@DJ%hQ+_GmdN$8j~$DP-&-S&&WfwDX_1iGa8QK;Ch zFxJ z%7+DN=Z-}3g&pO@T%l|l0g8EHk25FY7v}qsB~=qy_^P|}+kcvgJjKR0$?qi|fj)e> z{&i8;OqlU85mEhZ-7esZ*#l|AI~Q`U16Swx6u_Rsn$7*K8CnJdZPse$X~nhKkF@np zhs~J#q#swPyW6eSS#IaNb;NMrHxEF%oqCITwhJGzPn_6Q!k{Nf*$6h2T z(n{cNA3ZooEHYqpH*n&x;4H7}ku+XsJHZ$DdDqQHb zeiE`<5qeGLC=Ne0a`Qn0wJmc=$8t(P{=|IJ481y7y|&EW3MRZ<^ouuxzA1p+J_JYW zjdOJgW%FH1!7ogb)aWzoGJ+w+IPoJ^)d~#Y?@}I0yH%*=#p8ds(9O8kWC>H?cpMa6 zmyUOzt9w3!Pd2_cZmws7qGXNOY1hSJq6n3Kkqg^B0bOQGm*a2mNs5c2yh zNOIdk%rAZC67{}=l1GxJ;umsHeS7dEm7q}jd~;o)XN-QS9{gR!)GeDsEmJ>cpbF75 zkRc3#^Qip}?w6gc z(T?#$&B3_FX3@Zo09QPn9c~w#cQ5a!W1rydXfxuEFlZ$`zrMK ziYem#Gg>zzX~1zuDl=Gaf9u0lg&11Jld00nk8cAwgYb+qyn>-9>GVqZ(S^c;d6M|$ zpbhQ3xSg?Cs zb-EzwZdO>~Pfz?u(F;?mLs50R2sb zW0}0!HN$lRFN<-<)s#htfPk1kOhfJ}z=*w#wXX!k;aC$t&33|yed73h(f$T<=vcb3 zU@3Ck40^KbHIXg8YO^>WAf?(2x<+gw|DiyVh9|$e2k=57ZgO^x)4&gL9ow`Sc=L3s z26mVj7i*^}@|3+o1WX2VbVqX_Lq>;+kI+d8BO)GJo2j-rWj3}4s%EY_YBgL(H zkhySd(qZX54sfxd(luNY)(>2g&3+bf0zA|_Cd*yTR}qoGA_coL<7L6wtGR0@zK~7S zgek`6=wRCnnIMXutoNPL2EbryCS%`Va&C?9QR`GKFq4)`HrA5*rvo1!9Vi{co@#ye z60HU9vwFUU3j+Q4u2o<^BO~nmotn`&kgpl6an{sx!vDBuC-O~`lt1ZPQ8Ex{Ql|Oh zHDQyB9}A>uj5*JE6uw%$OYXQgUI z{}}A$XGhfnrGJ`3Gr%PK%Yo?RiWZ~sJH7$1>e-N^M>AgBR)chwFxQ!LR3K1#Q1DG4 zyZFIUh)2orrY17em{jqsGvtbkNBd6LXz@i5sO!y-b5TpHd!<6?Nua(AFkFuh+m*ba zqh*?gbUSGzeRFCCem-PSYLTUe*QjgMU`ZW!3s}((W#P=vh;~>8O>Lj=} zfAf(09H@>npxLT<+~}0455j=J3CKng%*t^pv0m6au@ZqBZRXA6y93jJ-Vz$iUMwIGgw_P8<;Swxsq`{_X~x zj-O|kv_6R!+US3@bEQE|Wmz~SgQzspg0d(97f@D1SR2xSY$76ptu4wHkxfE^4a`83 zU?Mb&wkViD0 zZq!!U$EzVPzl2M|;cDCK;zq}b-!b*lUsN9RkiOF;0BC_+gZ^|8(NZu@ z5j*NM)gl7T8@t1X!gWpe)(*brCN@cJQN+iX$x?Sps=Pfd6MGp_fMw#zWG=S;}HsS-7AGBvB784t>WvR* zAxrVm%WW~qK26XH{?_G57xFUMzrC(OVNBy$fLF;B9@e=vP{oWDxM>l6s7HuF5Ua)x z6zS?u0!oMOZgc%I7xby%?$rX3<@9*5<+L9H4Q^t#>RFN|w#t!jVoiA1ds`@Cy!j>j zcY!T|rIZ@}y6b=;bs|#rA6){Ewr{actcY@9jD$TxaUTe-YbrST=vj`QN)F-n0A}#E8blTF6 zshcpjdJ|*puAfJBhP?R%_bSvyf5>_Y8Ks(nOPV|%Zq?Hhs39hm-n)5KH~D6wIk0M} z&7Z{of?B$0VlpQiG$Vyo05(KhQE4l;Gd;nJo*hV`>~$a;bMG4PSQ>xeP2WjagVR)g zNA_g=9Ie;Ku3jeTG<5jUnrb)Q?YiVO&u($K4l+-NX`Pt#c|&b}=>~F_&!b4!m%Knq zV`tHvcM)U&H1F4lCiQH}00;mlV?x{<$g{E~e z4wWkf`hVonr?af&!2;4y*5F86XkYXTaP$p}Ia#+|`bY-o1Pj2HaSnX!4lP|9*E73W zt5HF49Ca=wh1wX;WT%@;|NCx$(MLM0tJXyEX^RA; zm|pHtF@>Q7i3v-{>b}HAP>m7HpLY!^#)J_eZYp&a=cKq1tZVj;S zG&7SSukVZi0rIIeze3!_ht`IHxI81j?o$5}{L4K5IVfz8sS`i)RXF-`rPxG#21Khp zHY(^qFV-73`bW>HZx^h9+-nwHW^dn#+@8N17?p8~%}@ zG4cMfrj23X2~aURbwzc2_E=>DKbs?>FAf5RTpM>XW~>4b>VOhQIfi4OX!?e@n!ui3 zU1W5Yw><+Tk;XHC7MRoW3=Uz4-4xbwCH_VUv)mUBOj3B8tTY0Dm!0fix%jk84%Gl> z1aZ8Z$9$HG76DIgI(x62n3T6NPiugmmDr3L&?e$R@peyZHt20Z$K!OJ+*i0gk)2qm zye14#C9t0qS+aO1^=TE=f(0!Ho8h*rL1IqWOm<~awBs8M6?YO;B)fGh9^zyM55!vz zo>ry*=5_pqU|p|IS&QsUTra~GwUrmWSxbEYFxPW$>Z19FicKNUBgZ2VCDrWg3IZ?U zHiio%3};`8b8imL(wTYiP)HxkXrozurcOoHa6c z8I>SQzjohxxTpUbqx)W;(($8&-^WNJzhLTtFAfi1YOo?F${V#%5`^j`L1|isFj>g` zJ&+VgW9dPY+@X)iZ-|^6su7Yag3qu(b1~O|iz}u;x=Q}oU_qwHBRIWG#2MRp*W)4Q zZaEhoNv=5qQ8ja5qI>VoHt7t<%rRprU~$I_3PR>Q)srsg zzH;P)XlXULoAJH=@#zC(+J(l%7Rca{xe)b!Yo5PZgE=|6^kttw=0RP*#&)B**FSe$ zUQ_nr$iW8Fd1AGBrg#X_WB?(L$bv}h4ergM#5aZ@lh}(XuMX?)gBX5cX<*L>{Z{W> jV*!RG{a?R^AXkkpbmZ?_$m!_^O2aJ8&zjLqu7v&zvzu!N literal 0 HcmV?d00001 diff --git a/example/deep_learning_framework/README_zh.md b/example/deep_learning_framework/README_zh.md new file mode 100644 index 0000000..ef8e58e --- /dev/null +++ b/example/deep_learning_framework/README_zh.md @@ -0,0 +1,218 @@ +# Tensorflow Lite 接入NNRt Delegate Demo开发指南 + +## 概述 + +### 功能简介 +- 神经网络运行时部件(NNRt)是跨设备的AI运行时框架,作为端侧推理框架和专用加速芯片的中间桥梁,为端侧推理框架提供了统一的Native接口; +- 本demo旨在介绍上层AI业务如何利用NNRt在专有芯片上加速推理,使能OpenHarmony社区生态; +- 本demo根据用户输入参数(模型、标签、模型输入shape、循环浮点推理次数、是否允许动态尺寸推理、以及是否打印结果等)完成标签分类模型推理,用户可通过打印信息观察在不同条件下的模型推理性能、精度等KIP。 + +### 基本概念 +在开发前,开发者需要先了解以下概念,以便更好地理解全文内容: +- NNRt: Neural Network Runtime,神经网络运行时,是本指导主要介绍的部件。 +- OHOS:OpenHarmony Operating System,开源鸿蒙操作系统。 + +### 约束与限制 +- 系统版本:OpenHarmonyOS 3.2及以上 +- 开发环境:Ubuntu 18.04及以上 +- 接入设备:OpenHarmony定义的标准设备 +- 其他开发依赖: + - tensorflow-lite.so及其依赖库,目前完成在tensorflow lite 2.6版本上的测试; + - NNRt库libneural_network_runtime.z.so; + - TensorFlow Lite头文件:https://github.com/tensorflow/tensorflow/tree/master/tensorflow/lite; + - mobilenetv2.tflite模型(https://storage.googleapis.com/mobilenet_v2/checkpoints/mobilenet_v2_1.4_224.tgz); + - 标签文件labels.txt; + - 测试图片grace_hopper.bmp; + +### 运作机制 +

+ + - 用户调用TFLite的BuildFromFile接口完成初始构图; + - 用户设置自定义的参数options,应用于创建NnrtDelegate; + - 用户创建DelegateProviders,并调用DelegateProviders的CreateAllRankedDelegates接口创建NnrtDelegate,创建NnrtDelegate过程中dlopen打开NNRt的动态库,并加载API,返回delegate; + - 用户调用ModifyGraphWithDelegate接口完成Node替换,其中该步分四个步骤; + - Initalize初始化NnrtDelegate; + - 判断图中各node是否支持NnrtDelegate,返回支持的node集合; + - 调用TFLiteRegistration注册NnrtDelegate,并初始化init, prepare, invoke成员函数指针,指向delegateKernel的Init, Prepare和run函数方法; + - 替换TensorFlow Delegate的node为已注册的NNrt delegate kernel, 并调用Init完成构图步骤; + - 用户调用AllocateTensors,完成内存分配和图编译,其中支持delegate的node会转到delegateKernel的prepare完成编译,不支持delegate的会调用原有tflite node的prepare编译; + - 用户调用Invoke完成图执行; + +### 开发流程 +
+ +### 开发步骤 +本节主要描述NNRt接入TFLite的TFLite-delegate代理机制,重点对TFLite调用delegate的流程和delegate对接NNRt的方式进行了介绍。 +TensorFlow Lite Delegate有两个基类DelegateProvider、TfLiteDelegate, 本节主要描述继承这两个基类得到子类NnrtDelegate和NnrtDelegateProvider。 + +本demo主要文件目录结构如下图: +```text +. +├── CMakeLists.txt +├── delegates +│   └── nnrt_delegate +│   ├── CMakeLists.txt +│   ├── nnrt_delegate.cpp +│   ├── nnrt_delegate.h +│   ├── nnrt_delegate_kernel.cpp +│   ├── nnrt_delegate_kernel.h +│   ├── nnrt_delegate_provider.cpp +│   ├── nnrt_op_builder.cpp +│   ├── nnrt_op_builder.h +│   ├── nnrt_utils.cpp +│   ├── nnrt_utils.h +│   └── tensor_mapping.h +├── label_classify +│   ├── CMakeLists.txt +│   ├── label_classify.cpp +│   └── label_classify.h +├── nnrt +│   ├── CMakeLists.txt +│   ├── nnrt_implementation.cpp +│   └── nnrt_implementation.h +└── tools + ├── bitmap_helpers.cpp + ├── bitmap_helpers.h + ├── get_topn.h + ├── log.h + ├── utils.cpp + └── utils.h +``` +1. 创建Tensorflow Lite NnrtDelegate类 + - Tensorflow Lite NNRt Delegate 使TensorFlow Lite模型能够运行在NNRt框架(https://gitee.com/openharmony/neural_network_runtime)上,这导致了在OHOS设备上更快的模型推理 + - nnrt_delegate依赖nnrt_delegate_kernel, nnrt_delegate_kernel(将支持替换的TensorFlow Lite模型中的operators替换成Nnrt中的operators)依赖nnrt_op_builder(给每个operators设置输入输出tensor和operation属性),完成nnrt_delegate的自定义。 + + +2. 创建NnrtDelegateProvider + - NnrtDelegateProvider依赖nnrt_implementation(用于加载libneural_network_runtime.z.so中的Api)和nnrt_delegate(用于创建子类NnrtDelegate对象),完成与TFLite的对接; + + - 注册NnrtDelegateProvider + ```cpp + REGISTER_DELEGATE_PROVIDER(NnrtDelegateProvider); + ``` + + - 创建CreateTfLiteDelegate主要有以下几步 + ```cpp + NnrtDelegate::Options options; + + const auto* nnrtImpl = NnrtImplementation(); + if (!nnrtImpl->nnrtExists) { + TFLITE_LOG(WARN) << "NNRT acceleration is unsupported on this platform."; + return delegate; + } + + Interpreter::TfLiteDelegatePtr TfLiteDelegatePtr(new (std::nothrow) NnrtDelegate(nnrtImpl, options), + [](TfLiteDelegate* delegate) { delete reinterpret_cast(delegate); }); + ``` + +3. label_classify.cpp中加载Nnrt_Delegate + ```cpp + interpreter->ModifyGraphWithDelegate(std::move(delegate.delegate)) + ``` + +### 调测命令 +1. 编译生成Tensorflow Lite库及其依赖库 + 请参考Tensorflow Lite交叉编译指南(https://www.tensorflow.org/lite/guide/build_cmake_arm), 同时在```tensorflow/lite/CMakeLists.txt```中增加以下内容: + ```text + # TODO: TFLite External Delegate + list(APPEND TFLITE_EXTERNAL_DELEGATE_SRC + ${TFLITE_SOURCE_DIR}/tools/delegates/delegate_provider.cc + # ${TFLITE_SOURCE_DIR}/tools/delegates/external_delegate_provider.cc + ${TFLITE_SOURCE_DIR}/tools/tool_params.cc + ${TFLITE_SOURCE_DIR}/tools/command_line_flags.cc + ) + ``` + ```text + target_link_libraries(tensorflow-lite + PUBLIC + Eigen3::Eigen + NEON_2_SSE + absl::flags + absl::hash + absl::status + absl::strings + absl::synchronization + absl::variant + farmhash + fft2d_fftsg2d + flatbuffers + gemmlowp + ruy + ${CMAKE_DL_LIBS} + ${TFLITE_TARGET_DEPENDENCIES} + ) + ``` +2. 编译生成NNRt库libneural_network_runtime.z.so + 请参考编译指导(https://gitee.com/openharmony/build),编译命令如下 + ```shell + ./build.sh --product-name rk3568 –ccache --jobs=16 --build-target=neural_network_runtime + ``` +3. 用cmake编译北向demo + - 将TensorFlow Lite头文件(https://github.com/tensorflow/tensorflow/tree/master/tensorflow/lite)和编译生成的TensorFlow Lite库,分别放在```deep_learning_framework/lib_3rd_nnrt_tflite/include/tensorflow/lite/```和```deep_learning_framework/lib_3rd_nnrt_tflite/com/arm64-v8a/lib/```下; + - 指定ohos的cmake, ohos.toolchain.cmake路径,在```foundation/ai/neural_network_runtime/example/cmake_build/build_ohos_tflite.sh```中替换以下两行; + ```shell + ./tool_chain/native/build-tools/cmake/bin/cmake \ + -DCMAKE_TOOLCHAIN_FILE=./tool_chain/native/cmake_build/cmake/ohos.toolchain.cmake \ + ``` + - 进入```foundation/ai/neural_network_runtime/example/cmake_build```: + - 如果需要在arm32架构的CPU上运行: + - 修改```tflite/CMakeLists.txt``` + ```text + set(CMAKE_CXX_FLAGS "-pthread -fstack-protector-all -fPIC -D_FORTIFY_SOURCE=2 -march=armv7-a") + ``` + - 执行编译命令 + ```shell + bash build_ohos_tflite.sh armeabi-v7a + ``` + - 如果需要在arm64架构的CPU上运行: + - 修改```tflite/CMakeLists.txt``` + ```text + set(CMAKE_CXX_FLAGS "-pthread -fstack-protector-all -fPIC -D_FORTIFY_SOURCE=2 -march=armv8-a") + ``` + - 执行编译命令 + ```shell + bash build_ohos_tflite.sh arm64-v8a + ``` + - 在```example/deep_learning_framework/```目录下创建lib和output两个文件夹: + ```shell + mkdir lib output + ``` + - 进入```foundation/ai/neural_network_runtime/example/cmake_build```, 执行链接命令: + ```shell + make + ``` + - 北向demo成功编译完成后会在```deep_learning_framework/lib```生成libnnrt_delegate.so和libnnrt_implementation.so, 在```deep_learning_framework/output```下生成label_classify可执行文件,目录结构体如下所示。 + + ```text + deep_learning_framework + ├── lib + │   ├── libnnrt_delegate.so # 生成的TensorFlow Lite nnrt delegate库 + │   └── libnnrt_implementation.so # 生成的nnrt在TensorFlow Lite中接口实现库 + └── output + └── label_classify # 生成的可执行文件 + ``` + +4. 在开发板上运行北向demo + - 将步骤1生成的libnnrt_implementation.so, libnnrt_delegate.so和可执行文件label_classify, libneural_network_runtime.z.so, tensorflow-lite.so及其依赖的库, mobilenetv2.tflite模型, 标签labels.txt, 测试图片grace_hopper.bmp推送到开发板上: + ```shell + # 假设上述待推送文件均放在push_files/文件夹下 + hdc_std file send push_files/ /data/demo/ + ``` + - 进入开发板,执行demo前需要添加环境变量,文件执行权限等: + ```shell + # 进入开发板 + hdc_std shell + + # 进入推送文件目录,并增加可执行文件权限 + cd /data/demo + chmod +x ./label_classify + + # 添加环境变量 + export LD_LIBRARY_PATH=/data/demo:$LD_LIBRARY_PATH + + # 执行demo,-m tflite模型, -i 测试图片, -l 数据标签, -a 1表示使用nnrt, 0表示不使用nnrt推理,-z 1 表示打印输出张量大小的结果 + ./label_classify -m mobilenetv2.tflite -i grace_hopper.bmp -l labels.txt -a 1 -z 1 + ``` + +### 开发实例 +完整[Demo实例](xxx, Demo暂时还在黄区代码仓,超链接需等Demo开源后补充)可以参考社区实现。 diff --git a/example/deep_learning_framework/cmake_build/build_ohos_tflite.sh b/example/deep_learning_framework/cmake_build/build_ohos_tflite.sh new file mode 100644 index 0000000..3dcc0ea --- /dev/null +++ b/example/deep_learning_framework/cmake_build/build_ohos_tflite.sh @@ -0,0 +1,51 @@ +#!/bin/bash +# +# Copyright (c) 2022 Huawei Device Co., Ltd. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +set -e + +function help_info() { + echo "arm64-v8a(armeabi-v7a) means the CPU architecture is 64-bit(32-bit), the compile command like the following:" + echo "bash build_ohos_tflite.sh arm64-v8a" +} + +function build() { + echo "$1" + ./tool_chain/native/build-tools/cmake/bin/cmake \ + -DCMAKE_TOOLCHAIN_FILE=./tool_chain/native/build/cmake/ohos.toolchain.cmake \ + -DOHOS_ARCH=$1 \ + -DOHOS_PLATFORM=OHOS \ + -DCMAKE_BUILD_TYPE=RELEASE \ + -DBUILD_SHARED_LIBS=true \ + -DOHOS_STL=c++_static \ + -DCMAKE_BUILD_TYPE=Debug \ + .. +} + +if [ "$#" != 1 ]; then + echo "Incorrect command, please pass the correct number of parameters to the compile command." + help_info + exit 1; +fi + +if [ "$1" == "arm64-v8a" ]; then + build arm64-v8a +elif [ "$1" == "armeabi-v7a" ]; then + build armeabi-v7a +else + echo "Incorrect CPU architecture parameter or missing setting it, please pass the correct compile command." + help_info +fi + diff --git a/example/deep_learning_framework/tflite/CMakeLists.txt b/example/deep_learning_framework/tflite/CMakeLists.txt new file mode 100644 index 0000000..e89cbba --- /dev/null +++ b/example/deep_learning_framework/tflite/CMakeLists.txt @@ -0,0 +1,25 @@ +# +# Copyright (c) 2022 Huawei Device Co., Ltd. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +set(NNRT_INTERFACE_HOME ${LOCAL_DIRECTORY_PATH}/tflite/nnrt) +set(NNRT_DELEGATE_HOME ${LOCAL_DIRECTORY_PATH}/tflite/delegates/nnrt_delegate) +set(NNRT_DEMO_HOME ${LOCAL_DIRECTORY_PATH}/tflite/label_classify) +set(TFLITE_LIB_PATH ${LOCAL_DIRECTORY_PATH}/lib_3rd_nnrt_tflite) + +add_subdirectory(${NNRT_INTERFACE_HOME}) +add_subdirectory(${NNRT_DELEGATE_HOME}) +add_subdirectory(${NNRT_DEMO_HOME}) + + diff --git a/example/deep_learning_framework/tflite/delegates/nnrt_delegate/CMakeLists.txt b/example/deep_learning_framework/tflite/delegates/nnrt_delegate/CMakeLists.txt new file mode 100644 index 0000000..ee5fa7f --- /dev/null +++ b/example/deep_learning_framework/tflite/delegates/nnrt_delegate/CMakeLists.txt @@ -0,0 +1,34 @@ +# +# Copyright (c) 2022 Huawei Device Co., Ltd. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +set(TFLITE_PATH ${LOCAL_DIRECTORY_PATH}/lib_3rd_nnrt_tflite) + +LINK_DIRECTORIES(${TFLITE_PATH}/com/arm64-v8a/lib/) + +# Header path +set(OHOS_INC ${LOCAL_DIRECTORY_PATH}/../../interfaces/kits/c) +set(TOOLS_INC ${LOCAL_DIRECTORY_PATH}/tflite/tools) +set(TFLITE_INC ${LOCAL_DIRECTORY_PATH}/lib_3rd_nnrt_tflite/include) +include_directories(${LOCAL_DIRECTORY_PATH} ${NNRT_DELEGATE_HOME} ${TFLITE_INC} ${OHOS_INC} ${TOOLS_INC}) + +# Scr path +file(GLOB NNRT_DELEGATE_SRCS "${NNRT_DELEGATE_HOME}/*.cpp") + +set(CMAKE_LIBRARY_OUTPUT_DIRECTORY ${LOCAL_DIRECTORY_PATH}/lib) + +add_library(nnrt_delegate SHARED ${NNRT_DELEGATE_SRCS}) +target_link_libraries(nnrt_delegate -ltensorflow-lite nnrt_implementation) + + diff --git a/example/deep_learning_framework/tflite/delegates/nnrt_delegate/nnrt_delegate.cpp b/example/deep_learning_framework/tflite/delegates/nnrt_delegate/nnrt_delegate.cpp new file mode 100644 index 0000000..9528d75 --- /dev/null +++ b/example/deep_learning_framework/tflite/delegates/nnrt_delegate/nnrt_delegate.cpp @@ -0,0 +1,316 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "nnrt_delegate.h" + +#include "tensorflow/lite/util.h" +#include "tensorflow/lite/context_util.h" +#include "tensorflow/lite/minimal_logging.h" + +#include "nnrt_utils.h" +#include "nnrt_delegate_kernel.h" + +namespace tflite { +const char* g_tfliteNnrtDelegateName = "TfLiteNnrtDelegate"; +constexpr int32_t TFLITE_NNRT_DELEGATE_VERSION = 1; + +NnrtDelegate::Data::Data(const NnrtApi* nnrt) : nnrt(nnrt) {} + +NnrtDelegate::Data::~Data() {} + +void NnrtDelegate::NnrtDelegateConstructorImpl(const Options& options) +{ + m_delegateData.acceleratorName = options.acceleratorName; + m_delegateData.cacheDir = options.cacheDir; + m_delegateData.modelToken = options.modelToken; + m_delegateData.enableFp16 = options.enableFp16; + m_delegateData.executionPriority = options.executionPriority; + m_delegateData.executionPerformance = options.executionPerformance; + m_delegateData.allowDynamicDimensions = options.allowDynamicDimensions; + m_delegateData.maxNumberDelegatedPartitions = options.maxNumberDelegatedPartitions; + m_delegateData.maxCompilationTimeoutDurationNs = options.maxCompilationTimeoutDurationNs; + m_delegateData.maxExecutionTimeoutDurationNs = options.maxExecutionTimeoutDurationNs; + m_delegateData.maxExecutionLoopTimeoutDurationNs = options.maxExecutionLoopTimeoutDurationNs; + + Prepare = DoPrepare; + CopyFromBufferHandle = DoCopyFromBufferHandle; + CopyToBufferHandle = DoCopyToBufferHandle; + FreeBufferHandle = DoFreeBufferHandle; + data_ = &m_delegateData; + + // NNRT support dynamic shape feature. + flags |= kTfLiteDelegateFlagsAllowDynamicTensors; + flags |= kTfLiteDelegateFlagsRequirePropagatedShapes; +} + +NnrtDelegate::NnrtDelegate(const NnrtApi* nnrt) : NnrtDelegate(nnrt, Options()) {} + +NnrtDelegate::NnrtDelegate(const Options& options) : NnrtDelegate(NnrtImplementation(), options) {} + +NnrtDelegate::NnrtDelegate(const NnrtApi* nnrt, const Options& options) + : TfLiteDelegate(TfLiteDelegateCreate()), m_delegateData(nnrt) +{ + NnrtDelegateConstructorImpl(options); +} + +NnrtDelegate::NnrtDelegate() : NnrtDelegate(Options()) {} + +TfLiteStatus NnrtDelegate::GetOptions(const TfLiteDelegate* pDelegate, Options& options) +{ + // Caller guarantees that parameters are legal + auto pDelegateData = static_cast(pDelegate->data_); + options.acceleratorName = pDelegateData->acceleratorName; + options.cacheDir = pDelegateData->cacheDir; + options.modelToken = pDelegateData->modelToken; + options.enableFp16 = pDelegateData->enableFp16; + options.executionPriority = pDelegateData->executionPriority; + options.executionPerformance = pDelegateData->executionPerformance; + options.allowDynamicDimensions = pDelegateData->allowDynamicDimensions; + options.maxNumberDelegatedPartitions = pDelegateData->maxNumberDelegatedPartitions; + options.maxCompilationTimeoutDurationNs = pDelegateData->maxCompilationTimeoutDurationNs; + options.maxExecutionTimeoutDurationNs = pDelegateData->maxExecutionTimeoutDurationNs; + options.maxExecutionLoopTimeoutDurationNs = pDelegateData->maxExecutionLoopTimeoutDurationNs; + options.version = pDelegateData->version; + + return kTfLiteOk; +} + +TfLiteStatus NnrtDelegate::DoCopyFromBufferHandle(TfLiteContext* context, + TfLiteDelegate* delegate, TfLiteBufferHandle bufferHandle, TfLiteTensor* tensor) +{ + return kTfLiteError; +} + +TfLiteStatus NnrtDelegate::DoCopyToBufferHandle(TfLiteContext* context, + TfLiteDelegate* delegate, TfLiteBufferHandle bufferHandle, TfLiteTensor* tensor) +{ + return kTfLiteError; +} + +void NnrtDelegate::DoFreeBufferHandle(TfLiteContext* context, + TfLiteDelegate* delegate, TfLiteBufferHandle* handle) +{ + return; +} + +TfLiteStatus NnrtDelegate::LimitDelegatedPartitions(int32_t maxPartitions, + std::vector partitionParamsArray, std::vector& nodesToDelegate) +{ + int32_t numPartitions = partitionParamsArray.size(); + if ((maxPartitions <= 0) || (numPartitions <= maxPartitions)) { // no limit or not exceed limit + return kTfLiteOk; + } + + int32_t numberDelegatedPartitions = std::count_if( + partitionParamsArray.begin(), partitionParamsArray.end(), + [nodesToDelegate](const TfLiteDelegateParams& partitionParams) { + return std::find(nodesToDelegate.begin(), nodesToDelegate.end(), + partitionParams.nodes_to_replace->data[0]) != nodesToDelegate.end(); + }); + // Adapt maxPartitions to limit delegate paritions, sort and abandon the low-ranking nodes. + if (numberDelegatedPartitions > maxPartitions) { + std::sort(partitionParamsArray.begin(), partitionParamsArray.end(), + [](const TfLiteDelegateParams& left, const TfLiteDelegateParams& right) -> bool { + return left.nodes_to_replace->size > right.nodes_to_replace->size; + }); + + nodesToDelegate.clear(); + + for (int32_t i = 0; i < maxPartitions; ++i) { + const TfLiteDelegateParams& partitionParams = partitionParamsArray[i]; + nodesToDelegate.insert(nodesToDelegate.end(), + partitionParams.nodes_to_replace->data, + partitionParams.nodes_to_replace->data + + partitionParams.nodes_to_replace->size); + } + } + + return kTfLiteOk; +} + +TfLiteStatus NnrtDelegate::GetSupportedNodes(TfLiteContext* context, + TfLiteDelegate* delegate, std::vector& supportedNodes) +{ + // Caller guarantees that parameters are legal + TfLiteIntArray* executionPlan = nullptr; + TF_LITE_ENSURE_STATUS(context->GetExecutionPlan(context, &executionPlan)); + TF_LITE_ENSURE_EQ(context, executionPlan != nullptr, true); + + // Check for every node if it is supported + TfLiteNode* node = nullptr; + TfLiteRegistration* registration = nullptr; + for (auto nodeIndex : TfLiteIntArrayView(executionPlan)) { + node = nullptr; + registration = nullptr; + TF_LITE_ENSURE_STATUS(context->GetNodeAndRegistration(context, nodeIndex, &node, ®istration)); + if (NnrtDelegateKernel::Validate(registration->builtin_code)) { + supportedNodes.emplace_back(nodeIndex); + } else { + TFLITE_LOG_PROD(TFLITE_LOG_WARNING, + "[NNRT-DELEGATE] Get unsupportted node: %d.", registration->builtin_code); + } + } + + return kTfLiteOk; +} + +void NnrtDelegate::GetDelegateKernelRegistration(TfLiteDelegate* delegate, TfLiteRegistration& nnrtDelegateKernel) +{ + // Caller guarantees that parameters are legal + nnrtDelegateKernel.profiling_string = nullptr; + nnrtDelegateKernel.builtin_code = kTfLiteBuiltinDelegate; + nnrtDelegateKernel.custom_name = g_tfliteNnrtDelegateName; + nnrtDelegateKernel.version = TFLITE_NNRT_DELEGATE_VERSION; + + nnrtDelegateKernel.init = [](TfLiteContext* context, const char* buffer, size_t length) -> void* { + if (buffer == nullptr) { + return nullptr; + } + + const TfLiteDelegateParams* params = reinterpret_cast(buffer); + auto* delegateData = static_cast(params->delegate->data_); + NnrtDelegateKernel* state = new (std::nothrow) NnrtDelegateKernel(delegateData->nnrt); + if (state == nullptr) { + TFLITE_LOG_PROD(TFLITE_LOG_ERROR, "Failed to create NnrtDelegateKernel instance."); + return state; + } + + TfLiteStatus status = state->Init(context, params); + if (status != kTfLiteOk) { + TFLITE_LOG_PROD(TFLITE_LOG_ERROR, "Failed to init NnrtDelegateKernel."); + delete state; + state = nullptr; + } + return state; + }; + + nnrtDelegateKernel.free = [](TfLiteContext* context, void* buffer) -> void { + if (buffer != nullptr) { + delete static_cast(buffer); + buffer = nullptr; + } + }; + + nnrtDelegateKernel.prepare = [](TfLiteContext* context, TfLiteNode* node) -> TfLiteStatus { + if (node == nullptr) { + TFLITE_LOG_PROD(TFLITE_LOG_ERROR, "Failed to prepare delegate kernels, the node is nullptr."); + return kTfLiteError; + } + + NnrtDelegateKernel* state = reinterpret_cast(node->user_data); + return state->Prepare(context, node); + }; + + nnrtDelegateKernel.invoke = [](TfLiteContext* context, TfLiteNode* node) -> TfLiteStatus { + if (node == nullptr) { + TFLITE_LOG_PROD(TFLITE_LOG_ERROR, "Failed to invoke delegate kernels, the node is nullptr."); + return kTfLiteError; + } + + NnrtDelegateKernel* state = reinterpret_cast(node->user_data); + return state->Invoke(context, node); + }; +} + +TfLiteStatus NnrtDelegate::CheckDeviceValid(TfLiteContext* context, TfLiteDelegate* delegate) +{ + // Caller guarantees that parameters are legal + auto* delegateData = static_cast(delegate->data_); + if (delegateData == nullptr) { + TFLITE_LOG_PROD(TFLITE_LOG_ERROR, "Delegate data not be found."); + return kTfLiteDelegateDataNotFound; + } + + const NnrtApi* nnrt = delegateData->nnrt; + if (nnrt == nullptr) { + TFLITE_LOG_PROD(TFLITE_LOG_ERROR, "Failed to get nnrt instance."); + return kTfLiteError; + } + + NnrtDelegate::Options delegateOptions; + TF_LITE_ENSURE_STATUS(NnrtDelegate::GetOptions(delegate, delegateOptions)); + + if (tflite::IsUseTargetDevice(delegateOptions)) { + size_t nnrtDevice; + TF_LITE_ENSURE_STATUS(GetTargetDevice(context, delegate, nnrt, nnrtDevice)); + } + + return kTfLiteOk; +} + +TfLiteStatus NnrtDelegate::DoPrepare(TfLiteContext* context, TfLiteDelegate* delegate) +{ + if ((context == nullptr) || (delegate == nullptr)) { + TFLITE_LOG_PROD(TFLITE_LOG_ERROR, "[NNRT-DELEGATE] Input TFLite-Context or TFLite-Delegate is nullptr."); + return kTfLiteError; + } + + auto* delegateData = static_cast(delegate->data_); + const NnrtApi* nnrt = delegateData->nnrt; + + // Do not delegate nodes_ if NN API is unavailable. + if (!nnrt->nnrtExists) { + return kTfLiteOk; + } + + // Check devices validity + TF_LITE_ENSURE_STATUS(CheckDeviceValid(context, delegate)); + + // Get supportted nodes by tflite. + // We don't care about all nodes_, we only care about ones in the current plan. + std::vector supportedNodes; + GetSupportedNodes(context, delegate, supportedNodes); + + // If there are no delegated nodes, short-circuit node replacement. + if (supportedNodes.empty()) { + TFLITE_LOG_PROD(TFLITE_LOG_INFO, "[NNRT-DELEGATE] supportted node list is empty."); + return kTfLiteOk; + } + + static TfLiteRegistration nnrtDelegateKernel; + GetDelegateKernelRegistration(delegate, nnrtDelegateKernel); + + std::vector nodesToDelegate(supportedNodes); + int32_t numPartitions; + TfLiteDelegateParams* paramsArray = nullptr; + auto supportedNodesArray = BuildTfLiteIntArray(supportedNodes); + TF_LITE_ENSURE_STATUS(context->PreviewDelegatePartitioning( + context, supportedNodesArray.get(), ¶msArray, &numPartitions)); + + NnrtDelegate::Options delegateOptions; + TF_LITE_ENSURE_STATUS(NnrtDelegate::GetOptions(delegate, delegateOptions)); + const auto partitionParamsArray = std::vector(paramsArray, paramsArray + numPartitions); + TF_LITE_ENSURE_STATUS(LimitDelegatedPartitions( + delegateOptions.maxNumberDelegatedPartitions, partitionParamsArray, nodesToDelegate)); + + auto nodesToDelegateArray = BuildTfLiteIntArray(nodesToDelegate); + if (nodesToDelegateArray->size == 0) { + TFLITE_LOG_PROD(TFLITE_LOG_INFO, "[NNRT-DELEGATE] No node to delegate."); + return kTfLiteOk; + } else { + // Request TFLite to partition the graph and make kernels + // for each independent node sub set a new nnrtDelegateKernel. + return context->ReplaceNodeSubsetsWithDelegateKernels(context, + nnrtDelegateKernel, nodesToDelegateArray.get(), delegate); + } +} + +// Return a singleton NNRT Delegate that can check ops supported. +TfLiteDelegate* NnrtDelegateSingleton() +{ + static NnrtDelegate delegate; + return &delegate; +} +} // namespace tflite \ No newline at end of file diff --git a/example/deep_learning_framework/tflite/delegates/nnrt_delegate/nnrt_delegate.h b/example/deep_learning_framework/tflite/delegates/nnrt_delegate/nnrt_delegate.h new file mode 100644 index 0000000..681e069 --- /dev/null +++ b/example/deep_learning_framework/tflite/delegates/nnrt_delegate/nnrt_delegate.h @@ -0,0 +1,167 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef TENSORFLOW_LITE_DELEGATES_NNRT_DELEGATE_H +#define TENSORFLOW_LITE_DELEGATES_NNRT_DELEGATE_H + +#include +#include + +#include "neural_network_runtime.h" + +#include "tensorflow/lite/c/common.h" +#include "tensorflow/lite/delegates/serialization.h" + +#include "../nnrt/nnrt_implementation.h" + +namespace tflite { +namespace delegate { +namespace nnrt { +class NnrtDelegateKernel; +} // namespace nnrt +} // namespace delegate + +using tflite::delegate::nnrt::NnrtDelegateKernel; + +// TFliteDelegate to interface with NNRT. +class NnrtDelegate : public TfLiteDelegate { +public: + struct Options { + OH_NN_PerformanceMode executionPerformance = OH_NN_PERFORMANCE_NONE; + std::string acceleratorName; + std::string cacheDir; + std::string modelToken; + OH_NN_Priority executionPriority = OH_NN_PRIORITY_MEDIUM; + int32_t maxNumberDelegatedPartitions = -1; + uint64_t maxCompilationTimeoutDurationNs = 0; + uint64_t maxExecutionTimeoutDurationNs = 0; + uint64_t maxExecutionLoopTimeoutDurationNs = 0; + // allow fp32 compuation to be run in fp16. + bool enableFp16 = false; + bool allowDynamicDimensions = false; + + uint32_t version {0}; + }; + + // Uses default options. + NnrtDelegate(); + + // The ownership of the NNRT instance is left to the caller of the + // NnrtDelegate constructor; the caller must ensure that the lifetime + // of the NNRT instance exceeds the lifetime of the NnrtDelegate. + explicit NnrtDelegate(const NnrtApi* nnrt); + + // The constructor that accepts options from user. + // This makes a copy of any data that it needs from Options, so + // the caller can safely deallocate any storage pointed to by + // the 'const char *' members of Options immediately after calling this. + explicit NnrtDelegate(const Options& options); + + // Constructor that accepts both an NNRT instance and options. + // The ownership of the NNRT instance is left to the caller of the + // NnrtDelegate constructor; the caller must ensure that the lifetime + // of the NNRT instance exceeds the lifetime of the NnrtDelegate. + // This constructor makes a copy of any data that it needs from Options, so + // the caller can safely deallocate any storage pointed to by + // the 'const char *' members of Options immediately after calling this. + NnrtDelegate(const NnrtApi* nnrt, const Options& options); + + ~NnrtDelegate() = default; + + // Returns the delegate options. + // The lifetime of the storage pointed to by the 'const char *' members of the + // returned Options object is the same as the lifetime of the supplied + // TfLiteDelegate instance. + static TfLiteStatus GetOptions(const TfLiteDelegate* pDelegate, Options& options); + +private: + struct Data { + const NnrtApi* nnrt = nullptr; + + // Preferred Power/perf trade-off. + OH_NN_PerformanceMode executionPerformance = OH_NN_PERFORMANCE_NONE; + + // Selected NNRT accelerator name. + std::string acceleratorName; + + // The cache dir for NNRT model. + std::string cacheDir; + + // The unique token string for NNRT model. + std::string modelToken; + + // Maximum number of NNRT partition to delegate. Zero or negative means + // no limit. + int32_t maxNumberDelegatedPartitions = -1; + + // Specifies the relative priority for executions of the model. + OH_NN_Priority executionPriority = OH_NN_PRIORITY_MEDIUM; + + // Specifies the maximum expected duration in nanosecond for compiling the + // model. + uint64_t maxCompilationTimeoutDurationNs = 0; + + // Specifies the maximum expected duration in nanosecond for executing the + // model. + uint64_t maxExecutionTimeoutDurationNs = 0; + + // Specifies the maximum expected duration in nanosecond for WHILE loops in + // the execution + uint64_t maxExecutionLoopTimeoutDurationNs = 0; + + // allow fp32 compuation to be run in fp16. + bool enableFp16 = false; + + // Whether to allow dynamic dimension sizes without re-compilation. + bool allowDynamicDimensions = false; + + uint32_t version {0}; + + explicit Data(const NnrtApi* nnrt); + ~Data(); + }; + + static TfLiteStatus DoPrepare(TfLiteContext* context, TfLiteDelegate* delegate); + + static TfLiteStatus DoCopyFromBufferHandle(TfLiteContext* context, + TfLiteDelegate* delegate, TfLiteBufferHandle bufferHandle, TfLiteTensor* tensor); + + static TfLiteStatus DoCopyToBufferHandle(TfLiteContext* context, + TfLiteDelegate* delegate, TfLiteBufferHandle bufferHandle, TfLiteTensor* tensor); + + static void DoFreeBufferHandle(TfLiteContext* context, + TfLiteDelegate* delegate, TfLiteBufferHandle* handle); + + static TfLiteStatus LimitDelegatedPartitions(int32_t maxPartitions, + std::vector partitionParamsArray, std::vector& nodesToDelegate); + + static TfLiteStatus GetSupportedNodes(TfLiteContext* context, + TfLiteDelegate* delegate, std::vector& supportedNodes); + + static void GetDelegateKernelRegistration(TfLiteDelegate* delegate, TfLiteRegistration& nnrtDelegateKernel); + + static TfLiteStatus CheckDeviceValid(TfLiteContext* context, TfLiteDelegate* delegate); + + void NnrtDelegateConstructorImpl(const Options& options); + +private: + // Delegate data presented through TfLiteDelegate::data_. + Data m_delegateData; +}; + +TfLiteDelegate* NnrtDelegateSingleton(); +} // namespace tflite + +#endif // TENSORFLOW_LITE_DELEGATES_NNRT_DELEGATE_H diff --git a/example/deep_learning_framework/tflite/delegates/nnrt_delegate/nnrt_delegate_kernel.cpp b/example/deep_learning_framework/tflite/delegates/nnrt_delegate/nnrt_delegate_kernel.cpp new file mode 100644 index 0000000..05933ae --- /dev/null +++ b/example/deep_learning_framework/tflite/delegates/nnrt_delegate/nnrt_delegate_kernel.cpp @@ -0,0 +1,420 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "nnrt_delegate_kernel.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "tensorflow/lite/context_util.h" +#include "neural_network_runtime.h" + +namespace tflite { +namespace delegate { +namespace nnrt { +constexpr int32_t SCALAR_RANK = 1; + +#define RETURN_TFLITE_ERROR_IF_NN_ERROR_FOR_COMPILE(code, callDesc) \ + do { \ + if ( (code) != OH_NN_SUCCESS) { \ + const auto errorDesc = NnrtErrorDescription((code)); \ + TFLITE_LOG_PROD(TFLITE_LOG_ERROR, "NN API returned error %s at line %d while %s.\n", errorDesc.c_str(), \ + __LINE__, (callDesc)); \ + m_nnrt->OH_NNCompilation_Destroy(&m_pNnCompilation); \ + return kTfLiteError; \ + } \ + } while (0) + +bool NnrtDelegateKernel::Validate(const int32_t builtinCode) +{ + if (TFLITE_TYPE_TO_NNRT_TYPE.count(builtinCode) && + TFLITE_TYPE_TO_NNRT_TYPE.at(builtinCode) != OH_NN_UNSUPPORT_OPS) { + return true; + } + + return false; +} + +TfLiteStatus NnrtDelegateKernel::Init(TfLiteContext* context, const TfLiteDelegateParams* params) +{ + TF_LITE_ENSURE_EQ(context, params != nullptr, true); + + if (m_initialised) { + TFLITE_LOG_PROD(TFLITE_LOG_INFO, + "[NNRT-DELEGATE_KERNEL] NnrtDelegateKernel has completed initialization, no need init again."); + return kTfLiteOk; + } + + for (auto nodeIndex : TfLiteIntArrayView(params->nodes_to_replace)) { + m_delegateNodes.emplace_back(nodeIndex); + } + + NnrtDelegate::Options delegateOptions; + TF_LITE_ENSURE_STATUS(NnrtDelegate::GetOptions(params->delegate, delegateOptions)); + TF_LITE_ENSURE_STATUS(tflite::GetTargetDevice(context, params->delegate, m_nnrt, m_nnrtDevice)); + if (m_nnModel == nullptr) { + m_nnModel = m_nnrt->OH_NNModel_Construct(); + if (m_nnModel == nullptr) { + TFLITE_LOG_PROD(TFLITE_LOG_ERROR, "[NNRT-DELEGATE_KERNEL] Fail to create ONNRT model."); + return kTfLiteError; + } + TF_LITE_ENSURE_STATUS(BuildGraph(context, delegateOptions, params->input_tensors, params->output_tensors)); + } + + m_initialised = true; + + return kTfLiteOk; +} + +TfLiteStatus NnrtDelegateKernel::Prepare(TfLiteContext* context, TfLiteNode* node) +{ + TF_LITE_ENSURE_EQ(context, node != nullptr, true); + + if (!m_initialised) { + TFLITE_LOG_PROD(TFLITE_LOG_ERROR, + "[NNRT-DELEGATE_KERNEL] NnrtDelegateKernel Prepare failed, not Init yet."); + return kTfLiteError; + } + + if (m_compiled) { + return kTfLiteOk; // If model has completed compilation, no need compile again. + } + + // Create OH_NNCompilation + m_pNnCompilation = m_nnrt->OH_NNCompilation_Construct(m_nnModel); + if (m_pNnCompilation == nullptr) { + TFLITE_LOG_PROD(TFLITE_LOG_ERROR, "[NNRT-DELEGATE_KERNEL] Fail to create OH_NNCompilation instance."); + return kTfLiteError; + } + + NnrtDelegate::Options delegateOptions; + TF_LITE_ENSURE_STATUS(NnrtDelegate::GetOptions(node->delegate, delegateOptions)); + + TF_LITE_ENSURE_STATUS(SetNnOptions(context, delegateOptions)); + RETURN_TFLITE_ERROR_IF_NN_ERROR_FOR_COMPILE(m_nnrt->OH_NNCompilation_Build(m_pNnCompilation), + "completing NNRT compilation"); + + m_compiled = true; + return kTfLiteOk; +} + +TfLiteStatus NnrtDelegateKernel::Invoke(TfLiteContext* context, TfLiteNode* node) +{ + if (!m_compiled) { + TFLITE_LOG_PROD(TFLITE_LOG_ERROR, + "[NNRT-DELEGATE_KERNEL] NnrtDelegateKernel Invoke failed, not compile yet."); + return kTfLiteError; + } + + // Create OH_NNExecutor_Construct + OH_NNExecutor* pNnExecution {nullptr}; + pNnExecution = m_nnrt->OH_NNExecutor_Construct(m_pNnCompilation); + if (pNnExecution == nullptr) { + TFLITE_LOG_PROD(TFLITE_LOG_ERROR, "[NNRT-DELEGATE_KERNEL] Fail to create OH_NNExecutor instance."); + return kTfLiteError; + } + + // Set the input tensor buffers. + OH_NN_Tensor inputNnTensor; + TF_LITE_ENSURE_STATUS(SetInputTensors(context, node, pNnExecution, inputNnTensor)); + + // Get the output tensor buffers. + TF_LITE_ENSURE_STATUS(SetOutputTensors(context, node, pNnExecution)); + + // Invoke delegated subgraph. + RETURN_TFLITE_ERROR_IF_NN_ERROR(m_nnrt->OH_NNExecutor_Run(pNnExecution), "running computation"); + + m_nnrt->OH_NNExecutor_Destroy(&pNnExecution); + pNnExecution = nullptr; + return kTfLiteOk; +} + +TfLiteStatus NnrtDelegateKernel::Map(const int32_t builtinCode, const NnrtOpMappingArgs& mappingArgs, + int32_t& nnOpType) const +{ + if (TFLITE_TYPE_TO_NNRT_TYPE.count(builtinCode) == 0) { + TFLITE_LOG_PROD(TFLITE_LOG_ERROR, + "[NNRT-DELEGATE_KERNEL] Not support current TF-Lite Operator, builtCode: %d.", builtinCode); + return kTfLiteError; + } + + TfLiteStatus retValue = mappingArgs.builder->AddOpFuncParams(mappingArgs, builtinCode); + if (retValue != kTfLiteOk) { + TFLITE_LOG_PROD(TFLITE_LOG_ERROR, "[NNRT-DELEGATE_KERNEL] Failed to add params to these operations."); + return retValue; + } + nnOpType = TFLITE_TYPE_TO_NNRT_TYPE.at(builtinCode); + + return kTfLiteOk; +} + +TfLiteStatus NnrtDelegateKernel::BuildGraph(TfLiteContext* context, const NnrtDelegate::Options& delegateOptions, + const TfLiteIntArray* inputTensors, const TfLiteIntArray* outputTensors) +{ + if (context == nullptr) { + TFLITE_LOG_PROD(TFLITE_LOG_ERROR, "[NNRT-DELEGATE_KERNEL] The context is nullptr when building the graph."); + return kTfLiteError; + } + + TF_LITE_ENSURE_EQ(context, inputTensors != nullptr, true); + TF_LITE_ENSURE_EQ(context, outputTensors != nullptr, true); + + // Build the ops and tensors. + TF_LITE_ENSURE_STATUS(AddOpsAndTensors(context, inputTensors, delegateOptions)); + // Map input and output tensor indices to NN + // Make the TensorFlow Lite inputs and outputs to nn_indices. + OH_NN_UInt32Array inputIndices; + OH_NN_UInt32Array outputIndices; + std::vector inputsData; + for (auto i : TfLiteIntArrayView(inputTensors)) { + // Constant tensors are not NNRT inputs. + if ((i != kTfLiteOptionalTensor) && (context->tensors[i].allocation_type != kTfLiteMmapRo) && + // The delegate might not have mapped this input (this can + // happen if one tensor is split in several ones) + (m_tensorMapping.LiteIndexToNn(i) != INVALID_INDEX)) { + const int32_t inputTensorNnIndex = m_tensorMapping.LiteIndexToNn(i); + inputsData.emplace_back(inputTensorNnIndex); + } + } + + std::vector outputsData; + for (auto i : TfLiteIntArrayView(outputTensors)) { + const int32_t outputTensorNnIndex = m_tensorMapping.LiteIndexToNn(i); + // Unmapped outputs are not added + if (outputTensorNnIndex != INVALID_INDEX) { + outputsData.emplace_back(outputTensorNnIndex); + } + } + + inputIndices.data = inputsData.data(); + outputIndices.data = outputsData.data(); + inputIndices.size = inputsData.size(); + outputIndices.size = outputsData.size(); + // Tell NN to declare inputs/outputs + RETURN_TFLITE_ERROR_IF_NN_ERROR(m_nnrt->OH_NNModel_SpecifyInputsAndOutputs(m_nnModel, &inputIndices, + &outputIndices), "identifying model inputs and outputs"); + + RETURN_TFLITE_ERROR_IF_NN_ERROR(m_nnrt->OH_NNModel_Finish(m_nnModel), "finalizing the model"); + return kTfLiteOk; +} + +TfLiteStatus NnrtDelegateKernel::AddOpsAndTensors(TfLiteContext* context, const TfLiteIntArray* inputTensors, + const NnrtDelegate::Options& delegateOptions) +{ + // The tensor builder allows creating a single op. It is created outside + // the for loop to avoid reallocating the vectors. + NnrtOpBuilderArgs opBuilderArgs = { + .context = context, + .nnModel = m_nnModel, + .inputTensors = const_cast(inputTensors), + .pTensorMapping = &m_tensorMapping, + .delegateOptions = delegateOptions + }; + NnrtOpBuilder builder(m_nnrt, opBuilderArgs); + + // Clear the input and output lists. + builder.ClearInputOuputLists(); + + // Add other tensors. + TfLiteNode* node = nullptr; + TfLiteRegistration* reg = nullptr; + for (int32_t nodeIndex : m_delegateNodes) { + node = nullptr; + reg = nullptr; + TF_LITE_ENSURE_STATUS( + context->GetNodeAndRegistration(context, nodeIndex, &node, ®)); // Obtain the op and registration. + if ((node == nullptr) || (reg == nullptr)) { + TFLITE_LOG_PROD(TFLITE_LOG_ERROR, "[NNRT-DELEGATE_KERNEL] Get node and registration failed."); + return kTfLiteError; + } + + const bool scalarAsTensor = IsScalarInputSupported(reg->builtin_code); + int32_t inputTensorFlags = 0; + if (scalarAsTensor) { + inputTensorFlags |= NN_TENSOR_FLAG_SCALAR_AS_TENSOR; + } + + // Get op type and tensors, fails if the Validate function failed. + int32_t nnOpType; + NnrtOpMappingArgs opMappingArgs = { context, &builder, node, nodeIndex }; + TF_LITE_ENSURE_STATUS(Map(reg->builtin_code, opMappingArgs, nnOpType)); + + for (int32_t inputPos = 0; inputPos < node->inputs->size; ++inputPos) { + if ((reg->builtin_code == kTfLiteBuiltinFullyConnected) && + (node->inputs->data[inputPos] == kTfLiteOptionalTensor)) { + continue; // skip optional bias and handle it during mapping. + } + const auto inputIndex = node->inputs->data[inputPos]; + TF_LITE_ENSURE_STATUS(builder.AddTensorInput(inputIndex, reg->builtin_code, inputTensorFlags)); + } + // Map outputs to NN API tensor indices. + int32_t outputTensorFlags = 0; + for (int32_t outputPos = 0; outputPos < node->outputs->size; ++outputPos) { + auto outputIndex = node->outputs->data[outputPos]; + TF_LITE_ENSURE_STATUS(builder.AddTensorOutput(outputIndex, reg->builtin_code, outputTensorFlags)); + } + TF_LITE_ENSURE_STATUS(builder.FinalizeAddOperation(static_cast(nnOpType), nodeIndex)); + } + + return kTfLiteOk; +} + +TfLiteStatus NnrtDelegateKernel::ConvertTensorTypeToNn(TfLiteContext* context, + const std::pair& indexPair, OH_NN_QuantParam* nnQuantParam, OH_NN_Tensor& nnTensor) +{ + TF_LITE_ENSURE_EQ(context, context->tensors_size > indexPair.first, true); + TfLiteTensor* tensor = &(context->tensors[indexPair.first]); + TF_LITE_ENSURE_EQ(context, tensor != nullptr, true); + + OH_NN_DataType nnType {OH_NN_UNKNOWN}; + TF_LITE_ENSURE_STATUS(m_tensorMapping.ConvertType(context, indexPair.first, 0, nnType)); + + uint32_t tensorRank = static_cast(tensor->dims->size); + int32_t* tensorDims = reinterpret_cast(tensor->dims->data); + if (tensorDims == nullptr) { + TFLITE_LOG_PROD(TFLITE_LOG_ERROR, + "[NNRT-DELEGATE_KERNEL] The tensorDims is nullptr when converting the type of tensors to nnrt."); + return kTfLiteError; + } + + // treat scalar input as single cell tensor in NNRT. + if (tensorRank == 0) { + tensorRank = SCALAR_RANK; + *tensorDims = SCALAR_RANK; + } + + nnTensor.dataType = nnType; + nnTensor.dimensionCount = tensorRank; + nnTensor.dimensions = tensorDims; + nnTensor.quantParam = nnQuantParam; + nnTensor.type = OH_NN_TENSOR; + + return kTfLiteOk; +} + +TfLiteStatus NnrtDelegateKernel::SetInputTensors(TfLiteContext* context, TfLiteNode* node, + OH_NNExecutor* pNnExecution, OH_NN_Tensor& nnTensor) +{ + TF_LITE_ENSURE_EQ(context, node != nullptr, true); + TF_LITE_ENSURE_EQ(context, pNnExecution != nullptr, true); + + // Note: we access tflite tensors using + // absolute indices but NN api indices inputs by relative indices. + int32_t relativeIndex = 0; + OH_NN_QuantParam* nnQuantParam = nullptr; + TfLiteIntArray* tensors = node->inputs; + TF_LITE_ENSURE_EQ(context, tensors != nullptr, true); + + for (auto absoluteIndex : TfLiteIntArrayView(tensors)) { + if (absoluteIndex == kTfLiteOptionalTensor) { + continue; + } + + std::pair indexPair = std::make_pair(absoluteIndex, relativeIndex); + ConvertTensorTypeToNn(context, indexPair, nnQuantParam, nnTensor); + + TfLiteTensor* tensor = &context->tensors[absoluteIndex]; + TF_LITE_ENSURE_EQ(context, tensor != nullptr, true); + + if (tensor->allocation_type != kTfLiteMmapRo) { + RETURN_TFLITE_ERROR_IF_NN_ERROR_FOR_TENSOR(m_nnrt->OH_NNExecutor_SetInput(pNnExecution, relativeIndex, + &nnTensor, tensor->data.raw, tensor->bytes), + "associating NNRT execution output with a memory object", tensor); + ++relativeIndex; + } else { + continue; + } + } + + return kTfLiteOk; +} + +TfLiteStatus NnrtDelegateKernel::SetOutputTensors(TfLiteContext* context, TfLiteNode* node, + OH_NNExecutor* pNnExecution) +{ + TF_LITE_ENSURE_EQ(context, node != nullptr, true); + TF_LITE_ENSURE_EQ(context, pNnExecution != nullptr, true); + + // Note: we access tflite tensors using + // absolute indices but NN api indices inputs by relative indices. + int32_t relativeIndex = 0; + TfLiteIntArray* tensors = node->outputs; + TF_LITE_ENSURE_EQ(context, tensors != nullptr, true); + for (auto absoluteIndex : TfLiteIntArrayView(tensors)) { + if (m_tensorMapping.LiteIndexToNn(absoluteIndex) == INVALID_INDEX) { + continue; + } + + TfLiteTensor* tensor = &context->tensors[absoluteIndex]; + TF_LITE_ENSURE_EQ(context, tensor != nullptr, true); + RETURN_TFLITE_ERROR_IF_NN_ERROR_FOR_TENSOR( + m_nnrt->OH_NNExecutor_SetOutput(pNnExecution, relativeIndex, tensor->data.raw, tensor->bytes), + "associating NNRT execution output to a memory object", tensor); + ++relativeIndex; + } + + return kTfLiteOk; +} + +TfLiteStatus NnrtDelegateKernel::SetNnOptions(TfLiteContext* context, const NnrtDelegate::Options& delegateOptions) +{ + if (context == nullptr) { + TFLITE_LOG_PROD(TFLITE_LOG_ERROR, + "[NNRT-DELEGATE_KERNEL] The context is nullptr when setting nnrt options."); + return kTfLiteError; + } + + RETURN_TFLITE_ERROR_IF_NN_ERROR(m_nnrt->OH_NNCompilation_SetDevice(m_pNnCompilation, m_nnrtDevice), + "creating NNRT compilation"); + + auto performance = delegateOptions.executionPerformance; + if (performance != OH_NN_PERFORMANCE_NONE) { + RETURN_TFLITE_ERROR_IF_NN_ERROR_FOR_COMPILE( + m_nnrt->OH_NNCompilation_SetPerformanceMode(m_pNnCompilation, performance), + "setting compilation performance"); + } + + // Set cacahe, if cacheDir & modelToken & device is valid. + std::string cacheDir = delegateOptions.cacheDir; + std::string modelToken = delegateOptions.modelToken; + uint32_t version = delegateOptions.version; + if (!cacheDir.empty() && (!IsUseTargetDevice(delegateOptions) || + (delegateOptions.acceleratorName == NNRT_REFERENCE_DEVICE))) { + RETURN_TFLITE_ERROR_IF_NN_ERROR_FOR_COMPILE( + m_nnrt->OH_NNCompilation_SetCache(m_pNnCompilation, cacheDir.c_str(), version), + "setting compilation cache"); + } else if (cacheDir.empty()) { + TFLITE_LOG_PROD(TFLITE_LOG_WARNING, "The cacheDir is empty, will not load or save cache."); + } + return kTfLiteOk; +} +} // namespace nnrt +} // namespace delegate +} // tflite \ No newline at end of file diff --git a/example/deep_learning_framework/tflite/delegates/nnrt_delegate/nnrt_delegate_kernel.h b/example/deep_learning_framework/tflite/delegates/nnrt_delegate/nnrt_delegate_kernel.h new file mode 100644 index 0000000..997dc7f --- /dev/null +++ b/example/deep_learning_framework/tflite/delegates/nnrt_delegate/nnrt_delegate_kernel.h @@ -0,0 +1,104 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef TENSORFLOW_LITE_DELEGATES_NNRT_DELEGATE_KERNEL_H +#define TENSORFLOW_LITE_DELEGATES_NNRT_DELEGATE_KERNEL_H + + +#include "neural_network_runtime.h" +#include "tensorflow/lite/c/common.h" + +#include "tensor_mapping.h" +#include "nnrt_op_builder.h" + +namespace tflite { +namespace delegate { +namespace nnrt { + +// Represents a subgraph in TFLite that will be delegated to NNRt. +// It is abstracted as a single kernel node in the main TFLite graph and +// implements Init/Prepare/Invoke as TFLite kernel nodes. +class NnrtDelegateKernel { +public: + explicit NnrtDelegateKernel(const NnrtApi* nnrt) + : m_initialised(false), + m_compiled(false), + m_nnrt(nnrt), + m_nnModel(nullptr), + m_pNnCompilation(nullptr) {} + + NnrtDelegateKernel() : NnrtDelegateKernel(NnrtImplementation()) {} + virtual ~NnrtDelegateKernel() + { + m_nnrt->OH_NNModel_Destroy(&m_nnModel); + m_nnrt->OH_NNCompilation_Destroy(&m_pNnCompilation); + m_nnrt = nullptr; + } + + // Returns true if the node can be accelerated with NNRT. + static bool Validate(const int32_t builtinCode); + + // Initialize the kernel (a NN model) and builds the NN Model. + TfLiteStatus Init(TfLiteContext* context, const TfLiteDelegateParams* params); + + // Creates the NNRT Compilation for the NN model. It assumes that Init has + // been called and completed successfully. + TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node); + + // Invoke the NN Model. Expects Init and Prepare to have been completed successfully. + TfLiteStatus Invoke(TfLiteContext* context, TfLiteNode* node); + +private: + TfLiteStatus Map(int32_t builtinCode, const NnrtOpMappingArgs& mappingArgs, int32_t& nnOpType) const; + TfLiteStatus AddOpsAndTensors(TfLiteContext* context, const TfLiteIntArray* inputTensors, + const NnrtDelegate::Options& delegateOptions); + TfLiteStatus BuildGraph(TfLiteContext* context, const NnrtDelegate::Options& options, + const TfLiteIntArray* inputTensors, const TfLiteIntArray* outputTensors); + TfLiteStatus ConvertTensorTypeToNn(TfLiteContext* context, const std::pair& indexPair, + OH_NN_QuantParam* nnQuantParam, OH_NN_Tensor& nnTensor); + TfLiteStatus SetInputTensors(TfLiteContext* context, TfLiteNode* node, OH_NNExecutor* pNnExecution, + OH_NN_Tensor& nnTensor); + TfLiteStatus SetOutputTensors(TfLiteContext* context, TfLiteNode* node, OH_NNExecutor* pNnExecution); + TfLiteStatus SetNnOptions(TfLiteContext* context, const NnrtDelegate::Options& delegateOptions); + +private: + // True if initialization has been completed successfully + bool m_initialised; + + // True if compilation has been completed successfully + bool m_compiled; + + // NN device handle. + size_t m_nnrtDevice; + + // Access to NNRT. + const NnrtApi* m_nnrt; + + // NN API state. + OH_NNModel* m_nnModel; + OH_NNCompilation* m_pNnCompilation; + + // Node indices that this delegate is responsible for. Indices here + // indexes into the nodes array in the TfLiteContext. + std::vector m_delegateNodes; + + // Track indices we use + TensorMapping m_tensorMapping; +}; +} // namespace nnrt +} // namespace delegate +} // namespace tflite + +#endif // TENSORFLOW_LITE_DELEGATES_NNRT_DELEGATE_KERNEL_H diff --git a/example/deep_learning_framework/tflite/delegates/nnrt_delegate/nnrt_delegate_provider.cpp b/example/deep_learning_framework/tflite/delegates/nnrt_delegate/nnrt_delegate_provider.cpp new file mode 100644 index 0000000..e7592a3 --- /dev/null +++ b/example/deep_learning_framework/tflite/delegates/nnrt_delegate/nnrt_delegate_provider.cpp @@ -0,0 +1,215 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include + +#include "tensorflow/lite/tools/delegates/delegate_provider.h" + +#include "nnrt_delegate.h" +#include "../nnrt/nnrt_implementation.h" + +namespace tflite { +namespace tools { +constexpr int32_t DEFAULT_THREADS = 1; +constexpr int32_t DEFAULT_DELEGATE_NUM = -1; +class NnrtDelegateProvider : public DelegateProvider { +public: + NnrtDelegateProvider() + { + default_params_.AddParam("use_nnrt", ToolParam::Create(false)); + default_params_.AddParam("performance", ToolParam::Create("")); + default_params_.AddParam("priority", ToolParam::Create("")); + default_params_.AddParam("device", ToolParam::Create("")); + default_params_.AddParam("cache_dir", ToolParam::Create("")); + default_params_.AddParam("model_token", ToolParam::Create("")); + default_params_.AddParam("max_delegate_num", ToolParam::Create(DEFAULT_DELEGATE_NUM)); + default_params_.AddParam("enable_fp16", ToolParam::Create(false)); + default_params_.AddParam("allow_dynamic_dimensions", ToolParam::Create(false)); + } + + ~NnrtDelegateProvider() {}; + + std::vector CreateFlags(ToolParams* param) const final; + + void LogParams(const ToolParams& params, bool verbose) const final; + + TfLiteDelegatePtr CreateTfLiteDelegate(const ToolParams& params) const final; + + std::pair CreateRankedTfLiteDelegate(const ToolParams& params) const final; + + std::string GetName() const final + { + return "NNRT"; + } +}; + +REGISTER_DELEGATE_PROVIDER(NnrtDelegateProvider); + +std::vector NnrtDelegateProvider::CreateFlags(ToolParams* params) const +{ + std::vector flags = { + CreateFlag("max_delegate_num", params, "Delegate max num limit, max_delegate_num <= 0 means no limit"), + CreateFlag("enable_fp16", params, "Whether to Infer model with FP16."), + CreateFlag("allow_dynamic_dimensions", params, + "Whether to allow dynamic dimension sizes without re-compilation."), + CreateFlag("performance", params, + "Execution performance for nnrt delegate. " + "choose within [low, medium, high, extreme, default]."), + CreateFlag("priority", params, + "The model execution priority in nnrt, and it " + "choose within [default, low, medium, high]."), + CreateFlag("device", params, + "The name of the nnrt accelerator to use, " + "choose within [cpu, gpu, apu, nnrt-reference], " + "nnrt-reference means chosen automatically by nnrt."), + CreateFlag("cache_dir", params, "The directory of load and save cache for delegate"), + CreateFlag("model_token", params, "The file_name of load and save cache for delegate"), + }; + return flags; +} + +void NnrtDelegateProvider::LogParams(const ToolParams& params, bool verbose) const +{ + LOG_TOOL_PARAM(params, bool, "use_nnrt", "Use NNRT", verbose); + if (!params.Get("use_nnrt")) { + return; // no use nnrt, return. + } + + LOG_TOOL_PARAM(params, std::string, "performance", "NNRT execution performance", verbose); + LOG_TOOL_PARAM(params, std::string, "priority", "NNRT execution priority", verbose); + LOG_TOOL_PARAM(params, std::string, "device", "NNRT accelerator name", verbose); + LOG_TOOL_PARAM(params, std::string, "cache_dir", "NNRT model cache directory", verbose); + LOG_TOOL_PARAM(params, std::string, "model_token", "NNRT model cache filename", verbose); + LOG_TOOL_PARAM(params, int32_t, "max_delegate_num", "NNRT delegate max partition", verbose); + LOG_TOOL_PARAM(params, bool, "enable_fp16", "NNRT allow fp16 inference", verbose); + LOG_TOOL_PARAM(params, bool, "allow_dynamic_dimensions", "NNRT allow dynamic dimensions", verbose); +} + +TfLiteStatus GetExecutionPerformance(const ToolParams& params, NnrtDelegate::Options& options) +{ + std::string stringExecutionPerformance = params.Get("performance"); + if (stringExecutionPerformance.empty()) { + return kTfLiteOk; // no set performance + } + + OH_NN_PerformanceMode executionPerformance = OH_NN_PERFORMANCE_NONE; + if (stringExecutionPerformance == "low") { + executionPerformance = OH_NN_PERFORMANCE_LOW; + } else if (stringExecutionPerformance == "medium") { + executionPerformance = OH_NN_PERFORMANCE_MEDIUM; + } else if (stringExecutionPerformance == "high") { + executionPerformance = OH_NN_PERFORMANCE_HIGH; + } else if (stringExecutionPerformance == "extreme") { + executionPerformance = OH_NN_PERFORMANCE_EXTREME; + } else if (stringExecutionPerformance == "default") { + executionPerformance = OH_NN_PERFORMANCE_NONE; + } else { + TFLITE_LOG(ERROR) << "The provided value is not a valid nnrt execution performance."; + return kTfLiteError; + } + options.executionPerformance = executionPerformance; + + return kTfLiteOk; +} + +TfLiteStatus GetExecutionPriority(const ToolParams& params, NnrtDelegate::Options& options) +{ + std::string stringExecutionPriority = params.Get("priority"); + if (stringExecutionPriority.empty()) { + return kTfLiteOk; // no set priority + } + + OH_NN_Priority executionPriority = OH_NN_PRIORITY_MEDIUM; + if (stringExecutionPriority == "low") { + executionPriority = OH_NN_PRIORITY_LOW; + } else if (stringExecutionPriority == "medium") { + executionPriority = OH_NN_PRIORITY_MEDIUM; + } else if (stringExecutionPriority == "high") { + executionPriority = OH_NN_PRIORITY_HIGH; + } else if (stringExecutionPriority == "default") { + executionPriority = OH_NN_PRIORITY_MEDIUM; + } else { + TFLITE_LOG(ERROR) << "The provided value is not a valid nnrt execution priority."; + return kTfLiteError; + } + options.executionPriority = executionPriority; + + return kTfLiteOk; +} + +TfLiteStatus MapParams(const ToolParams& params, NnrtDelegate::Options& options) +{ + std::string acceleratorName = params.Get("device"); + if (!acceleratorName.empty()) { + options.acceleratorName = acceleratorName; + } + + if (params.GetParam("max_delegate_num") != nullptr) { + options.maxNumberDelegatedPartitions = params.Get("max_delegate_num"); + } + + std::string cacheDir = params.Get("cache_dir"); + if (!cacheDir.empty()) { + options.cacheDir = cacheDir; + } + + std::string modelToken = params.Get("model_token"); + if (!modelToken.empty()) { + options.modelToken = modelToken; + } + + if (params.Get("enable_fp16")) { + options.enableFp16 = true; + } + + if (params.Get("allow_dynamic_dimensions")) { + options.allowDynamicDimensions = true; + } + + return kTfLiteOk; +} + +TfLiteDelegatePtr NnrtDelegateProvider::CreateTfLiteDelegate(const ToolParams& params) const +{ + TfLiteDelegatePtr delegate(nullptr, [](TfLiteDelegate*) {}); + if (!params.Get("use_nnrt")) { + return delegate; + } + + NnrtDelegate::Options options; + TFLITE_TOOLS_CHECK(MapParams(params, options) == kTfLiteOk) << "Map params to NNRT Delegate options failed."; + TFLITE_TOOLS_CHECK(GetExecutionPerformance(params, options) == kTfLiteOk) << + "Create TfLite NNRT Delegate failed."; + TFLITE_TOOLS_CHECK(GetExecutionPriority(params, options) == kTfLiteOk) << "Create TfLite NNRT Delegate failed."; + + const auto* nnrtImpl = NnrtImplementation(); + if (!nnrtImpl->nnrtExists) { + TFLITE_LOG(WARN) << "NNRT acceleration is unsupported on this platform."; + return delegate; + } + + return TfLiteDelegatePtr(new (std::nothrow) NnrtDelegate(nnrtImpl, options), + [](TfLiteDelegate* delegate) { delete reinterpret_cast(delegate); }); +} + +std::pair NnrtDelegateProvider::CreateRankedTfLiteDelegate(const ToolParams& params) const +{ + auto ptr = CreateTfLiteDelegate(params); + LogParams(params, false); + return std::make_pair(std::move(ptr), params.GetPosition("use_nnrt")); +} +} // namespace tools +} // namespace tflite \ No newline at end of file diff --git a/example/deep_learning_framework/tflite/delegates/nnrt_delegate/nnrt_op_builder.cpp b/example/deep_learning_framework/tflite/delegates/nnrt_delegate/nnrt_op_builder.cpp new file mode 100644 index 0000000..b4bdb93 --- /dev/null +++ b/example/deep_learning_framework/tflite/delegates/nnrt_delegate/nnrt_op_builder.cpp @@ -0,0 +1,503 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#define __STDC_WANT_LIB_EXT1__ 1 + +#include "nnrt_op_builder.h" + +#include + +#include "neural_network_runtime.h" +#include "tensorflow/lite/util.h" +#include "tensorflow/lite/context_util.h" + +#include "nnrt_utils.h" + +namespace tflite { +namespace delegate { +namespace nnrt { +constexpr int32_t SCALAR_TENSOR_RANK = 1; +constexpr int32_t ADDZEROS_BIAS_INDEX = -1; +constexpr int32_t UNSPECIFIED_DIMENSION_VALUE = -1; +const std::vector DEPTHWISE_TRANSPOSE_AXISS = { 3, 1, 2, 0 }; + +NnrtOpBuilder::NnrtOpBuilder(const NnrtApi* nnrt, NnrtOpBuilderArgs& opBuilderArgs) + : m_nnrt(nnrt), + m_context(opBuilderArgs.context), + m_pTensorMapping(opBuilderArgs.pTensorMapping), + m_nnModel(opBuilderArgs.nnModel), + m_allowDynamicDimensions(opBuilderArgs.delegateOptions.allowDynamicDimensions) +{ + // Map Op func pointer + MapBuiltinCodeToFunc(); + + // Get model inputs + for (int32_t i : TfLiteIntArrayView(opBuilderArgs.inputTensors)) { + // Constant tensors are not NNRT inputs. + if (i != kTfLiteOptionalTensor && opBuilderArgs.context->tensors[i].allocation_type != kTfLiteMmapRo) { + m_inputs.emplace_back(i); + } + } +} + +TfLiteStatus NnrtOpBuilder::AddZerosBias(const NnrtOpMappingArgs& mappingArgs, int32_t inputId, int32_t filterId, + int32_t channelNum) +{ + int32_t biasIndex = ADDZEROS_BIAS_INDEX; + mappingArgs.context->AddTensors(mappingArgs.context, 1, &biasIndex); + TfLiteTensor* biasTensor = &mappingArgs.context->tensors[biasIndex]; + const auto inputType = mappingArgs.context->tensors[inputId].type; + + if (inputType == kTfLiteFloat32) { + biasTensor->type = kTfLiteFloat32; + } else { + biasTensor->type = kTfLiteInt32; + } + + // Create an array with a required bias shape and resize the bias tensor. + TfLiteIntArray* biasShape = TfLiteIntArrayCreate(1); // 1-dimension + biasShape->data[0] = channelNum; + biasTensor->allocation_type = kTfLiteDynamic; + mappingArgs.context->ResizeTensor(mappingArgs.context, biasTensor, biasShape); + + // Set tensor's values to zeroes and add it using AddVector*, so that the values are copied to NNRT. +#ifdef __STDC_LIB_EXT1__ + if (inputType == kTfLiteFloat32) { + memset_s(biasTensor->data.f, biasTensor->bytes, 0, channelNum * sizeof(float)); + TF_LITE_ENSURE_STATUS(mappingArgs.builder->AddVectorFloat32Tensor(biasTensor->data.f, channelNum, + OH_NN_TENSOR)); + } else { + memset_s(biasTensor->data.i32, biasTensor->bytes, 0, channelNum * sizeof(int32_t)); + const TfLiteTensor& inputTensor = mappingArgs.context->tensors[inputId]; + const TfLiteTensor& filterTensor = mappingArgs.context->tensors[filterId]; + + // NNRT requires bias scale to be a product of an input scale and a filter scale. + biasTensor->params.scale = inputTensor.params.scale * filterTensor.params.scale; + TF_LITE_ENSURE_STATUS(mappingArgs.builder->AddVectorInt32Tensor(biasTensor->data.i32, channelNum, + OH_NN_TENSOR)); + } +#endif + + return kTfLiteOk; +} + +TfLiteStatus NnrtOpBuilder::AddBasicComputeParams(const NnrtOpMappingArgs& mappingArgs, int32_t builtinCode) +{ + if (builtinCode == kTfLiteBuiltinAdd) { + auto builtin = reinterpret_cast(mappingArgs.node->builtin_data); + TF_LITE_ENSURE_STATUS(AddActivateParamsInOperator(mappingArgs, builtin, builtinCode, + OH_NN_ADD_ACTIVATIONTYPE)); + } else if (builtinCode == kTfLiteBuiltinMul) { + auto builtin = reinterpret_cast(mappingArgs.node->builtin_data); + TF_LITE_ENSURE_STATUS(AddActivateParamsInOperator(mappingArgs, builtin, builtinCode, + OH_NN_MUL_ACTIVATION_TYPE)); + } else if (builtinCode == kTfLiteBuiltinSub) { + auto builtin = reinterpret_cast(mappingArgs.node->builtin_data); + TF_LITE_ENSURE_STATUS(AddActivateParamsInOperator(mappingArgs, builtin, builtinCode, + OH_NN_SUB_ACTIVATIONTYPE)); + } else { + TFLITE_LOG_PROD(TFLITE_LOG_ERROR, "[NNRT-OPBUILDER] unsupportted basic compute type %d.", builtinCode); + return kTfLiteError; + } + + return kTfLiteOk; +} + +TfLiteStatus NnrtOpBuilder::AddAvgPoolingParams(const NnrtOpMappingArgs& mappingArgs, int32_t builtinCode) +{ + auto builtin = reinterpret_cast(mappingArgs.node->builtin_data); + std::vector kernel = { static_cast(builtin->filter_height), + static_cast(builtin->filter_width) }; + std::vector stride = { static_cast(builtin->stride_height), + static_cast(builtin->stride_width) }; + + mappingArgs.builder->AddVectorInt64Tensor(kernel.data(), kernel.size(), OH_NN_AVG_POOL_KERNEL_SIZE); + mappingArgs.builder->AddVectorInt64Tensor(stride.data(), stride.size(), OH_NN_AVG_POOL_STRIDE); + TF_LITE_ENSURE_STATUS(AddPadParamsInOperator(mappingArgs, builtin, builtinCode, + OH_NN_AVG_POOL_PAD_MODE)); + TF_LITE_ENSURE_STATUS(AddActivateParamsInOperator(mappingArgs, builtin, builtinCode, + OH_NN_AVG_POOL_ACTIVATION_TYPE)); + + return kTfLiteOk; +} + +TfLiteStatus NnrtOpBuilder::AddMaxPoolingParams(const NnrtOpMappingArgs& mappingArgs, int32_t builtinCode) +{ + auto builtin = reinterpret_cast(mappingArgs.node->builtin_data); + std::vector kernel = { static_cast(builtin->filter_height), + static_cast(builtin->filter_width) }; + std::vector stride = { static_cast(builtin->stride_height), + static_cast(builtin->stride_width) }; + + mappingArgs.builder->AddVectorInt64Tensor(kernel.data(), kernel.size(), OH_NN_MAX_POOL_KERNEL_SIZE); + mappingArgs.builder->AddVectorInt64Tensor(stride.data(), stride.size(), OH_NN_MAX_POOL_STRIDE); + TF_LITE_ENSURE_STATUS(AddPadParamsInOperator(mappingArgs, builtin, builtinCode, + OH_NN_MAX_POOL_PAD_MODE)); + TF_LITE_ENSURE_STATUS(AddActivateParamsInOperator(mappingArgs, builtin, builtinCode, + OH_NN_MAX_POOL_ACTIVATION_TYPE)); + + return kTfLiteOk; +} + +TfLiteStatus NnrtOpBuilder::AddFullConnectedParams(const NnrtOpMappingArgs& mappingArgs, int32_t builtinCode) +{ + // IF bias is not presented, bias input index will be -1. + const bool isBiasPresent = + (mappingArgs.node->inputs->size == 3) && (mappingArgs.node->inputs->data[2] != kTfLiteOptionalTensor); + + if (!isBiasPresent) { + const int32_t inputTensorId = mappingArgs.node->inputs->data[0]; // kInputTensor + const int32_t filterTensorId = mappingArgs.node->inputs->data[1]; // kWeightsTensor + const int32_t numUnits = mappingArgs.context->tensors[filterTensorId].dims->data[0]; // bias channel num + TF_LITE_ENSURE_STATUS(AddZerosBias(mappingArgs, inputTensorId, filterTensorId, numUnits)); + } + + auto builtin = reinterpret_cast(mappingArgs.node->builtin_data); + TF_LITE_ENSURE_STATUS(AddActivateParamsInOperator(mappingArgs, builtin, builtinCode, + OH_NN_FULL_CONNECTION_ACTIVATIONTYPE)); + + return kTfLiteOk; +} + +TfLiteStatus NnrtOpBuilder::AddConcatenationParams(const NnrtOpMappingArgs& mappingArgs, int32_t builtinCode) +{ + auto builtin = reinterpret_cast(mappingArgs.node->builtin_data); + const int64_t axis = static_cast(builtin->axis); + mappingArgs.builder->AddScalarInt64Tensor(axis, OH_NN_CONCAT_AXIS); + + return kTfLiteOk; +} + +TfLiteStatus NnrtOpBuilder::AddSoftmaxParams(const NnrtOpMappingArgs& mappingArgs, int32_t builtinCode) +{ + auto builtin = reinterpret_cast(mappingArgs.node->builtin_data); + const int64_t axis = static_cast(builtin->beta); + mappingArgs.builder->AddScalarInt64Tensor(axis, OH_NN_SOFTMAX_AXIS); + + return kTfLiteOk; +} + +TfLiteStatus NnrtOpBuilder::AddQuantizeParams(const NnrtOpMappingArgs& mappingArgs, int32_t builtinCode) +{ + OH_NN_DataType nnType {OH_NN_FLOAT32}; + + int32_t inputIndex = mappingArgs.node->inputs->data[0]; + m_pTensorMapping->ConvertType(m_context, inputIndex, 0, nnType); + mappingArgs.builder->AddScalarInt64Tensor(static_cast(nnType), OH_NN_QUANT_DTYPE_CAST_SRC_T); + + int32_t outputIndex = mappingArgs.node->outputs->data[0]; + m_pTensorMapping->ConvertType(m_context, outputIndex, 0, nnType); + mappingArgs.builder->AddScalarInt64Tensor(static_cast(nnType), OH_NN_QUANT_DTYPE_CAST_DST_T); + + return kTfLiteOk; +} + +TfLiteStatus NnrtOpBuilder::AddPackParams(const NnrtOpMappingArgs& mappingArgs, int32_t builtinCode) +{ + auto builtin = reinterpret_cast(mappingArgs.node->builtin_data); + const int64_t axis = static_cast(builtin->axis); + mappingArgs.builder->AddScalarInt64Tensor(axis, OH_NN_STACK_AXIS); + + return kTfLiteOk; +} + +TfLiteStatus NnrtOpBuilder::AddPadParams(const NnrtOpMappingArgs& mappingArgs, int32_t builtinCode) +{ + float padValue = 0.0; + mappingArgs.builder->AddScalarFloat32Tensor(padValue, OH_NN_PAD_CONSTANT_VALUE); + return kTfLiteOk; +} + +TfLiteStatus NnrtOpBuilder::AddReduceMeanParams(const NnrtOpMappingArgs& mappingArgs, int32_t builtinCode) +{ + auto builtin = reinterpret_cast(mappingArgs.node->builtin_data); + const int32_t keepDims = (builtin->keep_dims); + mappingArgs.builder->AddScalarBoolTensor(keepDims, OH_NN_REDUCE_MEAN_KEEP_DIMS); + + return kTfLiteOk; +} + +TfLiteStatus NnrtOpBuilder::AddStridedSliceParams(const NnrtOpMappingArgs& mappingArgs, int32_t builtinCode) +{ + auto builtin = reinterpret_cast(mappingArgs.node->builtin_data); + mappingArgs.builder->AddScalarInt64Tensor(static_cast(builtin->begin_mask), + OH_NN_STRIDED_SLICE_BEGIN_MASK); + mappingArgs.builder->AddScalarInt64Tensor(static_cast(builtin->end_mask), + OH_NN_STRIDED_SLICE_END_MASK); + mappingArgs.builder->AddScalarInt64Tensor(static_cast(builtin->ellipsis_mask), + OH_NN_STRIDED_SLICE_ELLIPSIS_MASK); + mappingArgs.builder->AddScalarInt64Tensor(static_cast(builtin->new_axis_mask), + OH_NN_STRIDED_SLICE_NEW_AXIS_MASK); + mappingArgs.builder->AddScalarInt64Tensor(static_cast(builtin->shrink_axis_mask), + OH_NN_STRIDED_SLICE_SHRINK_AXIS_MASK); + + return kTfLiteOk; +} + +TfLiteStatus NnrtOpBuilder::AddReshapeParams(const NnrtOpMappingArgs& mappingArgs, int32_t builtinCode) +{ + if (mappingArgs.node->inputs->size == 1) { + auto builtin = reinterpret_cast(mappingArgs.node->builtin_data); + int32_t numDimensions = builtin->num_dimensions; + std::vector outputShape(numDimensions); + for (int32_t i = 0; i < numDimensions; ++i) { + outputShape[i] = builtin->shape[i]; + } + mappingArgs.builder->AddVectorInt32Tensor(outputShape.data(), outputShape.size(), OH_NN_TENSOR); + } + + return kTfLiteOk; +} + +TfLiteStatus NnrtOpBuilder::AddConv2DParams(const NnrtOpMappingArgs& mappingArgs, int32_t builtinCode) +{ + auto builtin = reinterpret_cast(mappingArgs.node->builtin_data); + std::vector stride = { static_cast(builtin->stride_height), + static_cast(builtin->stride_width) }; + std::vector dilation = { static_cast(builtin->dilation_height_factor), + static_cast(builtin->dilation_width_factor) }; + int64_t groupNum = 1; + + mappingArgs.builder->AddVectorInt64Tensor(stride.data(), stride.size(), OH_NN_CONV2D_STRIDES); + mappingArgs.builder->AddVectorInt64Tensor(dilation.data(), dilation.size(), OH_NN_CONV2D_DILATION); + + TF_LITE_ENSURE_STATUS(AddPadParamsInOperator(mappingArgs, builtin, builtinCode, + OH_NN_CONV2D_PAD_MODE)); + mappingArgs.builder->AddScalarInt64Tensor(groupNum, OH_NN_CONV2D_GROUP); + TF_LITE_ENSURE_STATUS(AddActivateParamsInOperator(mappingArgs, builtin, builtinCode, + OH_NN_CONV2D_ACTIVATION_TYPE)); + + return kTfLiteOk; +} + +TfLiteStatus NnrtOpBuilder::AddDepthwiseConv2DParams(const NnrtOpMappingArgs& mappingArgs, int32_t builtinCode) +{ + auto builtin = reinterpret_cast(mappingArgs.node->builtin_data); + std::vector stride = { static_cast(builtin->stride_height), + static_cast(builtin->stride_width) }; + std::vector dilation = { static_cast(builtin->dilation_height_factor), + static_cast(builtin->dilation_width_factor) }; + TF_LITE_ENSURE_STATUS(mappingArgs.builder->AddVectorInt64Tensor(stride.data(), stride.size(), + OH_NN_DEPTHWISE_CONV2D_NATIVE_STRIDES)); + TF_LITE_ENSURE_STATUS(mappingArgs.builder->AddVectorInt64Tensor(dilation.data(), dilation.size(), + OH_NN_DEPTHWISE_CONV2D_NATIVE_DILATION)); + TF_LITE_ENSURE_STATUS(AddPadParamsInOperator(mappingArgs, builtin, builtinCode, + OH_NN_DEPTHWISE_CONV2D_NATIVE_PAD)); + TF_LITE_ENSURE_STATUS(AddActivateParamsInOperator(mappingArgs, builtin, builtinCode, + OH_NN_DEPTHWISE_CONV2D_NATIVE_ACTIVATION_TYPE)); + return kTfLiteOk; +} + +TfLiteStatus NnrtOpBuilder::FinalizeAddOperation(OH_NN_OperationType type, int32_t liteNodeIndex) +{ + // Actually add a NN API Operation + OH_NN_UInt32Array inputIndices; + OH_NN_UInt32Array outputIndices; + OH_NN_UInt32Array paramIndices; + inputIndices.data = m_augmentedInputs.data(); + inputIndices.size = static_cast(m_augmentedInputs.size()); + outputIndices.data = m_augmentedOutputs.data(); + outputIndices.size = static_cast(m_augmentedOutputs.size()); + paramIndices.size = static_cast(m_augmentedParams.size()); + + paramIndices.data = (m_augmentedParams.size() == 0) ? nullptr : m_augmentedParams.data(); + + RETURN_TFLITE_ERROR_IF_NN_ERROR(m_nnrt->OH_NNModel_AddOperation(m_nnModel, + type, ¶mIndices, &inputIndices, &outputIndices), "adding operation"); + + m_augmentedInputs.clear(); + m_augmentedOutputs.clear(); + m_augmentedParams.clear(); + + return kTfLiteOk; +} + +TfLiteStatus NnrtOpBuilder::AddTensor(int32_t tensorIndex, int32_t builtinCode, std::vector& indices, + int32_t tensorFlags) +{ + int32_t nnTensorIndex = m_pTensorMapping->LiteIndexToNn(tensorIndex); + if (nnTensorIndex != INVALID_INDEX) { + indices.emplace_back(nnTensorIndex); + return kTfLiteOk; + } + + // Parameters needed for new type. + TfLiteTensor* tensor = &(m_context->tensors[tensorIndex]); + if (kTfLiteNoType == tensor->type) { + indices.emplace_back(INVALID_INDEX); + return kTfLiteOk; + } + + TF_LITE_ENSURE_STATUS(AddTensor(tensorIndex, builtinCode, tensorFlags, nnTensorIndex)); + + indices.emplace_back(nnTensorIndex); + + return kTfLiteOk; +} + +TfLiteStatus NnrtOpBuilder::AddTensor(int32_t tensorIndex, int32_t builtinCode, int32_t tensorFlags, + int32_t& nnTensorIndex) +{ + TfLiteTensor* tensor = &(m_context->tensors[tensorIndex]); + const bool scalarAsTensor = tensorFlags & NN_TENSOR_FLAG_SCALAR_AS_TENSOR; + OH_NN_Tensor nnTensor; + OH_NN_QuantParam nnQuantParam; + std::vector weightDims; + void* tensorData = tensor->data.data; + std::vector depthwiseTensorData; + TF_LITE_ENSURE_STATUS(ConstructNNTensor(tensorIndex, builtinCode, scalarAsTensor, nnQuantParam, nnTensor)); + + // For depth-wise conv operator, we should transpose weight tensor to adapt NN tensor format. + if ((builtinCode == kTfLiteBuiltinDepthwiseConv2d) && (tensor->allocation_type == kTfLiteMmapRo) && + (nnTensor.dimensionCount == DEPTHWISE_WEIGHT_DIMENSION_COUNT)) { + size_t typeBytes = 0; + int64_t tensorSize = 0; + TF_LITE_ENSURE_STATUS(GetSizeOfType(m_context, tensor->type, &typeBytes)); + TF_LITE_ENSURE_STATUS(GetTensorSize(m_context, nnTensor.dimensions, nnTensor.dimensionCount, tensorSize)); + + depthwiseTensorData.assign(tensorSize * typeBytes, 0); + TfLiteStatus retCode = TransposeDepthwiseTensor(tensorIndex, nnTensor, weightDims, depthwiseTensorData); + if (retCode != kTfLiteOk) { + TFLITE_LOG_PROD(TFLITE_LOG_ERROR, "[NNRT-OPBUILDER] Fail to transpose depthwise tensor."); + return kTfLiteError; + } + tensorData = static_cast(depthwiseTensorData.data()); + } + + int32_t nnRet = m_nnrt->OH_NNModel_AddTensor(m_nnModel, &nnTensor); + if (nnRet != OH_NN_SUCCESS) { + TFLITE_LOG_PROD(TFLITE_LOG_ERROR, "[NNRT-OPBUILDER] Fail to add nnTensor to NN model."); + return kTfLiteError; + } + + // Allocate a new tensor index + nnTensorIndex = m_pTensorMapping->AddNewNnTensorIndex(tensorIndex); + if (tensor->allocation_type == kTfLiteMmapRo) { + nnRet = m_nnrt->OH_NNModel_SetTensorData(m_nnModel, nnTensorIndex, + tensorData, tensor->bytes); + if (nnRet != OH_NN_SUCCESS) { + TFLITE_LOG_PROD(TFLITE_LOG_ERROR, "[NNRT-OPBUILDER] Fail to setting new nnTensor value."); + return kTfLiteError; + } + } + + return kTfLiteOk; +} + +TfLiteStatus NnrtOpBuilder::TransposeDepthwiseTensor(int32_t tensorIndex, OH_NN_Tensor& nnTensor, + std::vector& weightDims, std::vector& tensorData) +{ + const int32_t* tensorDims = nnTensor.dimensions; + uint32_t tensorRank = nnTensor.dimensionCount; + + // For Depth-wise Convolution, NNRT choose to Transpose dimension with [3, 1, 2, 0] + TF_LITE_ENSURE_STATUS(TransposeDims(m_context, tensorDims, tensorRank, DEPTHWISE_TRANSPOSE_AXISS, weightDims)); + nnTensor.dimensions = weightDims.data(); + + TfLiteTensor* tensor = &(m_context->tensors[tensorIndex]); + if (tensor->type == kTfLiteFloat32) { + TF_LITE_ENSURE_STATUS( + TransposeTensor(m_context, tensorIndex, tensorDims, reinterpret_cast(tensorData.data()))); + } else if (tensor->type == kTfLiteInt32) { + TF_LITE_ENSURE_STATUS( + TransposeTensor(m_context, tensorIndex, tensorDims, reinterpret_cast(tensorData.data()))); + } else if (tensor->type == kTfLiteInt8) { + TF_LITE_ENSURE_STATUS( + TransposeTensor(m_context, tensorIndex, tensorDims, reinterpret_cast(tensorData.data()))); + } else if (tensor->type == kTfLiteUInt8) { + TF_LITE_ENSURE_STATUS( + TransposeTensor(m_context, tensorIndex, tensorDims, reinterpret_cast(tensorData.data()))); + } else { + TFLITE_LOG_PROD(TFLITE_LOG_ERROR, "[NNRT-OPBUILDER] unsupportted weight tensor type %d.", tensor->type); + return kTfLiteError; + } + + return kTfLiteOk; +} + +TfLiteStatus NnrtOpBuilder::ConstructNNTensor(int32_t tensorIndex, int32_t builtinCode, int32_t tensorFlags, + OH_NN_QuantParam& nnQuantParam, OH_NN_Tensor& nnTensor) +{ + OH_NN_DataType nnType {OH_NN_UNKNOWN}; + TF_LITE_ENSURE_STATUS(m_pTensorMapping->ConvertType(m_context, tensorIndex, tensorFlags, nnType)); + TF_LITE_ENSURE_STATUS(m_pTensorMapping->ConvertQuantParams(m_context, tensorIndex, nnQuantParam)); + + TfLiteTensor* tensor = &(m_context->tensors[tensorIndex]); + uint32_t tensorRank = static_cast(tensor->dims->size); + m_dimsUnspecified.assign(tensorRank, UNSPECIFIED_DIMENSION_VALUE); + + int32_t* tensorDims = (m_allowDynamicDimensions && (tensor->allocation_type != kTfLiteMmapRo) && + std::find(m_inputs.begin(), m_inputs.end(), tensorIndex) != m_inputs.end()) ? + reinterpret_cast(m_dimsUnspecified.data()) : + tensor->dims->data; + + const bool scalarAsTensor = tensorFlags & NN_TENSOR_FLAG_SCALAR_AS_TENSOR; + if (scalarAsTensor && tensorRank == 0) { + tensorRank = SCALAR_TENSOR_RANK; // Use rank 1, shape {1} nnTensor for TFLite scalar tensors. + tensorDims = const_cast(&SCALAR_TENSOR_RANK); + } + + if (tensorRank == 0) { + // if the tensorRank is 0, the dimension ptr must be nullptr. + tensorDims = nullptr; + } + + nnTensor.dataType = nnType; + nnTensor.dimensionCount = tensorRank; + nnTensor.dimensions = tensorDims; + nnTensor.quantParam = nnQuantParam.quantCount ? &nnQuantParam : nullptr; + nnTensor.type = OH_NN_TENSOR; + + return kTfLiteOk; +} + +TfLiteStatus NnrtOpBuilder::AddOpFuncParams(const NnrtOpMappingArgs& mappingArgs, int32_t builtinCode) +{ + if (!m_keyToOpFunc.count(builtinCode)) { + TFLITE_LOG_PROD(TFLITE_LOG_ERROR, "[NNRT-OPBUILDER] unsupportted Op builtinCode : %d.", builtinCode); + return kTfLiteError; + } + + OpFuncPtr pfunc = m_keyToOpFunc[builtinCode]; + return (this->*pfunc)(mappingArgs, builtinCode); +} + +TfLiteStatus NnrtOpBuilder::MapBuiltinCodeToFunc() +{ + m_keyToOpFunc[kTfLiteBuiltinAdd] = &NnrtOpBuilder::AddBasicComputeParams; + m_keyToOpFunc[kTfLiteBuiltinAveragePool2d] = &NnrtOpBuilder::AddAvgPoolingParams; + m_keyToOpFunc[kTfLiteBuiltinConcatenation] = &NnrtOpBuilder::AddConcatenationParams; + m_keyToOpFunc[kTfLiteBuiltinConv2d] = &NnrtOpBuilder::AddConv2DParams; + m_keyToOpFunc[kTfLiteBuiltinDepthwiseConv2d] = &NnrtOpBuilder::AddDepthwiseConv2DParams; + m_keyToOpFunc[kTfLiteBuiltinDequantize] = &NnrtOpBuilder::AddQuantizeParams; + m_keyToOpFunc[kTfLiteBuiltinFullyConnected] = &NnrtOpBuilder::AddFullConnectedParams; + m_keyToOpFunc[kTfLiteBuiltinMaxPool2d] = &NnrtOpBuilder::AddMaxPoolingParams; + m_keyToOpFunc[kTfLiteBuiltinMul] = &NnrtOpBuilder::AddBasicComputeParams; + m_keyToOpFunc[kTfLiteBuiltinSub] = &NnrtOpBuilder::AddBasicComputeParams; + m_keyToOpFunc[kTfLiteBuiltinReshape] = &NnrtOpBuilder::AddReshapeParams; + m_keyToOpFunc[kTfLiteBuiltinSoftmax] = &NnrtOpBuilder::AddSoftmaxParams; + m_keyToOpFunc[kTfLiteBuiltinStridedSlice] = &NnrtOpBuilder::AddStridedSliceParams; + m_keyToOpFunc[kTfLiteBuiltinPack] = &NnrtOpBuilder::AddPackParams; + m_keyToOpFunc[kTfLiteBuiltinPad] = &NnrtOpBuilder::AddPadParams; + m_keyToOpFunc[kTfLiteBuiltinMean] = &NnrtOpBuilder::AddReduceMeanParams; + m_keyToOpFunc[kTfLiteBuiltinQuantize] = &NnrtOpBuilder::AddQuantizeParams; + m_keyToOpFunc[kTfLiteBuiltinHardSwish] = &NnrtOpBuilder::AddDefaultOpParams; + m_keyToOpFunc[kTfLiteBuiltinShape] = &NnrtOpBuilder::AddDefaultOpParams; + m_keyToOpFunc[kTfLiteBuiltinLogistic] = &NnrtOpBuilder::AddDefaultOpParams; + + return kTfLiteOk; +} +} // namespace nnrt +} // namespace delegate +} // namespace tflite \ No newline at end of file diff --git a/example/deep_learning_framework/tflite/delegates/nnrt_delegate/nnrt_op_builder.h b/example/deep_learning_framework/tflite/delegates/nnrt_delegate/nnrt_op_builder.h new file mode 100644 index 0000000..bb4e823 --- /dev/null +++ b/example/deep_learning_framework/tflite/delegates/nnrt_delegate/nnrt_op_builder.h @@ -0,0 +1,325 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef TENSORFLOW_LITE_DELEGATES_NNRT_OP_BUILDER_H +#define TENSORFLOW_LITE_DELEGATES_NNRT_OP_BUILDER_H + +#include + +#include "tensorflow/lite/builtin_ops.h" +#include "tensorflow/lite/c/builtin_op_data.h" +#include "tensorflow/lite/kernels/kernel_util.h" +#include "tensorflow/lite/minimal_logging.h" + +#include "../nnrt/nnrt_implementation.h" +#include "tensor_mapping.h" + +namespace tflite { +namespace delegate { +namespace nnrt { +constexpr int32_t PADDING_SAME = 0; +constexpr int32_t PADDING_VALID = 1; + +// NN API Operator Builder +class NnrtOpBuilder; + +// The kernel that represents the node sub set of TF Lite being run on NN API. +struct NnrtOpMappingArgs { + TfLiteContext* context {nullptr}; + NnrtOpBuilder* builder {nullptr}; + TfLiteNode* node {nullptr}; + int32_t nodeIndex {-1}; +}; + +struct NnrtOpBuilderArgs { + TfLiteContext* context {nullptr}; + OH_NNModel* nnModel {nullptr}; + TfLiteIntArray* inputTensors {nullptr}; + TensorMapping* pTensorMapping {nullptr}; + NnrtDelegate::Options delegateOptions; +}; + +// Abstract builder for building an op in the NN API graph. This handles +// the disparity between TFLite and NN API nnTensor types. NN API has singular +// nnTensors for both tensors and parameters, and TFLite separates the two. +class NnrtOpBuilder { +public: + NnrtOpBuilder(const NnrtApi* nnrt, NnrtOpBuilderArgs& opBuilderArgs); + ~NnrtOpBuilder() = default; + + // Add scalar nnTensor, the datatypes involved are bool, Int32, Int8, Int64, Float32 + TfLiteStatus AddScalarBoolTensor(bool value, OH_NN_TensorType nnTensorType) + { + return AddScalarTensor(value, OH_NN_BOOL, nnTensorType); + } + TfLiteStatus AddScalarInt32Tensor(int32_t value, OH_NN_TensorType nnTensorType) + { + return AddScalarTensor(value, OH_NN_INT32, nnTensorType); + } + TfLiteStatus AddScalarInt8Tensor(int32_t value, OH_NN_TensorType nnTensorType) + { + return AddScalarTensor(value, OH_NN_INT8, nnTensorType); + } + TfLiteStatus AddScalarInt64Tensor(int64_t value, OH_NN_TensorType nnTensorType) + { + return AddScalarTensor(value, OH_NN_INT64, nnTensorType); + } + TfLiteStatus AddScalarFloat32Tensor(float value, OH_NN_TensorType nnTensorType) + { + return AddScalarTensor(value, OH_NN_FLOAT32, nnTensorType); + } + + // Add vector nnTensor, the datatypes involved are Int32, Int64, Int16, Int8, Float32 + TfLiteStatus AddVectorInt32Tensor(const int32_t* values, uint32_t numValues, OH_NN_TensorType nnTensorType) + { + return AddVectorTensor(values, numValues, OH_NN_UINT32, nnTensorType); + } + TfLiteStatus AddVectorInt64Tensor(const int64_t* values, uint32_t numValues, OH_NN_TensorType nnTensorType) + { + return AddVectorTensor(values, numValues, OH_NN_INT64, nnTensorType); + } + TfLiteStatus AddVectorFloat32Tensor(const float* values, uint32_t numValues, OH_NN_TensorType nnTensorType) + { + return AddVectorTensor(values, numValues, OH_NN_FLOAT32, nnTensorType); + } + + // Add input tensor + TfLiteStatus AddTensorInput(int32_t tensorIndex, int32_t builtinCode, int32_t tensorFlags = 0) + { + return AddTensor(tensorIndex, builtinCode, m_augmentedInputs, tensorFlags); + } + // Add output tensor + TfLiteStatus AddTensorOutput(int32_t tensorIndex, int32_t builtinCode, int32_t tensorFlags = 0) + { + return AddTensor(tensorIndex, builtinCode, m_augmentedOutputs, tensorFlags); + } + + // Finish emitting the op (of type `type`) into the NN API. + TfLiteStatus FinalizeAddOperation(OH_NN_OperationType type, int32_t liteNodeIndex); + + void ClearInputOuputLists() + { + m_augmentedInputs.clear(); + m_augmentedOutputs.clear(); + m_augmentedParams.clear(); + } + + TfLiteStatus AddOpFuncParams(const NnrtOpMappingArgs& mappingArgs, int32_t builtinCode); + + TfLiteStatus MapBuiltinCodeToFunc(); + +private: + template + TfLiteStatus AddScalarTensor(T value, OH_NN_DataType nnType, OH_NN_TensorType nnTensorType) + { + OH_NN_Tensor tensor { + .dataType = nnType, + .dimensionCount = 0, + .dimensions = nullptr, + .quantParam = nullptr, + .type = nnTensorType, + }; + + RETURN_TFLITE_ERROR_IF_NN_ERROR( + m_nnrt->OH_NNModel_AddTensor(m_nnModel, &tensor), "adding nnTensor"); + + const int32_t nnIndex = m_pTensorMapping->AddNewNonTensorTensor(); + RETURN_TFLITE_ERROR_IF_NN_ERROR( + m_nnrt->OH_NNModel_SetTensorData(m_nnModel, nnIndex, &value, sizeof(value)), + "setting new nnTensor value"); + m_augmentedParams.emplace_back(nnIndex); + + return kTfLiteOk; + } + + template + TfLiteStatus AddVectorTensor(const T* values, int32_t numValues, OH_NN_DataType nnType, + OH_NN_TensorType nnTensorType) + { + if (values == nullptr) { + TFLITE_LOG_PROD(TFLITE_LOG_ERROR, + "[NNRT-OPBUILDER] The variable of values is nullptr when adding vector to operator."); + return kTfLiteError; + } + uint32_t numBits = 8; + double doubleScale = 0.f; + int32_t zeroPoint = 0; + OH_NN_QuantParam quantParam = { + .quantCount = 1, + .numBits = &numBits, + .scale = &doubleScale, + .zeroPoint = &zeroPoint + }; + + OH_NN_Tensor tensor { + .dataType = nnType, + .dimensionCount = 1, // For 1-dim vector, dimensionCount is one. + .dimensions = &numValues, + .quantParam = &quantParam, + .type = nnTensorType, + }; + + RETURN_TFLITE_ERROR_IF_NN_ERROR( + m_nnrt->OH_NNModel_AddTensor(m_nnModel, &tensor), "adding nnTensor"); + const int32_t nnIndex = m_pTensorMapping->AddNewNonTensorTensor(); + RETURN_TFLITE_ERROR_IF_NN_ERROR( + m_nnrt->OH_NNModel_SetTensorData(m_nnModel, nnIndex, values, sizeof(*(values)) * numValues), + "settings new nnTensor value"); + m_augmentedParams.emplace_back(nnIndex); + + return kTfLiteOk; + } + + template + TfLiteStatus AddActivateParamsInOperator(const NnrtOpMappingArgs& mappingArgs, T* builtinParams, + int32_t builtinCode, OH_NN_TensorType nnTensorType) + { + if (builtinParams == nullptr) { + TFLITE_LOG_PROD(TFLITE_LOG_ERROR, + "[NNRT-OPBUILDER] The builtin params is nullptr when adding activate params to operator."); + return kTfLiteError; + } + + if ((builtinParams->activation >= 0) && + (builtinParams->activation < ACTIVATE_FUSE_TYPE_LIST.size()) && + (ACTIVATE_FUSE_TYPE_LIST[builtinParams->activation] != OH_NN_FUSE_UNSUPPORTED)) { + mappingArgs.builder->AddScalarInt8Tensor(ACTIVATE_FUSE_TYPE_LIST[builtinParams->activation], nnTensorType); + } else { + TFLITE_LOG_PROD(TFLITE_LOG_ERROR, + "[NNRT-OPBUILDER] unsupportted fused activation type %d for OpType %d.", + builtinParams->activation, builtinCode); + return kTfLiteError; + } + + return kTfLiteOk; + } + + template + TfLiteStatus AddPadParamsInOperator(const NnrtOpMappingArgs& mappingArgs, T* builtinParams, int32_t builtinCode, + OH_NN_TensorType nnTensorType) + { + if (builtinParams == nullptr) { + TFLITE_LOG_PROD(TFLITE_LOG_ERROR, + "[NNRT-OPBUILDER] The builtin params is nullptr when adding pad params to operator."); + return kTfLiteError; + } + + int32_t padding = 0; + if (builtinParams->padding == kTfLitePaddingUnknown) { + TFLITE_LOG_PROD(TFLITE_LOG_ERROR, "[NNRT-OPBUILDER] unknown padding mode for OpType %d.", builtinCode); + return kTfLiteError; + } else { + padding = (builtinParams->padding == kTfLitePaddingSame) ? PADDING_SAME : PADDING_VALID; + } + mappingArgs.builder->AddScalarInt8Tensor(padding, nnTensorType); + + return kTfLiteOk; + } + + // NNRT requires a bias tensor, so we allocate a new tensor to fill it with zeroes. + // It is deleted with other tensors in the context during subgraph destructor call. + TfLiteStatus AddZerosBias(const NnrtOpMappingArgs& mappingArgs, int32_t inputId, int32_t filterId, + int32_t channelNum); + + TfLiteStatus AddBasicComputeParams(const NnrtOpMappingArgs& mappingArgs, int32_t builtinCode); + + TfLiteStatus AddAvgPoolingParams(const NnrtOpMappingArgs& mappingArgs, int32_t builtinCode); + + TfLiteStatus AddMaxPoolingParams(const NnrtOpMappingArgs& mappingArgs, int32_t builtinCode); + + TfLiteStatus AddFullConnectedParams(const NnrtOpMappingArgs& mappingArgs, int32_t builtinCode); + + TfLiteStatus AddConv2DParams(const NnrtOpMappingArgs& mappingArgs, int32_t builtinCode); + + TfLiteStatus AddConcatenationParams(const NnrtOpMappingArgs& mappingArgs, int32_t builtinCode); + + TfLiteStatus AddSoftmaxParams(const NnrtOpMappingArgs& mappingArgs, int32_t builtinCode); + + TfLiteStatus AddQuantizeParams(const NnrtOpMappingArgs& mappingArgs, int32_t builtinCode); + + TfLiteStatus AddPackParams(const NnrtOpMappingArgs& mappingArgs, int32_t builtinCode); + + TfLiteStatus AddPadParams(const NnrtOpMappingArgs& mappingArgs, int32_t builtinCode); + + TfLiteStatus AddReduceMeanParams(const NnrtOpMappingArgs& mappingArgs, int32_t builtinCode); + + TfLiteStatus AddReshapeParams(const NnrtOpMappingArgs& mappingArgs, int32_t builtinCode); + + TfLiteStatus AddStridedSliceParams(const NnrtOpMappingArgs& mappingArgs, int32_t builtinCode); + + TfLiteStatus AddDepthwiseConv2DParams(const NnrtOpMappingArgs& mappingArgs, int32_t builtinCode); + + TfLiteStatus AddDefaultOpParams(const NnrtOpMappingArgs& mappingArgs, int32_t builtinCode) + { + return kTfLiteOk; + } + + // Adds a new NN API tensor that shadows the TF Lite tensor `tensorIndex`. + // This restores the NN API tensor index corresponding to the created tensor. + // If another caller previously created a NN API tensor for `tensorIndex` + // then the existing one is restored. + TfLiteStatus AddTensor(int32_t tensorIndex, int32_t builtinCode, std::vector& indices, + int32_t tensorFlags = 0); + + // Adds a new NN API nnTensor to NNModel. + // If the builtinCode is kTfLiteBuiltinDepthwiseConv2d, the weight tensor will be transposed to CHWN format. + TfLiteStatus AddTensor(int32_t tensorIndex, int32_t builtinCode, int32_t tensorFlags, int32_t& nnTensorIndex); + + // Transpose dimension for Depth-wise Convolution Operator. + TfLiteStatus TransposeDepthwiseTensor(int32_t tensorIndex, OH_NN_Tensor& nnTensor, std::vector& destDims, + std::vector& tensorData); + + // Get NN nnTensor from tensor + TfLiteStatus ConstructNNTensor(int32_t tensorIndex, int32_t builtinCode, int32_t tensorFlags, + OH_NN_QuantParam& nnQuantParam, OH_NN_Tensor& nnTensor); + +private: + // Access to NNRT. + const NnrtApi* const m_nnrt; + + // TfLiteContext for error handling. + TfLiteContext* const m_context; + + // Indices of all inputs of tflite subgraph. + std::vector m_inputs; + + // Tracks relationship between indices. + TensorMapping* const m_pTensorMapping; + + // The NNRT model. + OH_NNModel* const m_nnModel; + + // Inputs and outputs for the current op. These are augmented in the sense + // that NN API uses nnTensors for all arguments, not just tensors, unlike + // TensorFlow Lite. + std::vector m_augmentedInputs; + std::vector m_augmentedParams; + std::vector m_augmentedOutputs; + + // Whether to allow dynamic batch size without re-compilation. + bool m_allowDynamicDimensions; + + // the dynamic dimension information. + std::vector m_dimsUnspecified; + + // key builtInCode to OpFunc Map + using OpFuncPtr = TfLiteStatus(NnrtOpBuilder::*)(const NnrtOpMappingArgs& mappingArgs, int32_t builtinCode); + std::map m_keyToOpFunc; +}; +} // namespace nnrt +} // namespace delegate +} // namespace tflite + +#endif // TENSORFLOW_LITE_DELEGATES_NNRT_OP_BUILDER_H \ No newline at end of file diff --git a/example/deep_learning_framework/tflite/delegates/nnrt_delegate/nnrt_utils.cpp b/example/deep_learning_framework/tflite/delegates/nnrt_delegate/nnrt_utils.cpp new file mode 100644 index 0000000..870976b --- /dev/null +++ b/example/deep_learning_framework/tflite/delegates/nnrt_delegate/nnrt_utils.cpp @@ -0,0 +1,347 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "nnrt_utils.h" + +#include +#include "tensorflow/lite/util.h" +#include "tensorflow/lite/builtin_ops.h" +#include "tensorflow/lite/context_util.h" +#include "tensorflow/lite/minimal_logging.h" +#include "tensorflow/lite/kernels/kernel_util.h" + +#include "neural_network_runtime_type.h" + +namespace tflite { +std::string NnrtErrorDescription(int32_t errorCode) +{ + switch (errorCode) { + case OH_NN_SUCCESS: + return "OH_NN_SUCCESS"; + case OH_NN_FAILED: + return "OH_NN_FAILED"; + case OH_NN_INVALID_PARAMETER: + return "OH_NN_INVALID_PARAMETER"; + case OH_NN_MEMORY_ERROR: + return "OH_NN_MEMORY_ERROR"; + case OH_NN_OPERATION_FORBIDDEN: + return "OH_NN_OPERATION_FORBIDDEN"; + case OH_NN_NULL_PTR: + return "OH_NN_NULL_PTR"; + case OH_NN_INVALID_FILE: + return "OH_NN_INVALID_FILE"; + case OH_NN_UNAVALIDABLE_DEVICE: + return "OH_NN_UNAVALIDABLE_DEVICE"; + case OH_NN_INVALID_PATH: + return "OH_NN_INVALID_PATH"; + default: + return "Unknown NNRT error code: " + std::to_string(errorCode); + } +} + +bool IsFloat(TfLiteType type) +{ + return type == kTfLiteFloat32; +} + +bool IsQuantized(TfLiteType type) +{ + return ((type == kTfLiteUInt8) || (type == kTfLiteInt8)); +} + +bool IsScalarInputSupported(int32_t builtinCode) +{ + switch (builtinCode) { + case kTfLiteBuiltinAdd: + case kTfLiteBuiltinMul: + case kTfLiteBuiltinSub: + case kTfLiteBuiltinDiv: + case kTfLiteBuiltinEqual: + case kTfLiteBuiltinNotEqual: + case kTfLiteBuiltinGreater: + case kTfLiteBuiltinGreaterEqual: + case kTfLiteBuiltinLess: + case kTfLiteBuiltinLessEqual: + case kTfLiteBuiltinPow: + case kTfLiteBuiltinMaximum: + case kTfLiteBuiltinMinimum: + case kTfLiteBuiltinPrelu: + case kTfLiteBuiltinLeakyRelu: + return true; + default: + return false; + } +} + +bool IsUseTargetDevice(NnrtDelegate::Options delegateOptions, bool excludeNnrtReference) +{ + const std::string& deviceName = delegateOptions.acceleratorName; + bool hasSelectedAccelerator = !deviceName.empty(); + if (!excludeNnrtReference && hasSelectedAccelerator) { + if (!deviceName.compare(NNRT_REFERENCE_DEVICE)) { + hasSelectedAccelerator = false; + } + } + + return hasSelectedAccelerator; +} + +TfLiteStatus GetTargetDevice(TfLiteContext* context, TfLiteDelegate* delegate, const NnrtApi* nnrt, size_t& dev) +{ + TF_LITE_ENSURE_EQ(context, nnrt != nullptr, true); + TF_LITE_ENSURE_EQ(context, delegate != nullptr, true); + + NnrtDelegate::Options delegateOptions; + TF_LITE_ENSURE_STATUS(NnrtDelegate::GetOptions(delegate, delegateOptions)); + const std::string& deviceName = delegateOptions.acceleratorName; + + uint32_t numDevices {0}; + const size_t* alldevicesID {nullptr}; + RETURN_TFLITE_ERROR_IF_NN_ERROR(nnrt->OH_NNDevice_GetAllDevicesID(&alldevicesID, &numDevices), + "Get available device number and deviceID."); + if (numDevices == 0) { + TFLITE_LOG_PROD(TFLITE_LOG_ERROR, "[NNRT-UTILS] Have no available device."); + return kTfLiteError; + } + + std::vector deviceTypes; + for (uint32_t i = 0; i < numDevices; ++i) { + OH_NN_DeviceType tempDeviceType {OH_NN_ACCELERATOR}; + RETURN_TFLITE_ERROR_IF_NN_ERROR(nnrt->OH_NNDevice_GetType(alldevicesID[i], &tempDeviceType), + "Get available devicesType."); + deviceTypes.emplace_back(tempDeviceType); + } + + OH_NN_DeviceType deviceType {OH_NN_CPU}; + std::vector::iterator pos = std::find(deviceTypes.begin(), deviceTypes.end(), deviceType); + if (pos != deviceTypes.end()) { + int index = distance(deviceTypes.begin(), pos); + dev = alldevicesID[index]; + } else { + TFLITE_LOG_PROD(TFLITE_LOG_ERROR, + "[NNRT-UTILS] Cannot find the %s device, please choose another process unit.", + deviceName.c_str()); + return kTfLiteError; + } + + return kTfLiteOk; +} + +TfLiteStatus TransposeDims(TfLiteContext* context, const int32_t* dims, uint32_t dimCount, + std::vector destAxis, std::vector& weightDims) +{ + TF_LITE_ENSURE_EQ(context, dims != nullptr, true); + + if (dimCount != destAxis.size()) { + TFLITE_LOG_PROD(TFLITE_LOG_ERROR, "[NNRT-UTILS] Invalid dimension count %d.", dimCount); + return kTfLiteError; + } + + for (auto axis : destAxis) { + if (axis < 0) { + TFLITE_LOG_PROD(TFLITE_LOG_ERROR, "[NNRT-UTILS] Invalid axis %d.", axis); + return kTfLiteError; + } + weightDims.emplace_back(*(dims + axis)); + } + + return kTfLiteOk; +} + +TfLiteStatus GetTensorSize(TfLiteContext* context, const int32_t* dims, int32_t dimCount, int64_t& tensorSize) +{ + TF_LITE_ENSURE_EQ(context, dims != nullptr, true); + + if (dimCount != DEPTHWISE_WEIGHT_DIMENSION_COUNT) { + TFLITE_LOG_PROD(TFLITE_LOG_ERROR, + "[NNRT-UTILS] Dimension count is not equal to destination axis number, should be 4."); + return kTfLiteError; + } + + tensorSize = 1; + for (int32_t i = 0; i < dimCount; ++i) { + if (*(dims + i) <= 0) { + TFLITE_LOG_PROD(TFLITE_LOG_ERROR, "[NNRT-UTILS] Get invalid dimenision."); + return kTfLiteError; + } + tensorSize *= *(dims + i); + } + + return kTfLiteOk; +} + +namespace delegate { +namespace nnrt { +const std::vector ACTIVATE_FUSE_TYPE_LIST = { + OH_NN_FUSED_NONE, + OH_NN_FUSED_RELU, + OH_NN_FUSE_UNSUPPORTED, + OH_NN_FUSED_RELU6, + OH_NN_FUSE_UNSUPPORTED, + OH_NN_FUSE_UNSUPPORTED, + OH_NN_FUSE_UNSUPPORTED +}; + +const unorderedTypeMap TFLITE_TYPE_TO_NNRT_TYPE = { + {kTfLiteBuiltinAdd, OH_NN_OPS_ADD}, + {kTfLiteBuiltinAveragePool2d, OH_NN_OPS_AVG_POOL}, + {kTfLiteBuiltinConcatenation, OH_NN_OPS_CONCAT}, + {kTfLiteBuiltinConv2d, OH_NN_OPS_CONV2D}, + {kTfLiteBuiltinDepthwiseConv2d, OH_NN_OPS_DEPTHWISE_CONV2D_NATIVE}, + {kTfLiteBuiltinDepthToSpace, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinDequantize, OH_NN_OPS_QUANT_DTYPE_CAST}, + {kTfLiteBuiltinEmbeddingLookup, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinFloor, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinFullyConnected, OH_NN_OPS_FULL_CONNECTION}, + {kTfLiteBuiltinHashtableLookup, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinL2Normalization, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinL2Pool2d, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinLocalResponseNormalization, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinLogistic, OH_NN_OPS_SIGMOID}, + {kTfLiteBuiltinLshProjection, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinLstm, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinMaxPool2d, OH_NN_OPS_MAX_POOL}, + {kTfLiteBuiltinMul, OH_NN_OPS_MUL}, + {kTfLiteBuiltinRelu, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinReluN1To1, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinRelu6, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinReshape, OH_NN_OPS_RESHAPE}, + {kTfLiteBuiltinResizeBilinear, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinRnn, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinSoftmax, OH_NN_OPS_SOFTMAX}, + {kTfLiteBuiltinSpaceToDepth, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinSvdf, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinTanh, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinConcatEmbeddings, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinSkipGram, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinCall, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinCustom, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinEmbeddingLookupSparse, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinPad, OH_NN_OPS_PAD}, + {kTfLiteBuiltinUnidirectionalSequenceRnn, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinGather, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinBatchToSpaceNd, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinSpaceToBatchNd, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinTranspose, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinMean, OH_NN_OPS_REDUCE_MEAN}, + {kTfLiteBuiltinSub, OH_NN_OPS_SUB}, + {kTfLiteBuiltinDiv, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinSqueeze, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinUnidirectionalSequenceLstm, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinStridedSlice, OH_NN_OPS_STRIDED_SLICE}, + {kTfLiteBuiltinBidirectionalSequenceRnn, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinExp, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinTopkV2, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinSplit, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinLogSoftmax, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinDelegate, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinBidirectionalSequenceLstm, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinCast, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinPrelu, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinMaximum, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinArgMax, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinMinimum, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinLess, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinNeg, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinPadv2, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinGreater, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinGreaterEqual, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinLessEqual, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinSelect, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinSlice, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinSin, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinTransposeConv, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinSparseToDense, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinTile, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinExpandDims, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinEqual, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinNotEqual, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinLog, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinSum, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinSqrt, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinRsqrt, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinShape, OH_NN_OPS_SHAPE}, + {kTfLiteBuiltinPow, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinArgMin, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinFakeQuant, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinReduceProd, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinReduceMax, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinPack, OH_NN_OPS_STACK}, + {kTfLiteBuiltinLogicalOr, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinOneHot, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinLogicalAnd, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinLogicalNot, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinUnpack, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinReduceMin, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinFloorDiv, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinReduceAny, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinSquare, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinZerosLike, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinFill, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinFloorMod, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinRange, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinResizeNearestNeighbor, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinLeakyRelu, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinSquaredDifference, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinMirrorPad, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinAbs, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinSplitV, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinUnique, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinCeil, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinReverseV2, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinAddN, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinGatherNd, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinCos, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinWhere, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinRank, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinElu, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinReverseSequence, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinMatrixDiag, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinQuantize, OH_NN_OPS_QUANT_DTYPE_CAST}, + {kTfLiteBuiltinMatrixSetDiag, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinRound, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinHardSwish, OH_NN_OPS_HSWISH}, + {kTfLiteBuiltinIf, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinWhile, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinNonMaxSuppressionV4, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinNonMaxSuppressionV5, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinScatterNd, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinSelectV2, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinDensify, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinSegmentSum, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinBatchMatmul, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinPlaceholderForGreaterOpCodes, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinCumsum, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinCallOnce, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinBroadcastTo, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinRfft2d, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinConv3d, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinImag, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinReal, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinComplexAbs, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinHashtable, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinHashtableFind, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinHashtableImport, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinHashtableSize, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinReduceAll, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinConv3dTranspose, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinVarHandle, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinReadVariable, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinAssignVariable, OH_NN_UNSUPPORT_OPS}, + {kTfLiteBuiltinBroadcastTo, OH_NN_UNSUPPORT_OPS}, +}; +} // nnrt +} // namespace +} // namespace tflite \ No newline at end of file diff --git a/example/deep_learning_framework/tflite/delegates/nnrt_delegate/nnrt_utils.h b/example/deep_learning_framework/tflite/delegates/nnrt_delegate/nnrt_utils.h new file mode 100644 index 0000000..a4908aa --- /dev/null +++ b/example/deep_learning_framework/tflite/delegates/nnrt_delegate/nnrt_utils.h @@ -0,0 +1,153 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef TENSORFLOW_LITE_DELEGATES_NNRT_UTILS_H +#define TENSORFLOW_LITE_DELEGATES_NNRT_UTILS_H + +#include +#include +#include + +#include "nnrt_delegate.h" + +namespace tflite { +constexpr int32_t DEPTHWISE_WEIGHT_BATCH_DIMENSION = 0; +constexpr int32_t DEPTHWISE_WEIGHT_HEIGHT_DIMENSION = 1; +constexpr int32_t DEPTHWISE_WEIGHT_WIDTH_DIMENSION = 2; +constexpr int32_t DEPTHWISE_WEIGHT_CHANNEL_DIMENSION = 3; +constexpr int32_t DEPTHWISE_WEIGHT_DIMENSION_COUNT = 4; +const std::string NNRT_REFERENCE_DEVICE = "nnrt-reference"; + +// Bit mask for tensor flags. +enum BIT_MASK { + NN_TENSOR_FLAG_SCALAR_AS_TENSOR = 1U << 0, + NN_TENSOR_FLAG_INT8_CONVERSION = 1U << 1, + NN_TENSOR_FLAG_USE_INT8_ASYMM_SIGNED = 1U << 2, + NN_TENSOR_FLAG_FORCE_PER_CHANNEL = 1U << 3, + NN_TENSOR_FLAG_HALF_TO_FLOAT_CONVERSION = 1U << 4, +}; + +// Returns the enum name corresponding to the given error code if the given +// value corresponds to an of the error codes in the enumeration above or +// an message with the unknown code. +// LINT.IfChange(NnrtErrorDescription) +extern std::string NnrtErrorDescription(int32_t errorCode); + +#define RETURN_TFLITE_ERROR_IF_NN_ERROR(code, callDesc) \ + do { \ + if ((code) != OH_NN_SUCCESS) { \ + const auto errorDesc = NnrtErrorDescription((code)); \ + TFLITE_LOG_PROD(TFLITE_LOG_ERROR, "NN API returned error %s at line %d while %s.\n", \ + errorDesc.c_str(), __LINE__, (callDesc)); \ + return kTfLiteError; \ + } \ + } while (0) + +#define RETURN_TFLITE_ERROR_IF_NN_ERROR_FOR_TENSOR(code, callDesc, pTensor) \ + do { \ + if ((code) != OH_NN_SUCCESS) { \ + const auto errorDesc = NnrtErrorDescription((code)); \ + TFLITE_LOG_PROD(TFLITE_LOG_ERROR, \ + "NN API returned error %s at line %d while %s for tensor '%s'.\n", errorDesc.c_str(), \ + __LINE__, (callDesc), (pTensor)->name ? (pTensor)->name : "no-name"); \ + return kTfLiteError; \ + } \ + } while (0) + +// Return true if type is kTfLiteFloat32. +extern bool IsFloat(TfLiteType type); + +// Return true if type is kTfLiteUInt8 or kTfLiteInt8. +extern bool IsQuantized(TfLiteType type); + +// Return true if the operator supports scalar data as input. +extern bool IsScalarInputSupported(int32_t builtinCode); + +// Returns true if this delegate is configured to use a specific set of devices. +// If the acceleratorName in the delegate options is equal to "nnrt-reference" +// this method will return true only if the excludeNnrtReference is true. +extern bool IsUseTargetDevice( + NnrtDelegate::Options delegateOptions, bool excludeNnrtReference = false); + +// Fills the given result vector with the list of devices the given delegate +// is referring to. +// There are three possible results, +// - An empty array (not the full list of available accelerators, +// for efficiency reasons) if no accelerator is chosen and the +// disallowNnrtCpu delegate option is false. +// - A single element array with the target processor, if an accelerator name +// is specified in the delegate options. +// - The target available device on device. +extern TfLiteStatus GetTargetDevice(TfLiteContext* context, TfLiteDelegate* delegate, + const NnrtApi* nnrt, size_t& dev); + +// Transpose demension following fixed axis. +// If exist -1 in destAxis, return kTfLiteError. +extern TfLiteStatus TransposeDims(TfLiteContext* context, const int32_t* dims, uint32_t dimCount, + std::vector destAxis, std::vector& weightDims); + +// Get Tensor size by byte. +// Calculate Tesnorsize by mul all dimension in dims. +// Return kTfLiteError if element dimension is less 0. +extern TfLiteStatus GetTensorSize(TfLiteContext* context, const int32_t* dims, int32_t dimCount, int64_t& tensorSize); + +// Transpose dimension for Tensor. +// Only change NHWC format tensor to CHWN format tensor, and +// the capacity of result vec must equal to input tensor size. +template +TfLiteStatus TransposeTensor(TfLiteContext* context, int32_t tensorIndex, const int32_t* dims, + T* transposeTensor) +{ + TF_LITE_ENSURE_EQ(context, dims != nullptr, true); + + // NHWC -> CHWN + TfLiteTensor* tensor = &(context->tensors[tensorIndex]); + const T* tensorData = reinterpret_cast(tensor->data.data); + const int32_t batch = dims[DEPTHWISE_WEIGHT_BATCH_DIMENSION]; + const int32_t height = dims[DEPTHWISE_WEIGHT_HEIGHT_DIMENSION]; + const int32_t width = dims[DEPTHWISE_WEIGHT_WIDTH_DIMENSION]; + const int32_t channel = dims[DEPTHWISE_WEIGHT_CHANNEL_DIMENSION]; + + for (int32_t c = 0; c < channel; ++c) { + for (int32_t j = 0; j < height * width; ++j) { + for (int32_t n = 0; n < batch; ++n) { + int32_t newPos = c * (height * width) * batch + j * batch + n; + int32_t orgPos = n * (height * width) * channel + j * channel + c; + *(transposeTensor + newPos) = *(tensorData + orgPos); + } + } + } + + return kTfLiteOk; +}; + +namespace delegate { +namespace nnrt { +using unorderedTypeMap = std::unordered_map; + +extern const std::vector ACTIVATE_FUSE_TYPE_LIST; + +extern const unorderedTypeMap TFLITE_TYPE_TO_NNRT_TYPE; + +const int32_t INVALID_INDEX = -1; + +const int32_t OH_NN_UNSUPPORT_OPS = -1; + +const int32_t OH_NN_FUSE_UNSUPPORTED = -1; +} // namespace nnrt +} // namespace delegate +} // namespace tflite + +#endif // TENSORFLOW_LITE_DELEGATES_NNRT_UTILS_H diff --git a/example/deep_learning_framework/tflite/delegates/nnrt_delegate/tensor_mapping.h b/example/deep_learning_framework/tflite/delegates/nnrt_delegate/tensor_mapping.h new file mode 100644 index 0000000..0f3d139 --- /dev/null +++ b/example/deep_learning_framework/tflite/delegates/nnrt_delegate/tensor_mapping.h @@ -0,0 +1,177 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef TENSORFLOW_LITE_DELEGATES_NNRT_TENSOR_MAPPING_H +#define TENSORFLOW_LITE_DELEGATES_NNRT_TENSOR_MAPPING_H + +#include "tensorflow/lite/c/common.h" +#include "tensorflow/lite/minimal_logging.h" + +#include "nnrt_utils.h" + +namespace tflite { +namespace delegate { +namespace nnrt { +constexpr uint32_t QUANT_NUMBITS = 8; + +class TensorMapping { +public: + // Given a TFLite index return the NN index. If it doesn't exist + // return -1. + int32_t LiteIndexToNn(int32_t index) const + { + const int64_t maxSize = m_liteTensorToNnTensor.size(); + if (index >= 0 && index < maxSize) { + return m_liteTensorToNnTensor[index]; + } else { + return INVALID_INDEX; + } + } + + // NN API uses non tensor tensors instead of structs. This creates one + // and returns the index. It uses a std::vector and resizes it as needed + // keeping -1 to unmapped values. Intermediate tensors likely will not + // be mapped. + const int32_t AddNewNonTensorTensor() + { + return m_nextNnTensorIndex++; + } + + // Add a new mapping from `tfliteIndex` and return the NN API tensor index. + int32_t AddNewNnTensorIndex(int32_t tfliteIndex) + { + const int64_t currentSize = m_liteTensorToNnTensor.size(); + if (tfliteIndex >= currentSize) { + m_liteTensorToNnTensor.resize(tfliteIndex + 1, INVALID_INDEX); + } + const int32_t newTensorIndex = m_nextNnTensorIndex++; + m_liteTensorToNnTensor[tfliteIndex] = newTensorIndex; + return newTensorIndex; + } + + // Get nn tensor tensor tensor num. + int32_t GetTensorTensorNum() const + { + return m_nextNnTensorIndex; + } + + // Given a TFLite index returns a TFLite type to which a tensor must be + // converted during copying the data to the memory allocated for NN API. + // kTfLiteNoType means no conversion is needed. + TfLiteType GetEqualLiteTypeFromLiteIndex(int32_t index) const + { + const int64_t maxSize = m_indexToTypeConversion.size(); + if (index >= 0 && index < maxSize) + return m_indexToTypeConversion[index]; + else + return kTfLiteNoType; + } + + // Add a new mapping from TFLite index to a type conversion. + void AddTypeConversion(int32_t tfliteIndex, TfLiteType tfliteType) + { + const int64_t currentSize = m_indexToTypeConversion.size(); + if (tfliteIndex >= currentSize) { + m_indexToTypeConversion.resize(tfliteIndex + 1, kTfLiteNoType); + } + m_indexToTypeConversion[tfliteIndex] = tfliteType; + } + + // Convert TFLite tensor quant params to NNRT tensor quant params + TfLiteStatus ConvertQuantParams(TfLiteContext* context, int32_t tensorIndex, OH_NN_QuantParam& quantParam) + { + TfLiteTensor* tensor = &(context->tensors[tensorIndex]); + TfLiteType tfType = tensor->type; + if ((tfType != kTfLiteFloat32) && (tfType != kTfLiteFloat16) && (tfType != kTfLiteBool) && + (tfType != kTfLiteInt32) && (tfType != kTfLiteUInt8) && (tfType != kTfLiteInt8)) { + TFLITE_LOG_PROD(TFLITE_LOG_ERROR, + "[TENSOR_MAPPING] type %s is not supported.", TfLiteTypeGetName(tensor->type)); + return kTfLiteError; + } + + if (tensor->quantization.type) { + TfLiteAffineQuantization* params = reinterpret_cast(tensor->quantization.params); + int number = params->scale->size; + std::vector scale; + for (int i = 0; i < number; ++i) { + scale.emplace_back(static_cast(params->scale->data[i])); + } + m_scale.emplace_back(scale); + quantParam.scale = m_scale.back().data(); + quantParam.zeroPoint = params->zero_point->data; + quantParam.quantCount = number; + m_numBits.emplace_back(number, QUANT_NUMBITS); + quantParam.numBits = m_numBits.back().data(); + } else { + quantParam.quantCount = 0; + } + + return kTfLiteOk; + } + + // Convert TFLite tensor type to NNRT tensor type + TfLiteStatus ConvertType(TfLiteContext* context, int32_t tensorIndex, int32_t tensorFlags, OH_NN_DataType& nnType) + { + const bool scalarAsTensor = tensorFlags & NN_TENSOR_FLAG_SCALAR_AS_TENSOR; + TfLiteTensor* tensor = &(context->tensors[tensorIndex]); + TfLiteType nnTypeEquivalent = GetEqualLiteTypeFromLiteIndex(tensorIndex); + if (tensor->type == kTfLiteFloat32) { + nnType = OH_NN_FLOAT32; + } else if (tensor->type == kTfLiteFloat16) { + nnType = OH_NN_FLOAT16; + if (scalarAsTensor) { + nnType = OH_NN_FLOAT32; + AddTypeConversion(tensorIndex, kTfLiteFloat32); + } + } else if (tensor->type == kTfLiteInt32) { + nnType = OH_NN_INT32; + } else if (tensor->type == kTfLiteBool) { + nnType = OH_NN_INT8; + } else if (tensor->type == kTfLiteUInt8) { + nnType = (nnTypeEquivalent == kTfLiteInt32) ? OH_NN_INT32 : OH_NN_INT8; + } else if (tensor->type == kTfLiteInt8) { + nnType = (nnTypeEquivalent == kTfLiteInt32) ? OH_NN_INT32 : OH_NN_UINT8; + } else { + TFLITE_LOG_PROD(TFLITE_LOG_ERROR, + "[TENSOR_MAPPING] type %s is not supported.", TfLiteTypeGetName(tensor->type)); + return kTfLiteError; + } + + return kTfLiteOk; + } + +private: + // Next index of nnrt tensor + int32_t m_nextNnTensorIndex = 0; + + // Mapping from lite index. Use a std::vector for speed and code size + // rather than a map. + std::vector m_liteTensorToNnTensor; + + // Mapping from lite index to a type which tensor must be converted to during + // the copying of the data to the memory allocated for NN API. kTfLiteNoType + // means no conversion is needed. Use an std::vector for speed and code size + // rather than a map. + std::vector m_indexToTypeConversion; + + std::vector> m_numBits; + + std::vector> m_scale; +}; +} // namespace nnrt +} // namespace delegate +} // namespace tflite + +#endif // TENSORFLOW_LITE_DELEGATES_NNRT_TENSOR_MAPPING_H \ No newline at end of file diff --git a/example/deep_learning_framework/tflite/label_classify/CMakeLists.txt b/example/deep_learning_framework/tflite/label_classify/CMakeLists.txt new file mode 100644 index 0000000..a409748 --- /dev/null +++ b/example/deep_learning_framework/tflite/label_classify/CMakeLists.txt @@ -0,0 +1,39 @@ +# +# Copyright (c) 2022 Huawei Device Co., Ltd. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# Header path +set(OHOS_INC ${LOCAL_DIRECTORY_PATH}/../../interfaces/kits/c) +set(TOOLS_INC ${LOCAL_DIRECTORY_PATH}/tflite/tools) +set(TFLITE_INC ${LOCAL_DIRECTORY_PATH}/lib_3rd_nnrt_tflite/include) +set(TFLITE_FLATBUFFER_INC ${LOCAL_DIRECTORY_PATH}/lib_3rd_nnrt_tflite/include/tensorflow/lite) +include_directories(${NNRT_DEMO_HOME} ${TFLITE_INC} ${OHOS_INC} ${TOOLS_INC} ${TFLITE_FLATBUFFER_INC} ${LOCAL_DIRECTORY_PATH}) + +# Scr path +aux_source_directory(${NNRT_DEMO_HOME} NNRT_DEMO_SRCS) +file(GLOB TOOLS_SRCS "${TOOLS_INC}/*.cpp") + +LINK_DIRECTORIES(${TFLITE_LIB_PATH}/com/arm64-v8a/lib/) +add_executable(label_classify ${NNRT_DEMO_SRCS} ${TOOLS_SRCS}) + +set(CMAKE_SHARED_LINKER_FLAGS "-Wl,-z,relro,-z,now,-z,noexecstack") +target_link_libraries(label_classify ${LOCAL_DIRECTORY_PATH}/lib/libnnrt_implementation.so) +target_link_libraries(label_classify ${LOCAL_DIRECTORY_PATH}/lib/libnnrt_delegate.so) +target_link_libraries(label_classify -ltensorflow-lite) + +set (EXECUTABLE_OUTPUT_PATH ${LOCAL_DIRECTORY_PATH}/output) + + + + diff --git a/example/deep_learning_framework/tflite/label_classify/label_classify.cpp b/example/deep_learning_framework/tflite/label_classify/label_classify.cpp new file mode 100644 index 0000000..18b8506 --- /dev/null +++ b/example/deep_learning_framework/tflite/label_classify/label_classify.cpp @@ -0,0 +1,341 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "label_classify.h" + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "tensorflow/lite/kernels/register.h" +#include "tensorflow/lite/optional_debug_tools.h" +#include "tensorflow/lite/string_util.h" +#include "tensorflow/lite/tools/command_line_flags.h" +#include "tensorflow/lite/tools/delegates/delegate_provider.h" + +#include "log.h" +#include "utils.h" + +namespace tflite { +namespace label_classify { +using TfLiteDelegatePtr = tflite::Interpreter::TfLiteDelegatePtr; +using ProvidedDelegateList = tflite::tools::ProvidedDelegateList; +constexpr int BASE_NUMBER = 10; +constexpr int CONVERSION_RATE = 1000; +static struct option LONG_OPTIONS[] = { + {"help", no_argument, nullptr, 'h'}, + {"use_nnrt", required_argument, nullptr, 'a'}, + {"count", required_argument, nullptr, 'c'}, + {"image", required_argument, nullptr, 'i'}, + {"labels", required_argument, nullptr, 'l'}, + {"tflite_model", required_argument, nullptr, 'm'}, + {"num_results", required_argument, nullptr, 'n'}, + {"input_mean", required_argument, nullptr, 'b'}, + {"input_std", required_argument, nullptr, 's'}, + {"verbose", required_argument, nullptr, 'v'}, + {"warmup_nums", required_argument, nullptr, 'w'}, + {"print_result", required_argument, nullptr, 'z'}, + {"input_shape", required_argument, nullptr, 'p'}, + {nullptr, 0, nullptr, 0}, +}; + +class DelegateProviders { +public: + DelegateProviders() : m_delegateListUtil(¶ms) + { + m_delegateListUtil.AddAllDelegateParams(); // Add all registered delegate params to the contained 'params_'. + } + + ~DelegateProviders() {} + + bool InitFromCmdlineArgs(int32_t* argc, const char** argv) + { + std::vector flags; + m_delegateListUtil.AppendCmdlineFlags(&flags); + + const bool parseResult = Flags::Parse(argc, argv, flags); + if (!parseResult) { + std::string usage = Flags::Usage(argv[0], flags); + LOG(ERROR) << usage; + } + return parseResult; + } + + void MergeSettingsIntoParams(const Settings& settings) + { + if (settings.accel) { + if (!params.HasParam("use_nnrt")) { + LOG(WARN) << "NNRT deleate execution provider isn't linked or NNRT " + << "delegate isn't supported on the platform!"; + } else { + params.Set("use_nnrt", true); + } + } + } + + std::vector CreateAllDelegates() const + { + return m_delegateListUtil.CreateAllRankedDelegates(); + } + +private: + // Contain delegate-related parameters that are initialized from command-line flags. + tflite::tools::ToolParams params; + + // A helper to create TfLite delegates. + ProvidedDelegateList m_delegateListUtil; +}; + +void PrepareModel(Settings& settings, std::unique_ptr& interpreter, + DelegateProviders& delegateProviders) +{ + const std::vector inputs = interpreter->inputs(); + const std::vector outputs = interpreter->outputs(); + + if (settings.verbose) { + LOG(INFO) << "number of inputs: " << inputs.size(); + LOG(INFO) << "number of outputs: " << outputs.size(); + } + + std::map> neededInputShapes; + if (settings.inputShape != "") { + if (FilterDynamicInputs(settings, interpreter, neededInputShapes) != kTfLiteOk) { + return; + } + } + + delegateProviders.MergeSettingsIntoParams(settings); + auto delegates = delegateProviders.CreateAllDelegates(); + + for (auto& delegate : delegates) { + const auto delegateName = delegate.provider->GetName(); + if (interpreter->ModifyGraphWithDelegate(std::move(delegate.delegate)) != kTfLiteOk) { + LOG(ERROR) << "Failed to apply " << delegateName << " delegate."; + return; + } else { + LOG(INFO) << "Applied " << delegateName << " delegate."; + } + } + + if (settings.inputShape != "") { + for (const auto& inputShape : neededInputShapes) { + if (IsEqualShape(inputShape.first, inputShape.second, interpreter)) { + LOG(WARNING) << "The input shape is same as the model shape, not resize."; + continue; + } + if (interpreter->ResizeInputTensor(inputShape.first, inputShape.second) != kTfLiteOk) { + LOG(ERROR) << "Fail to resize index " << inputShape.first << "."; + return; + } else { + LOG(INFO) << "Susccess to resize index " << inputShape.first << "."; + } + } + } + + if (interpreter->AllocateTensors() != kTfLiteOk) { + LOG(ERROR) << "Failed to allocate tensors!"; + return; + } + + if (settings.verbose) { + PrintInterpreterState(interpreter.get()); + } +} + +void LogInterpreterParams(Settings& settings, std::unique_ptr& interpreter) +{ + if (!interpreter) { + LOG(ERROR) << "Failed to construct interpreter"; + return; + } + + if (settings.verbose) { + LOG(INFO) << "tensors size: " << interpreter->tensors_size(); + LOG(INFO) << "nodes size: " << interpreter->nodes_size(); + LOG(INFO) << "inputs: " << interpreter->inputs().size(); + LOG(INFO) << "input(0) name: " << interpreter->GetInputName(0); + + size_t tSize = interpreter->tensors_size(); + for (size_t i = 0; i < tSize; ++i) { + if (interpreter->tensor(i)->name) { + LOG(INFO) << i << ": " << interpreter->tensor(i)->name << ", " << interpreter->tensor(i)->bytes << + ", " << interpreter->tensor(i)->type << ", " << interpreter->tensor(i)->params.scale << ", " << + interpreter->tensor(i)->params.zero_point; + } + } + } +} + +void InferenceModel(Settings& settings, DelegateProviders& delegateProviders) +{ + if (!settings.modelName.c_str()) { + LOG(ERROR) << "no model file name"; + return; + } + std::unique_ptr model; + std::unique_ptr interpreter; + model = tflite::FlatBufferModel::BuildFromFile(settings.modelName.c_str()); + if (!model) { + LOG(ERROR) << "Failed to mmap model " << settings.modelName; + return; + } + + settings.model = model.get(); + model->error_reporter(); + tflite::ops::builtin::BuiltinOpResolver resolver; + tflite::InterpreterBuilder(*model, resolver)(&interpreter); + if (!interpreter) { + LOG(ERROR) << "Failed to construct interpreter, please check the model."; + return; + } + + LogInterpreterParams(settings, interpreter); + + // set settings input type + PrepareModel(settings, interpreter, delegateProviders); + std::vector imageSize { 224, 224, 3}; + ImportData(settings, imageSize, interpreter); + + if (settings.loopCount > 0 && settings.numberOfWarmupRuns > 0) { + LOG(INFO) << "Warm-up for " << settings.numberOfWarmupRuns << " times"; + for (int32_t i = 0; i < settings.numberOfWarmupRuns; ++i) { + if (interpreter->Invoke() != kTfLiteOk) { + LOG(ERROR) << "Failed to invoke tflite!"; + return; + } + } + } + + struct timeval startTime, stopTime; + LOG(INFO) << "Invoke for " << settings.loopCount << " times"; + gettimeofday(&startTime, nullptr); + for (int32_t i = 0; i < settings.loopCount; ++i) { + if (interpreter->Invoke() != kTfLiteOk) { + LOG(ERROR) << "Failed to invoke tflite!"; + return; + } + } + + gettimeofday(&stopTime, nullptr); + LOG(INFO) << "invoked, average time: " << + (GetUs(stopTime) - GetUs(startTime)) / (settings.loopCount * CONVERSION_RATE) << " ms"; + AnalysisResults(settings, interpreter); +} + +void DisplayUsage() +{ + LOG(INFO) << "label_classify\n" + << "\t--help, -h: show the usage of the demo\n" + << "\t--use_nnrt, -a: [0|1], use NNRT or not\n" + << "\t--input_mean, -b: input mean\n" + << "\t--count, -c: loop interpreter->Invoke() for certain times\n" + << "\t--image, -i: image_name.bmp\n" + << "\t--labels, -l: labels for the model\n" + << "\t--tflite_model, -m: modelName.tflite\n" + << "\t--num_results, -n: number of results to show\n" + << "\t--input_std, -s: input standard deviation\n" + << "\t--verbose, -v: [0|1] print more information\n" + << "\t--warmup_nums, -w: number of warmup runs\n" + << "\t--print_result, -z: flag to print results\n" + << "\t--input_shape, -p: Indicates the specified dynamic input node and the corresponding shape.\n"; +} + +void InitSettings(int32_t argc, char** argv, Settings& settings) +{ + // getopt_long stores the option index here. + int32_t optionIndex = 0; + while ((optionIndex = getopt_long(argc, argv, "a:b:c:h:i:l:m:n:p:s:v:w:z:", LONG_OPTIONS, nullptr)) != -1) { + switch (optionIndex) { + case 'a': + settings.accel = strtol(optarg, nullptr, BASE_NUMBER); + break; + case 'b': + settings.inputMean = strtod(optarg, nullptr); + break; + case 'c': + settings.loopCount = strtol(optarg, nullptr, BASE_NUMBER); + break; + case 'i': + settings.inputBmpName = optarg; + break; + case 'l': + settings.labelsFileName = optarg; + break; + case 'm': + settings.modelName = optarg; + break; + case 'n': + settings.numberOfResults = strtol(optarg, nullptr, BASE_NUMBER); + break; + case 'p': + settings.inputShape = optarg; + break; + case 's': + settings.inputStd = strtod(optarg, nullptr); + break; + case 'v': + settings.verbose = strtol(optarg, nullptr, BASE_NUMBER); + break; + case 'w': + settings.numberOfWarmupRuns = strtol(optarg, nullptr, BASE_NUMBER); + break; + case 'z': + settings.printResult = strtol(optarg, nullptr, BASE_NUMBER); + break; + case 'h': + case '?': + // getopt_long already printed an error message. + DisplayUsage(); + return; + default: + return; + } + } +} + +int32_t Main(int32_t argc, char** argv) +{ + DelegateProviders delegateProviders; + bool parseResult = delegateProviders.InitFromCmdlineArgs(&argc, const_cast(argv)); + if (!parseResult) { + return EXIT_FAILURE; + } + + Settings settings; + InitSettings(argc, argv, settings); + InferenceModel(settings, delegateProviders); + return 0; +} +} // namespace label_classify +} // namespace tflite + +int32_t main(int32_t argc, char** argv) +{ + return tflite::label_classify::Main(argc, argv); +} diff --git a/example/deep_learning_framework/tflite/label_classify/label_classify.h b/example/deep_learning_framework/tflite/label_classify/label_classify.h new file mode 100644 index 0000000..e8c1c4a --- /dev/null +++ b/example/deep_learning_framework/tflite/label_classify/label_classify.h @@ -0,0 +1,47 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef TENSORFLOW_LITE_EXAMPLES_LABEL_CLASSIFY_H +#define TENSORFLOW_LITE_EXAMPLES_LABEL_CLASSIFY_H + +#include + +#include "tensorflow/lite/model_builder.h" +#include "tensorflow/lite/string_type.h" +#include "tensorflow/lite/c/c_api_types.h" + +namespace tflite { +namespace label_classify { +struct Settings { + tflite::FlatBufferModel* model; + bool verbose = false; + bool accel = false; + bool printResult = false; + TfLiteType inputType = kTfLiteFloat32; + int32_t loopCount = 1; + float inputMean = 127.5f; + float inputStd = 127.5f; + string modelName = "./mbv2.tflite"; + string inputBmpName = "./grace_hopper.bmp"; + string labelsFileName = "./labels.txt"; + string inputShape = ""; + int32_t numberOfThreads = 1; + int32_t numberOfResults = 5; + int32_t numberOfWarmupRuns = 0; +}; +} // namespace label_classify +} // namespace tflite + +#endif // TENSORFLOW_LITE_EXAMPLES_LABEL_CLASSIFY_H diff --git a/example/deep_learning_framework/tflite/nnrt/CMakeLists.txt b/example/deep_learning_framework/tflite/nnrt/CMakeLists.txt new file mode 100644 index 0000000..ba7fa6d --- /dev/null +++ b/example/deep_learning_framework/tflite/nnrt/CMakeLists.txt @@ -0,0 +1,26 @@ +# +# Copyright (c) 2022 Huawei Device Co., Ltd. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# Header path +set(OHOS_INC ${LOCAL_DIRECTORY_PATH}/../../interfaces/kits/c) +include_directories(${NNRT_INTERFACE_HOME} ${OHOS_INC}) + +# Scr path +file(GLOB NNRT_SRCS "${NNRT_INTERFACE_HOME}/*.cpp") + +set(CMAKE_LIBRARY_OUTPUT_DIRECTORY ${LOCAL_DIRECTORY_PATH}/lib) +add_library(nnrt_implementation SHARED ${NNRT_SRCS}) + + diff --git a/example/deep_learning_framework/tflite/nnrt/nnrt_implementation.cpp b/example/deep_learning_framework/tflite/nnrt/nnrt_implementation.cpp new file mode 100644 index 0000000..136f9f1 --- /dev/null +++ b/example/deep_learning_framework/tflite/nnrt/nnrt_implementation.cpp @@ -0,0 +1,113 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "nnrt_implementation.h" + +#include +#include +#include +#include + +#include +#include + +namespace tflite { +// These function parameters are guaranteed to be nullptr by the caller +template +void LoadFunction(void* handle, const char* name, T* nnrtFunction) +{ + if (name == nullptr) { + NNRT_LOG("nnrt error: the function %s does not exist.", name); + return; + } + + void* fn = dlsym(handle, name); + if (fn == nullptr) { + NNRT_LOG("nnrt error: unable to open function %s", name); + return; + } + + *nnrtFunction = reinterpret_cast(fn); + return; +} + +const NnrtApi LoadNnrt() +{ + NnrtApi nnrt; + nnrt.nnrtExists = false; + void* libNeuralNetworks = nullptr; + + // Assumes there can be multiple instances of NN API + static std::string nnrtLibraryName = "libneural_network_runtime.z.so"; + libNeuralNetworks = dlopen(nnrtLibraryName.c_str(), RTLD_LAZY | RTLD_NODELETE); + if (libNeuralNetworks == nullptr) { + NNRT_LOG("nnrt error: unable to open library %s", nnrtLibraryName.c_str()); + return nnrt; + } else { + nnrt.nnrtExists = true; + } + + // NNModel + LoadFunction(libNeuralNetworks, "OH_NNModel_Construct", &nnrt.OH_NNModel_Construct); + LoadFunction(libNeuralNetworks, "OH_NNModel_AddTensor", &nnrt.OH_NNModel_AddTensor); + LoadFunction(libNeuralNetworks, "OH_NNModel_SetTensorData", &nnrt.OH_NNModel_SetTensorData); + LoadFunction(libNeuralNetworks, "OH_NNModel_AddOperation", &nnrt.OH_NNModel_AddOperation); + LoadFunction(libNeuralNetworks, "OH_NNModel_SpecifyInputsAndOutputs", &nnrt.OH_NNModel_SpecifyInputsAndOutputs); + LoadFunction(libNeuralNetworks, "OH_NNModel_Finish", &nnrt.OH_NNModel_Finish); + LoadFunction(libNeuralNetworks, "OH_NNModel_Destroy", &nnrt.OH_NNModel_Destroy); + LoadFunction(libNeuralNetworks, "OH_NNModel_GetAvailableOperations", &nnrt.OH_NNModel_GetAvailableOperations); + + // NNCompilation + LoadFunction(libNeuralNetworks, "OH_NNCompilation_Construct", &nnrt.OH_NNCompilation_Construct); + LoadFunction(libNeuralNetworks, "OH_NNCompilation_SetDevice", &nnrt.OH_NNCompilation_SetDevice); + LoadFunction(libNeuralNetworks, "OH_NNCompilation_SetCache", &nnrt.OH_NNCompilation_SetCache); + LoadFunction(libNeuralNetworks, "OH_NNCompilation_SetPerformanceMode", &nnrt.OH_NNCompilation_SetPerformanceMode); + LoadFunction(libNeuralNetworks, "OH_NNCompilation_SetPriority", &nnrt.OH_NNCompilation_SetPriority); + LoadFunction(libNeuralNetworks, "OH_NNCompilation_EnableFloat16", &nnrt.OH_NNCompilation_EnableFloat16); + LoadFunction(libNeuralNetworks, "OH_NNCompilation_Build", &nnrt.OH_NNCompilation_Build); + LoadFunction(libNeuralNetworks, "OH_NNCompilation_Destroy", &nnrt.OH_NNCompilation_Destroy); + + // NNExecutor + LoadFunction(libNeuralNetworks, "OH_NNExecutor_Construct", &nnrt.OH_NNExecutor_Construct); + LoadFunction(libNeuralNetworks, "OH_NNExecutor_SetInput", &nnrt.OH_NNExecutor_SetInput); + LoadFunction(libNeuralNetworks, "OH_NNExecutor_SetOutput", &nnrt.OH_NNExecutor_SetOutput); + LoadFunction(libNeuralNetworks, "OH_NNExecutor_GetOutputShape", &nnrt.OH_NNExecutor_GetOutputShape); + LoadFunction(libNeuralNetworks, "OH_NNExecutor_Run", &nnrt.OH_NNExecutor_Run); + LoadFunction(libNeuralNetworks, "OH_NNExecutor_AllocateInputMemory", &nnrt.OH_NNExecutor_AllocateInputMemory); + LoadFunction(libNeuralNetworks, "OH_NNExecutor_AllocateOutputMemory", &nnrt.OH_NNExecutor_AllocateOutputMemory); + LoadFunction(libNeuralNetworks, "OH_NNExecutor_DestroyInputMemory", &nnrt.OH_NNExecutor_DestroyInputMemory); + LoadFunction(libNeuralNetworks, "OH_NNExecutor_DestroyOutputMemory", &nnrt.OH_NNExecutor_DestroyOutputMemory); + LoadFunction(libNeuralNetworks, "OH_NNExecutor_SetInputWithMemory", &nnrt.OH_NNExecutor_SetInputWithMemory); + LoadFunction(libNeuralNetworks, "OH_NNExecutor_SetOutputWithMemory", &nnrt.OH_NNExecutor_SetOutputWithMemory); + LoadFunction(libNeuralNetworks, "OH_NNExecutor_Destroy", &nnrt.OH_NNExecutor_Destroy); + + // NNDevice + LoadFunction(libNeuralNetworks, "OH_NNDevice_GetAllDevicesID", &nnrt.OH_NNDevice_GetAllDevicesID); + LoadFunction(libNeuralNetworks, "OH_NNDevice_GetName", &nnrt.OH_NNDevice_GetName); + LoadFunction(libNeuralNetworks, "OH_NNDevice_GetType", &nnrt.OH_NNDevice_GetType); + + return nnrt; +} + +const NnrtApi* NnrtImplementation() +{ + static const NnrtApi nnrt = LoadNnrt(); + if (!nnrt.nnrtExists) { + return nullptr; + } + return &nnrt; +} + +} // namespace tflite diff --git a/example/deep_learning_framework/tflite/nnrt/nnrt_implementation.h b/example/deep_learning_framework/tflite/nnrt/nnrt_implementation.h new file mode 100644 index 0000000..5a342cb --- /dev/null +++ b/example/deep_learning_framework/tflite/nnrt/nnrt_implementation.h @@ -0,0 +1,88 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef TENSORFLOW_LITE_NNRT_IMPLEMENTATION_H +#define TENSORFLOW_LITE_NNRT_IMPLEMENTATION_H + +#include +#include +#include +#include +#include +#include + +#include "neural_network_runtime_type.h" + +namespace tflite { +#define NNRT_LOG(format, ...) fprintf(stderr, format "\n", __VA_ARGS__) + +struct NnrtApi { + // This indicates the availability of nnrt library. If it is false, it means that loading + // the nnrt library failed and tflite will not use nnrt to run the model, vice versa. + bool nnrtExists; + + // Create model interface + OH_NNModel* (*OH_NNModel_Construct)(void); + OH_NN_ReturnCode (*OH_NNModel_AddTensor)(OH_NNModel* model, const OH_NN_Tensor* nnTensor); + OH_NN_ReturnCode (*OH_NNModel_SetTensorData)(OH_NNModel* model, uint32_t index, const void* buffer, + size_t length); + OH_NN_ReturnCode (*OH_NNModel_AddOperation)(OH_NNModel* model, OH_NN_OperationType op, + const OH_NN_UInt32Array* paramIndices, const OH_NN_UInt32Array* inputIndices, + const OH_NN_UInt32Array* outputIndices); + OH_NN_ReturnCode (*OH_NNModel_SpecifyInputsAndOutputs)(OH_NNModel* model, const OH_NN_UInt32Array* inputIndices, + const OH_NN_UInt32Array* outputIndices); + OH_NN_ReturnCode (*OH_NNModel_Finish)(OH_NNModel* model); + void (*OH_NNModel_Destroy)(OH_NNModel** model); + OH_NN_ReturnCode (*OH_NNModel_GetAvailableOperations)(OH_NNModel* model, size_t deviceID, const bool** isSupported, + uint32_t* opCount); + // Compilation interface + OH_NNCompilation* (*OH_NNCompilation_Construct)(const OH_NNModel* model); + OH_NN_ReturnCode (*OH_NNCompilation_SetCache)(OH_NNCompilation* compilation, const char* cacheDir, + uint32_t version); + OH_NN_ReturnCode (*OH_NNCompilation_SetPerformanceMode)(OH_NNCompilation* compilation, + OH_NN_PerformanceMode performanceMode); + OH_NN_ReturnCode (*OH_NNCompilation_SetPriority)(OH_NNCompilation* compilation, OH_NN_Priority priority); + OH_NN_ReturnCode (*OH_NNCompilation_EnableFloat16)(OH_NNCompilation* compilation, bool enablefloat16); + OH_NN_ReturnCode (*OH_NNCompilation_SetDevice)(OH_NNCompilation* compilation, size_t deviceID); + OH_NN_ReturnCode (*OH_NNCompilation_Build)(OH_NNCompilation* compilation); + void (*OH_NNCompilation_Destroy)(OH_NNCompilation** compilation); + // Executor interface + OH_NNExecutor* (*OH_NNExecutor_Construct)(OH_NNCompilation* compilation); + OH_NN_ReturnCode (*OH_NNExecutor_SetInput)(OH_NNExecutor* executor, uint32_t inputIndex, + const OH_NN_Tensor* nnTensor, const void* buffer, size_t length); + OH_NN_ReturnCode (*OH_NNExecutor_SetOutput)(const OH_NNExecutor* executor, uint32_t outputIndex, void* buffer, + size_t length); + OH_NN_ReturnCode (*OH_NNExecutor_GetOutputShape)(const OH_NNExecutor* executor, uint32_t outputIndex, + const uint32_t** dimensions, uint32_t* dimensionCount); + OH_NN_ReturnCode (*OH_NNExecutor_Run)(OH_NNExecutor* executor); + OH_NN_Memory* (*OH_NNExecutor_AllocateInputMemory)(OH_NNExecutor* executor, uint32_t inputIndex, size_t length); + OH_NN_Memory* (*OH_NNExecutor_AllocateOutputMemory)(OH_NNExecutor* executor, uint32_t outputIndex, size_t length); + void (*OH_NNExecutor_DestroyOutputMemory)(OH_NNExecutor* executor, uint32_t outputIndex, OH_NN_Memory** memory); + void (*OH_NNExecutor_DestroyInputMemory)(OH_NNExecutor* executor, uint32_t inputIndex, OH_NN_Memory** memory); + OH_NN_ReturnCode (*OH_NNExecutor_SetInputWithMemory)(OH_NNExecutor* executor, uint32_t inputIndex, + const OH_NN_Tensor* nnTensor, const OH_NN_Memory* memory); + OH_NN_ReturnCode (*OH_NNExecutor_SetOutputWithMemory)(OH_NNExecutor* executor, uint32_t outputIndex, + const OH_NN_Memory* memory); + void (*OH_NNExecutor_Destroy)(OH_NNExecutor** executor); + // Device interface + OH_NN_ReturnCode (*OH_NNDevice_GetAllDevicesID)(const size_t** allDevicesID, uint32_t* deviceCount); + OH_NN_ReturnCode (*OH_NNDevice_GetName)(size_t deviceID, const char** name); + OH_NN_ReturnCode (*OH_NNDevice_GetType)(size_t deviceID, OH_NN_DeviceType* deviceType); +}; + +const NnrtApi* NnrtImplementation(); +} // namespace tflite + +#endif // TENSORFLOW_LITE_NNRT_IMPLEMENTATION_H \ No newline at end of file diff --git a/example/deep_learning_framework/tflite/tools/bitmap_helpers.cpp b/example/deep_learning_framework/tflite/tools/bitmap_helpers.cpp new file mode 100644 index 0000000..379ca94 --- /dev/null +++ b/example/deep_learning_framework/tflite/tools/bitmap_helpers.cpp @@ -0,0 +1,116 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "tflite/tools/bitmap_helpers.h" + +#include +#include + +#include "tflite/tools/log.h" + +namespace tflite { +namespace label_classify { +void DecodeBmp(const uint8_t* input, int32_t rowSize, ImageInfo imageInfo, bool topDown, std::vector& output) +{ + ColorChannelOffset colorChannelOffset = { BLUE_OFFSET, GREEN_OFFSET, ALPHA_OFFSET }; + for (int32_t i = 0; i < imageInfo.height; ++i) { + int32_t srcPos; + int32_t dstPos; + + for (int32_t j = 0; j < imageInfo.width; j++) { + if (!topDown) { + srcPos = ((imageInfo.height - 1 - i) * rowSize) + j * imageInfo.channels; + } else { + srcPos = i * rowSize + j * imageInfo.channels; + } + + dstPos = (i * imageInfo.width + j) * imageInfo.channels; + + switch (imageInfo.channels) { + case GRAYSCALE_DIM: + output[dstPos] = input[srcPos]; + break; + case BGR_DIM: + // BGR -> RGB + output[dstPos] = input[srcPos + colorChannelOffset.blueOffset]; + output[dstPos + colorChannelOffset.greenOffset] = input[srcPos + colorChannelOffset.greenOffset]; + output[dstPos + colorChannelOffset.blueOffset] = input[srcPos]; + break; + case BGRA_DIM: + // BGRA -> RGBA + output[dstPos] = input[srcPos + colorChannelOffset.blueOffset]; + output[dstPos + colorChannelOffset.greenOffset] = input[srcPos + colorChannelOffset.greenOffset]; + output[dstPos + colorChannelOffset.blueOffset] = input[srcPos]; + output[dstPos + colorChannelOffset.alphaOffset] = input[srcPos + colorChannelOffset.alphaOffset]; + break; + default: + LOG(FATAL) << "Unexpected number of channels: " << imageInfo.channels; + break; + } + } + } + return; +} + +void ReadBmp(const std::string& inputBmpName, ImageInfo& imageInfo, Settings* s, std::vector& inputImage) +{ + int32_t begin, end; + std::ifstream file(inputBmpName, std::ios::in | std::ios::binary); + if (!file) { + LOG(FATAL) << "input file " << inputBmpName << " not found"; + return; + } + + begin = file.tellg(); + file.seekg(0, std::ios::end); + end = file.tellg(); + size_t len = end - begin; + if (s->verbose) { + LOG(INFO) << "len: " << len; + } + + std::vector img_bytes(len); + BmpAddressOffset bmpAddressOffset = { HEADER_ADDRESS_OFFSET, WIDTH_ADDRESS_OFFSET, + HEIGHT_ADDRESS_OFFSET, BBP_ADDRESS_OFFSET }; + file.seekg(0, std::ios::beg); + file.read(reinterpret_cast(img_bytes.data()), len); + const int32_t headerSize = + *(reinterpret_cast(img_bytes.data() + bmpAddressOffset.headerAddressOffset)); + imageInfo.width = *(reinterpret_cast(img_bytes.data() + bmpAddressOffset.widthAddressOffset)); + imageInfo.height = + abs(*(reinterpret_cast(img_bytes.data() + bmpAddressOffset.heightAddressOffset))); + const int32_t bpp = *(reinterpret_cast(img_bytes.data() + bmpAddressOffset.bbpAddressOffset)); + imageInfo.channels = bpp / BIT_TO_BYTE; + inputImage.resize(imageInfo.height * imageInfo.width * imageInfo.channels); + + if (s->verbose) { + LOG(INFO) << "width, height, channels: " << imageInfo.width << ", " << imageInfo.height << ", " + << imageInfo.channels; + } + + // there may be padding bytes when the width is not a multiple of 4 bytes. + // 8 * channels == bits per pixel + const int32_t rowSize = ((8 * imageInfo.channels * imageInfo.width + 31) >> 5) << 2; + + // if height is negative, data layout is top down. otherwise, it's bottom up. + bool topDown = (imageInfo.height < 0); + + // Decode image, allocating tensor once the image size is known. + const uint8_t* bmpPixels = &img_bytes[headerSize]; + DecodeBmp(bmpPixels, rowSize, imageInfo, topDown, inputImage); + return; +} +} // namespace label_classify +} // namespace tflite diff --git a/example/deep_learning_framework/tflite/tools/bitmap_helpers.h b/example/deep_learning_framework/tflite/tools/bitmap_helpers.h new file mode 100644 index 0000000..f8a8123 --- /dev/null +++ b/example/deep_learning_framework/tflite/tools/bitmap_helpers.h @@ -0,0 +1,141 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef TENSORFLOW_LITE_EXAMPLES_LABEL_CLASSIFY_BITMAP_HELPERS_H +#define TENSORFLOW_LITE_EXAMPLES_LABEL_CLASSIFY_BITMAP_HELPERS_H + +#include "tflite/label_classify/label_classify.h" +#include "tensorflow/lite/builtin_op_data.h" +#include "tensorflow/lite/interpreter.h" +#include "tensorflow/lite/kernels/register.h" +#include "tensorflow/lite/string_util.h" +#include "log.h" + + +namespace tflite { +namespace label_classify { +const int INPUT_NUMBER = 2; +const int OUPUT_NUMBER = 1; +const int INT8_OFFSET_NUMBER = 128; +const int BIT_TO_BYTE = 8; +const int BLUE_OFFSET = 2; +const int GREEN_OFFSET = 1; +const int ALPHA_OFFSET = 3; +const int HEADER_ADDRESS_OFFSET = 10; +const int WIDTH_ADDRESS_OFFSET = 18; +const int HEIGHT_ADDRESS_OFFSET = 22; +const int BBP_ADDRESS_OFFSET = 28; +enum ChannelDim : int { + GRAYSCALE_DIM = 1, + BGR_DIM = 3, + BGRA_DIM = 4 +}; + +struct BmpAddressOffset { + int headerAddressOffset = 0; + int widthAddressOffset = 0; + int heightAddressOffset = 0; + int bbpAddressOffset = 0; +}; + +struct ColorChannelOffset { + int blueOffset = 0; + int greenOffset = 0; + int alphaOffset = 0; +}; + +struct ImageInfo { + int32_t width = 0; + int32_t height = 0; + int32_t channels = 0; +}; + +void ReadBmp(const std::string& input_bmp_name, ImageInfo& imageInfo, Settings* s, std::vector& input); + +template +void Resize(T* out, uint8_t* in, ImageInfo inputImageInfo, ImageInfo wantedImageInfo, Settings* s) +{ + std::unique_ptr interpreter = std::make_unique(); + + int32_t baseIndex = 0; + int32_t outputIndex = 2; + + // two inputs: input and new_sizes + interpreter->AddTensors(INPUT_NUMBER, &baseIndex); + // one output + interpreter->AddTensors(OUPUT_NUMBER, &baseIndex); + // set input and output tensors + interpreter->SetInputs({ 0, 1 }); + interpreter->SetOutputs({ 2 }); + + // set parameters of tensors + TfLiteQuantizationParams quant; + interpreter->SetTensorParametersReadWrite(0, kTfLiteFloat32, "input", + { 1, inputImageInfo.height, inputImageInfo.width, inputImageInfo.channels }, quant); + interpreter->SetTensorParametersReadWrite(1, kTfLiteInt32, "new_size", { 2 }, quant); + interpreter->SetTensorParametersReadWrite(outputIndex, kTfLiteFloat32, "output", + { 1, wantedImageInfo.height, wantedImageInfo.width, wantedImageInfo.channels }, quant); + + ops::builtin::BuiltinOpResolver resolver; + const TfLiteRegistration* resizeOp = resolver.FindOp(BuiltinOperator_RESIZE_BILINEAR, 1); + auto* params = reinterpret_cast(malloc(sizeof(TfLiteResizeBilinearParams))); + if (params == nullptr) { + LOG(ERROR) << "Malloc memory failed in BitmapHelperslmpl."; + return; + } + params->align_corners = false; + params->half_pixel_centers = false; + interpreter->AddNodeWithParameters({ 0, 1 }, { 2 }, nullptr, 0, params, resizeOp, nullptr); + interpreter->AllocateTensors(); + + // fill input image + // in[] are integers, cannot do memcpy() directly + auto input = interpreter->typed_tensor(0); + + for (int32_t i = 0; i < inputImageInfo.height * inputImageInfo.width * inputImageInfo.channels; ++i) { + input[i] = in[i]; + } + + // fill new_sizes + interpreter->typed_tensor(1)[0] = wantedImageInfo.height; + interpreter->typed_tensor(1)[1] = wantedImageInfo.width; + interpreter->Invoke(); + auto output = interpreter->typed_tensor(2); + for (int32_t i = 0; i < wantedImageInfo.height * wantedImageInfo.width * wantedImageInfo.channels; ++i) { + switch (s->inputType) { + case kTfLiteFloat32: + out[i] = (output[i] - s->inputMean) / s->inputStd; + break; + case kTfLiteInt8: + out[i] = static_cast(output[i] - INT8_OFFSET_NUMBER); + break; + case kTfLiteUInt8: + out[i] = static_cast(output[i]); + break; + default: + break; + } + } +} + +// explicit instantiation +template void Resize(float*, uint8_t*, ImageInfo, ImageInfo, Settings*); +template void Resize(int8_t*, uint8_t*, ImageInfo, ImageInfo, Settings*); +template void Resize(uint8_t*, uint8_t*, ImageInfo, ImageInfo, Settings*); +template void Resize(int64_t*, uint8_t*, ImageInfo, ImageInfo, Settings*); +} // namespace label_classify +} // namespace tflite + +#endif // TENSORFLOW_LITE_EXAMPLES_LABEL_CLASSIFY_BITMAP_HELPERS_H diff --git a/example/deep_learning_framework/tflite/tools/get_topn.h b/example/deep_learning_framework/tflite/tools/get_topn.h new file mode 100644 index 0000000..8e01f53 --- /dev/null +++ b/example/deep_learning_framework/tflite/tools/get_topn.h @@ -0,0 +1,87 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef TENSORFLOW_LITE_EXAMPLES_LABEL_CLASSIFY_GET_TOP_N_H +#define TENSORFLOW_LITE_EXAMPLES_LABEL_CLASSIFY_GET_TOP_N_H + +#include +#include +#include + +#include "tensorflow/lite/c/common.h" + +namespace tflite { +namespace label_classify { +template +void GetTopN(T* prediction, int32_t predictionSize, size_t numResults, float threshold, + std::vector>* topResults, TfLiteType inputType) +{ + // Will contain top N results in ascending order. + std::priority_queue, std::vector>, + std::greater>> + topResultPQ; + + const long count = predictionSize; // NOLINT(runtime/int32_t) + float value = 0.0; + float intNormalizedFactor = 256.0; + float uintNormalizedFactor = 255.0; + uint32_t offsetNumber = 128; + + for (int32_t i = 0; i < count; ++i) { + switch (inputType) { + case kTfLiteFloat32: + value = prediction[i]; + break; + case kTfLiteInt8: + value = (prediction[i] + offsetNumber) / intNormalizedFactor; + break; + case kTfLiteUInt8: + value = prediction[i] / uintNormalizedFactor; + break; + default: + break; + } + + // Only add it if it beats the threshold and has a chance at being in the top N. + if (value < threshold) { + continue; + } + + topResultPQ.push(std::pair(value, i)); + + // If at capacity, kick the smallest value out. + if (topResultPQ.size() > numResults) { + topResultPQ.pop(); + } + } + + // Copy to output vector and reverse into descending order. + while (!topResultPQ.empty()) { + topResults->push_back(topResultPQ.top()); + topResultPQ.pop(); + } + + std::reverse(topResults->begin(), topResults->end()); +} + +// explicit instantiation so that we can use them otherwhere +template void GetTopN(float*, int32_t, size_t, float, std::vector>*, TfLiteType); +template void GetTopN(int8_t*, int32_t, size_t, float, std::vector>*, TfLiteType); +template void GetTopN(uint8_t*, int32_t, size_t, float, std::vector>*, TfLiteType); +template void GetTopN(int64_t*, int32_t, size_t, float, std::vector>*, TfLiteType); +} // namespace label_classify +} // namespace tflite + +#endif // TENSORFLOW_LITE_EXAMPLES_LABEL_CLASSIFY_GET_TOP_N_H diff --git a/example/deep_learning_framework/tflite/tools/log.h b/example/deep_learning_framework/tflite/tools/log.h new file mode 100644 index 0000000..06bcf72 --- /dev/null +++ b/example/deep_learning_framework/tflite/tools/log.h @@ -0,0 +1,46 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef TENSORFLOW_LITE_EXAMPLES_LABEL_CLASSIFY_LOG_H +#define TENSORFLOW_LITE_EXAMPLES_LABEL_CLASSIFY_LOG_H + +#include +#include + +namespace tflite { +namespace label_classify { +class Log { + std::stringstream stream_; + +public: + explicit Log(const char* severity) + { + stream_ << severity << ": "; + } + std::stringstream& Stream() + { + return stream_; + } + ~Log() + { + std::cerr << stream_.str() << std::endl; + } +}; + +#define LOG(severity) tflite::label_classify::Log(#severity).Stream() +} // namespace label_classify +} // namespace tflite + +#endif // TENSORFLOW_LITE_EXAMPLES_LABEL_CLASSIFY_LOG_H diff --git a/example/deep_learning_framework/tflite/tools/utils.cpp b/example/deep_learning_framework/tflite/tools/utils.cpp new file mode 100644 index 0000000..ada2268 --- /dev/null +++ b/example/deep_learning_framework/tflite/tools/utils.cpp @@ -0,0 +1,267 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "utils.h" + +#include +#include +#include + +#include "tflite/tools/bitmap_helpers.h" +#include "tflite/tools/get_topn.h" +#include "tflite/tools/log.h" + +namespace tflite { +namespace label_classify { +constexpr int32_t DATA_PRINT_NUM = 1000; +constexpr int32_t DATA_EACHLINE_NUM = 1000; +constexpr int32_t SECOND_TO_MICROSECOND_RATIO = 1000000; +constexpr uint8_t WEIGHT_DIMENSION = 2; +constexpr uint8_t CHANNEL_DIMENSION = 3; + +double GetUs(struct timeval t) +{ + return (t.tv_sec * SECOND_TO_MICROSECOND_RATIO + t.tv_usec); +} + +TfLiteStatus ReadLabelsFile(const string& fileName, std::vector& result, size_t& foundLabelCount) +{ + std::ifstream file(fileName); + if (!file) { + LOG(ERROR) << "Labels file " << fileName << " not found"; + return kTfLiteError; + } + result.clear(); + string line; + while (std::getline(file, line)) { + result.push_back(line); + } + foundLabelCount = result.size(); + const int32_t padding = 16; + while (result.size() % padding) { + result.emplace_back(); + } + + return kTfLiteOk; +} + +void GetInputNameAndShape(string& inputShapeString, std::map>& userInputShapes) +{ + if (inputShapeString == "") { + return; + } + int pos = inputShapeString.find_last_of(":"); + string userInputName = inputShapeString.substr(0, pos); + + string dimString = inputShapeString.substr(pos + 1); + int dimPos = dimString.find(","); + std::vector inputDims; + while (dimPos != dimString.npos) { + inputDims.push_back(std::stoi(dimString.substr(0, dimPos))); + dimString = dimString.substr(dimPos + 1); + dimPos = dimString.find(","); + } + inputDims.push_back(std::stoi(dimString)); + userInputShapes.insert(std::map>::value_type(userInputName, inputDims)); +} + +TfLiteStatus FilterDynamicInputs(Settings& settings, std::unique_ptr& interpreter, + std::map>& neededInputShapes) +{ + std::vector inputIndexes = interpreter->inputs(); + std::map nameIndexs; + for (int i = 0; i < inputIndexes.size(); i++) { + LOG(INFO) << "input index: " << inputIndexes[i]; + nameIndexs.insert(std::map::value_type(interpreter->GetInputName(i), inputIndexes[i])); + } + + if (settings.inputShape.find(":") == settings.inputShape.npos) { + LOG(ERROR) << "The format of input shapes string is not supported."; + return kTfLiteError; + } + + // Get input names and shapes + std::map> userInputShapes; + string inputShapeString = settings.inputShape; + int pos = inputShapeString.find(";"); + while (pos != inputShapeString.npos) { + GetInputNameAndShape(inputShapeString, userInputShapes); + inputShapeString = inputShapeString.substr(pos + 1); + pos = inputShapeString.find(";"); + } + GetInputNameAndShape(inputShapeString, userInputShapes); + + for (const auto& inputShape : userInputShapes) { + string inputName = inputShape.first; + auto findName = nameIndexs.find(inputName); + if (findName == nameIndexs.end()) { + LOG(ERROR) << "The input name is error: " << inputShape.first << "."; + return kTfLiteError; + } else { + neededInputShapes.insert(std::map>::value_type(findName->second, inputShape.second)); + } + } + + return kTfLiteOk; +} + +template void PrintData(T* data, int32_t dataSize, int32_t printSize) +{ + if (printSize > dataSize) { + printSize = dataSize; + } + for (int32_t i = 0; i < printSize; ++i) { + std::cout << static_cast(*(data + i)) << "\t"; + } + std::cout << std::endl; +} + +void PrintResult(std::unique_ptr& interpreter) +{ + for (int32_t index = 0; index < interpreter->outputs().size(); ++index) { + int32_t output_index = interpreter->outputs()[index]; + TfLiteIntArray* outputsDims = interpreter->tensor(output_index)->dims; + int32_t dimSize = outputsDims->size; + int32_t outputTensorSize = 1; + for (int32_t i = 0; i < dimSize; ++i) { + outputTensorSize *= outputsDims->data[i]; + } + + TfLiteTensor* outputTensor = interpreter->tensor(output_index); + switch (outputTensor->type) { + case kTfLiteFloat32: + PrintData(interpreter->typed_output_tensor(index), outputTensorSize, DATA_PRINT_NUM); + break; + case kTfLiteInt32: + PrintData(interpreter->typed_output_tensor(index), outputTensorSize, DATA_PRINT_NUM); + break; + case kTfLiteUInt8: + PrintData(interpreter->typed_output_tensor(index), outputTensorSize, DATA_PRINT_NUM); + break; + case kTfLiteInt8: + PrintData(interpreter->typed_output_tensor(index), outputTensorSize, DATA_PRINT_NUM); + break; + default: + LOG(ERROR) << "Unsupportted tensor datatype: " << outputTensor->type << "!"; + return; + } + } +} + +void AnalysisResults(Settings& settings, std::unique_ptr& interpreter) +{ + const float threshold = 0.001f; + std::vector> topResults; + + if (settings.printResult) { + LOG(INFO) << "Outputs Data:"; + PrintResult(interpreter); + } + + int32_t output = interpreter->outputs()[0]; + TfLiteIntArray* outputDims = interpreter->tensor(output)->dims; + // assume output dims to be something like (1, 1, ... ,size) + auto outputSize = outputDims->data[outputDims->size - 1]; + + auto tfType = interpreter->tensor(output)->type; + switch (tfType) { + case kTfLiteFloat32: + GetTopN(interpreter->typed_output_tensor(0), outputSize, settings.numberOfResults, threshold, + &topResults, settings.inputType); + break; + case kTfLiteInt8: + GetTopN(interpreter->typed_output_tensor(0), outputSize, settings.numberOfResults, + threshold, &topResults, settings.inputType); + break; + case kTfLiteUInt8: + GetTopN(interpreter->typed_output_tensor(0), outputSize, settings.numberOfResults, + threshold, &topResults, settings.inputType); + break; + case kTfLiteInt64: + GetTopN(interpreter->typed_output_tensor(0), outputSize, settings.numberOfResults, + threshold, &topResults, settings.inputType); + break; + default: + LOG(ERROR) << "cannot handle output type " << tfType << " yet"; + return; + } + + std::vector labels; + size_t labelCount; + + if (ReadLabelsFile(settings.labelsFileName, labels, labelCount) != kTfLiteOk) { + return; + } + for (const auto& result : topResults) { + const float confidence = result.first; + const int32_t index = result.second; + LOG(INFO) << confidence << ": " << index << " " << labels[index]; + } +} + +void ImportData(Settings& settings, std::vector& imageSize, std::unique_ptr& interpreter) +{ + ImageInfo inputImageInfo = {imageSize[0], imageSize[1], imageSize[2]}; + std::vector in; + ReadBmp(settings.inputBmpName, inputImageInfo, &settings, in); + + int32_t input = interpreter->inputs()[0]; + if (settings.verbose) { + LOG(INFO) << "input: " << input; + } + + // get input dimension from the model. + TfLiteIntArray* dims = interpreter->tensor(input)->dims; + ImageInfo wantedimageInfo; + wantedimageInfo.height = dims->data[1]; + wantedimageInfo.width = dims->data[WEIGHT_DIMENSION]; + wantedimageInfo.channels = (dims->size > CHANNEL_DIMENSION) ? dims->data[CHANNEL_DIMENSION] : 1; + + settings.inputType = interpreter->tensor(input)->type; + switch (settings.inputType) { + case kTfLiteFloat32: + Resize(interpreter->typed_tensor(input), in.data(), inputImageInfo, wantedimageInfo, + &settings); + break; + case kTfLiteInt8: + Resize(interpreter->typed_tensor(input), in.data(), inputImageInfo, wantedimageInfo, + &settings); + break; + case kTfLiteUInt8: + Resize(interpreter->typed_tensor(input), in.data(), inputImageInfo, wantedimageInfo, + &settings); + break; + case kTfLiteInt64: + Resize(interpreter->typed_tensor(input), in.data(), inputImageInfo, wantedimageInfo, + &settings); + break; + default: + LOG(ERROR) << "cannot handle input type " << settings.inputType << " yet"; + return; + } +} + +bool IsEqualShape(int tensorIndex, const std::vector& dims, std::unique_ptr& interpreter) +{ + TfLiteTensor* tensor = interpreter->tensor(tensorIndex); + for (int i = 0; i < tensor->dims->size; ++i) { + if (tensor->dims->data[i] != dims[i]) { + return false; + } + } + return true; +} +} // namespace label_classify +} // namespace tflite \ No newline at end of file diff --git a/example/deep_learning_framework/tflite/tools/utils.h b/example/deep_learning_framework/tflite/tools/utils.h new file mode 100644 index 0000000..f97a883 --- /dev/null +++ b/example/deep_learning_framework/tflite/tools/utils.h @@ -0,0 +1,44 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef TENSORFLOW_LITE_EXAMPLES_LABEL_CLASSIFY_UTILS_H +#define TENSORFLOW_LITE_EXAMPLES_LABEL_CLASSIFY_UTILS_H + +#include "../label_classify/label_classify.h" + +#include "sys/time.h" + +#include "tensorflow/lite/model_builder.h" +#include "tensorflow/lite/string_type.h" +#include "tensorflow/lite/c/c_api_types.h" +#include "tensorflow/lite/interpreter.h" + +#include "neural_network_runtime.h" + +namespace tflite { +namespace label_classify { +double GetUs(struct timeval t); +TfLiteStatus ReadLabelsFile(const string& fileName, std::vector& result, size_t& foundLabelCount); +TfLiteStatus FilterDynamicInputs(Settings& settings, + std::unique_ptr& interpreter, std::map>& neededInputShapes); +bool IsEqualShape(int tensorIndex, const std::vector& dim, std::unique_ptr& interpreter); +void GetInputNameAndShape(string &inputShapeString, std::map>& userInputShapes); +void PrintResult(std::unique_ptr& interpreter); +void AnalysisResults(Settings& settings, std::unique_ptr& interpreter); +void ImportData(Settings& settings, std::vector& imageSize, std::unique_ptr& interpreter); +} // namespace label_classify +} // namespace tflite + +#endif // TENSORFLOW_LITE_EXAMPLES_LABEL_CLASSIFY_UTILS_H diff --git a/example/drivers/README_zh.md b/example/drivers/README_zh.md new file mode 100644 index 0000000..0848080 --- /dev/null +++ b/example/drivers/README_zh.md @@ -0,0 +1,329 @@ +# NNRt开发指导 + +## NNRt开发概述 + +### 功能简介 + +神经网络运行时部件(NNRt)是跨设备的AI运行时框架,作为端侧推理框架和专用加速芯片的中间桥梁,为端侧推理框架提供了统一的Native接口,使能端侧推理框架在专有加速芯片上推理;为芯片厂商提供了统一的HDI接口,使能专有加速芯片接入OpenHarmony社区生态。 + +本文介绍芯片厂商如何在将专有加速芯片接入NNRt,接入OpenHarmony社区生态。 + +### 基本概念 +在开发前,开发者需要先了解以下概念,以便更好地理解全文内容: + +- NNRt:Neural Network Runtime,神经网络运行时,是本指导主要介绍的部件。 +- OHOS:OpenHarmony Operating System,开源鸿蒙操作系统。 +- HDI:Hardware Device Interface,硬件设备接口,是OHOS中系统组件与芯片组件通信的接口。 +- IDL: Interface Description Language,接口描述语言,是HDI接口的语言格式。 + +### 约束与限制 +- 系统版本:OpenHarmony 3.2及以上。 +- 开发环境:Ubuntu 18.04及以上。 +- 接入设备:OpenHarmony定义的标准设备。 + +### 运作机制 +NNRt通过HDI接口实现与设备芯片的对接,由HDI接口实现跨进程通信。 + +**图1** NNRt架构图 + +![架构图](./arch_diagram.png) + +整个架构主要分为三层,AI应用在应用层,AI推理框架和神经网络运行时在系统层,设备服务在芯片层。AI应用要在专用加速芯片上完成模型推理,需要经过AI推理框架和神经网络运行时才能调用到底层的芯片设备,而神经网络运行时就是负责适配底层各种芯片设备,它开放了标准统一的南向接口,众多的第三方芯片设备都可以通过HDI接口接入OHOS。 + +程序运行时,AI应用、AI推理框架、神经网络运行时都在同一个进程,底层设备服务在另一个进程,进程间是通过IPC的机制通信,神经网络运行时根据南向HDI接口实现了HDI Client,服务端也需要根据南向HDI接口实现HDI Service。 + +## NNRt开发指导 + +### 场景介绍 +下文以rk3568芯片为例,展示rk3568 CPU如何通过HDI接口接入NNRt,并完成AI模型推理。 + +### 开发流程 +适配操作的整体流程如下: + +**图2** NNRt适配流程 + +![开发流程](./dev_flow.png) + +### 开发步骤 +开发者具体可通过以下步骤在芯片侧对接NNRt: +1. 开源社区下载OpenHarmony的代码,编译drivers_interface部件,生成HDI接口的头文件。 + - [下载源码](../get-code/sourcecode-acquire.md)。 + - 编译接口IDL文件。 + ```shell + ./build.sh --product-name rk3568 –ccache --target-cpu arm64 --build-target=drivers_interface_nnrt + ``` + + --target-cpu arm64:是64位编译选项,若编译32位,则不需添加--target-cpu arm64 + + 编译之后,可以在```out/rk3568/gen/drivers/interface/nnrt```目录下找到生成的头文件,默认生成C++头文件,若需要生成C头文件,则修改```drivers/interface/nnrt/v1_0/BUILD.gn```文件中的language。 + ```shell + language = "c" + ``` + + 生成头文件目录如下所示: + ```text + out/rk3568/gen/drivers/interface/nnrt + └── v1_0 + ├── drivers_interface_nnrt__libnnrt_proxy_1.0_external_deps_temp.json + ├── drivers_interface_nnrt__libnnrt_stub_1.0_external_deps_temp.json + ├── innrt_device.h # 设备接口头文件 + ├── iprepared_model.h # 编译AI模型对象头文件 + ├── libnnrt_proxy_1.0__notice.d + ├── libnnrt_stub_1.0__notice.d + ├── model_types.cpp # AI模型结构定义实现文件 + ├── model_types.h # AI模型结构定义头文件 + ├── nnrt_device_driver.cpp # 设备驱动实现参考样例 + ├── nnrt_device_proxy.cpp + ├── nnrt_device_proxy.h + ├── nnrt_device_service.cpp # 设备服务端实现参考样例 + ├── nnrt_device_service.h # 设备服务端头文件 + ├── nnrt_device_stub.cpp + ├── nnrt_device_stub.h + ├── nnrt_types.cpp # 数据类型定义实现文件 + ├── nnrt_types.h # 数据类型定义头文件 + ├── node_attr_types.cpp # AI模型算子属性定义实现文件 + ├── node_attr_types.h # AI模型算子属性定义 + ├── prepared_model_proxy.cpp + ├── prepared_model_proxy.h + ├── prepared_model_service.cpp # 编译AI模型对象服务端实现参考样例 + ├── prepared_model_service.h # 编译AI模型对象服务端头文件 + ├── prepared_model_stub.cpp + └── prepared_model_stub.h + ``` + +2. 实现HDI服务 + - 在drivers/peripheral目录下新建开发目录,用于HDI服务开发,开发目录结构如下所示。 + ```text + drivers/peripheral/nnrt + ├── BUILD.gn # 代码编译脚本文件 + ├── bundle.json + └── hdi_cpu_service # 自定义目录 + ├── BUILD.gn # 代码编译脚本文件 + ├── include + │   ├── nnrt_device_service.h # 设备服务端头文件 + │   ├── node_functions.h # 非必须,由具体实现决定 + │   ├── node_registry.h # 非必须,由具体实现决定 + │   └── prepared_model_service.h # 编译AI模型对象服务端头文件 + └── src + ├── nnrt_device_driver.cpp # 设备驱动实现文件 + ├── nnrt_device_service.cpp # 设备服务端实现文件 + ├── nnrt_device_stub.cpp # 非必须,由具体实现决定 + ├── node_attr_types.cpp # 非必须,由具体实现决定 + ├── node_functions.cpp # 非必须,由具体实现决定 + ├── node_registry.cpp # 非必须,由具体实现决定 + └── prepared_model_service.cpp # 编译AI模型对象服务端实现文件 + ``` + + - 实现设备驱动,无特殊需求可直接使用步骤1中生成的nnrt_device_driver.cpp文件,否则根据具体驱动开发。 + - 实现服务接口,主要实现nnrt_device_service.cpp和prepared_model_service.cpp文件,接口定义可以参考```drivers/interface/nnrt```。 + + - 编译驱动和服务实现为共享库。 + 在```drivers/peripheral/nnrt/hdi_cpu_service/```下新建```BUILD.gn```文件,对驱动入口和服务实现编译为共享库。 + + ```shell + import("//build/ohos.gni") + import("//drivers/hdf_core/adapter/uhdf2/uhdf.gni") + + ohos_shared_library("libnnrt_service_1.0") { + include_dirs = [] + sources = [ + "src/nnrt_device_service.cpp", + "src/prepared_model_service.cpp", + "src/node_registry.cpp", + "src/node_functions.cpp", + "src/node_attr_types.cpp" + ] + public_deps = [ "//drivers/interface/nnrt/v1_0:nnrt_idl_headers" ] + external_deps = [ + "hdf_core:libhdf_utils", + "hiviewdfx_hilog_native:libhilog", + "ipc:ipc_single", + "c_utils:utils", + ] + + install_images = [ chipset_base_dir ] + subsystem_name = "hdf" + part_name = "drivers_peripheral_nnrt" + } + + ohos_shared_library("libnnrt_driver") { + include_dirs = [] + sources = [ "src/nnr_device_driver.cpp" ] + deps = [ "//drivers/peripheral/nnrt/hdi_cpu_service:libnnrt_service_1.0" ] + + external_deps = [ + "hdf_core:libhdf_host", + "hdf_core:libhdf_ipc_adapter", + "hdf_core:libhdf_utils", + "hiviewdfx_hilog_native:libhilog", + "ipc:ipc_single", + "c_utils:utils", + ] + + install_images = [ chipset_base_dir ] + subsystem_name = "hdf" + part_name = "drivers_peripheral_nnrt" + } + + group("hdf_nnrt_service") { + deps = [ + ":libnnrt_driver", + ":libnnrt_service_1.0", + ] + } + ``` + + 将```group("hdf_nnrt_service")```添加到```drivers/peripheral/nnrt/BUILD.gn```文件中 + ```shell + if (defined(ohos_lite)) { + group("nnrt_entry") { + deps = [ ] + } + } else { + group("nnrt_entry") { + deps = [ + "./hdi_cpu_service:hdf_nnrt_service", + ] + } + } + ``` + + 新建```drivers/peripheral/nnrt/bundle.json```用于定义新增的```drivers_peripheral_nnrt```部件。 + ```json + { + "name": "drivers_peripheral_nnrt", + "description": "Neural network runtime device driver", + "version": "3.2", + "license": "Apache License 2.0", + "component": { + "name": "drivers_peripheral_nnrt", + "subsystem": "hdf", + "syscap": [""], + "adapter_system_type": ["standard"], + "rom": "1024KB", + "ram": "2048KB", + "deps": { + "components": [ + "ipc", + "hdf_core", + "hiviewdfx_hilog_native", + "c_utils" + ], + "third_part": [ + "bounds_checking_function" + ] + }, + "build": { + "sub_component": [ + "//drivers/peripheral/nnrt:nnrt_entry" + ], + "test": [ + ], + "inner_kits": [ + ] + } + } + } + ``` + +3. 声明HDI服务 + 在对应产品的uhdf hcs配置文件中声明用户态驱动与服务,本例中rk3568对应在```vendor/hihope/rk3568/hdf_config/uhdf/device_info.hcs```文件中新增如下配置: + ```text + nnrt :: host { + hostName = "nnrt_host"; + priority = 50; + uid = ""; + gid = ""; + caps = ["DAC_OVERRIDE", "DAC_READ_SEARCH"]; + nnrt_device :: device { + device0 :: deviceNode { + policy = 2; + priority = 100; + moduleName = "libnnrt_driver.z.so"; + serviceName = "nnrt_device_service"; + } + } + } + ``` + 注意:修改hcs文件后请删除out目录重新编译,才能生效。 + +4. 配置host进程用户和组 + 对于新增host进程的场景,需要新增配置对应进程的用户ID和组ID。 进程的用户ID在文件```base/startup/init/services/etc/passwd```中配置,进程的组ID在文件```base/startup/init/services/etc/group```中配置。 + ```text + # 在base/startup/init/services/etc/passwd新增 + nnrt_host:x:3311:3311:::/bin/false + + # 在base/startup/init/services/etc/group新增 + nnrt_host:x:3311: + ``` + 完成上述所有配置后,全量编译版本后应该可以观察到新增host进程启动,也可以通过hilog输出检索新增的服务名称nnrt_interface_service观察到服务发布成功 + +5. SELinux配置 + OHOS已经开启SELinux特性,需要对新增的进程和服务配置相应的SELinux规则,用于运行host进程启动访问某些资源、发布HDI服务。对于调用者来说,也需要配置SELinux规则运行获取和调用某个HDI服务。 + + 在```base/security/selinux/sepolicy/ohos_policy/drivers/adapter/vendor/type.te```文件中配置nnrt_host进程安全上下文 + ```text + # 新增 + type nnrt_host, hdfdomain, domain; + ``` + + 由于SeLinux是白名单访问的权限机制,需要根据实际权限需求配置,将服务启动起来之后,通过以下dmesg命令可能查看avc告警, + avc告警会给出缺少的权限,SeLinux的配置也可以参考[OpenHarmony SeLinux子系统的说明](https://gitee.com/openharmony/security_selinux/blob/master/README.md) + ```shell + hdc_std shell + dmesg | grep nnrt + ``` + + 新建nnrt_host.te配置文件,将权限配置到nnrt_host.te文件中 + ```shell + # 创建nnrt文件夹 + mkdir base/security/selinux/sepolicy/ohos_policy/drivers/peripheral/nnrt + + # 创建vendor文件夹 + mkdir base/security/selinux/sepolicy/ohos_policy/drivers/peripheral/nnrt/vendor + + # 创建nnrt_host.te文件 + touch base/security/selinux/sepolicy/ohos_policy/drivers/peripheral/nnrt/vendor/nnrt_host.te + ``` + + 然后再将所需的权限写入nnrt_host.te文件中,比如: + ```text + allow nnrt_host dev_hdf_kevent:chr_file { ioctl }; + allow nnrt_host hilog_param:file { read }; + allow nnrt_host sh:binder { transfer }; + allow nnrt_host dev_ashmem_file:chr_file { open }; + allow sh nnrt_host:fd { use }; + ``` + +6. 删除out目录编译整个系统 + ```shell + ./build.sh --product-name rk3568 –ccache --jobs=4 + ``` + + +### 调测验证 +服务开发完成后,可以使用XTS用例验证基本功能和兼容性,开发者可通过以下步骤进行验证: +1. 开源社区下载[OpenHarmony代码](https://gitee.com/openharmony/docs/blob/master/zh-cn/device-dev/get-code/sourcecode-acquire.md),相关用例在test/xts/hats/hdf/nnrt目录下。 +2. 编译XTS用例。 +```shell +cd test/xts/hats +./build.sh suite=hats system_size=standard --product-name rk3568 +``` +编译好的测试用例会输出到out/rk3568/suites/hats/testcases/HatsHdfNnrtFunctionTest + + +3. 将测试用例push到设备上。 +```shell +# 将测试用例可执行文件推送到设备上,HatsHdfNnrtFunctionTest是测试用例可执行文件。 +hdc_std file send out/rk3568/suites/hats/testcases/HatsHdfNnrtFunctionTest /data/local/tmp/ + +# 给测试用例可执行文件加上权限。 +hdc_std shell "chmod +x /data/local/tmp/HatsHdfNnrtFunctionTest" + +# 执行测试用例 +hdc_std shell "/data/local/tmp/HatsHdfNnrtFunctionTest" +``` + +### 开发实例 +完整[Demo实例](xxx, Demo暂时还在黄区代码仓,超链接需等Demo开源后补充)可以参考社区实现。 + diff --git a/example/drivers/arch_diagram.png b/example/drivers/arch_diagram.png new file mode 100644 index 0000000000000000000000000000000000000000..2e4b44da94089ec9ebd22d3985ad8435b9cb9d2d GIT binary patch literal 8746 zcmdsd2UJsAx9(18QCd8LNRyzP14pU_keblM9o~#XGLc>4fI(07uu#3YKN>>lH6O*w|PV z=-+6f-*&53cJ0hP*-Zg>XWD$as(M@M?JhrZ$7v}mo4aQng&G~vbzSS;(k@x()BE!3qHYPXAs5irpx-FT5bVOOF`QC)z`$v~w@L znP{a%a?nIQR;r&VzS4UaE?a}yR6aY;g!|QNX1&mm_Y-NcKmeb-D)D{Bb$D_Z+pi=^ z|EqY9XBQpME-XDUMz!A#9W<>Yfl@M$*CfyH>kZliE4C06gtl&p?@(RK>>0#jl$(^oDT=1h&(*kY^d!APg)K-Dq2;X+A>>%c$jyZ6-B?5?&5uOIWb#yjksF@>z zhf+jZ)L_}aht#>Z8jIwBL7@0`AEL`i#8R~B#rngmbrj0@BRc3M$NK{e77!{Z_)!jD zr*dP+Xx(SrwKy4Pds>;xHCyw^wN%=D6dIEf_~n+KxMmK|K0qY|0uU1aOB$z4Va_=A z;y9eRu^m7(IW3J}VvGuyr+-=>+ysETEmmk}oegmnZtRQSFs|0;%-^RuMhBY+BF+p&kZ(Pn>PX>(Vtvc%Y987CkeH_MYwBU zoVc2%JLtc;MDPVIR#z76jhgKj3I*LZEiy-ZMet>HQQcUX9(NKBT1S`H{sQyJT4L-O zyg_;xp!R)?D1|9GdX~HD^Yjej$s?T^i(N*XAqlXEIbXrPQi`z{o5-!MI{M{Tp_nZ~ zTL_g|7}dW_?X#vMnLPVdOUTlWCK_wA^IH(Z_Lu>0oHEa1QGQKxEa4D2h%+J>qcC}q z5Xok4Wa5_Z`lm9YjUY3ke0%y@3onE|%pfH?gdv}(`GUss059J55Yz`rY? zKIEf>hV^|d2D!#nu9?Kl)jMCM5C{!8D9=?5dLDDhaciOWa?r7JlEYHK2{kJ-08>~3(Q z0;g&+PHP|V+7-Et^+`4>+CYNfw+%X_A~2jtbqyGgK7aE@{T7Jq@Z>^pLB2-=b>ef# z0`y2lmc#f*nv3`w-P-a5?AWO8bUl460&3A3fxI4Y>L|0qg!&QQCnNt-eq{z$O(YK~ zob6vrf-8ubh&S)T-kU8gyQH1SF>w`)F?>fzQTkR&8w|h~Q1&aDuee1#?*9cl^DSCM z5^}jeC%^_CPWNNpASmXR__6GgN)rhWx2YLH+0LQJjxo*_2sXLpRi7z+@{ua@^XhV( zX`WI*$t7KoGA57GepQik0Kkc`8XUdjVb@v_N= z;r#t97}z|^0TH9L5pX-0D7WZ(sJfv=vfk-#e}1Sfets;vj}xRvqZ|^wT1g( z$;Z8Ei%l*^P!UjJ*Xnj(J1LsFv8~QhKLakNKKm*R-4Zd8d6w;LOUzh8io6Iz)lAIJ zO()4_XLQ?E+zP@y_S=fpyu;;KRKHH1D>YVy?QQSkt{K?y@Rvu3r-X zcvAbm2O*T5i2Twlaw7$sM0_Cnf)lQy)m)*(BrQ24&|#&uqQ+vPm+_47@w1nk6*El_ zbbHhkYhxvYbuvOS2|5DCTjE>WPYzDZU|C@EHflefR|UKN6(!&O zmT#+q@tnywkLSM@4A*79gj{G;3p8B*)L`WSH1=FUT+U@?jh*RJ2?NjliEqw`q$t7DjXvbTxu*HMLz^f zps0VsSBp}A>kARMvYyzh+wrJ(+WB~Is?6+HiRqYScjm&fn`CVWb)lY0yOf+^tY_hO z649CIs0Y-H^v!-&p437P_!xO)Ey`z|?V#-~;o`tN$f3Tw^3#RFlG$UaIaq(Y|cu#DOMoa1b6Q+1%4 z2jv%AW04;JeiwW9IGJj$6t<F}waVh{|8H*x%(!_B?Y#AsqyK zd;bTI0s}gdyu22`qfP*iv;J9w)+8dhc}EV=*!a2Zj%VnmqY6-b?t5a}0|Q)K|5;KM z?}r_XoMS(?FLX$*|8Sp|kWwaWlQU$-*DMkz<_|Bv!3-?8hl=a!nb1Z<Z#k)5wDtD7B2<61v~`tg?IL+i()Dwjkwetn6Dn&Oi_0o3s3$WZF_Lyp7PkqFWo< zIXe*w-aXljQPREas+8yK$5>%URq;jk_1U>!a#sjZDvoqzJxW^Su&S&j%9xQI!F@bF zUChv_tQoo^_F6kQW%AXGX{`PzO{lS)czD)``%PygxkznUc@P4+~YeGC^m+yQ$g=jG#5>dnr4E~sO zFS$!{c#7cFZdEaRmt!LlKe}8{dW}3e5-52!kF_%c#y3}?t6iGwg!uLK&sWrOaO}zq zM;vTUqhW8}6Zyy{cqyS5<$9@sI>|SD_cxAwJ&wZIkq7bNMe_<@mX!@?Hrgf>+!=0I z;wM}Q>k0F`@!+~s7H-retNSV3AD3B=9AkkN`GmXsf}T^~#BgkN_)@pT^^M$J$3@22 zgd=g6FRz}<{vh%(w5Cl0Wl_^MiV*8-4qU3zJ6OG3<1R@pBeF+KYuxX$b;NqT4v8SY z41Ov`7e*k5^^QWvNca*ZhC8~O8nQ&+J0Q|x&0~aajswMVH;$;v7d7eZV<(y_DFjwZ z2|xDN-}TEHZXrr;c24*8?*jWw^T{KSJ_)KA>O~{>Ryb@gC#BvW-JL8(+NU%=w%F7* zRxaQ+jBSYg?uhd{FK`LRwAWoLch2wD@3T7_{c5w*h)5LHjCve8CBRlQZ!Yg>Xpi^j1ICJAfELJZSCJs9ZXFi>}2hMx3A9!v|L3%@{xj#r~bR>_~GTF0hp zw@9Lk(ic}RCmj3n%yd+SkY&BIjNF6SBTC*_#1Y_iUlyv&TIpC4P0=3?+UAVBc48_t z@w+^Hm&DG76?X=gxq z|8!+7W1~;AVnV87%P#^6hBtobKW=MyT2UAD$ik?|P!>viFpHbDJu|Xb$a05gTZxUD z3Py}>_Lr&E<*SdLA2d_g3p?ERbb?WvR6JX33JP@iVZm0CqM+r9?ne)V$25CP#PSev ziE%g&bUU-i|2QJk`>*1qOiVKlKKVeE_a2tWJ@j=;{2vC_C6{lB_wT#4neMZbu6OQ< zM-Agy2NPCJD(S$YP`|sAB;%nvJymC@j^Dk}x!$V*TPIF&6i4iLN0*jwrrg0-*6mg0e4D&*XGdFXD%jlh~`A-;Ts z+gP6;BUH|0d-*`37j?Lwp*+eJ5)@*f2qkl8(f#-*L-QXTNsa30x}Qk3%g1ULzNr6a zcHAlQ2IpQ>$K6vw4mjyfxMnQ$U$>>3@#=hv=UXeIE9+<84zEAISX0kex8U*ftB9K+ z({!gSO!5|f$JwF}gd-H#fZ0-Cr}Q8Cv5X*&6x&l_M_zJ&*0tiPtf8w4>z!imzPhe! z%Nw)(SsG~D-n!J&+sO}Le`d?Sn7MpwJk2?xFl_9eEdWHo4y@?;ydb2=%uyua0I-7a zoblViLjZ;kgFh!Zg4!9XtUGv>VCK=qg|&Bc-IZS1m;2!4(7Y=RFhp4yzj z7mglb?ufyN+&13TZx&;8Q@NlT6+j&m6kM^!;n1ZJ2z^^qK0Zi9nfN6n%@`gS=R(=z zU6CagujSE!c+W3!MkBHhD0jZ7j0Wv(YVZ7R6}P>x-if8@oM9FxZ%)TG#{)G~rKPZN zT2`nrwXz74-=HYC!Ykca$_a-dVul7KW<}vD)yTzIBx4dcGIFh7wC5KrPLo$~;+16z zSD%^SLbjWB#kponZZgi|H;oe9Tr97J=9A&Y)6z@IcLdGw&O&R7qB^PT#{E1cfdK3O zfInwNBGwlqnYx?n%uV6HD+etzH--M__$O&}l$;W|&fA$g7aqze)z9eizLZrr7@6j~ z9lJJ!%;@SP-pp32@gFc~UYSr-4J}jdnO^TAA8n{ ztKDx_J7`Lj!1tgh+g!Y5Pse?q?4c1vc62R2M7CuhXtKCXNj2&t{M6gWlJQ7$wRdwz zH;wBZfBn|3XqWu`?nZ*wW`r`ueiw`4ER0`6L#qU8Y#~q{K2f5R`-ABFD>)POiU@KEDeEC5+BiuYJk<3p3RZR3?g-dsHA15sovf9;WT%&;KUJ4-Pk_E`Nv5%X}4}ZK}(k8j$VHMpQ->4qk#?Pghr5I%|Pnc-Z;s z2dh1gGJ?eWLXg5S=N6>TI<@134t~3T*)`{0HqpQ{-1ISYr#Jg=ZtfF_F-BqXZ=Vm> zzdZoD`9U(?KSFlnsz@sR=O>`su@9a#{fz>C+oApCYpP341h2S-;^yEv4C-uN-$m5# z+#TTErh`Qfvh&4-OsEGTy<`1M*wdt=ijIHSvrGJAFQH(~(334;E^>`d^(vNZzG0O| z@H$Q|+TWa6BOJuAw_Bw?N%~X%{V(BzPw5hogW?<^f<@}uNNe87_qf`yw0|;4{weX} zMdMgc05JSNm3aW*ZyNsl+mL_kP^v0H*Db0_dhr^mkMl1r1+&Ii&V8hnq}OLhB982- z#d{~Q@&3fWzw{GSRFC!ce`}x7+*lmk@4}0&uQS~7W{)*0TDAE)QI8q9{Q*j!nOT@_ z3$JC+lHcM=v9ld?Q8dcr7PflzW%0}`a>hN}!H=#@hL3~{`kHEKZNDke_sW#izt`@;<#2S}@#;s7`76nmXU#&{!m|Hkz!20>ZKd@QtYLbjMf)F{nRtv7I6}(e}G|2 zU9kv1gtgY`XK^3)oqffu-&LAk{}VI_yNEM-Kjlx7^$c+%RwMJuRZDIX9-`^mmvBim zNzPbpG0km z@A*JHGa(p*IZkFK1~OtRNZH{e{iUahE~N6hfrGGblQ~P($FWnFKqbtLu)j5=-rIL0 zPA>Aomh|MRR!NQ;wxsV?mCQhW_!O&YvpPE4-;+*a2qdkgp9{a=e=*jm=p^nw%jLTx z4S(gzYH)k{7>Mr<)pYi{z#g}~c#dsxV8)#seWImqZl&w98_^A|w*_K`jOZXelFVvy zX59gS%CH;FWAZ_YV;3I6^> z-?{zEqkjw-05Gl$0{|`F7W7wMIps&gaEcH*TSj{;3IH?NyuVDmc)EtS&bR_bwyaA6 Pyw6dqQMF4zAJ*{^k4 literal 0 HcmV?d00001 diff --git a/example/drivers/dev_flow.png b/example/drivers/dev_flow.png new file mode 100644 index 0000000000000000000000000000000000000000..1e75ccb0abedf661e64b08c791831308cf84c02c GIT binary patch literal 6014 zcma)AXE>bQ(^sRc#9B41-o=W9=)LzI(V|9ASR$gcSiKX`>mrD@2%;?!q7%JD@6mhT z|31(Edaw7(`{Dg?&YAO@nKN_GJu~ON?noUi<%h(y#8_BZ4^>qZ^e}5M78dpyAueY2 ziP}iP0NC&Kl;yB0hrrvI4W7NMrYsg#O+4V%3LnFJp`&4-h{2&y=-%GmGXX&Z0|QrA z*ZuwdY(z;xK|w@Z;(IS247azp_weu#!@|&*nKOK*zrX+a^XD8f)66Wa9AVQ=?p`G& zB|}3)HujEZXJ?#YGZ?Y>Pia6PkS${N;^Jat=FjNpCs)xUmdGzB{MUm#NkjRs?=!YVXL0~&4Cjz+&hqnpv_ zA9Lto^i&8M4Glq4EN-6sBOOF?!-eKYU!cp;XhJ+RS_2KN{RcPDVMC*VaAo6Yj7cJ> zit&3aENZfU26i9Ei8K}#rG=`3tbv#LUiQQG%wU)i)(xJ8OYeG3;v9VFKR`Gi zWorJ31!W+(TqetBxtfmuAR0@17SXJ`-a}w>d^zjJjg9-uxmPF$4=)jc0Xp0mrA!tI z{wK&@IS!vhfbsTEyHm3&L;|zbll~ZaIjKZ;e`<;u_lSnNmb6q|+Ldxt^r|8soA$SG zPapSo>S&^uH@}(N+$A}y-!ng^&{NvANMZQtbBj`!KQ@4~flD}-C5@4CifF=h0_WU| zdIg$8T-(j_O5CS%!wtwclK96r$V-8Tk=A|MBL+pvzajFwjOEZ=22EPIM{MEi@_J=* zpt!O32IL#F-!1brE>~EEXy9y5d*cK6N*d?1&XHj@&fmswUnx#dv(5dTuD>W34 zbY!>2STt}_qlTDye5xyK*2vTe4)4f~XB6KSjpKV6^j(!!#VM9IZ@t?nF{v>=7^8{qiJTWE~ zf2L(${wakmgP4w8=YCJwFQHHrGS|gLeQ4`h9`wA4rmy%UViq^1XOmx~Pw63LpRKX> zlR!bY%bIG&FIbWDzkYO^d_HcXu?limZXorWtN(0v3u?V8Y!f3#DtUBv+%M_HWQ-$a z1CwNK3YBS`~grQIk3Ye__uRRKsM7K3ijKPoG(W^gYj$`dhaCn2`9NM~QGkM4R>%r<=-= zgYLdgOp`dNwf)y0J}1ilH_E@nUJ|Xtf}T^paQ__+lu!{TAyp!yc`WA`L~n?MXs$vLOpff87`vEiblmB5^9>Sh@n^wK=3-&lcPZ5jH2b z{FFMjV*4VaHt@0Nl+!j}CylKXH76X<%tHULpPur6=4Ll zNgvJrWzzNfa}=j$Y=}1P$q%jf>SbLf3!&wZ z9XCtfa0bch%4BGN5iOxYF2Gcdlp!m_>OY5@vGO^1^vtSEc_NY#gDlWgChPqJE=Q*| zPqNt*b;3R&)iHbzP9&AvRM83xae`78WBnGP0|!Tmw0vn1bViota{Oa0iwTq?P48{g zlT{lqY&DKj%nj$mhn^buNFb#X7?8G2XIZy}P_H;i<{9(J zK#w5MQ3+SfE?wk#Qy?Na05s7e+1Z0yr*`-^ygI)tb1LkFdr=JMPH7GglY7du-C|>+ zFXRO>N1X-8105L@pH-_m8SJoK|RM+;l{CWa~b()RdeHevztT}F+^zBZMN zX2*N`P(I6l+i~)zUN5l%=i{~l1E2KHGrs5Q-PtG?Bv8cVt6y8O7WzFEjxyiO3ll<# z9mvt4Qhs^oyO)g5Gaz7fPAZMWEaTyqT#RDXGL>R&<>pJ0Cm_3v*>~yciJZ0-kY=h> zk?2o-v3C4w+kl9gfg9DC8sPPHgG38lRo272hr#$0bUl`i7f(M5Cr-pRkGFT@zWC*NcFn47D6o$^ za2YLultkMO@cV(QrM+uR#c!G4iI3hVxqVzB_;$dzXIa|0wi69im2iw&8}O-=yBdMv zyH5-k;9IMua=y;e9_Nq0TvH#Gp{{_{ta96q^#&77pZ0K8bJSWc6BEAdH*jWk-v-BR zXBF1Q_0We9QY8_JJ`_P0TBh!=3r#sU~EPFlN?RrgKjAZUrMcF-+ zFmDg%0EoEgKh#XX*Jrbze9zuT@E)+}(jfloa~#6k(}m%I|Aej+X==sL@r=V9<-Uzw zzGBVXR?QyE;nJ-s6RAt)DA#*MA(O-KL1`{^$~DRwEf<#cAJG&Fxj^$UR!mln2lP8=! z{TG}5tr6ism`0J56YPBP#Bckb1Y}qo=2Smh(KyoU)G- zDfL2Q4JCrpcZHx7Ut%B;0t8Go0QBoTc|)&r>?w`-guSMNm8qZ&tQ>{a*%HkGD85}$ z>5rPcOI+Ou*jsp=o>&YLzn`?R23ZG{dR3b_9p+Lr5S%D+d_(m!+&eQf4sbMVNNMDH&I12LxEQ?){C^*kKN*#*uLr=N;Q#c%cbX zT8+sh)C_Gj)EihX4``6riMjKdbOgHuwFt{EGo;7I#{|7v1{72K%vhf&&+98>;VMrWqb!5C_=7iZldY z^PlSgS;=sc!IfEUzJs zI`Q`k;pMI1kJ&D9QP2d=GiH(uB2pkr&$<)T5^IE|BT~mhgTuzvdZwk%23f;OT4+#g%(SNR@a#?E_BMST>miamIIyWF=ZE~@H6U(pUrnYLdZ z6_5vPZq`n`fz!nY!N9ADn73a!ihyQ-z|>mSoq z%fQM|xW&9s>(qTugVIx>_T*Tq+hgUH+{-gkz4^MOb5pZ{KQEqa##|<;^^V>>6>fia zQiOdF@V)8G=X8Fp7qD^bDUV<8m?QK?ezS`yzbG`*i#zN=rv0sT^27phQR=xls%_X~ zwADw*xHaQ-prCl5;ZbEOhghP_%ab2t;o8GPE<-sCRr#%<_NSbnoVEg+$j|P9JqOitrw3`n;X5T ztI}60!DfOxk+L{6Y(Yvy`fVQjJKTQx(HHOW8t?KMgqsbde!fm|w`xn}Aet0RGnK$) zYMvO2o%s@eF+29kT;pc^fTi1@6>qkS)aHdJvLVu7l#AJ2LQ)oXMBQhM7qE2|zyrI; zZF!V#p-)r8C^!F8HnZ5==(}vFxVl>j*Sj)8;oP03PohM+9IyhWQbFPX81;EmzN#Dz zFZI4jMb9JcUpgJZyUP$9PY&$uBvrzmHBMMPjSsU~lB!rhk*mhTjV$Khg}me@eLvqZ z0qk9@X5gX19sdLYWt%u6{M*l2n}9>(FIX-EY}@hLT6J_wO!iX{E@drKS%!zQclW4RB4R~ty4FC( zs^_UgyL$P)nRFjZ^JIf5%kZVO$5@HJx!=MWx5LfW*enSQc|qNM%tX;%LvaQ1jxDYR z5e_PZq&DOM0WCpnm#_l)6SqS*7Ul?62=+5S5+|K4kNw7+P2HVAoBh*QNDCb4A@zzx zU>0jP%{i_3+ln=O}mJwn)8~P;y)3CH~-yTUl@^gK*8#oQ~DkAAs@`oSZkE z-c~tlMu1+ds)^qT)L>Iy$f-^*MXJA=D-oYfxRu1YUqI~BQuj@Sq`@uWnMp<~bot-R zsSa&R+%WYO8pfPu()yPlcR%e@l#TWhmap+4DDmI{>IAuT4 zC;H03;@djZ9QT&}j0{PDhL3hKl)Mf*ME$+_WG;h_5OY;!jA-WcNjDt><>6DO6xk&D z!P?y#yVP^MLOj?rMmHfknZQWAo&KtmVWVQL*Ok$na&Ovi;T0D<%v!)@a%`cT`Co~o z$xF&Y9FUPwtHqNSS>@t?c{M$MWwroNrsJ?KQ}NT!fNQrEI4W0#pZ`i&ZU6{1!7N)`~)=ko+8YH1nE2T*ZVgK z!K`Oq{h4;=BYidBfwe@GAUQEIsuQ=bSt3foe?ONvcOv57=G!-;%2kQi3d%~UjcmNE zH1}8Oo~Yn7z0b)J$e7)|+_;=k&qF9N!s)$dC2t`U$w1_CZ>t|j*oG*N0+N4<9V}}r%eK;9{Rod znN}GjEfE=VJ?TafqZ-T*tNOqp3HHNZkTW#d3)b}p= z{8Oln%(rO2c%Pe9R-dM9HHV&mssT%Vi_WpFz66)N_JFOw;&5~RtsHCC5(nVef zSGpMD!Aq3Ocy81aK(tk}lky;$8 z^Ht`2|D{()^lcMk7z?Z=F*zu$u~B;6Vq%nuIgWR4qzjwxDr z>QZ~Q=u_`=wWVeY7QgDyi062oPOPvKQ0gOlx!F=>@xp=dZDkXjqL`>+s-GQp?3zTn z*fV1yEt|hcMOWt%?D5s_Dm0BlDQ2)2Skztm82Y};1BhM=s9=TQq5jX*Lf;K#@0|~2 zi%XtYgd2lNhF2~bXSRscqH+!<^ATHXF4;H2ITp#nN%X_*cO_rJ!}XiO`?W#MnS;u% z=+_JV=StpJ=k}R{(W6`48--F9@QcBX=+S!3rwl4yO;=7{0ye%x+E;g3_P2dTgO3-$ zvtg;AA% + +#include "v1_0/innrt_device.h" +#include "ashmem.h" +#include "include/api/model.h" + +#include "mindspore_schema/model_generated.h" + +namespace OHOS { +namespace HDI { +namespace Nnrt { +namespace V1_0 { +class NnrtDeviceService : public INnrtDevice { +public: + NnrtDeviceService() = default; + virtual ~NnrtDeviceService(); + + int32_t GetDeviceName(std::string& name) override; + + int32_t GetVendorName(std::string& name) override; + + int32_t GetDeviceType(DeviceType& deviceType) override; + + int32_t GetDeviceStatus(DeviceStatus& status) override; + + int32_t GetSupportedOperation(const Model& model, std::vector& ops) override; + + int32_t IsFloat16PrecisionSupported(bool& isSupported) override; + + int32_t IsPerformanceModeSupported(bool& isSupported) override; + + int32_t IsPrioritySupported(bool& isSupported) override; + + int32_t IsDynamicInputSupported(bool& isSupported) override; + + int32_t PrepareModel(const Model& model, const ModelConfig& config, sptr& preparedModel) override; + + int32_t IsModelCacheSupported(bool& isSupported) override; + + int32_t PrepareModelFromModelCache(const std::vector& modelCache, const ModelConfig& config, + sptr& preparedModel) override; + + int32_t AllocateBuffer(uint32_t length, SharedBuffer& buffer) override; + + int32_t ReleaseBuffer(const SharedBuffer& buffer) override; + +private: + int32_t ValidateModelConfig(const ModelConfig& config) const; + int32_t ValidateModel(const Model& model) const; + std::shared_ptr TransModelToGraph(const Model& model) const; + std::unique_ptr TransTensor(const Tensor& tensor) const; + std::unique_ptr TransNode(const Node& node) const; + std::unique_ptr TransSubGraph(const SubGraph& graph, const size_t numTensor) const; + std::shared_ptr TransModelConfig(const ModelConfig& config) const; + +private: + std::shared_ptr m_model {nullptr}; + std::unordered_map> m_ashmems; +}; +} // V1_0 +} // Nnrt +} // HDI +} // OHOS + +#endif // OHOS_HDI_NNRT_V1_0_NNRTDEVICESERVICE_H \ No newline at end of file diff --git a/example/drivers/nnrt/hdi_cpu_service/include/node_functions.h b/example/drivers/nnrt/hdi_cpu_service/include/node_functions.h new file mode 100644 index 0000000..5e820b9 --- /dev/null +++ b/example/drivers/nnrt/hdi_cpu_service/include/node_functions.h @@ -0,0 +1,71 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef OHOS_HDI_NNR_NODE_FUNCTIONS_H +#define OHOS_HDI_NNR_NODE_FUNCTIONS_H + +#include + +#include "hdf_base.h" +#include "utils/hdf_log.h" + +#include "node_registry.h" + +namespace OHOS { +namespace HDI { +namespace Nnrt { +namespace V1_0 { +template +int32_t ParsePrimitive(const std::vector& primitive, T& attr, + std::function parseFunc) +{ + if (primitive.empty()) { + HDF_LOGE("Primitive data is empty."); + return HDF_FAILURE; + } + + OHOS::MessageParcel parcelData; + bool ret = parcelData.WriteBuffer(primitive.data(), primitive.size()); + if (!ret) { + HDF_LOGE("Write data to MessageParcel failed."); + return HDF_FAILURE; + } + + ret = parseFunc(parcelData, attr); + if (!ret) { + HDF_LOGE("Unmarshalling data failed."); + return HDF_FAILURE; + } + return HDF_SUCCESS; +} + +PrimUniquePtr GetAddPrimitive(const std::vector& primitive); +PrimUniquePtr GetAvgPoolPrimitive(const std::vector& primitive); +PrimUniquePtr GetConcatPrimitive(const std::vector& primitive); +PrimUniquePtr GetConv2dPrimitive(const std::vector& primitive); +PrimUniquePtr GetFullConnectionPrimitive(const std::vector& primitive); +PrimUniquePtr GetMaxPoolFusionPrimitive(const std::vector& primitive); +PrimUniquePtr GetMatMulFusionPrimitive(const std::vector& primitive); +PrimUniquePtr GetSoftmaxPrimitive(const std::vector& primitive); +PrimUniquePtr GetReshapePrimitive(const std::vector& primitive); +PrimUniquePtr GetScaleFusionPrimitive(const std::vector& primitive); +PrimUniquePtr GetActivationPrimitive(const std::vector& primitive); +PrimUniquePtr GetQuantDTypeCastPrimitive(const std::vector& primitive); +PrimUniquePtr GetMulFusionPrimitive(const std::vector& primitive); +} // namespace V1_0 +} // namespace Nnrt +} // namespace HDI +} // namespace OHOS +#endif // OHOS_HDI_NNR_NODE_FUNCTIONS_H \ No newline at end of file diff --git a/example/drivers/nnrt/hdi_cpu_service/include/node_registry.h b/example/drivers/nnrt/hdi_cpu_service/include/node_registry.h new file mode 100644 index 0000000..cc2083c --- /dev/null +++ b/example/drivers/nnrt/hdi_cpu_service/include/node_registry.h @@ -0,0 +1,57 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef OHOS_HDI_NNR_NODE_REGISTRY_H +#define OHOS_HDI_NNR_NODE_REGISTRY_H + +#include +#include +#include + +#include "v1_0/nnrt_types.h" +#include "mindspore_schema/model_generated.h" + +namespace OHOS { +namespace HDI { +namespace Nnrt { +namespace V1_0 { +using PrimUniquePtr = std::unique_ptr; +class NodeRegistry { +public: + struct Registrar { + Registrar() = delete; + Registrar(NodeType type, std::function&)> nodeFunc); + }; + +public: + static NodeRegistry& GetSingleton(); + std::function&)> GetNodeFunc(NodeType type) const; + bool IsNodeTypeExist(NodeType type) const; + +private: + NodeRegistry() {}; + NodeRegistry(const NodeRegistry&) = delete; + NodeRegistry& operator=(const NodeRegistry&) = delete; + +private: + std::unordered_map&)>> m_nodeRegs; +}; + +#define REGISTER_NODE(nodeName, nodeType, funcPtr) static NodeRegistry::Registrar g_##nodeName(nodeType, funcPtr) +} // namespace V1_0 +} // namespace Nnrt +} // namespace HDI +} // namespace OHOS +#endif // OHOS_HDI_NNR_NODE_REGISTRY_H \ No newline at end of file diff --git a/example/drivers/nnrt/hdi_cpu_service/include/prepared_model_service.h b/example/drivers/nnrt/hdi_cpu_service/include/prepared_model_service.h new file mode 100644 index 0000000..f4cb99e --- /dev/null +++ b/example/drivers/nnrt/hdi_cpu_service/include/prepared_model_service.h @@ -0,0 +1,77 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef OHOS_HDI_NNR_V1_0_PREPAREDMODELSERVICE_H +#define OHOS_HDI_NNR_V1_0_PREPAREDMODELSERVICE_H + +#include "v1_0/iprepared_model.h" +#include "include/api/data_type.h" +#include "include/api/context.h" +#include "include/api/types.h" +#include "include/api/model.h" +#include "mindspore_schema/model_generated.h" +#include "ashmem.h" + +namespace OHOS { +namespace HDI { +namespace Nnrt { +namespace V1_0 { +constexpr int DYNAMIC_SHAPE_FLAG = -1; +class PreparedModelService : public IPreparedModel { +public: + PreparedModelService() = default; + + virtual ~PreparedModelService(); + + explicit PreparedModelService(std::shared_ptr context); + + int32_t Compile(std::shared_ptr graph); + + int32_t Compile(const void* modelBuffer, size_t length); + + int32_t ExportModelCache(std::vector& modelCache) override; + + int32_t Run(const std::vector& inputs, const std::vector& outputs, + std::vector>& outputsDims, std::vector& isOutputBufferEnough) override; + +private: + int32_t SetInputs(const std::vector& inputs); + int32_t SetOutputs(const std::vector& outputs); + int32_t GetMSInputsAndOutputs(); + int32_t CompareTensor(const IOTensor& tensor, const mindspore::MSTensor& msTensor); + sptr ParseBuffer(const SharedBuffer& buffer); + int32_t UpdateOutput(const std::vector& outputs, + std::vector>& outputsDims, std::vector& isOutputBufferEnough); + void ResetInputAndOutput(); + +private: + std::shared_ptr m_graph {nullptr}; + std::shared_ptr m_context {nullptr}; + flatbuffers::FlatBufferBuilder m_builder; + std::shared_ptr m_model {nullptr}; + sptr m_cacheBuffer {nullptr}; + std::vector> m_inputAshmems; + std::vector m_inputs; + std::vector> m_outputAshmems; + std::vector m_outputs; + std::vector> m_inputDims; + bool m_isDynamicShape {false}; +}; +} // V1_0 +} // Nnrt +} // HDI +} // OHOS + +#endif // OHOS_HDI_NNR_V1_0_PREPAREDMODELSERVICE_H \ No newline at end of file diff --git a/example/drivers/nnrt/hdi_cpu_service/include/shared_buffer_parser.h b/example/drivers/nnrt/hdi_cpu_service/include/shared_buffer_parser.h new file mode 100644 index 0000000..b3479ca --- /dev/null +++ b/example/drivers/nnrt/hdi_cpu_service/include/shared_buffer_parser.h @@ -0,0 +1,49 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef OHOS_HDI_NNR_V1_0_SHARED_BUFFER_PARSER_H +#define OHOS_HDI_NNR_V1_0_SHARED_BUFFER_PARSER_H + +#include "ashmem.h" +#include "v1_0/nnrt_types.h" + +namespace OHOS { +namespace HDI { +namespace Nnrt { +namespace V1_0 { +namespace { +const int INVALID_FD = -1; +} + +class SharedBufferParser { +public: + SharedBufferParser() {}; + ~SharedBufferParser(); + + int32_t Init(const SharedBuffer& buffer); + int32_t Init(const std::string& name, int32_t size); + void* GetBufferPtr(); + SharedBuffer GetBuffer(); + +private: + SharedBuffer m_buffer; + sptr m_ashptr {nullptr}; + void* m_bufferAddr {nullptr}; +}; +} // V1_0 +} // Nnrt +} // HDI +} // OHOS +#endif // OHOS_HDI_NNR_V1_0_SHARED_BUFFER_PARSER_H \ No newline at end of file diff --git a/example/drivers/nnrt/hdi_cpu_service/include/validation.h b/example/drivers/nnrt/hdi_cpu_service/include/validation.h new file mode 100644 index 0000000..42cd84b --- /dev/null +++ b/example/drivers/nnrt/hdi_cpu_service/include/validation.h @@ -0,0 +1,33 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef OHOS_HDI_NNRT_VALIDATION_H +#define OHOS_HDI_NNRT_VALIDATION_H + +#include "v1_0/nnrt_types.h" + +namespace OHOS { +namespace HDI { +namespace Nnrt { +namespace V1_0 { +int32_t ValidatePerformanceMode(PerformanceMode mode); +int32_t ValidatePriority(Priority priority); +int32_t ValidateDataType(DataType dataType); +int32_t ValidateFormat(Format format); +} // namespace V1_0 +} // namespace Nnrt +} // namespace HDI +} // namespace OHOS +#endif // OHOS_HDI_NNRT_VALIDATION_H \ No newline at end of file diff --git a/example/drivers/nnrt/hdi_cpu_service/src/nnrt_device_driver.cpp b/example/drivers/nnrt/hdi_cpu_service/src/nnrt_device_driver.cpp new file mode 100644 index 0000000..dfa9d23 --- /dev/null +++ b/example/drivers/nnrt/hdi_cpu_service/src/nnrt_device_driver.cpp @@ -0,0 +1,115 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include +#include "v1_0/nnrt_device_stub.h" + +using namespace OHOS::HDI::Nnrt::V1_0; + +struct HdfNnrtDeviceHost { + struct IDeviceIoService ioService; + OHOS::sptr stub; +}; + +static int32_t NnrtDeviceDriverDispatch(struct HdfDeviceIoClient *client, int cmdId, struct HdfSBuf *data, + struct HdfSBuf *reply) +{ + auto *hdfNnrtDeviceHost = CONTAINER_OF(client->device->service, struct HdfNnrtDeviceHost, ioService); + + OHOS::MessageParcel *dataParcel = nullptr; + OHOS::MessageParcel *replyParcel = nullptr; + OHOS::MessageOption option; + + if (SbufToParcel(data, &dataParcel) != HDF_SUCCESS) { + HDF_LOGE("%{public}s:invalid data sbuf object to dispatch", __func__); + return HDF_ERR_INVALID_PARAM; + } + if (SbufToParcel(reply, &replyParcel) != HDF_SUCCESS) { + HDF_LOGE("%{public}s:invalid reply sbuf object to dispatch", __func__); + return HDF_ERR_INVALID_PARAM; + } + + return hdfNnrtDeviceHost->stub->SendRequest(cmdId, *dataParcel, *replyParcel, option); +} + +static int HdfNnrtDeviceDriverInit(struct HdfDeviceObject *deviceObject) +{ + HDF_LOGI("HdfNnrtDeviceDriverInit enter"); + return HDF_SUCCESS; +} + +static int HdfNnrtDeviceDriverBind(struct HdfDeviceObject *deviceObject) +{ + HDF_LOGI("HdfNnrtDeviceDriverBind enter"); + + auto *hdfNnrtDeviceHost = new (std::nothrow) HdfNnrtDeviceHost; + if (hdfNnrtDeviceHost == nullptr) { + HDF_LOGE("%{public}s: failed to create create HdfNnrtDeviceHost object", __func__); + return HDF_FAILURE; + } + + hdfNnrtDeviceHost->ioService.Dispatch = NnrtDeviceDriverDispatch; + hdfNnrtDeviceHost->ioService.Open = NULL; + hdfNnrtDeviceHost->ioService.Release = NULL; + + auto serviceImpl = INnrtDevice::Get(true); + if (serviceImpl == nullptr) { + HDF_LOGE("%{public}s: failed to get of implement service", __func__); + delete hdfNnrtDeviceHost; + return HDF_FAILURE; + } + + hdfNnrtDeviceHost->stub = OHOS::HDI::ObjectCollector::GetInstance().GetOrNewObject(serviceImpl, + INnrtDevice::GetDescriptor()); + if (hdfNnrtDeviceHost->stub == nullptr) { + HDF_LOGE("%{public}s: failed to get stub object", __func__); + delete hdfNnrtDeviceHost; + return HDF_FAILURE; + } + + deviceObject->service = &hdfNnrtDeviceHost->ioService; + return HDF_SUCCESS; +} + +static void HdfNnrtDeviceDriverRelease(struct HdfDeviceObject *deviceObject) +{ + HDF_LOGI("HdfNnrtDeviceDriverRelease enter"); + if (deviceObject->service == nullptr) { + HDF_LOGE("HdfNnrtDeviceDriverRelease not initted"); + return; + } + + auto *hdfNnrtDeviceHost = CONTAINER_OF(deviceObject->service, struct HdfNnrtDeviceHost, ioService); + delete hdfNnrtDeviceHost; +} + +struct HdfDriverEntry g_nnrtdeviceDriverEntry = { + .moduleVersion = 1, + .moduleName = "nnrt", + .Bind = HdfNnrtDeviceDriverBind, + .Init = HdfNnrtDeviceDriverInit, + .Release = HdfNnrtDeviceDriverRelease, +}; + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ +HDF_INIT(g_nnrtdeviceDriverEntry); +#ifdef __cplusplus +} +#endif /* __cplusplus */ \ No newline at end of file diff --git a/example/drivers/nnrt/hdi_cpu_service/src/nnrt_device_service.cpp b/example/drivers/nnrt/hdi_cpu_service/src/nnrt_device_service.cpp new file mode 100644 index 0000000..e3b4375 --- /dev/null +++ b/example/drivers/nnrt/hdi_cpu_service/src/nnrt_device_service.cpp @@ -0,0 +1,436 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "nnrt_device_service.h" + +#include +#include "utils/hdf_log.h" +#include "ashmem.h" +#include "securec.h" + +#include "node_registry.h" +#include "prepared_model_service.h" +#include "shared_buffer_parser.h" +#include "validation.h" + +namespace OHOS { +namespace HDI { +namespace Nnrt { +namespace V1_0 { +extern "C" INnrtDevice *NnrtDeviceImplGetInstance(void) +{ + return new (std::nothrow) NnrtDeviceService(); +} + +NnrtDeviceService::~NnrtDeviceService() +{ + for (auto ash : m_ashmems) { + ash.second->UnmapAshmem(); + ash.second->CloseAshmem(); + } +} + +int32_t NnrtDeviceService::GetDeviceName(std::string& name) +{ + name = "RK3568-CPU"; + return HDF_SUCCESS; +} + +int32_t NnrtDeviceService::GetVendorName(std::string& name) +{ + name = "Rockchip"; + return HDF_SUCCESS; +} + +int32_t NnrtDeviceService::GetDeviceType(DeviceType& deviceType) +{ + deviceType = DeviceType::CPU; + return HDF_SUCCESS; +} + +int32_t NnrtDeviceService::GetDeviceStatus(DeviceStatus& status) +{ + status = DeviceStatus::AVAILABLE; + return HDF_SUCCESS; +} + +int32_t NnrtDeviceService::GetSupportedOperation(const Model& model, std::vector& ops) +{ + size_t nodeSize = model.nodes.size(); + auto nodes = model.nodes; + ops.resize(nodeSize, false); + auto& regInstance = NodeRegistry::GetSingleton(); + for (size_t i = 0; i < nodeSize; i++) { + ops[i] = regInstance.IsNodeTypeExist(nodes[i].nodeType); + } + return HDF_SUCCESS; +} + +int32_t NnrtDeviceService::IsFloat16PrecisionSupported(bool& isSupported) +{ + isSupported = true; + return HDF_SUCCESS; +} + +int32_t NnrtDeviceService::IsPerformanceModeSupported(bool& isSupported) +{ + isSupported = true; + return HDF_SUCCESS; +} + +int32_t NnrtDeviceService::IsPrioritySupported(bool& isSupported) +{ + isSupported = false; + return HDF_SUCCESS; +} + +int32_t NnrtDeviceService::IsDynamicInputSupported(bool& isSupported) +{ + isSupported = true; + return HDF_SUCCESS; +} + +int32_t NnrtDeviceService::PrepareModel(const Model& model, const ModelConfig& config, + sptr& preparedModel) +{ + auto ret = ValidateModel(model); + if (ret != HDF_SUCCESS) { + HDF_LOGE("Model is invalid."); + return ret; + } + + auto graph = TransModelToGraph(model); + if (graph == nullptr) { + HDF_LOGE("Transfrom model to graph failed."); + return HDF_ERR_INVALID_PARAM; + } + + ret = ValidateModelConfig(config); + if (ret != HDF_SUCCESS) { + HDF_LOGE("ModelConfig is invalid."); + return ret; + } + + auto context = TransModelConfig(config); + sptr service = new (std::nothrow) PreparedModelService(context); + if (service == nullptr) { + HDF_LOGE("Create new PreparedModelService instance failed."); + return HDF_ERR_MALLOC_FAIL; + } + + ret = service->Compile(graph); + if (ret != HDF_SUCCESS) { + HDF_LOGE("Prepared model failed."); + return ret; + } + + preparedModel = service; + return HDF_SUCCESS; +} + +int32_t NnrtDeviceService::IsModelCacheSupported(bool& isSupported) +{ + isSupported = true; + return HDF_SUCCESS; +} + +int32_t NnrtDeviceService::PrepareModelFromModelCache(const std::vector& modelCache, + const ModelConfig& config, sptr& preparedModel) +{ + HDF_LOGD("Using cache to prepare model."); + + // modelCache must be 1, because PreparedModel only export one cache file. + if (modelCache.size() != 1) { + HDF_LOGE("The size of modelCache vector is not valid, it should be one elememt in that vector."); + return HDF_ERR_INVALID_PARAM; + } + + SharedBufferParser parser; + auto ret = parser.Init(modelCache[0]); + if (ret != HDF_SUCCESS) { + HDF_LOGE("Parse modle buffer failed."); + return HDF_ERR_INVALID_PARAM; + } + + void* modelBuffer = parser.GetBufferPtr(); + auto context = TransModelConfig(config); + sptr service = new (std::nothrow) PreparedModelService(context); + if (service == nullptr) { + HDF_LOGE("Create new instance PreparedModelService failed."); + return HDF_ERR_MALLOC_FAIL; + } + + ret = service->Compile(modelBuffer, modelCache[0].dataSize); + if (ret != HDF_SUCCESS) { + HDF_LOGE("Prepared model failed."); + return ret; + } + + preparedModel = service; + return HDF_SUCCESS; +} + +int32_t NnrtDeviceService::AllocateBuffer(uint32_t length, SharedBuffer& buffer) +{ + sptr ashptr = Ashmem::CreateAshmem("allocateBuffer", length); + if (ashptr == nullptr) { + HDF_LOGE("Create shared memory failed."); + return HDF_FAILURE; + } + + if (!ashptr->MapReadAndWriteAshmem()) { + HDF_LOGE("Map allocate buffer failed."); + return HDF_FAILURE; + } + + buffer.fd = ashptr->GetAshmemFd(); + buffer.bufferSize = ashptr->GetAshmemSize(); + buffer.offset = 0; + buffer.dataSize = length; + + m_ashmems[buffer.fd] = ashptr; + return HDF_SUCCESS; +} + +int32_t NnrtDeviceService::ReleaseBuffer(const SharedBuffer& buffer) +{ + // parser will close current fd. + SharedBufferParser parser; + auto ret = parser.Init(buffer); + if (ret != HDF_SUCCESS) { + HDF_LOGE("Parse buffer failed."); + return HDF_ERR_INVALID_PARAM; + } + + for (auto& ash : m_ashmems) { + ash.second->UnmapAshmem(); + ash.second->CloseAshmem(); + } + m_ashmems.clear(); + return HDF_SUCCESS; +} + +int32_t NnrtDeviceService::ValidateModelConfig(const ModelConfig& config) const +{ + if (!ValidatePerformanceMode(config.mode)) { + HDF_LOGE("PerformanceMode is invalid. mode=%d", config.mode); + return HDF_ERR_INVALID_PARAM; + } + + if (!ValidatePriority(config.priority)) { + HDF_LOGE("Priority is invalid. priority=%d", config.priority); + return HDF_ERR_INVALID_PARAM; + } + + return HDF_SUCCESS; +} + +int32_t NnrtDeviceService::ValidateModel(const Model& model) const +{ + if (model.allTensors.empty()) { + HDF_LOGE("Model has no tensors."); + return HDF_ERR_INVALID_PARAM; + } + + if (model.subGraph.empty()) { + HDF_LOGE("Model has no subGraphs."); + return HDF_ERR_INVALID_PARAM; + } + + if (model.nodes.empty()) { + HDF_LOGE("Model has no nodes."); + return HDF_ERR_INVALID_PARAM; + } + + if (model.inputIndex.empty()) { + HDF_LOGE("Model has no input."); + return HDF_ERR_INVALID_PARAM; + } + + if (model.outputIndex.empty()) { + HDF_LOGE("Model has no output."); + return HDF_ERR_INVALID_PARAM; + } + + size_t tensorSize = model.allTensors.size(); + for (auto index : model.inputIndex) { + if (index > tensorSize) { + HDF_LOGE("Input index is invalid, index=%u", index); + return HDF_ERR_INVALID_PARAM; + } + } + + for (auto index : model.outputIndex) { + if (index > tensorSize) { + HDF_LOGE("Output index is invalid, index=%u", index); + return HDF_ERR_INVALID_PARAM; + } + } + + return HDF_SUCCESS; +} + +std::shared_ptr NnrtDeviceService::TransModelToGraph(const Model& model) const +{ + auto metaGraph = std::make_shared(); + metaGraph->name = model.name; + metaGraph->version = mindspore::Version(); + + std::unique_ptr transTensor{nullptr}; + for (auto tensor : model.allTensors) { + transTensor = TransTensor(tensor); + if (transTensor == nullptr) { + HDF_LOGE("Transform tensor failed."); + return nullptr; + } + metaGraph->allTensors.emplace_back(std::move(transTensor)); + } + metaGraph->inputIndex = model.inputIndex; + metaGraph->outputIndex = model.outputIndex; + + // Transform node + std::unique_ptr transNode {nullptr}; + for (auto& node : model.nodes) { + transNode = TransNode(node); + if (transNode == nullptr) { + HDF_LOGE("Transform node failed, node name=%{public}s", node.name.c_str()); + return nullptr; + } + metaGraph->nodes.emplace_back(std::move(transNode)); + } + + // Transform subgraph + const size_t numTensor = model.allTensors.size(); + for (auto graph : model.subGraph) { + metaGraph->subGraph.emplace_back(TransSubGraph(graph, numTensor)); + } + return metaGraph; +} + +std::unique_ptr NnrtDeviceService::TransTensor(const Tensor& tensor) const +{ + if (!ValidateDataType(tensor.dataType)) { + HDF_LOGE("DataType of tensor is invalid. dataType=%d", tensor.dataType); + return nullptr; + } + + if (!ValidateFormat(tensor.format)) { + HDF_LOGE("Format of tensor is invalid. format=%d", tensor.format); + return nullptr; + } + + auto schemaTensor = std::make_unique(); + schemaTensor->name = tensor.name; + schemaTensor->dataType = static_cast(tensor.dataType); + schemaTensor->format = static_cast(tensor.format); + schemaTensor->dims = tensor.dims; + for (auto param : tensor.quantParams) { + auto quantParam = std::make_unique(); + quantParam->scale = param.scale; + quantParam->zeroPoint = param.zeroPoint; + quantParam->numBits = param.numBits; + quantParam->inited = true; + schemaTensor->quantParams.emplace_back(std::move(quantParam)); + } + + if (tensor.data.fd != INVALID_FD) { + SharedBufferParser parser; + auto ret = parser.Init(tensor.data); + if (ret != HDF_SUCCESS) { + HDF_LOGE("Parse tensor data failed."); + return nullptr; + } + + auto data = parser.GetBufferPtr(); + schemaTensor->data.resize(tensor.data.dataSize); + auto memRet = memcpy_s(const_cast(schemaTensor->data.data()), + tensor.data.dataSize, data, tensor.data.dataSize); + if (memRet != EOK) { + HDF_LOGW("Copy tensor data failed."); + return nullptr; + } + } + return schemaTensor; +} + +std::unique_ptr NnrtDeviceService::TransNode(const Node& node) const +{ + auto cnode = std::make_unique(); + cnode->name = node.name; + cnode->inputIndex = node.inputIndex; + cnode->outputIndex = node.outputIndex; + cnode->quantType = static_cast(node.quantType); + + auto& regInstance = NodeRegistry::GetSingleton(); + auto parseFunc = regInstance.GetNodeFunc(node.nodeType); + auto primitive = parseFunc(node.nodeAttr); + if (primitive == nullptr) { + HDF_LOGE("Parse primitve data failed. node name=%{public}s", node.name.c_str()); + return nullptr; + } + + cnode->primitive = std::move(primitive); + return cnode; +} + +std::unique_ptr NnrtDeviceService::TransSubGraph(const SubGraph& graph, + const size_t numTensor) const +{ + auto subGraph = std::make_unique(); + subGraph->name = graph.name; + subGraph->inputIndices = graph.inputIndices; + subGraph->outputIndices = graph.outputIndices; + subGraph->nodeIndices = graph.nodeIndices; + subGraph->tensorIndices.reserve(numTensor); + for (auto i = 0; i < numTensor; i++) { + subGraph->tensorIndices.emplace_back(static_cast(i)); + } + return subGraph; +} + +std::shared_ptr NnrtDeviceService::TransModelConfig(const ModelConfig& config) const +{ + auto context = std::make_shared(); + const int cpuThreadNum = 2; + const int cpuNoAffinities = 0; + const int cpuBigCore = 1; + const int cpuLittleCore = 2; + context->SetThreadNum(cpuThreadNum); + + int mode = cpuNoAffinities; + switch (config.mode) { + case PerformanceMode::PERFORMANCE_LOW: + case PerformanceMode::PERFORMANCE_MEDIUM: + mode = cpuLittleCore; + break; + case PerformanceMode::PERFORMANCE_HIGH: + case PerformanceMode::PERFORMANCE_EXTREME: + mode = cpuBigCore; + break; + default: + mode = cpuNoAffinities; + } + context->SetThreadAffinity(mode); + + auto cpuInfo = std::make_shared(); + cpuInfo->SetEnableFP16(config.enableFloat16); + auto& deviceInfos = context->MutableDeviceInfo(); + deviceInfos.emplace_back(cpuInfo); + return context; +} +} // V1_0 +} // Nnrt +} // HDI +} // OHOS diff --git a/example/drivers/nnrt/hdi_cpu_service/src/node_functions.cpp b/example/drivers/nnrt/hdi_cpu_service/src/node_functions.cpp new file mode 100644 index 0000000..51ca0ee --- /dev/null +++ b/example/drivers/nnrt/hdi_cpu_service/src/node_functions.cpp @@ -0,0 +1,373 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "node_functions.h" + +#include "node_registry.h" +#include +#include + +namespace OHOS { +namespace HDI { +namespace Nnrt { +namespace V1_0 { +PrimUniquePtr GetAddPrimitive(const std::vector& primitive) +{ + AddFusion addAttr; + auto ret = ParsePrimitive(primitive, addAttr, AddFusionBlockUnmarshalling); + if (ret != HDF_SUCCESS) { + HDF_LOGE("Parse primitive data of AddFusion operator failed."); + return nullptr; + } + + auto prim = std::make_unique(); + prim->value.type = mindspore::schema::PrimitiveType_AddFusion; + auto attr = new (std::nothrow) mindspore::schema::AddFusionT; + if (attr == nullptr) { + HDF_LOGE("Create AddFusion primitive failed."); + return nullptr; + } + attr->activation_type = static_cast(addAttr.activationType); + prim->value.value = attr; + return prim; +} + +PrimUniquePtr GetAvgPoolPrimitive(const std::vector& primitive) +{ + AvgPoolFusion avgPoolAttr; + auto ret = ParsePrimitive(primitive, avgPoolAttr, AvgPoolFusionBlockUnmarshalling); + if (ret != HDF_SUCCESS) { + HDF_LOGE("Parse primitive data of AvgPoolFusion operator failed."); + return nullptr; + } + + auto prim = std::make_unique(); + prim->value.type = mindspore::schema::PrimitiveType_AvgPoolFusion; + + auto attr = new (std::nothrow) mindspore::schema::AvgPoolFusionT; + if (attr == nullptr) { + HDF_LOGE("Create AvgPoolFusion primitive failed."); + return nullptr; + } + attr->kernel_size = avgPoolAttr.kernelSize; + attr->strides = avgPoolAttr.strides; + attr->pad = avgPoolAttr.pad; + attr->pad_mode = static_cast(avgPoolAttr.padMode); + attr->round_mode = static_cast(avgPoolAttr.roundMode); + attr->format = static_cast(avgPoolAttr.format); + attr->global = avgPoolAttr.global; + attr->activation_type = static_cast(avgPoolAttr.activationType); + prim->value.value = attr; + return prim; +} + +PrimUniquePtr GetConcatPrimitive(const std::vector& primitive) +{ + Concat concatAttr; + auto ret = ParsePrimitive(primitive, concatAttr, ConcatBlockUnmarshalling); + if (ret != HDF_SUCCESS) { + HDF_LOGE("Parse primitive data of Concat operator failed."); + return nullptr; + } + + auto prim = std::make_unique(); + prim->value.type = mindspore::schema::PrimitiveType_Concat; + + auto attr = new (std::nothrow) mindspore::schema::ConcatT; + if (attr == nullptr) { + HDF_LOGE("Create concat primitive failed."); + return nullptr; + } + attr->axis = concatAttr.axis; + prim->value.value = attr; + return prim; +} + +PrimUniquePtr GetConv2dPrimitive(const std::vector& primitive) +{ + Conv2DFusion conv2dAttr; + auto ret = ParsePrimitive(primitive, conv2dAttr, Conv2DFusionBlockUnmarshalling); + if (ret != HDF_SUCCESS) { + HDF_LOGE("Parse primitive data of Conv2DFusion operator failed."); + return nullptr; + } + + auto prim = std::make_unique(); + prim->value.type = mindspore::schema::PrimitiveType_Conv2DFusion; + + auto attr = new (std::nothrow) mindspore::schema::Conv2DFusionT; + if (attr == nullptr) { + HDF_LOGE("Create Conv2DFusion primitive failed."); + return nullptr; + } + + attr->kernel_size = conv2dAttr.kernelSize; + attr->stride = conv2dAttr.stride; + attr->dilation = conv2dAttr.dilation; + attr->pad_mode = static_cast(conv2dAttr.padMode); + attr->pad_list = conv2dAttr.padList; + attr->group = conv2dAttr.group; + attr->in_channel = conv2dAttr.inChannel; + attr->out_channel = conv2dAttr.outChannel; + attr->activation_type = static_cast(conv2dAttr.activationType); + + prim->value.value = attr; + return prim; +} + +PrimUniquePtr GetFullConnectionPrimitive(const std::vector& primitive) +{ + FullConnection fullConnAttr; + auto ret = ParsePrimitive(primitive, fullConnAttr, FullConnectionBlockUnmarshalling); + if (ret != HDF_SUCCESS) { + HDF_LOGE("Parse primitive data of FullConnection operator failed."); + return nullptr; + } + + auto prim = std::make_unique(); + prim->value.type = mindspore::schema::PrimitiveType_FullConnection; + + auto attr = new (std::nothrow) mindspore::schema::FullConnectionT; + if (attr == nullptr) { + HDF_LOGE("Create FullConnection primitive failed."); + return nullptr; + } + + attr->has_bias = fullConnAttr.hasBias; + attr->use_axis = fullConnAttr.useAxis; + attr->axis = fullConnAttr.axis; + attr->activation_type = static_cast(fullConnAttr.activationType); + + prim->value.value = attr; + return prim; +} + +PrimUniquePtr GetMaxPoolFusionPrimitive(const std::vector& primitive) +{ + MaxPoolFusion maxPoolAttr; + auto ret = ParsePrimitive(primitive, maxPoolAttr, MaxPoolFusionBlockUnmarshalling); + if (ret != HDF_SUCCESS) { + HDF_LOGE("Parse primitive data of MaxPoolFusion operator failed."); + return nullptr; + } + + auto prim = std::make_unique(); + prim->value.type = mindspore::schema::PrimitiveType_MaxPoolFusion; + + auto attr = new (std::nothrow) mindspore::schema::MaxPoolFusionT; + if (attr == nullptr) { + HDF_LOGE("Create MaxPoolFusion primitive failed."); + return nullptr; + } + + attr->kernel_size = maxPoolAttr.kernelSize; + attr->strides = maxPoolAttr.strides; + attr->pad = maxPoolAttr.pad; + attr->pad_mode = static_cast(maxPoolAttr.padMode); + attr->format = static_cast(maxPoolAttr.format); + attr->global = maxPoolAttr.global; + attr->activation_type = static_cast(maxPoolAttr.activationType); + + prim->value.value = attr; + return prim; +} + +PrimUniquePtr GetMatMulFusionPrimitive(const std::vector& primitive) +{ + MatMulFusion matmulAttr; + auto ret = ParsePrimitive(primitive, matmulAttr, MatMulFusionBlockUnmarshalling); + if (ret != HDF_SUCCESS) { + HDF_LOGE("Parse primitive data of MatMulFusion operator failed."); + return nullptr; + } + + auto prim = std::make_unique(); + prim->value.type = mindspore::schema::PrimitiveType_MatMulFusion; + + auto attr = new (std::nothrow) mindspore::schema::MatMulFusionT; + if (attr == nullptr) { + HDF_LOGE("Create MatMulFusion primitive failed."); + return nullptr; + } + + attr->transpose_a = matmulAttr.transposeA; + attr->transpose_b = matmulAttr.transposeB; + attr->activation_type = static_cast(matmulAttr.activationType); + + prim->value.value = attr; + return prim; +} + +PrimUniquePtr GetSoftmaxPrimitive(const std::vector& primitive) +{ + Softmax softmaxAttr; + auto ret = ParsePrimitive(primitive, softmaxAttr, SoftmaxBlockUnmarshalling); + if (ret != HDF_SUCCESS) { + HDF_LOGE("Parse primitive data of Softmax operator failed."); + return nullptr; + } + + auto prim = std::make_unique(); + prim->value.type = mindspore::schema::PrimitiveType_Softmax; + + auto attr = new (std::nothrow) mindspore::schema::SoftmaxT; + if (attr == nullptr) { + HDF_LOGE("Create Softmax primitive failed."); + return nullptr; + } + + attr->axis = softmaxAttr.axis; + prim->value.value = attr; + return prim; +} + +PrimUniquePtr GetReshapePrimitive(const std::vector& primitive) +{ + Reshape reshapeAttr; + auto ret = ParsePrimitive(primitive, reshapeAttr, ReshapeBlockUnmarshalling); + if (ret != HDF_SUCCESS) { + HDF_LOGE("Parse primitive data of Reshape operator failed."); + return nullptr; + } + + auto prim = std::make_unique(); + prim->value.type = mindspore::schema::PrimitiveType_Reshape; + + auto attr = new (std::nothrow) mindspore::schema::ReshapeT; + if (attr == nullptr) { + HDF_LOGE("Create Reshape primitive failed."); + return nullptr; + } + + prim->value.value = attr; + return prim; +} + +PrimUniquePtr GetScaleFusionPrimitive(const std::vector& primitive) +{ + ScaleFusion scaleAttr; + auto ret = ParsePrimitive(primitive, scaleAttr, ScaleFusionBlockUnmarshalling); + if (ret != HDF_SUCCESS) { + HDF_LOGE("Parse primitive data of ScaleFusion operator failed."); + return nullptr; + } + + auto prim = std::make_unique(); + prim->value.type = mindspore::schema::PrimitiveType_ScaleFusion; + + auto attr = new (std::nothrow) mindspore::schema::ScaleFusionT; + if (attr == nullptr) { + HDF_LOGE("Create ScaleFusion primitive failed."); + return nullptr; + } + + attr->axis = scaleAttr.axis; + attr->activation_type = static_cast(scaleAttr.activationType); + prim->value.value = attr; + return prim; +} + +PrimUniquePtr GetActivationPrimitive(const std::vector& primitive) +{ + Activation actAttr; + auto ret = ParsePrimitive(primitive, actAttr, ActivationBlockUnmarshalling); + if (ret != HDF_SUCCESS) { + HDF_LOGE("Parse primitive data of Activation operator failed."); + return nullptr; + } + + auto prim = std::make_unique(); + prim->value.type = mindspore::schema::PrimitiveType_Activation; + + auto attr = new (std::nothrow) mindspore::schema::ActivationT; + if (attr == nullptr) { + HDF_LOGE("Create Activation primitive failed."); + return nullptr; + } + + attr->alpha = actAttr.alpha; + attr->min_val = actAttr.minVal; + attr->max_val = actAttr.maxVal; + attr->approximate = actAttr.approximate; + attr->activation_type = static_cast(actAttr.activationType); + + prim->value.value = attr; + return prim; +} + +PrimUniquePtr GetQuantDTypeCastPrimitive(const std::vector& primitive) +{ + QuantDTypeCast quantAttr; + auto ret = ParsePrimitive(primitive, quantAttr, QuantDTypeCastBlockUnmarshalling); + if (ret != HDF_SUCCESS) { + HDF_LOGE("Parse primitive data of QuantDTypeCast operator failed."); + return nullptr; + } + + auto prim = std::make_unique(); + prim->value.type = mindspore::schema::PrimitiveType_QuantDTypeCast; + + auto attr = new (std::nothrow) mindspore::schema::QuantDTypeCastT; + if (attr == nullptr) { + HDF_LOGE("Create QuantDTypeCast primitive failed."); + return nullptr; + } + + attr->src_t = quantAttr.srcT; + attr->dst_t = quantAttr.dstT; + prim->value.value = attr; + return prim; +} + +PrimUniquePtr GetMulFusionPrimitive(const std::vector& primitive) +{ + MulFusion mulAttr; + auto ret = ParsePrimitive(primitive, mulAttr, MulFusionBlockUnmarshalling); + if (ret != HDF_SUCCESS) { + HDF_LOGE("Parse primitive data of MulFusion operator failed."); + return nullptr; + } + + auto prim = std::make_unique(); + prim->value.type = mindspore::schema::PrimitiveType_MulFusion; + + auto attr = new (std::nothrow) mindspore::schema::MulFusionT; + if (attr == nullptr) { + HDF_LOGE("Create MulFusion primitive failed."); + return nullptr; + } + + attr->activation_type = static_cast(mulAttr.activationType); + prim->value.value = attr; + return prim; +} + +REGISTER_NODE(Activation, NodeType::NODE_TYPE_ACTIVATION, GetActivationPrimitive); +REGISTER_NODE(AddFusion, NodeType::NODE_TYPE_ADD_FUSION, GetAddPrimitive); +REGISTER_NODE(AvgPoolFusion, NodeType::NODE_TYPE_AVGPOOL_FUSION, GetAvgPoolPrimitive); +REGISTER_NODE(Concat, NodeType::NODE_TYPE_CONCAT, GetConcatPrimitive); +REGISTER_NODE(Conv2DFusion, NodeType::NODE_TYPE_CONV2D_FUSION, GetConv2dPrimitive); +REGISTER_NODE(FullConnection, NodeType::NODE_TYPE_FULL_CONNECTION, GetFullConnectionPrimitive); +REGISTER_NODE(MaxPoolFusion, NodeType::NODE_TYPE_MAX_POOL_FUSION, GetMaxPoolFusionPrimitive); +REGISTER_NODE(MatMulFusion, NodeType::NODE_TYPE_MATMUL_FUSION, GetMatMulFusionPrimitive); +REGISTER_NODE(Reshape, NodeType::NODE_TYPE_RESHAPE, GetReshapePrimitive); +REGISTER_NODE(Softmax, NodeType::NODE_TYPE_SOFTMAX, GetSoftmaxPrimitive); +REGISTER_NODE(ScaleFusion, NodeType::NODE_TYPE_SCALE_FUSION, GetScaleFusionPrimitive); +REGISTER_NODE(QuantDTypeCast, NodeType::NODE_TYPE_QUANT_DTYPE_CAST, GetQuantDTypeCastPrimitive); +REGISTER_NODE(MulFusion, NodeType::NODE_TYPE_MUL_FUSION, GetMulFusionPrimitive); +} // namespace V1_0 +} // namespace Nnrt +} // namespace HDI +} // namespace OHOS \ No newline at end of file diff --git a/example/drivers/nnrt/hdi_cpu_service/src/node_registry.cpp b/example/drivers/nnrt/hdi_cpu_service/src/node_registry.cpp new file mode 100644 index 0000000..7b6a6aa --- /dev/null +++ b/example/drivers/nnrt/hdi_cpu_service/src/node_registry.cpp @@ -0,0 +1,60 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "node_registry.h" + +#include "utils/hdf_log.h" + +namespace OHOS { +namespace HDI { +namespace Nnrt { +namespace V1_0 { +NodeRegistry& NodeRegistry::GetSingleton() +{ + static NodeRegistry registry; + return registry; +} + +NodeRegistry::Registrar::Registrar(NodeType type, std::function&)> nodeFunc) +{ + auto& registry = NodeRegistry::GetSingleton(); + if (registry.m_nodeRegs.find(type) != registry.m_nodeRegs.end()) { + HDF_LOGW("Node has been registered. nodeType=%d", type); + } else { + registry.m_nodeRegs[type] = nodeFunc; + } +} + +std::function&)> NodeRegistry::GetNodeFunc(NodeType type) const +{ + if (m_nodeRegs.find(type) == m_nodeRegs.end()) { + HDF_LOGW("Node type is not found. nodeType=%d", type); + return nullptr; + } + + return m_nodeRegs.at(type); +} + +bool NodeRegistry::IsNodeTypeExist(NodeType type) const +{ + if (m_nodeRegs.find(type) == m_nodeRegs.end()) { + return false; + } + return true; +} +} // namespace V1_0 +} // namespace Nnrt +} // namespace HDI +} // namespace OHOS \ No newline at end of file diff --git a/example/drivers/nnrt/hdi_cpu_service/src/prepared_model_service.cpp b/example/drivers/nnrt/hdi_cpu_service/src/prepared_model_service.cpp new file mode 100644 index 0000000..b5452f8 --- /dev/null +++ b/example/drivers/nnrt/hdi_cpu_service/src/prepared_model_service.cpp @@ -0,0 +1,412 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "prepared_model_service.h" + +#include +#include "securec.h" +#include "utils/hdf_log.h" + +#include "shared_buffer_parser.h" + +namespace OHOS { +namespace HDI { +namespace Nnrt { +namespace V1_0 { +PreparedModelService::PreparedModelService(std::shared_ptr context) + : m_context(context) {} + +PreparedModelService::~PreparedModelService() +{ + if (m_cacheBuffer != nullptr) { + m_cacheBuffer->CloseAshmem(); + } + + for (auto& inputAsh : m_inputAshmems) { + inputAsh->UnmapAshmem(); + inputAsh->CloseAshmem(); + } + + for (auto& outputAsh : m_outputAshmems) { + outputAsh->UnmapAshmem(); + outputAsh->CloseAshmem(); + } +} + +int32_t PreparedModelService::ExportModelCache(std::vector& modelCache) +{ + if (!modelCache.empty()) { + HDF_LOGE("The parameters of ExportModelCache should be an empty vector."); + return HDF_ERR_INVALID_PARAM; + } + + if (m_cacheBuffer != nullptr) { + auto fd = m_cacheBuffer->GetAshmemFd(); + auto size = m_cacheBuffer->GetAshmemSize(); + + // SharedBuffer: fd, bufferSize, offset, dataSize + modelCache.emplace_back(SharedBuffer{fd, size, 0, size}); + return HDF_SUCCESS; + } + + auto size = m_builder.GetSize(); + auto buffer = m_builder.GetBufferPointer(); + const char* name = m_graph != nullptr ? m_graph->name.c_str() : "CacheModel"; + sptr cache = Ashmem::CreateAshmem(name, size); + if (cache == nullptr) { + HDF_LOGE("Create shared memory failed."); + return HDF_ERR_MALLOC_FAIL; + } + + bool ret = cache->MapReadAndWriteAshmem(); + if (!ret) { + HDF_LOGE("Map fd to write cache failed."); + return HDF_FAILURE; + } + + ret = cache->WriteToAshmem(buffer, size, 0); + cache->UnmapAshmem(); + if (!ret) { + HDF_LOGE("Write cache failed."); + return HDF_FAILURE; + } + + m_cacheBuffer = cache; + + // SharedBuffer: fd, bufferSize, offset, dataSize + modelCache.emplace_back(SharedBuffer {cache->GetAshmemFd(), cache->GetAshmemSize(), 0, cache->GetAshmemSize()}); + return HDF_SUCCESS; +} + +int32_t PreparedModelService::Run(const std::vector& inputs, const std::vector& outputs, + std::vector>& outputsDims, std::vector& isOutputBufferEnough) +{ + auto ret = SetInputs(inputs); + if (ret != HDF_SUCCESS) { + HDF_LOGE("Inputs tensor is invalid."); + return ret; + } + + if (!m_isDynamicShape) { + ret = SetOutputs(outputs); + if (ret != HDF_SUCCESS) { + HDF_LOGE("Output tensor is invalid."); + ResetInputAndOutput(); + return ret; + } + } + + auto msRet = m_model->Predict(m_inputs, &m_outputs); + if (msRet != mindspore::kSuccess) { + HDF_LOGE("Run model failed."); + ResetInputAndOutput(); + return HDF_FAILURE; + } + + ret = UpdateOutput(outputs, outputsDims, isOutputBufferEnough); + if (ret != HDF_SUCCESS) { + HDF_LOGE("Update output dimension or data failed."); + ResetInputAndOutput(); + return ret; + } + + ResetInputAndOutput(); + + return HDF_SUCCESS; +} + +int32_t PreparedModelService::UpdateOutput(const std::vector& outputs, + std::vector>& outputsDims, std::vector& isOutputBufferEnough) +{ + bool isEnough {true}; + size_t outputSize = m_outputs.size(); + isOutputBufferEnough.resize(outputSize, true); + for (size_t i = 0; i < outputSize; i++) { + auto& msOutput = m_outputs[i]; + auto& output = outputs[i]; + + auto msShape = msOutput.Shape(); + outputsDims.emplace_back(msShape.begin(), msShape.end()); + + auto dataSize = msOutput.DataSize(); + if (dataSize > output.data.bufferSize) { + HDF_LOGE("Output buffer is not enough. actual size %{public}zu, buffer size %{public}u", + dataSize, output.data.bufferSize); + isOutputBufferEnough[i] = false; + isEnough= false; + } + + if (isEnough && m_isDynamicShape) { + auto msData = msOutput.MutableData(); + SharedBufferParser parser; + auto ret = parser.Init(output.data); + if (ret != HDF_SUCCESS) { + HDF_LOGE("Parse %zu th output data failed.", i); + return HDF_ERR_INVALID_PARAM; + } + + auto data = parser.GetBufferPtr(); + auto memRet = memcpy_s(data, dataSize, msData, dataSize); + if (memRet != EOK) { + HDF_LOGE("Copy output memory failed."); + return HDF_FAILURE; + } + } + } + + return HDF_SUCCESS; +} + +void PreparedModelService::ResetInputAndOutput() +{ + for (auto& msInput : m_inputs) { + msInput.SetData(nullptr); + } + + if (!m_isDynamicShape) { + for (auto& msOutput : m_outputs) { + msOutput.SetData(nullptr); + } + } +} + +int32_t PreparedModelService::Compile(std::shared_ptr graph) +{ + if (graph == nullptr) { + HDF_LOGE("Graph cannot be nullptr"); + return HDF_ERR_INVALID_PARAM; + } + for (auto i : graph->inputIndex) { + auto inputShape = graph->allTensors[i]->dims; + auto iter = std::find(inputShape.begin(), inputShape.end(), DYNAMIC_SHAPE_FLAG); + if (iter != inputShape.end()) { + m_isDynamicShape = true; + break; + } + } + auto offset = mindspore::schema::MetaGraph::Pack(m_builder, graph.get()); + m_builder.Finish(offset); + mindspore::schema::FinishMetaGraphBuffer(m_builder, offset); + auto modelSize = m_builder.GetSize(); + uint8_t* modelBuffer = m_builder.GetBufferPointer(); + if (modelBuffer == nullptr) { + HDF_LOGE("Model is invalid."); + return HDF_FAILURE; + } + + m_model = std::make_shared(); + mindspore::Status msRet = m_model->Build(modelBuffer, modelSize, mindspore::kMindIR, m_context); + if (msRet != mindspore::kSuccess) { + HDF_LOGE("Prepare model failed, please make sure model is validate."); + return HDF_FAILURE; + } + + auto ret = GetMSInputsAndOutputs(); + if (ret != HDF_SUCCESS) { + HDF_LOGE("Model without inputs or outputs is invalid."); + return ret; + } + return HDF_SUCCESS; +} + +int32_t PreparedModelService::Compile(const void* modelBuffer, size_t length) +{ + if (modelBuffer == nullptr || length == 0) { + HDF_LOGE("ModelBuffer cannot be nullptr and length cannot be zero."); + return HDF_ERR_INVALID_PARAM; + } + + m_model = std::make_shared(); + mindspore::Status msRet = m_model->Build(modelBuffer, length, mindspore::kMindIR, m_context); + if (msRet != mindspore::kSuccess) { + HDF_LOGE("Prepare model from cache failed, please make sure model cache is valid."); + return HDF_FAILURE; + } + + auto ret = GetMSInputsAndOutputs(); + if (ret != HDF_SUCCESS) { + HDF_LOGE("Model without inputs or outputs is invalid."); + return ret; + } + + for (auto input : m_inputs) { + auto shapes = input.Shape(); + if (std::find(shapes.begin(), shapes.end(), DYNAMIC_SHAPE_FLAG) != shapes.end()) { + m_isDynamicShape = true; + break; + } + } + return HDF_SUCCESS; +} + +int32_t PreparedModelService::SetInputs(const std::vector& inputs) +{ + if (inputs.size() != m_inputs.size()) { + HDF_LOGE("inputs size is invalid. expect: %zu, actual: %zu", m_inputs.size(), inputs.size()); + return HDF_ERR_INVALID_PARAM; + } + for (auto& ash : m_inputAshmems) { + ash->UnmapAshmem(); + ash->CloseAshmem(); + } + m_inputAshmems.clear(); + + int32_t ret {0}; + size_t inputSize = m_inputs.size(); + std::vector> tmpAllDims; + for (size_t i = 0; i < inputSize; i++) { + auto& input = inputs[i]; + auto& msInput = m_inputs[i]; + ret = CompareTensor(input, msInput); + if (ret != HDF_SUCCESS) { + HDF_LOGE("Inputs tensor is not match that of model. Please check input tensor."); + return ret; + } + tmpAllDims.emplace_back(input.dimensions.begin(), input.dimensions.end()); + } + + if (m_isDynamicShape) { + auto msRet = m_model->Resize(m_inputs, tmpAllDims); + if (msRet != mindspore::kSuccess) { + HDF_LOGE("Resize for dynamic inputs failed."); + return HDF_FAILURE; + } + ret = GetMSInputsAndOutputs(); + if (ret != HDF_SUCCESS) { + HDF_LOGE("Get ms inputs or outputs failed after resize."); + return ret; + } + } + + for (size_t i = 0; i < inputSize; i++) { + auto& input = inputs[i]; + auto& msInput = m_inputs[i]; + sptr ashptr = ParseBuffer(input.data); + if (ashptr == nullptr) { + HDF_LOGE("Parse %zuth input data failed.", i); + return HDF_ERR_INVALID_PARAM; + } + + auto data = const_cast(ashptr->ReadFromAshmem(input.data.dataSize, 0)); + msInput.SetData(data); + m_inputAshmems.emplace_back(ashptr); + } + return HDF_SUCCESS; +} + +int32_t PreparedModelService::SetOutputs(const std::vector& outputs) +{ + HDF_LOGI("Start Set outputs, m_outputs size=%zu", m_outputs.size()); + if (outputs.size() != m_outputs.size()) { + HDF_LOGE("outputs size is invalid. expect: %{public}zu, actual: %{public}zu", m_outputs.size(), outputs.size()); + return HDF_ERR_INVALID_PARAM; + } + for (auto ash : m_outputAshmems) { + ash->UnmapAshmem(); + ash->CloseAshmem(); + } + m_outputAshmems.clear(); + + for (size_t i = 0; i < m_outputs.size(); i++) { + auto& output = outputs[i]; + auto& msOutput = m_outputs[i]; + + sptr ashptr = ParseBuffer(output.data); + if (ashptr == nullptr) { + HDF_LOGE("Parse %{public}zu th output data failed.", i); + return HDF_ERR_INVALID_PARAM; + } + + auto data = const_cast(ashptr->ReadFromAshmem(output.data.dataSize, 0)); + msOutput.SetAllocator(nullptr); + msOutput.SetData(data); + m_outputAshmems.emplace_back(ashptr); + } + return HDF_SUCCESS; +} + +int32_t PreparedModelService::GetMSInputsAndOutputs() +{ + m_inputs = m_model->GetInputs(); + if (m_inputs.empty()) { + HDF_LOGE("Get inputs failed."); + return HDF_FAILURE; + } + + m_outputs = m_model->GetOutputs(); + if (m_outputs.empty()) { + HDF_LOGE("Get outputs failed."); + return HDF_FAILURE; + } + return HDF_SUCCESS; +} + +int32_t PreparedModelService::CompareTensor(const IOTensor& tensor, const mindspore::MSTensor& msTensor) +{ + auto dataType = static_cast(msTensor.DataType()); + if (tensor.dataType != dataType) { + HDF_LOGE("Data type of tensor is not match that of model."); + return HDF_ERR_INVALID_PARAM; + } + + auto format = static_cast(msTensor.format()); + if (tensor.format != format) { + HDF_LOGE("Format of tensor is not match that of model."); + return HDF_ERR_INVALID_PARAM; + } + + for (size_t i = 0; i < tensor.dimensions.size(); i++) { + if (msTensor.Shape()[i] != DYNAMIC_SHAPE_FLAG && tensor.dimensions[i] != msTensor.Shape()[i]) { + HDF_LOGE("The Shape of tensor is not match that of model."); + return HDF_ERR_INVALID_PARAM; + } + } + + return HDF_SUCCESS; +} + +sptr PreparedModelService::ParseBuffer(const SharedBuffer& buffer) +{ + if (buffer.fd == -1) { + HDF_LOGE("Invalid buffer fd, it cannot be -1."); + return nullptr; + } + + HDF_LOGW("NNRT buffer fd=%{public}d, length=%{public}u", buffer.fd, buffer.dataSize); + + sptr ashptr = new (std::nothrow) Ashmem(buffer.fd, buffer.bufferSize); + if (ashptr == nullptr) { + HDF_LOGE("Create shared memory failed."); + return nullptr; + } + + if (!ashptr->MapReadAndWriteAshmem()) { + HDF_LOGE("Map buffer fd to address failed."); + return nullptr; + } + + const void* data = ashptr->ReadFromAshmem(buffer.dataSize, buffer.offset); + if (data == nullptr) { + HDF_LOGE("Get data address failed."); + ashptr->UnmapAshmem(); + ashptr->CloseAshmem(); + return nullptr; + } + return ashptr; +} +} // V1_0 +} // Nnrt +} // HDI +} // OHOS diff --git a/example/drivers/nnrt/hdi_cpu_service/src/shared_buffer_parser.cpp b/example/drivers/nnrt/hdi_cpu_service/src/shared_buffer_parser.cpp new file mode 100644 index 0000000..19f5aa5 --- /dev/null +++ b/example/drivers/nnrt/hdi_cpu_service/src/shared_buffer_parser.cpp @@ -0,0 +1,104 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef OHOS_HDI_NNR_V1_0_UTILS_H +#define OHOS_HDI_NNR_V1_0_UTILS_H + +#include "shared_buffer_parser.h" + +#include +#include "ashmem.h" +#include "v1_0/nnrt_types.h" +#include "utils/hdf_log.h" + +namespace OHOS { +namespace HDI { +namespace Nnrt { +namespace V1_0 { +SharedBufferParser::~SharedBufferParser() +{ + if (m_ashptr != nullptr) { + m_ashptr->UnmapAshmem(); + m_ashptr->CloseAshmem(); + m_bufferAddr = nullptr; + } +} + +int32_t SharedBufferParser::Init(const std::string& name, int32_t size) +{ + HDF_LOGI("Init SharedBufferParser from name and size."); + sptr ashptr = Ashmem::CreateAshmem(name.c_str(), size); + if (ashptr == nullptr) { + HDF_LOGE("Create ashmen from size failed."); + return HDF_FAILURE; + } + + SharedBuffer buffer; + buffer.fd = ashptr->GetAshmemFd(); + buffer.bufferSize = ashptr->GetAshmemSize(); + buffer.offset = 0; + buffer.dataSize = size; + + auto ret = Init(buffer); + if (ret != HDF_SUCCESS) { + HDF_LOGE("Init SharedBufferParser failed."); + return ret; + } + return HDF_SUCCESS; +} + +int32_t SharedBufferParser::Init(const SharedBuffer& buffer) +{ + if (buffer.fd == INVALID_FD) { + HDF_LOGE("Invalid buffer fd, it cannot be %{public}d.", INVALID_FD); + return HDF_ERR_INVALID_PARAM; + } + + m_ashptr = new (std::nothrow) Ashmem(buffer.fd, buffer.bufferSize); + if (m_ashptr == nullptr) { + HDF_LOGE("Create ashmem failed."); + return HDF_FAILURE; + } + + if (!m_ashptr->MapReadAndWriteAshmem()) { + HDF_LOGE("Map buffer fd to address failed."); + return HDF_FAILURE; + } + + auto bufferAddr = m_ashptr->ReadFromAshmem(buffer.dataSize, buffer.offset); + if (bufferAddr == nullptr) { + HDF_LOGE("Invalid dataSize or offset of SharedBuffer."); + return HDF_ERR_INVALID_PARAM; + } + m_bufferAddr = const_cast(bufferAddr); + + m_buffer = buffer; + return HDF_SUCCESS; +} + +void* SharedBufferParser::GetBufferPtr() +{ + return m_bufferAddr; +} + +SharedBuffer SharedBufferParser::GetBuffer() +{ + return m_buffer; +} +} // V1_0 +} // Nnrt +} // HDI +} // OHOS +#endif // OHOS_HDI_NNR_V1_0_UTILS_H \ No newline at end of file diff --git a/example/drivers/nnrt/hdi_cpu_service/src/validation.cpp b/example/drivers/nnrt/hdi_cpu_service/src/validation.cpp new file mode 100644 index 0000000..e640c0f --- /dev/null +++ b/example/drivers/nnrt/hdi_cpu_service/src/validation.cpp @@ -0,0 +1,72 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "validation.h" + +namespace OHOS { +namespace HDI { +namespace Nnrt { +namespace V1_0 { +int32_t ValidatePerformanceMode(PerformanceMode mode) +{ + if (mode < PerformanceMode::PERFORMANCE_NONE || mode > PerformanceMode::PERFORMANCE_EXTREME) { + return false; + } + + return true; +} + +int32_t ValidatePriority(Priority priority) +{ + if (priority < Priority::PRIORITY_NONE || priority > Priority::PRIORITY_HIGH) { + return false; + } + + return true; +} + +int32_t ValidateDataType(DataType dataType) +{ + if (dataType < DataType::DATA_TYPE_UNKNOWN || dataType > DataType::DATA_TYPE_FLOAT64) { + return false; + } + + if (dataType > DataType::DATA_TYPE_UNKNOWN && dataType < DataType::DATA_TYPE_BOOL) { + return false; + } + + if (dataType > DataType::DATA_TYPE_BOOL && dataType < DataType::DATA_TYPE_INT8) { + return false; + } + + if (dataType > DataType::DATA_TYPE_UINT64 && dataType < DataType::DATA_TYPE_FLOAT16) { + return false; + } + + return true; +} + +int32_t ValidateFormat(Format format) +{ + if (format < Format::FORMAT_NONE || format > Format::FORMAT_NHWC) { + return false; + } + + return true; +} +} // namespace V1_0 +} // namespace Nnrt +} // namespace HDI +} // namespace OHOS \ No newline at end of file diff --git a/frameworks/BUILD.gn b/frameworks/BUILD.gn new file mode 100644 index 0000000..34b7da3 --- /dev/null +++ b/frameworks/BUILD.gn @@ -0,0 +1,133 @@ +# Copyright (c) 2022 Huawei Device Co., Ltd. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import("//build/ohos.gni") + +config("nnrt_config") { + cflags_cc = [ "-fexceptions" ] +} + +nnrt_sources = [ + "native/device_manager.cpp", + "native/device_registrar.cpp", + "native/hdi_device.cpp", + "native/hdi_prepared_model.cpp", + "native/memory_manager.cpp", + "native/transform.cpp", + "native/nn_tensor.cpp", + "native/validation.cpp", + "native/inner_model.cpp", + "native/compilation.cpp", + "native/execution_plan.cpp", + "native/executor.cpp", + "native/neural_network_runtime.cpp", + "native/ops_builder.cpp", + "native/ops_registry.cpp", +] + +ops_sources = [ + "native/ops/add_builder.cpp", + "native/ops/argmax_builder.cpp", + "native/ops/avgpool_builder.cpp", + "native/ops/pooling_builder.cpp", + "native/ops/batch_to_space_nd_builder.cpp", + "native/ops/bias_add_builder.cpp", + "native/ops/cast_builder.cpp", + "native/ops/concat_builder.cpp", + "native/ops/conv2d_builder.cpp", + "native/ops/conv2d_transpose_builder.cpp", + "native/ops/depthwise_conv2d_native_builder.cpp", + "native/ops/div_builder.cpp", + "native/ops/eltwise_builder.cpp", + "native/ops/expandims_builder.cpp", + "native/ops/fullconnection_builder.cpp", + "native/ops/maxpool_builder.cpp", + "native/ops/slice_builder.cpp", + "native/ops/softmax_builder.cpp", + "native/ops/space_to_batch_nd_builder.cpp", + "native/ops/split_builder.cpp", + "native/ops/sqrt_builder.cpp", + "native/ops/squared_difference_builder.cpp", + "native/ops/squeeze_builder.cpp", + "native/ops/stack_builder.cpp", + "native/ops/strided_slice_builder.cpp", + "native/ops/sub_builder.cpp", + "native/ops/tanh_builder.cpp", + "native/ops/tile_builder.cpp", + "native/ops/top_k_builder.cpp", + "native/ops/transpose_builder.cpp", + "native/ops/unsqueeze_builder.cpp", + "native/ops/batchnorm_builder.cpp", + "native/ops/fill_builder.cpp", + "native/ops/matmul_builder.cpp", + "native/ops/gather_builder.cpp", + "native/ops/gelu_builder.cpp", + "native/ops/hswish_builder.cpp", + "native/ops/layernorm_builder.cpp", + "native/ops/maximum_builder.cpp", + "native/ops/lessequal_builder.cpp", + "native/ops/mul_builder.cpp", + "native/ops/onehot_builder.cpp", + "native/ops/pad_builder.cpp", + "native/ops/pow_builder.cpp", + "native/ops/prelu_builder.cpp", + "native/ops/quant_dtype_cast_builder.cpp", + "native/ops/reduceall_builder.cpp", + "native/ops/reducemean_builder.cpp", + "native/ops/reduceprod_builder.cpp", + "native/ops/relu_builder.cpp", + "native/ops/relu6_builder.cpp", + "native/ops/reshape_builder.cpp", + "native/ops/resize_bilinear_builder.cpp", + "native/ops/rsqrt_builder.cpp", + "native/ops/scale_builder.cpp", + "native/ops/shape_builder.cpp", + "native/ops/sigmoid_builder.cpp", +] + +ohos_shared_library("libneural_network_runtime") { + sources = nnrt_sources + sources += ops_sources + include_dirs = [ + "//commonlibrary/c_utils/base/include", + "//drivers/hdf_core/adapter/uhdf/posix/include", + "//drivers/hdf_core/adapter/uhdf2/include/hdi", + "//drivers/hdf_core/adapter/uhdf2/ipc/include", + "//drivers/hdf_core/framework/include/core", + "//drivers/hdf_core/framework/include/utils", + "//drivers/hdf_core/framework/core/common/include/host", + "//foundation/ai/neural_network_runtime", + "//foundation/communication/ipc/interfaces/innerkits/ipc_core/include", + "//third_party/googletest/googletest/include/gtest", + "//third_party/mindspore/mindspore/lite/mindir/include" + ] + + install_images = [ + "system", + "updater" + ] + + public_configs = [ ":nnrt_config" ] + + external_deps = [ + "hilog_native:libhilog", + "hitrace_native:libhitracechain", + "c_utils:utils", + "hdf_core:libhdf_utils", + "drivers_interface_nnrt:libnnrt_proxy_1.0", + "mindspore:mindir" + ] + + subsystem_name = "ai" + part_name = "neural_network_runtime" +} diff --git a/frameworks/native/compilation.cpp b/frameworks/native/compilation.cpp new file mode 100644 index 0000000..ed6e737 --- /dev/null +++ b/frameworks/native/compilation.cpp @@ -0,0 +1,714 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "compilation.h" + +#include +#include +#include +#include +#include +#include + +#include "common/utils.h" +#include "common/scoped_trace.h" +#include "validation.h" +#include "device_manager.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +constexpr int MAX_MODEL_SIZE = 200 * 1024 * 1024; // 200MB +constexpr int OCT_UNIT = 8; +constexpr int NULL_PTR_LENGTH = 0; +constexpr int NUMBER_CACHE_INFO_MEMBERS = 3; + +// CRC16 Table is created based on the Polynomial of G(x) = x^16 + x^12 + x^15 + 1 and +// CRC register initialization value of "0" (0x0000) +static const unsigned short CRC16_TAB[256] = { + 0x0000, 0x1021, 0x2042, 0x3063, 0x4084, 0x50a5, 0x60c6, 0x70e7, + 0x8108, 0x9129, 0xa14a, 0xb16b, 0xc18c, 0xd1ad, 0xe1ce, 0xf1ef, + 0x1231, 0x0210, 0x3273, 0x2252, 0x52b5, 0x4294, 0x72f7, 0x62d6, + 0x9339, 0x8318, 0xb37b, 0xa35a, 0xd3bd, 0xc39c, 0xf3ff, 0xe3de, + 0x2462, 0x3443, 0x0420, 0x1401, 0x64e6, 0x74c7, 0x44a4, 0x5485, + 0xa56a, 0xb54b, 0x8528, 0x9509, 0xe5ee, 0xf5cf, 0xc5ac, 0xd58d, + 0x3653, 0x2672, 0x1611, 0x0630, 0x76d7, 0x66f6, 0x5695, 0x46b4, + 0xb75b, 0xa77a, 0x9719, 0x8738, 0xf7df, 0xe7fe, 0xd79d, 0xc7bc, + 0x48c4, 0x58e5, 0x6886, 0x78a7, 0x0840, 0x1861, 0x2802, 0x3823, + 0xc9cc, 0xd9ed, 0xe98e, 0xf9af, 0x8948, 0x9969, 0xa90a, 0xb92b, + 0x5af5, 0x4ad4, 0x7ab7, 0x6a96, 0x1a71, 0x0a50, 0x3a33, 0x2a12, + 0xdbfd, 0xcbdc, 0xfbbf, 0xeb9e, 0x9b79, 0x8b58, 0xbb3b, 0xab1a, + 0x6ca6, 0x7c87, 0x4ce4, 0x5cc5, 0x2c22, 0x3c03, 0x0c60, 0x1c41, + 0xedae, 0xfd8f, 0xcdec, 0xddcd, 0xad2a, 0xbd0b, 0x8d68, 0x9d49, + 0x7e97, 0x6eb6, 0x5ed5, 0x4ef4, 0x3e13, 0x2e32, 0x1e51, 0x0e70, + 0xff9f, 0xefbe, 0xdfdd, 0xcffc, 0xbf1b, 0xaf3a, 0x9f59, 0x8f78, + 0x9188, 0x81a9, 0xb1ca, 0xa1eb, 0xd10c, 0xc12d, 0xf14e, 0xe16f, + 0x1080, 0x00a1, 0x30c2, 0x20e3, 0x5004, 0x4025, 0x7046, 0x6067, + 0x83b9, 0x9398, 0xa3fb, 0xb3da, 0xc33d, 0xd31c, 0xe37f, 0xf35e, + 0x02b1, 0x1290, 0x22f3, 0x32d2, 0x4235, 0x5214, 0x6277, 0x7256, + 0xb5ea, 0xa5cb, 0x95a8, 0x8589, 0xf56e, 0xe54f, 0xd52c, 0xc50d, + 0x34e2, 0x24c3, 0x14a0, 0x0481, 0x7466, 0x6447, 0x5424, 0x4405, + 0xa7db, 0xb7fa, 0x8799, 0x97b8, 0xe75f, 0xf77e, 0xc71d, 0xd73c, + 0x26d3, 0x36f2, 0x0691, 0x16b0, 0x6657, 0x7676, 0x4615, 0x5634, + 0xd94c, 0xc96d, 0xf90e, 0xe92f, 0x99c8, 0x89e9, 0xb98a, 0xa9ab, + 0x5844, 0x4865, 0x7806, 0x6827, 0x18c0, 0x08e1, 0x3882, 0x28a3, + 0xcb7d, 0xdb5c, 0xeb3f, 0xfb1e, 0x8bf9, 0x9bd8, 0xabbb, 0xbb9a, + 0x4a75, 0x5a54, 0x6a37, 0x7a16, 0x0af1, 0x1ad0, 0x2ab3, 0x3a92, + 0xfd2e, 0xed0f, 0xdd6c, 0xcd4d, 0xbdaa, 0xad8b, 0x9de8, 0x8dc9, + 0x7c26, 0x6c07, 0x5c64, 0x4c45, 0x3ca2, 0x2c83, 0x1ce0, 0x0cc1, + 0xef1f, 0xff3e, 0xcf5d, 0xdf7c, 0xaf9b, 0xbfba, 0x8fd9, 0x9ff8, + 0x6e17, 0x7e36, 0x4e55, 0x5e74, 0x2e93, 0x3eb2, 0x0ed1, 0x1ef0 +}; + +Compilation::Compilation(const InnerModel* innerModel) + : m_liteGraph(innerModel->GetLiteGraphs()), + m_inputTensors(innerModel->GetInputTensors()), + m_outputTensors(innerModel->GetOutputTensors()) {} + +OH_NN_ReturnCode Compilation::SetDevice(size_t deviceId) +{ + if (m_isBuild) { + LOGE("Cannot set deviceId after compilation finish."); + return OH_NN_OPERATION_FORBIDDEN; + } + + auto& deviceManager = DeviceManager::GetInstance(); + std::shared_ptr availableDevice = deviceManager.GetDevice(deviceId); + if (availableDevice == nullptr) { + LOGE("[Compilation] DeviceId does not exist, deviceId=%zu", deviceId); + return OH_NN_INVALID_PARAMETER; + } + + std::vector supportedList; + OH_NN_ReturnCode ret = availableDevice->GetSupportedOperation(m_liteGraph, supportedList); + if (ret != OH_NN_SUCCESS) { + LOGE("[Compilation] SetDevice failed, error happened when getting supported operation."); + return ret; + } + + for (bool isSupport : supportedList) { + if (!isSupport) { + LOGE("[Compilation] SetDevice failed, current device not support the model, device id: %zu.", deviceId); + return OH_NN_FAILED; + } + } + + bool supportDynamic; + ret = availableDevice->IsDynamicInputSupported(supportDynamic); + if (ret != OH_NN_SUCCESS) { + LOGE("[Compilation] SetDevice failed, error happened when checking whether device supports dynamic input."); + return ret; + } + + if (IsDynamicShape() && (!supportDynamic)) { + LOGE("[Compilation] SetDevice failed." + "The device does not support dynamic shape inputs, but the model has dynamic inputs."); + return OH_NN_FAILED; + } + + m_device = availableDevice; + m_deviceId = deviceId; + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode Compilation::SetCacheDir(const std::string& cacheModelPath, uint32_t version) +{ + if (m_isBuild) { + LOGE("Cannot set cache after compilation finish."); + return OH_NN_OPERATION_FORBIDDEN; + } + + if (m_device == nullptr) { + LOGE("The parameter of m_device is nullptr, please call SetDevice function before calling SetCacheDir."); + return OH_NN_OPERATION_FORBIDDEN; + } + + bool isSupportedCache {false}; + OH_NN_ReturnCode ret = m_device->IsModelCacheSupported(isSupportedCache); + if (ret != OH_NN_SUCCESS) { + LOGE("[Compilation] Fail to query whether the device is available to save cache model."); + return ret; + } + + if (!isSupportedCache) { + LOGE("[Compilation] The device is unavailable to save cache model."); + return OH_NN_OPERATION_FORBIDDEN; + } + + char realPathRes[PATH_MAX]; + const char* filePath = realpath(cacheModelPath.c_str(), realPathRes); + if (filePath == nullptr) { + LOGE("[Compilation] The cache model path is invalid."); + return OH_NN_INVALID_PARAMETER; + } + + struct stat fileInfo; + if (stat(filePath, &fileInfo) != 0) { + LOGE("[Compilation] The cache directory does not exist or cannot be accessed."); + return OH_NN_INVALID_PARAMETER; + } + + if (!(fileInfo.st_mode & S_IFDIR)) { + LOGE("[Compilation] The cache model path is not a directory."); + return OH_NN_INVALID_PARAMETER; + } + + m_cachePath = (std::string)filePath + "/"; + m_version = version; + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode Compilation::SetPerformance(OH_NN_PerformanceMode performance) +{ + if (m_isBuild) { + LOGE("[Compilation] Cannot set performance after compilation finish."); + return OH_NN_OPERATION_FORBIDDEN; + } + + if (m_device == nullptr) { + LOGE("Cannot set performance before set device, please set device first"); + return OH_NN_OPERATION_FORBIDDEN; + } + + bool isSupportedPerformance {false}; + OH_NN_ReturnCode ret = m_device->IsPerformanceModeSupported(isSupportedPerformance); + if (ret != OH_NN_SUCCESS) { + LOGE("[Compilation] Call device %zu failed.", m_deviceId); + return ret; + } + + if (!isSupportedPerformance) { + LOGE("[Compilation] This device %zu is not support performance setting.", m_deviceId); + return OH_NN_OPERATION_FORBIDDEN; + } + + if (!Validation::ValidatePerformanceMode(performance)) { + LOGE("[Compilation] SetPerformance passed invalid performance=%d", performance); + return OH_NN_INVALID_PARAMETER; + } + + m_performance = performance; + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode Compilation::SetPriority(OH_NN_Priority priority) +{ + if (m_isBuild) { + LOGE("[Compilation] Cannot set priority after compilation finish."); + return OH_NN_OPERATION_FORBIDDEN; + } + + if (m_device == nullptr) { + LOGE("Cannot set priority before set device, please set device first"); + return OH_NN_OPERATION_FORBIDDEN; + } + + bool isSupportedPriority {false}; + OH_NN_ReturnCode ret = m_device->IsPrioritySupported(isSupportedPriority); + if (ret != OH_NN_SUCCESS) { + LOGE("[Compilation] Call device %zu failed.", m_deviceId); + return ret; + } + + if (!isSupportedPriority) { + LOGE("[Compilation] This device %zu is not support priority setting.", m_deviceId); + return OH_NN_OPERATION_FORBIDDEN; + } + + if (!Validation::ValidatePriority(priority)) { + LOGE("[Compilation] SetPriority passed invalid priority=%d", priority); + return OH_NN_INVALID_PARAMETER; + } + + m_priority = priority; + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode Compilation::SetEnableFp16(bool isFp16) +{ + if (m_isBuild) { + LOGE("[Compilation] Cannot enable float16 after compilation finish."); + return OH_NN_OPERATION_FORBIDDEN; + } + + if (m_device == nullptr) { + LOGE("Cannot set enable fp16 before set device, please set device first"); + return OH_NN_OPERATION_FORBIDDEN; + } + + bool isSupportedFp16 {false}; + OH_NN_ReturnCode ret = m_device->IsFloat16PrecisionSupported(isSupportedFp16); + if (ret != OH_NN_SUCCESS) { + LOGE("[Compilation] Call device %zu failed.", m_deviceId); + return ret; + } + + if (!isSupportedFp16) { + LOGE("[Compilation] This device %zu is not support float16 precision setting.", m_deviceId); + return OH_NN_OPERATION_FORBIDDEN; + } + + m_enableFp16 = isFp16; + return OH_NN_SUCCESS; +} + +unsigned short Compilation::GetCrc16(const unsigned char* buffer, size_t length) const +{ + unsigned short crc16 = 0; + for (size_t i = 0; i < length; ++i) { + uint8_t tableIndex = ((crc16 >> OCT_UNIT) ^ *buffer++) & 0x00ff; + crc16 = (crc16 << OCT_UNIT) ^ CRC16_TAB[tableIndex]; + } + return crc16; +} + +OH_NN_ReturnCode Compilation::GenerateCacheInfo(uint32_t cacheSize, std::unique_ptr& cacheInfo) const +{ + std::string cacheInfoPath = m_cachePath + "cache_info.nncache"; + std::ofstream cacheInfoStream(cacheInfoPath, std::ios::binary | std::ios::out | std::ios::trunc); + if (cacheInfoStream.fail()) { + LOGE("[Compilation] Model cache info file is invalid."); + return OH_NN_INVALID_FILE; + } + + if (!cacheInfoStream.write(reinterpret_cast(cacheInfo.get()), cacheSize)) { + LOGE("[Compilation] Fail to write cache info."); + cacheInfoStream.close(); + return OH_NN_FAILED; + } + + cacheInfoStream.close(); + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode Compilation::GenerateCacheModel(size_t cacheNumber, std::unique_ptr& cacheInfo, + std::vector modelBuffer) const +{ + auto cacheInfoPtr = cacheInfo.get(); + *cacheInfoPtr++ = static_cast(cacheNumber); + *cacheInfoPtr++ = static_cast(m_version); + *cacheInfoPtr++ = static_cast(m_deviceId); + for (uint32_t i = 0; i < cacheNumber; ++i) { + std::string cacheModelFile = m_cachePath + std::to_string(i) + ".nncache"; + std::ofstream cacheModelStream(cacheModelFile, std::ios::binary | std::ios::out | std::ios::trunc); + if (cacheModelStream.fail()) { + LOGE("[Compilation] Model cache file is invalid."); + return OH_NN_INVALID_FILE; + } + + uint64_t checkSum = static_cast(GetCrc16(static_cast(modelBuffer[i].buffer), + modelBuffer[i].length)); + *cacheInfoPtr++ = checkSum; + if (!cacheModelStream.write(static_cast(modelBuffer[i].buffer), modelBuffer[i].length)) { + LOGE("[Compilation] Fail to write cache model."); + cacheModelStream.close(); + return OH_NN_FAILED; + }; + + cacheModelStream.close(); + } + + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode Compilation::GenerateCacheFiles(const std::vector& modelBuffer) const +{ + const size_t cacheNumber = modelBuffer.size(); + uint32_t cacheSize = NUMBER_CACHE_INFO_MEMBERS + cacheNumber; + std::unique_ptr cacheInfo = std::make_unique(cacheSize); + if (cacheInfo == nullptr) { + LOGE("Fail to create cacheInfo instance."); + return OH_NN_MEMORY_ERROR; + } + + OH_NN_ReturnCode ret = GenerateCacheModel(cacheNumber, cacheInfo, modelBuffer); + if (ret != OH_NN_SUCCESS) { + return ret; + } + + uint32_t infoCharNumber = cacheSize * sizeof(uint64_t); + ret = GenerateCacheInfo(infoCharNumber, cacheInfo); + if (ret != OH_NN_SUCCESS) { + return ret; + } + + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode Compilation::GetCacheFileLength(std::ifstream& ifs, int& fsize) const +{ + ifs.seekg(0, std::ios::end); + if (!ifs.good()) { + LOGE("[Compilation] Fail to set the position of the next character to be extracted from the input stream."); + return OH_NN_INVALID_FILE; + } + + int handleValue = ifs.tellg(); + if (handleValue == -1) { + LOGE("[Compilation] Unable to get position of the input stream."); + return OH_NN_INVALID_FILE; + } + + if ((handleValue > MAX_MODEL_SIZE) || (handleValue == NULL_PTR_LENGTH)) { + LOGE("[Compilation] Unable to read huge or empty input stream, get cache file size=%d", handleValue); + return OH_NN_INVALID_FILE; + } + + fsize = handleValue; + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode Compilation::ReadCacheModelFile(const std::string& file, ModelBuffer& modelBuffer) const +{ + std::ifstream ifs(file.c_str(), std::ios::in | std::ios::binary); + if (!ifs) { + LOGE("[Compilation] Fail to open cache file."); + return OH_NN_INVALID_FILE; + } + + int fsize {-1}; + OH_NN_ReturnCode ret = GetCacheFileLength(ifs, fsize); + if (ret != OH_NN_SUCCESS) { + ifs.close(); + return ret; + } + + ifs.seekg(0, std::ios::beg); + if (!ifs.good()) { + LOGE("[Compilation] Fail to set the position of the next character to be extracted" + "from the cache model stream."); + ifs.close(); + return OH_NN_FAILED; + } + + char* ptr = static_cast(m_device->AllocateBuffer(fsize)); + if (ptr == nullptr) { + LOGE("[Compilation] Fail to create file buffer."); + ifs.close(); + return OH_NN_NULL_PTR; + } + + ifs.read(ptr, fsize); + if (!ifs.good()) { + LOGE("[Compilation] Fail to read the characters from the cache model stream."); + ifs.close(); + m_device->ReleaseBuffer(ptr); + ptr = nullptr; + return OH_NN_FAILED; + } + + ifs.close(); + modelBuffer.buffer = ptr; + modelBuffer.length = fsize; + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode Compilation::CheckCacheInfo(ModelCacheInfo& modelCacheInfo, const std::string& cacheInfoPath) const +{ + std::ifstream infoCacheFile(cacheInfoPath.c_str(), std::ios::in | std::ios::binary); + if (!infoCacheFile) { + LOGE("[Compilation] Openning cache info file failed."); + return OH_NN_INVALID_FILE; + } + + int charNumber = NUMBER_CACHE_INFO_MEMBERS * sizeof(uint64_t); + if (!infoCacheFile.read((char*)&(modelCacheInfo), charNumber)) { + LOGE("[Compilation] Fail to get the content of info cache file."); + infoCacheFile.close(); + return OH_NN_INVALID_FILE; + } + + // modelCacheInfo.deviceId type is int64_t, + // it is transformed from size_t value, so the transform here will not truncate value. + size_t deviceId = static_cast(modelCacheInfo.deviceId); + if (deviceId != m_deviceId) { + LOGE("[Compilation] The deviceId=%zu in the cache files is different from current deviceId=%zu," + "please change the cache directory or current deviceId.", deviceId, m_deviceId); + infoCacheFile.close(); + return OH_NN_INVALID_PARAMETER; + } + + std::vector modelCheckSum; + modelCheckSum.resize(modelCacheInfo.fileNumber); + modelCacheInfo.modelCheckSum.resize(modelCacheInfo.fileNumber); + if (!infoCacheFile.read((char*)&modelCheckSum[0], modelCacheInfo.fileNumber * sizeof(uint64_t))) { + LOGE("[Compilation] The info cache file has been changed."); + infoCacheFile.close(); + return OH_NN_INVALID_FILE; + } + + for (uint32_t i = 0; i < modelCacheInfo.fileNumber; ++i) { + modelCacheInfo.modelCheckSum[i] = static_cast(modelCheckSum[i]); + } + + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode Compilation::RemoveCacheFiles(uint32_t fileNumber) const +{ + std::string cacheInfoPath = m_cachePath + "cache_info.nncache"; + if (remove(cacheInfoPath.c_str()) == -1) { + LOGE("[Compilation] Fail to remove the file %s, please delete the file manually.", cacheInfoPath.c_str()); + return OH_NN_FAILED; + } + LOGI("[Compilation] Succeed to remove the file cache_info.nncach."); + + for (uint32_t i = 0; i < fileNumber; ++i) { + std::string fileName = std::to_string(i) + ".nncache"; + std::string cacheModelPath = m_cachePath + fileName; + if (access(cacheModelPath.c_str(), 0) != 0) { + LOGW("[Compilation] The file %s does not exist, no need to delete the file.", cacheModelPath.c_str()); + continue; + } + + if (remove(cacheModelPath.c_str()) == -1) { + LOGE("[Compilation] Fail to remove the file %s, please delete the file manually.", cacheModelPath.c_str()); + return OH_NN_FAILED; + } + LOGI("[Compilation] Succeed to remove the file %s", cacheModelPath.c_str()); + } + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode Compilation::CheckCacheModel(const ModelCacheInfo& modelCacheInfo, + std::vector& modelBuffers) const +{ + for (uint32_t i = 0; i < modelCacheInfo.fileNumber; ++i) { + std::string cacheModelPath = m_cachePath + std::to_string(i) + ".nncache"; + if (access(cacheModelPath.c_str(), 0) != 0) { + LOGE("[Compilation] The cache model file %s does not exist.", cacheModelPath.c_str()); + return OH_NN_INVALID_FILE; + } + + ModelBuffer modelBuffer; + OH_NN_ReturnCode ret = ReadCacheModelFile(cacheModelPath, modelBuffer); + if (ret != OH_NN_SUCCESS) { + LOGE("[Compilation] Read cache model file failed."); + return ret; + } + + if (GetCrc16(static_cast(modelBuffer.buffer), + modelBuffer.length) != modelCacheInfo.modelCheckSum[i]) { + LOGE("[Compilation] The cache model file %s has been changed.", cacheModelPath.c_str()); + return OH_NN_INVALID_FILE; + } + + modelBuffers.emplace_back(std::move(modelBuffer)); + } + + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode Compilation::NormalBuild(std::shared_ptr& preparedModel) +{ + ModelConfig config {m_enableFp16, m_performance, m_priority}; + OH_NN_ReturnCode ret = m_device->PrepareModel(m_liteGraph, config, preparedModel); + if (ret != OH_NN_SUCCESS) { + LOGE("[Compilation] Preparing model failed when normally building."); + return ret; + } + + m_executionPlan = CreateSharedPtr(preparedModel, m_device); + if (m_executionPlan == nullptr) { + LOGE("Fail to create ExecutionPlan instance."); + return OH_NN_MEMORY_ERROR; + } + + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode Compilation::GenCacheBuild(std::shared_ptr& preparedModel) +{ + OH_NN_ReturnCode ret = NormalBuild(preparedModel); + if (ret != OH_NN_SUCCESS) { + LOGE("[Compilation] Preparing model failed when generating cache."); + return ret; + } + + std::vector modelBuffers; + ret = preparedModel->ExportModelCache(modelBuffers); + if (ret != OH_NN_SUCCESS) { + LOGE("[Compilation] Export model cache failed."); + return ret; + } + + ret = GenerateCacheFiles(modelBuffers); + if (ret != OH_NN_SUCCESS) { + LOGE("[Compilation] Generate cache files failed."); + return ret; + } + + LOGI("[Compilation] Export model cache successfully."); + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode Compilation::ReGenCacheBuild(uint32_t fileNumber, std::shared_ptr& preparedModel) +{ + OH_NN_ReturnCode ret = RemoveCacheFiles(fileNumber); + if (ret != OH_NN_SUCCESS) { + return ret; + } + + ret = GenCacheBuild(preparedModel); + if (ret != OH_NN_SUCCESS) { + LOGE("[Compilation] Generating cache building failed."); + return ret; + } + + LOGI("[Compilation] Update model cache successfully."); + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode Compilation::LoadCacheBuild(std::shared_ptr& preparedModel, + const ModelCacheInfo& cacheInfo) +{ + std::vector modelBuffers; + OH_NN_ReturnCode ret = CheckCacheModel(cacheInfo, modelBuffers); + if (ret != OH_NN_SUCCESS) { + LOGE("[Compilation] Checking cache model failed."); + for (size_t i = 0; i < modelBuffers.size(); ++i) { + m_device->ReleaseBuffer(modelBuffers[i].buffer); + modelBuffers[i].buffer = nullptr; + modelBuffers[i].length = 0; + } + return ret; + } + + ModelConfig config {m_enableFp16, m_performance, m_priority}; + ret = m_device->PrepareModelFromModelCache(modelBuffers, config, preparedModel); + if (ret != OH_NN_SUCCESS) { + LOGE("[Compilation] Preparing model from cache failed."); + return ret; + } + + LOGI("[Compilation] Load cache successfully."); + + m_executionPlan = CreateSharedPtr(preparedModel, m_device); + if (m_executionPlan == nullptr) { + LOGE("Fail to create ExecutionPlan instance."); + return OH_NN_MEMORY_ERROR; + } + + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode Compilation::InnerBuild() +{ + OH_NN_ReturnCode ret; + std::shared_ptr preparedModel; + if (m_cachePath.empty()) { + ret = NormalBuild(preparedModel); + if (ret != OH_NN_SUCCESS) { + LOGE("Fail to normally build."); + return ret; + } + + m_isBuild = true; + return OH_NN_SUCCESS; + } + + std::string cacheInfoPath = m_cachePath + "cache_info.nncache"; + if (access(cacheInfoPath.c_str(), 0) != 0) { + ret = GenCacheBuild(preparedModel); + if (ret != OH_NN_SUCCESS) { + LOGE("Fail to build in generating cache mode."); + return ret; + } + + m_isBuild = true; + return OH_NN_SUCCESS; + } + + ModelCacheInfo cacheInfo; + ret = CheckCacheInfo(cacheInfo, cacheInfoPath); + if (ret != OH_NN_SUCCESS) { + return ret; + } + + if (m_version > cacheInfo.version) { + ret = ReGenCacheBuild(cacheInfo.fileNumber, preparedModel); + if (ret != OH_NN_SUCCESS) { + return ret; + } + + m_isBuild = true; + return OH_NN_SUCCESS; + } + + if (m_version < cacheInfo.version) { + LOGE("[Compilation] The current version is lower than the cache files, please set a higher version."); + return OH_NN_OPERATION_FORBIDDEN; + } + + ret = LoadCacheBuild(preparedModel, cacheInfo); + if (ret != OH_NN_SUCCESS) { + return ret; + } + + m_isBuild = true; + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode Compilation::Build() +{ + NNRT_TRACE_NAME("Compilation"); + if (m_isBuild) { + LOGE("[Compilation] Cannot enable float16 after compilation finish."); + return OH_NN_OPERATION_FORBIDDEN; + } + + if (m_device == nullptr) { + LOGE("The parameter of m_device is nullptr, please call SetDevice function before build model."); + return OH_NN_OPERATION_FORBIDDEN; + } + + OH_NN_ReturnCode ret = InnerBuild(); + if (ret != OH_NN_SUCCESS) { + return ret; + } + + return OH_NN_SUCCESS; +} + +std::shared_ptr Compilation::GetExecutionPlan() const +{ + return m_executionPlan; +} + +std::vector> Compilation::GetInputTensors() const +{ + return m_inputTensors; +} + +std::vector> Compilation::GetOutputTensors() const +{ + return m_outputTensors; +} + +bool Compilation::IsBuild() const +{ + return m_isBuild; +} + +bool Compilation::IsDynamicShape() const +{ + for (size_t i = 0; i < m_inputTensors.size(); ++i) { + if (m_inputTensors[i]->IsDynamicShape()) { + return true; + } + } + return false; +} +} // namespace NeuralNetworkRuntime +} // namespace OHOS \ No newline at end of file diff --git a/frameworks/native/compilation.h b/frameworks/native/compilation.h new file mode 100644 index 0000000..0c76ef3 --- /dev/null +++ b/frameworks/native/compilation.h @@ -0,0 +1,86 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef NEURAL_NETWORK_RUNTIME_COMPILATION_H +#define NEURAL_NETWORK_RUNTIME_COMPILATION_H + +#include "inner_model.h" +#include "execution_plan.h" + +#include "interfaces/oem/cpp_api/device.h" +#include "interfaces/oem/cpp_api/cpp_type.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +struct ModelCacheInfo { + uint64_t fileNumber = 0; + uint64_t version = 0; + uint64_t deviceId = 0; + std::vector modelCheckSum; +}; + +class Compilation { +public: + explicit Compilation(const InnerModel* innerModel); + + OH_NN_ReturnCode SetDevice(size_t deviceId); + OH_NN_ReturnCode SetCacheDir(const std::string& cacheModelPath, uint32_t version); + OH_NN_ReturnCode SetPerformance(OH_NN_PerformanceMode performance); + OH_NN_ReturnCode SetPriority(OH_NN_Priority priority); + OH_NN_ReturnCode SetEnableFp16(bool isFp16); + + OH_NN_ReturnCode Build(); + + bool IsBuild() const; + bool IsDynamicShape() const; + std::vector>GetInputTensors() const; + std::vector>GetOutputTensors() const; + std::shared_ptr GetExecutionPlan() const; + +private: + std::shared_ptr m_liteGraph {nullptr}; + OH_NN_Priority m_priority {OH_NN_PRIORITY_NONE}; + OH_NN_PerformanceMode m_performance {OH_NN_PERFORMANCE_NONE}; + bool m_enableFp16 {false}; + std::shared_ptr m_device {nullptr}; + std::string m_cachePath; + uint32_t m_version {0}; + size_t m_deviceId {0}; + bool m_isBuild {false}; + std::shared_ptr m_executionPlan {nullptr}; + std::vector> m_inputTensors; + std::vector> m_outputTensors; + +private: + OH_NN_ReturnCode GenerateCacheFiles(const std::vector& modelBuffer) const; + OH_NN_ReturnCode GenerateCacheModel(size_t cacheNumber, std::unique_ptr& cacheInfo, + std::vector modelBuffer) const; + OH_NN_ReturnCode GenerateCacheInfo(uint32_t cacheSize, std::unique_ptr& cacheInfo) const; + OH_NN_ReturnCode CheckCacheInfo(ModelCacheInfo& modelCacheInfo, const std::string& cacheInfoPath) const; + OH_NN_ReturnCode ReadCacheModelFile(const std::string& file, ModelBuffer& modelBuffer) const; + OH_NN_ReturnCode RemoveCacheFiles(uint32_t fileNumber) const; + unsigned short GetCrc16(const unsigned char* buffer, size_t length) const; + OH_NN_ReturnCode CheckCacheModel(const ModelCacheInfo& modelCacheInfo, + std::vector& modelBuffers) const; + OH_NN_ReturnCode NormalBuild(std::shared_ptr& preparedModel); + OH_NN_ReturnCode GenCacheBuild(std::shared_ptr& preparedModel); + OH_NN_ReturnCode ReGenCacheBuild(uint32_t fileNumber, std::shared_ptr& preparedModel); + OH_NN_ReturnCode LoadCacheBuild(std::shared_ptr& preparedModel, const ModelCacheInfo& cacheInfo); + OH_NN_ReturnCode InnerBuild(); + OH_NN_ReturnCode GetCacheFileLength(std::ifstream& ifs, int& fsize) const; +}; +} // namespace NeuralNetworkRuntime +} // namespace OHOS + +#endif // NEURAL_NETWORK_RUNTIME_COMPILATION_H \ No newline at end of file diff --git a/frameworks/native/device_manager.cpp b/frameworks/native/device_manager.cpp new file mode 100644 index 0000000..6ad79bb --- /dev/null +++ b/frameworks/native/device_manager.cpp @@ -0,0 +1,171 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "device_manager.h" + +#include "hdi_interfaces.h" +#include "hdi_device.h" +#include "common/log.h" +#include "common/utils.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +const std::vector& DeviceManager::GetAllDeviceId() +{ + m_tmpDeviceIds.clear(); + std::shared_ptr device {nullptr}; + for (auto iter = m_devices.begin(); iter != m_devices.end(); ++iter) { + device = iter->second; + if (!IsValidDevice(device)) { + continue; + } + m_tmpDeviceIds.emplace_back(iter->first); + } + return m_tmpDeviceIds; +} + +std::shared_ptr DeviceManager::GetDevice(size_t deviceId) const +{ + auto iter = m_devices.find(deviceId); + if (iter == m_devices.end()) { + LOGE("DeviceId is not found, deviceId=%zu", deviceId); + return nullptr; + } + + return iter->second; +} + +const std::string& DeviceManager::GetDeviceName(size_t deviceId) +{ + m_tmpDeviceName.clear(); + auto iter = m_devices.find(deviceId); + if (iter == m_devices.end()) { + LOGE("DeviceId is not found, deviceId=%zu", deviceId); + return m_tmpDeviceName; + } + + std::string deviceName; + auto ret = iter->second->GetDeviceName(deviceName); + if (ret != OH_NN_SUCCESS) { + LOGE("Get device name failed."); + return m_tmpDeviceName; + } + + std::string vendorName; + ret = iter->second->GetVendorName(vendorName); + if (ret != OH_NN_SUCCESS) { + LOGE("Get vendor name failed."); + return m_tmpDeviceName; + } + + m_tmpDeviceName = GenUniqueName(deviceName, vendorName); + return m_tmpDeviceName; +} + +std::string DeviceManager::GenUniqueName(const std::string& deviceName, const std::string& vendorName) const +{ + return deviceName + "_" + vendorName; +} + +OH_NN_ReturnCode DeviceManager::RegisterDevice(std::function()> creator) +{ + auto regDevice = creator(); + if (regDevice == nullptr) { + LOGE("Cannot create device, register device failed."); + return OH_NN_INVALID_PARAMETER; + } + + if (!IsValidDevice(regDevice)) { + LOGE("Device is not avaliable."); + return OH_NN_UNAVALIDABLE_DEVICE; + } + + std::string deviceName; + auto ret = regDevice->GetDeviceName(deviceName); + if (ret != OH_NN_SUCCESS) { + LOGE("Get device name failed."); + return ret; + } + + std::string vendorName; + ret = regDevice->GetVendorName(vendorName); + if (ret != OH_NN_SUCCESS) { + LOGE("Get vendor name failed."); + return ret; + } + + const std::lock_guard lock(m_mtx); + std::string uniqueName = GenUniqueName(deviceName, vendorName); + auto setResult = m_uniqueName.emplace(uniqueName); + if (!setResult.second) { + LOGE("Device already exists, cannot register again. deviceName=%s, vendorName=%s", + deviceName.c_str(), vendorName.c_str()); + return OH_NN_FAILED; + } + + m_devices.emplace(std::hash{}(uniqueName), regDevice); + return OH_NN_SUCCESS; +} + +void DeviceManager::DiscoverHDIDevices() +{ + // only one device from HDI now. + OHOS::sptr iDevice = V1_0::INnrtDevice::Get(); + if (iDevice == nullptr) { + LOGW("Get HDI device failed."); + return; + } + + std::string deviceName; + std::string vendorName; + auto hdiRet = iDevice->GetDeviceName(deviceName); + if (hdiRet != HDF_SUCCESS) { + LOGW("Get device name failed. ErrorCode=%d", hdiRet); + return; + } + hdiRet = iDevice->GetVendorName(vendorName); + if (hdiRet != HDF_SUCCESS) { + LOGW("Get vendor name failed. ErrorCode=%d", hdiRet); + return; + } + + std::string uniqueName = GenUniqueName(deviceName, vendorName); + const std::lock_guard lock(m_mtx); + auto setResult = m_uniqueName.emplace(uniqueName); + if (!setResult.second) { + LOGW("Device already exists, cannot register again. deviceName=%s, vendorName=%s", + deviceName.c_str(), vendorName.c_str()); + return; + } + + std::shared_ptr device = CreateSharedPtr(iDevice); + if (device == nullptr) { + LOGW("Failed to register device, because fail to create device instance."); + return; + } + m_devices.emplace(std::hash{}(uniqueName), device); +} + +bool DeviceManager::IsValidDevice(std::shared_ptr device) const +{ + DeviceStatus status {DeviceStatus::UNKNOWN}; + auto ret = device->GetDeviceStatus(status); + if (ret != OH_NN_SUCCESS || status == DeviceStatus::UNKNOWN || status == DeviceStatus::OFFLINE) { + return false; + } + return true; +} +} // NeuralNetworkRuntime +} // OHOS \ No newline at end of file diff --git a/frameworks/native/device_manager.h b/frameworks/native/device_manager.h new file mode 100644 index 0000000..1f15c36 --- /dev/null +++ b/frameworks/native/device_manager.h @@ -0,0 +1,68 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_DEVICE_MANAGER_H +#define NEURAL_NETWORK_RUNTIME_DEVICE_MANAGER_H + +#include +#include +#include +#include +#include +#include + +#include "interfaces/oem/cpp_api/device.h" +#include "interfaces/kits/c/neural_network_runtime_type.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +class DeviceManager { +public: + const std::vector& GetAllDeviceId(); + std::shared_ptr GetDevice(size_t deviceId) const; + const std::string& GetDeviceName(size_t deviceId); + + // register device from C++ API + OH_NN_ReturnCode RegisterDevice(std::function()> creator); + + static DeviceManager& GetInstance() + { + static DeviceManager instance; + instance.DiscoverHDIDevices(); + return instance; + } + +private: + DeviceManager() = default; + DeviceManager(const DeviceManager&) = delete; + DeviceManager& operator=(const DeviceManager&) = delete; + + void DiscoverHDIDevices(); + std::string GenUniqueName(const std::string& deviceName, const std::string& vendorName) const; + bool IsValidDevice(std::shared_ptr device) const; + +private: + std::unordered_set m_uniqueName; + // key is device id, it is the unique number. + std::unordered_map> m_devices; + std::mutex m_mtx; + + std::string m_tmpDeviceName; + std::vector m_tmpDeviceIds; +}; +} // namespace NeuralNetworkRuntime +} // namespace OHOS + +#endif // NEURAL_NETWORK_RUNTIME_DEVICE_MANAGER_H \ No newline at end of file diff --git a/frameworks/native/device_registrar.cpp b/frameworks/native/device_registrar.cpp new file mode 100644 index 0000000..3d50ef4 --- /dev/null +++ b/frameworks/native/device_registrar.cpp @@ -0,0 +1,32 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "interfaces/oem/cpp_api/device_registrar.h" + +#include "device_manager.h" +#include "common/log.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +DeviceRegistrar::DeviceRegistrar(const CreateDevice creator) +{ + auto& deviceManager = DeviceManager::GetInstance(); + auto ret = deviceManager.RegisterDevice(creator); + if (ret != OH_NN_SUCCESS) { + LOGW("Register device failed. ErrorCode=%d", ret); + } +} +} // NeuralNetworkRuntime +} // OHOS \ No newline at end of file diff --git a/frameworks/native/execution_plan.cpp b/frameworks/native/execution_plan.cpp new file mode 100644 index 0000000..e9a7c72 --- /dev/null +++ b/frameworks/native/execution_plan.cpp @@ -0,0 +1,91 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "execution_plan.h" + +#include + +#include "common/log.h" +#include "interfaces/oem/cpp_api/cpp_type.h" + + +namespace OHOS { +namespace NeuralNetworkRuntime { +OH_NN_ReturnCode ExecutionPlan::Run(const std::vector>& inputTensors, + std::vector>& outputTensors) +{ + OH_NN_ReturnCode ret {OH_NN_FAILED}; + IOTensor tensor; + std::vector inputIOTensors; + size_t inputSize = inputTensors.size(); + size_t outputSize = outputTensors.size(); + for (size_t i = 0; i < inputSize; ++i) { + inputTensors[i]->ConvertToIOTensor(tensor); + inputIOTensors.emplace_back(std::move(tensor)); + } + + std::vector outputIOTensors; + for (size_t i = 0; i < outputSize; ++i) { + outputTensors[i]->ConvertToIOTensor(tensor); + outputIOTensors.emplace_back(std::move(tensor)); + } + + std::vector> outputsDims; + std::vector isSufficientDataBuffer; + ret = m_preparedModel->Run(inputIOTensors, outputIOTensors, outputsDims, isSufficientDataBuffer); + if (ret != OH_NN_SUCCESS) { + LOGE("PrepardModel Run() failed."); + return ret; + } + + // Check if the output buffer is sufficient + bool bufferFailed {false}; + for (size_t i = 0; i < outputSize; ++i) { + if (!isSufficientDataBuffer[i]) { + // Print all output indices with insufficient buffer, don't return until traversing all outputs. + LOGE("Run failed, Output %zu does not have enough buffer to store the data.", i); + bufferFailed = true; + } + } + if (bufferFailed) { + return OH_NN_FAILED; + } + + // Set the output NNTensor's dimensions from output IOTensor if it is dynamic. + // NNTensor::SetDimensions will check if the tensor buffer is enough for the new dimensions. + for (size_t i = 0; i < outputSize; ++i) { + ret = outputTensors[i]->SetDimensions(outputsDims[i]); + if (ret != OH_NN_SUCCESS) { + LOGE("Run failed, error happened when setting output tensor's dimensions, output id: %zu.", i); + return ret; + } + } + + return OH_NN_SUCCESS; +} + + +std::shared_ptr ExecutionPlan::GetInputDevice() const +{ + return m_device; +} + + +std::shared_ptr ExecutionPlan::GetOutputDevice() const +{ + return m_device; +} +} // NeuralNetworkRuntime +} // OHOS \ No newline at end of file diff --git a/frameworks/native/execution_plan.h b/frameworks/native/execution_plan.h new file mode 100644 index 0000000..6a6b254 --- /dev/null +++ b/frameworks/native/execution_plan.h @@ -0,0 +1,45 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_EXECUTION_PLAN_H +#define NEURAL_NETWORK_RUNTIME_EXECUTION_PLAN_H + +#include "frameworks/native/nn_tensor.h" +#include "interfaces/kits/c/neural_network_runtime_type.h" +#include "interfaces/oem/cpp_api/prepared_model.h" +#include "interfaces/oem/cpp_api/device.h" + + +namespace OHOS { +namespace NeuralNetworkRuntime { +class ExecutionPlan { +public: + ExecutionPlan(std::shared_ptr preparedModel, std::shared_ptr device) + : m_preparedModel(preparedModel), + m_device(device) {}; + + OH_NN_ReturnCode Run(const std::vector>& inputTensors, + std::vector>& outputTensors); + + std::shared_ptr GetInputDevice() const; + std::shared_ptr GetOutputDevice() const; + +private: + std::shared_ptr m_preparedModel {nullptr}; + std::shared_ptr m_device {nullptr}; +}; +} // namespace NeuralNetworkRuntime +} // namespace OHOS +#endif \ No newline at end of file diff --git a/frameworks/native/executor.cpp b/frameworks/native/executor.cpp new file mode 100644 index 0000000..f99d28c --- /dev/null +++ b/frameworks/native/executor.cpp @@ -0,0 +1,555 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "executor.h" + +#include "securec.h" + +#include "common/utils.h" +#include "common/scoped_trace.h" + + +namespace OHOS { +namespace NeuralNetworkRuntime { +Executor::Executor(const Compilation* compilation) + : m_modelInputs(compilation->GetInputTensors()), + m_modelOutputs(compilation->GetOutputTensors()), + m_executionPlan(compilation->GetExecutionPlan()) {} + +OH_NN_ReturnCode Executor::BuildInputTensor(uint32_t index, const OH_NN_Tensor& nnTensor, + std::shared_ptr inputTensor) const +{ + // Note: inputs have only shapes info. + if (index >= m_modelInputs.size()) { + LOGE("BuildInputTensor failed, input index is out of range."); + return OH_NN_INVALID_PARAMETER; + } + + // Build a tensor from nnTensor. + auto ret = inputTensor->BuildFromOHNNTensor(nnTensor); + if (ret != OH_NN_SUCCESS) { + LOGE("BuildInputTensor failed, please check input nnTensor."); + return ret; + } + + if (inputTensor->IsDynamicShape()) { + LOGE("BuildInputTensor failed, input nnTensor should has certain dimensions which cannot contain -1."); + return OH_NN_INVALID_PARAMETER; + } + + if (!m_modelInputs[index]->CompareAttribute(*inputTensor)) { + LOGE("BuildInputTensor failed, input has different attributes from the one in the constructed model."); + return OH_NN_INVALID_PARAMETER; + } + + inputTensor->SetName(m_modelInputs[index]->GetName()); + return OH_NN_SUCCESS; +} + + +OH_NN_ReturnCode Executor::SetInputTensorWithCurrentBuffer(uint32_t index, + std::shared_ptr inputTensor, + const void* buffer, + size_t dataLength, + size_t curBufferLength) +{ + void* curBuffer = m_inputTensors[index].tensor->GetBuffer(); + errno_t status = memcpy_s(curBuffer, dataLength, buffer, dataLength); + // Current buffer inside m_inputTensors is managed by executor, no need to release if memcpy failed. + if (status != EOK) { + LOGE("SetInputTensorWithCurrentBuffe failed, copy data from user buffer to device buffer failed. " + "Error code: %d.", status); + return OH_NN_MEMORY_ERROR; + } + + // Set the new tensor with the buffer of current tensor + inputTensor->SetBuffer(curBuffer, curBufferLength); + + // The memory is reused here. Thus, current tensor's buffer must set to nullptr, in case the memory is released + // twice. + m_inputTensors[index].tensor->SetBuffer(nullptr, 0); + + // Set to the new tensor, and release current one. + m_inputTensors[index].tensor = inputTensor; + return OH_NN_SUCCESS; +} + + +void Executor::SetInputTensorWithNewBuffer(uint32_t index, + std::shared_ptr inputTensor, + const void* inputBuffer, + size_t length, + bool isInnerMem) +{ + // Release the memory inside the tensor first, if it is allocated by Executor during SetInput(). + if (m_inputTensors.find(index) != m_inputTensors.end()) { + if (m_inputTensors[index].isInnerMem) { + void* curBuffer = m_inputTensors[index].tensor->GetBuffer(); + std::shared_ptr inputDevice = m_executionPlan->GetInputDevice(); + inputDevice->ReleaseBuffer(curBuffer); + } + // Set current tensor's buffer to nullptr in case the NNTensor release the driver memory in destruction. + m_inputTensors[index].tensor->SetBuffer(nullptr, 0); + } + + // Set new input tensor data buffer + inputTensor->SetBuffer(inputBuffer, length); + + // Create or update the input tensor + ExeTensor exeTensor{inputTensor, nullptr, 0, isInnerMem}; + m_inputTensors[index] = exeTensor; +} + + +OH_NN_ReturnCode Executor::SetInput(uint32_t index, const OH_NN_Tensor& nnTensor, const void* buffer, size_t length) +{ + std::shared_ptr inputTensor = CreateSharedPtr(); + if (inputTensor == nullptr) { + LOGE("SetInput failed, error happened when creating NNTensor."); + return OH_NN_MEMORY_ERROR; + } + + auto ret = BuildInputTensor(index, nnTensor, inputTensor); + if (ret != OH_NN_SUCCESS) { + LOGE("SetInput failed, please check input index or nnTensor."); + return ret; + } + + // dataLength will be larger than 0 after BuildInputTensor() + size_t dataLength = inputTensor->GetDataLength(); + if (length == 0 || length < dataLength) { + LOGE("SetInput failed, the given buffer length is too small to store the input nnTensor data."); + return OH_NN_INVALID_PARAMETER; + } + + // Get length of current buffer if it is allocate by SetInput() before. + size_t curBufferLength = 0; + if ((m_inputTensors.find(index) != m_inputTensors.end()) && (m_inputTensors[index].isInnerMem)) { + curBufferLength = m_inputTensors[index].tensor->GetBufferLength(); + } + + // (dataLength <= curBufferLength) returns true if and only if current buffer is allocated by SetInput() before + // and is larger than user buffer. + if (dataLength <= curBufferLength) { + ret = SetInputTensorWithCurrentBuffer(index, inputTensor, buffer, dataLength, curBufferLength); + if (ret != OH_NN_SUCCESS) { + LOGE("SetInput failed, error happened when setting input with current buffer."); + return ret; + } + m_isRun = false; + return OH_NN_SUCCESS; + } + + /** + * Buffer needs to allocated or reallocated if: + * + * - Current buffer is not enough. + * - SetInput() has not been called for the input before. + * - The buffer held in m_inputTensors is allocated and set by CreateInputMemory() and SetInputFromMemory(). + */ + std::shared_ptr inputDevice = m_executionPlan->GetInputDevice(); + void* inputBuffer = inputDevice->AllocateBuffer(length); + if (inputBuffer == nullptr) { + LOGE("SetInput failed, error happened when allocating input device buffer."); + return OH_NN_MEMORY_ERROR; + } + + errno_t status = memcpy_s(inputBuffer, dataLength, buffer, dataLength); + if (status != EOK) { + LOGE("SetInput failed, copy data from user buffer failed. Error code: %d.", status); + inputDevice->ReleaseBuffer(inputBuffer); + return OH_NN_MEMORY_ERROR; + } + + SetInputTensorWithNewBuffer(index, inputTensor, inputBuffer, length, true); + m_isRun = false; + return OH_NN_SUCCESS; +} + + +OH_NN_ReturnCode Executor::SetInputFromMemory(uint32_t index, const OH_NN_Tensor& nnTensor, const OH_NN_Memory& memory) +{ + // Build a input tensor + std::shared_ptr inputTensor = CreateSharedPtr(); + if (inputTensor == nullptr) { + LOGE("SetInputFromMemory failed, error happened when creating NNTensor."); + return OH_NN_MEMORY_ERROR; + } + + auto ret = BuildInputTensor(index, nnTensor, inputTensor); + if (ret != OH_NN_SUCCESS) { + LOGE("SetInputFromMemory failed, please check input index or nnTensor"); + return ret; + } + + // check data length + size_t dataLength = inputTensor->GetDataLength(); + if (memory.length == 0 || memory.length < dataLength) { + LOGE("SetInputFromMemory failed," + " the length in the given memory is too small to store the input nnTensor data."); + return OH_NN_INVALID_PARAMETER; + } + + SetInputTensorWithNewBuffer(index, inputTensor, const_cast(memory.data), memory.length, false); + m_isRun = false; + return OH_NN_SUCCESS; +} + + +OH_NN_ReturnCode Executor::SetOutput(uint32_t index, void* buffer, size_t length) +{ + if (index >= m_modelOutputs.size()) { + LOGE("SetOutput failed, output index is out of range."); + return OH_NN_INVALID_PARAMETER; + } + + size_t dataLength = m_modelOutputs[index]->GetDataLength(); + if (length == 0 || length < dataLength) { + LOGE("SetOutput failed, the given buffer length is too small to store the output tensor data."); + return OH_NN_INVALID_PARAMETER; + } + + // If output tensor does not exist, or inner device buffer size is not enough, + // or device buffer is set by SetOutputFromMemory() before, + // allocate a new device buffer and set it to output tensor, and update the user buffer. + std::shared_ptr outputDevice = m_executionPlan->GetOutputDevice(); + if (m_outputTensors.find(index) != m_outputTensors.end()) { + if (m_outputTensors[index].isInnerMem) { + size_t curBufferLength = m_outputTensors[index].tensor->GetBufferLength(); + if (length <= curBufferLength) { + // If current device buffer size is enough, only update the user buffer. + m_outputTensors[index].userBuffer = buffer; + m_outputTensors[index].userBufferLength = length; + m_isRun = false; + return OH_NN_SUCCESS; + } else { + // If current device buffer size is not enough, + // release current device buffer and then allocate a new one below. + void* curBuffer = m_outputTensors[index].tensor->GetBuffer(); + outputDevice->ReleaseBuffer(curBuffer); + } + } + } else { + // If output tensor does not exist, create a new null output tensor. + ExeTensor exeTensor; + m_outputTensors[index] = exeTensor; + m_outputTensors[index].tensor = m_modelOutputs[index]; + } + + void* deviceOutputBuffer = outputDevice->AllocateBuffer(length); + if (deviceOutputBuffer == nullptr) { + LOGE("SetOutput failed, allocating output device buffer failed."); + return OH_NN_MEMORY_ERROR; + } + + m_outputTensors[index].tensor->SetBuffer(deviceOutputBuffer, length); + m_outputTensors[index].userBuffer = buffer; + m_outputTensors[index].userBufferLength = length; + m_outputTensors[index].isInnerMem = true; + m_isRun = false; + return OH_NN_SUCCESS; +} + + +OH_NN_ReturnCode Executor::SetOutputFromMemory(uint32_t index, const OH_NN_Memory& memory) +{ + if (index >= m_modelOutputs.size()) { + LOGE("SetOutputFromMemory failed, output index is out of range."); + return OH_NN_INVALID_PARAMETER; + } + + size_t dataLength = m_modelOutputs[index]->GetDataLength(); + if (memory.length == 0 || memory.length < dataLength) { + LOGE("SetOutputFromMemory failed, the memory is too small to store the output tensor data."); + return OH_NN_INVALID_PARAMETER; + } + + if (m_outputTensors.find(index) != m_outputTensors.end()) { + if (m_outputTensors[index].isInnerMem) { + // If it is inner buffer, releate it + void* curBuffer = m_outputTensors[index].tensor->GetBuffer(); + std::shared_ptr outputDevice = m_executionPlan->GetOutputDevice(); + outputDevice->ReleaseBuffer(curBuffer); + } + } else { + // If output tensor does not exist, create a new null output tensor. + ExeTensor exeTensor; + m_outputTensors[index] = exeTensor; + m_outputTensors[index].tensor = m_modelOutputs[index]; + } + + // Set the output tensor with memory + m_outputTensors[index].tensor->SetBuffer(const_cast(memory.data), memory.length); + m_outputTensors[index].userBuffer = nullptr; + m_outputTensors[index].userBufferLength = 0; + m_outputTensors[index].isInnerMem = false; + m_isRun = false; + return OH_NN_SUCCESS; +} + + +OH_NN_ReturnCode Executor::GetOutputShape(uint32_t index, int32_t** dimensions, uint32_t& dimensionCount) +{ + if (!m_isRun) { + LOGE("GetOutputShape failed, cannot get output dimensions before Run."); + return OH_NN_OPERATION_FORBIDDEN; + } + + if (index >= m_modelOutputs.size()) { + LOGE("GetOutputShape failed, output index is out of range."); + return OH_NN_INVALID_PARAMETER; + } + + if (m_outputTensors.find(index) == m_outputTensors.end()) { + LOGE("GetOutputShape failed, output has not been set. Output index: %u.", index); + return OH_NN_INVALID_PARAMETER; + } + + m_outputDimensions[index] = m_outputTensors[index].tensor->GetDimensions(); + *dimensions = m_outputDimensions[index].data(); + dimensionCount = m_outputDimensions[index].size(); + + return OH_NN_SUCCESS; +} + + +OH_NN_ReturnCode Executor::CreateInputMemory(uint32_t index, size_t length, OH_NN_Memory** memory) +{ + if (index >= m_modelInputs.size()) { + LOGE("CreateInputMemory failed, input index is out of range."); + return OH_NN_INVALID_PARAMETER; + } + + // Allocate device buffer + std::shared_ptr inputDevice = m_executionPlan->GetInputDevice(); + void* deviceInputBuffer = inputDevice->AllocateBuffer(length); + if (deviceInputBuffer == nullptr) { + LOGE("CreateInputMemory failed, allocating intput device buffer failed."); + return OH_NN_MEMORY_ERROR; + } + + *memory = new(std::nothrow) OH_NN_Memory{deviceInputBuffer, length}; + if (*memory == nullptr) { + LOGE("CreateInputMemory failed, constructing OH_NN_Memory failed."); + inputDevice->ReleaseBuffer(deviceInputBuffer); + return OH_NN_MEMORY_ERROR; + } + + // Save the buffer address for check when destroying it. + m_inputCreatedMem[index].emplace_back(deviceInputBuffer); + + return OH_NN_SUCCESS; +} + + +OH_NN_ReturnCode Executor::DestroyInputMemory(uint32_t index, OH_NN_Memory** memory) +{ + if (index >= m_modelInputs.size()) { + LOGE("DestroyInputMemory failed, input index is out of range."); + return OH_NN_INVALID_PARAMETER; + } + + if (m_inputCreatedMem.find(index) == m_inputCreatedMem.end()) { + LOGE("DestroyInputMemory failed, the memory has not been created with the index."); + return OH_NN_INVALID_PARAMETER; + } + + std::vector& inputCreatedMem = m_inputCreatedMem[index]; + auto pos = std::find(inputCreatedMem.begin(), inputCreatedMem.end(), (*memory)->data); + if (pos == inputCreatedMem.end()) { + LOGE("DestroyInputMemory failed, the index does not match the memory."); + return OH_NN_INVALID_PARAMETER; + } + + std::shared_ptr inputDevice = m_executionPlan->GetInputDevice(); + auto ret = inputDevice->ReleaseBuffer((*memory)->data); + if (ret != OH_NN_SUCCESS) { + LOGE("Release input buffer failed."); + return ret; + } + + inputCreatedMem.erase(pos); + delete *memory; + *memory = nullptr; + + return OH_NN_SUCCESS; +} + + +OH_NN_ReturnCode Executor::CreateOutputMemory(uint32_t index, size_t length, OH_NN_Memory** memory) +{ + if (index >= m_modelOutputs.size()) { + LOGE("CreateOutputMemory failed, output index is out of range."); + return OH_NN_INVALID_PARAMETER; + } + + // Allocate device buffer + std::shared_ptr outputDevice = m_executionPlan->GetOutputDevice(); + void* deviceOutputBuffer = outputDevice->AllocateBuffer(length); + if (deviceOutputBuffer == nullptr) { + LOGE("CreateOutputMemory failed, allocating output device buffer failed."); + return OH_NN_MEMORY_ERROR; + } + + *memory = new(std::nothrow) OH_NN_Memory{deviceOutputBuffer, length}; + if (*memory == nullptr) { + LOGE("CreateOutputMemory failed, constructing OH_NN_Memory failed."); + outputDevice->ReleaseBuffer(deviceOutputBuffer); + return OH_NN_MEMORY_ERROR; + } + + // Save the buffer address for check when destroying it. + m_outputCreatedMem[index].emplace_back(deviceOutputBuffer); + + return OH_NN_SUCCESS; +} + + +OH_NN_ReturnCode Executor::DestroyOutputMemory(uint32_t index, OH_NN_Memory** memory) +{ + if (index >= m_modelOutputs.size()) { + LOGE("DestroyOutputMemory failed, output index is out of range."); + return OH_NN_INVALID_PARAMETER; + } + + if (m_outputCreatedMem.find(index) == m_outputCreatedMem.end()) { + LOGE("DestroyOutputMemory failed, the memory has not been created with the index."); + return OH_NN_INVALID_PARAMETER; + } + + std::vector& outputCreatedMem = m_outputCreatedMem[index]; + auto pos = std::find(outputCreatedMem.begin(), outputCreatedMem.end(), (*memory)->data); + if (pos == outputCreatedMem.end()) { + LOGE("DestroyOutputMemory failed, the index does not match the memory."); + return OH_NN_INVALID_PARAMETER; + } + + std::shared_ptr outputDevice = m_executionPlan->GetOutputDevice(); + auto ret = outputDevice->ReleaseBuffer((*memory)->data); + if (ret != OH_NN_SUCCESS) { + LOGE("Release output buffer failed."); + return ret; + } + + outputCreatedMem.erase(pos); + delete *memory; + *memory = nullptr; + + return OH_NN_SUCCESS; +} + + +OH_NN_ReturnCode Executor::Run() +{ + NNRT_TRACE_NAME("Execution"); + if (m_modelInputs.size() != m_inputTensors.size()) { + LOGE("Run failed, some input tensors have not been set."); + return OH_NN_INVALID_PARAMETER; + } + if (m_modelOutputs.size() != m_outputTensors.size()) { + LOGE("Run failed, some output tensors have not been set."); + return OH_NN_INVALID_PARAMETER; + } + + // Build the NNTensor pointer vector: inputTensors and outputTensors + std::vector> inputTensors; + std::vector> outputTensors; + size_t inputSize = m_inputTensors.size(); + size_t outputSize = m_outputTensors.size(); + for (size_t i = 0; i < inputSize; ++i) { + inputTensors.emplace_back(m_inputTensors[i].tensor); + } + for (size_t i = 0; i < outputSize; ++i) { + outputTensors.emplace_back(m_outputTensors[i].tensor); + } + + // Predict + auto ret = m_executionPlan->Run(inputTensors, outputTensors); + if (ret != OH_NN_SUCCESS) { + LOGE("Run failed, error happened when executing the inference."); + return ret; + } + + errno_t status{EOK}; + // Copy inner device buffer to user buffer if using SetOutput() + for (size_t i = 0; i < outputSize; ++i) { + if (m_outputTensors[i].isInnerMem) { + auto size = outputTensors[i]->GetDataLength(); + if (size > m_outputTensors[i].userBufferLength) { + LOGE("Output buffer size is not enough. Your size=%zu, but actual output size=%zu.", + m_outputTensors[i].userBufferLength, size); + return OH_NN_INVALID_PARAMETER; + } + + void* deviceBuffer = outputTensors[i]->GetBuffer(); + if (deviceBuffer == nullptr) { + LOGE("Output buffer is nullptr."); + return OH_NN_FAILED; + } + + status = memcpy_s(m_outputTensors[i].userBuffer, m_outputTensors[i].userBufferLength, deviceBuffer, size); + if (status != EOK) { + LOGE("Run failed, memory copy from device buffer to user buffer failed. Error code: %d.", status); + return OH_NN_MEMORY_ERROR; + } + } + } + + m_isRun = true; + return OH_NN_SUCCESS; +} + +Executor::~Executor() +{ + std::shared_ptr inputDevice; + for (auto& it : m_inputTensors) { + inputDevice = m_executionPlan->GetInputDevice(); + if ((it.second).isInnerMem) { + inputDevice->ReleaseBuffer((it.second).tensor->GetBuffer()); + } + (it.second).tensor->SetBuffer(nullptr, 0); + (it.second).tensor.reset(); + (it.second).userBuffer = nullptr; + } + m_inputTensors.clear(); + + std::shared_ptr outputDevice; + for (auto& it : m_outputTensors) { + outputDevice = m_executionPlan->GetOutputDevice(); + if ((it.second).isInnerMem) { + outputDevice->ReleaseBuffer((it.second).tensor->GetBuffer()); + } + (it.second).tensor->SetBuffer(nullptr, 0); + (it.second).tensor.reset(); + (it.second).userBuffer = nullptr; + } + m_outputTensors.clear(); + + for (auto& it : m_inputCreatedMem) { + it.second.clear(); + } + m_inputCreatedMem.clear(); + + for (auto& it : m_outputCreatedMem) { + it.second.clear(); + } + m_outputCreatedMem.clear(); + + m_outputDimensions.clear(); + m_modelInputs.clear(); + m_modelOutputs.clear(); +} +} // namespace NeuralNetworkRuntime +} // namespace OHOS diff --git a/frameworks/native/executor.h b/frameworks/native/executor.h new file mode 100644 index 0000000..bbe3d93 --- /dev/null +++ b/frameworks/native/executor.h @@ -0,0 +1,72 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_EXECUTOR_H +#define NEURAL_NETWORK_RUNTIME_EXECUTOR_H + +#include "compilation.h" +#include "execution_plan.h" +#include "nn_tensor.h" +#include "interfaces/kits/c/neural_network_runtime.h" +#include "interfaces/oem/cpp_api/device.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +class Executor { +public: + explicit Executor(const Compilation* compilation); + ~Executor(); + + OH_NN_ReturnCode SetInput(uint32_t index, const OH_NN_Tensor& nnTensor, const void* buffer, size_t length); + OH_NN_ReturnCode SetInputFromMemory(uint32_t index, const OH_NN_Tensor& nnTensor, const OH_NN_Memory& memory); + OH_NN_ReturnCode SetOutput(uint32_t index, void* buffer, size_t length); + OH_NN_ReturnCode SetOutputFromMemory(uint32_t index, const OH_NN_Memory& memory); + OH_NN_ReturnCode GetOutputShape(uint32_t index, int32_t** dimensions, uint32_t& dimensionCount); + + OH_NN_ReturnCode CreateInputMemory(uint32_t index, size_t length, OH_NN_Memory** memory); + OH_NN_ReturnCode CreateOutputMemory(uint32_t index, size_t length, OH_NN_Memory** memory); + OH_NN_ReturnCode DestroyInputMemory(uint32_t index, OH_NN_Memory** memory); + OH_NN_ReturnCode DestroyOutputMemory(uint32_t index, OH_NN_Memory** memory); + + OH_NN_ReturnCode Run(); + +private: + OH_NN_ReturnCode BuildInputTensor(uint32_t index, const OH_NN_Tensor& nnTensor, + std::shared_ptr inputTensor) const; + OH_NN_ReturnCode SetInputTensorWithCurrentBuffer(uint32_t index, std::shared_ptr inputTensor, + const void* buffer, size_t dataLength, size_t curBufferLength); + void SetInputTensorWithNewBuffer(uint32_t index, std::shared_ptr inputTensor, + const void* inputBuffer, size_t length, bool isInnerMem); + +private: + struct ExeTensor { + std::shared_ptr tensor; + void* userBuffer; + size_t userBufferLength; + bool isInnerMem; + }; + bool m_isRun {false}; + std::vector> m_modelInputs; + std::vector> m_modelOutputs; + std::shared_ptr m_executionPlan {nullptr}; + std::unordered_map> m_outputDimensions; + std::unordered_map m_inputTensors; + std::unordered_map m_outputTensors; + std::unordered_map> m_inputCreatedMem; + std::unordered_map> m_outputCreatedMem; +}; +} // namespace NeuralNetworkRuntime +} // namespace OHOS +#endif \ No newline at end of file diff --git a/frameworks/native/hdi_device.cpp b/frameworks/native/hdi_device.cpp new file mode 100644 index 0000000..6c8dd47 --- /dev/null +++ b/frameworks/native/hdi_device.cpp @@ -0,0 +1,331 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "hdi_device.h" + +#include "hdf_base.h" +#include "mindir.h" + +#include "hdi_prepared_model.h" +#include "memory_manager.h" +#include "transform.h" +#include "common/log.h" +#include "common/utils.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +HDIDevice::HDIDevice(OHOS::sptr device) : m_iDevice(device) +{ + device->GetVersion(m_hdiVersion.first, m_hdiVersion.second); +} + +OH_NN_ReturnCode HDIDevice::GetDeviceName(std::string& name) +{ + auto ret = m_iDevice->GetDeviceName(name); + if (ret != HDF_SUCCESS) { + LOGE("Get HDI device name failed. ErrorCode=%d", ret); + return OH_NN_UNAVALIDABLE_DEVICE; + } + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode HDIDevice::GetVendorName(std::string& name) +{ + auto ret = m_iDevice->GetVendorName(name); + if (ret != HDF_SUCCESS) { + LOGE("Get HDI device vendor name failed. ErrorCode=%d", ret); + return OH_NN_UNAVALIDABLE_DEVICE; + } + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode HDIDevice::GetDeviceType(OH_NN_DeviceType& deviceType) +{ + V1_0::DeviceType iDeviceType; + auto ret = m_iDevice->GetDeviceType(iDeviceType); + if (ret != HDF_SUCCESS) { + LOGE("Get HDI device type failed. ErrorCode=%d", ret); + return OH_NN_UNAVALIDABLE_DEVICE; + } + + deviceType = HDIToNN::TransHDIDeviceType(iDeviceType); + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode HDIDevice::GetDeviceStatus(DeviceStatus& status) +{ + V1_0::DeviceStatus iDeviceStatus; + auto ret = m_iDevice->GetDeviceStatus(iDeviceStatus); + if (ret != HDF_SUCCESS) { + LOGE("Get HDI device status failed. ErrorCode=%d", ret); + return OH_NN_UNAVALIDABLE_DEVICE; + } + status = HDIToNN::TransHDIDeviceStatus(iDeviceStatus); + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode HDIDevice::GetSupportedOperation(std::shared_ptr model, + std::vector& ops) +{ + if (model == nullptr) { + LOGE("Model is nullptr, cannot query supported operation."); + return OH_NN_NULL_PTR; + } + + V1_0::SharedBuffer tensorBuffer {INVALID_FD, 0, 0, 0}; + size_t tensorSize = mindspore::lite::MindIR_LiteGraph_GetConstTensorSize(model.get()); + int32_t hdiRet {0}; + if (tensorSize > 0) { + hdiRet = m_iDevice->AllocateBuffer(tensorSize, tensorBuffer); + if (hdiRet != HDF_SUCCESS || tensorBuffer.fd == INVALID_FD) { + LOGE("Allocate tensor buffer error when get supported operation. ErrorCode: %d", hdiRet); + return OH_NN_FAILED; + } + } + + auto iModel = mindspore::lite::MindIR_LiteGraph_To_Model(model.get(), tensorBuffer); + if (iModel == nullptr) { + LOGE("Parse litegraph to hdi model failed."); + ReleaseSharedBuffer(tensorBuffer); + return OH_NN_FAILED; + } + + hdiRet = m_iDevice->GetSupportedOperation(*iModel, ops); + + mindspore::lite::MindIR_Model_Destroy(&iModel); + auto ret = ReleaseSharedBuffer(tensorBuffer); + if (ret != OH_NN_SUCCESS) { + LOGE("Release tensorBuffer failed."); + return OH_NN_FAILED; + } + if (hdiRet != HDF_SUCCESS) { + LOGE("Get supported operation failed. ErrorCode=%d", ret); + return OH_NN_UNAVALIDABLE_DEVICE; + } + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode HDIDevice::IsFloat16PrecisionSupported(bool& isSupported) +{ + auto ret = m_iDevice->IsFloat16PrecisionSupported(isSupported); + if (ret != HDF_SUCCESS) { + LOGE("Query fp16 precision supported failed. ErrorCode=%d", ret); + return OH_NN_UNAVALIDABLE_DEVICE; + } + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode HDIDevice::IsPerformanceModeSupported(bool& isSupported) +{ + auto ret = m_iDevice->IsPerformanceModeSupported(isSupported); + if (ret != HDF_SUCCESS) { + LOGE("Query performance mode supported failed. ErrorCode=%d", ret); + return OH_NN_UNAVALIDABLE_DEVICE; + } + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode HDIDevice::IsPrioritySupported(bool& isSupported) +{ + auto ret = m_iDevice->IsPrioritySupported(isSupported); + if (ret != HDF_SUCCESS) { + LOGE("Query priority supported failed. ErrorCode=%d", ret); + return OH_NN_UNAVALIDABLE_DEVICE; + } + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode HDIDevice::IsDynamicInputSupported(bool& isSupported) +{ + auto ret = m_iDevice->IsDynamicInputSupported(isSupported); + if (ret != HDF_SUCCESS) { + LOGE("Query dynamic input supported failed. ErrorCode=%d", ret); + return OH_NN_UNAVALIDABLE_DEVICE; + } + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode HDIDevice::IsModelCacheSupported(bool& isSupported) +{ + auto ret = m_iDevice->IsModelCacheSupported(isSupported); + if (ret != HDF_SUCCESS) { + LOGE("Query cache model supported failed. ErrorCode=%d", ret); + return OH_NN_UNAVALIDABLE_DEVICE; + } + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode HDIDevice::PrepareModel(std::shared_ptr model, + const ModelConfig& config, + std::shared_ptr& preparedModel) +{ + if (model == nullptr) { + LOGE("Model is nullptr, cannot prepare model."); + return OH_NN_INVALID_PARAMETER; + } + + V1_0::SharedBuffer tensorBuffer {INVALID_FD, 0, 0, 0}; + size_t tensorSize = mindspore::lite::MindIR_LiteGraph_GetConstTensorSize(model.get()); + int32_t hdiRet {0}; + if (tensorSize > 0) { + hdiRet = m_iDevice->AllocateBuffer(tensorSize, tensorBuffer); + if (hdiRet != HDF_SUCCESS || tensorBuffer.fd == INVALID_FD) { + LOGE("Allocate tensor buffer error when prepare model. ErrorCode: %d", hdiRet); + return OH_NN_FAILED; + } + } + + V1_0::Model* iModel = mindspore::lite::MindIR_LiteGraph_To_Model(model.get(), tensorBuffer); + if (iModel == nullptr) { + LOGE("Parse litegraph to hdi model failed."); + ReleaseSharedBuffer(tensorBuffer); + return OH_NN_FAILED; + } + + V1_0::ModelConfig iModelConfig; + iModelConfig.enableFloat16 = config.enableFloat16; + iModelConfig.mode = NNToHDI::TransPerformanceMode(config.mode); + iModelConfig.priority = NNToHDI::TransPriority(config.priority); + OHOS::sptr iPreparedModel; + + auto preparedRet = m_iDevice->PrepareModel(*iModel, iModelConfig, iPreparedModel); + + mindspore::lite::MindIR_Model_Destroy(&iModel); + auto ret = ReleaseSharedBuffer(tensorBuffer); + if (ret != OH_NN_SUCCESS) { + LOGE("Release tensorBuffer failed."); + return OH_NN_FAILED; + } + if (preparedRet != HDF_SUCCESS || iPreparedModel == nullptr) { + LOGE("Prepare model failed. ErrorCode=%d", preparedRet); + return OH_NN_FAILED; + } + + preparedModel = CreateSharedPtr(iPreparedModel); + if (preparedModel == nullptr) { + LOGE("Prepare model failed, because fail to create preparedModel instance."); + return OH_NN_MEMORY_ERROR; + } + + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode HDIDevice::PrepareModelFromModelCache(const std::vector& modelCache, + const ModelConfig& config, + std::shared_ptr& preparedModel) +{ + std::vector iBuffers; + auto memManager = MemoryManager::GetInstance(); + Memory memory; + OH_NN_ReturnCode ret; + size_t modelCacheSize = modelCache.size(); + for (size_t i = 0; i < modelCacheSize; i++) { + ret = memManager->GetMemory(modelCache[i].buffer, memory); + if (ret != OH_NN_SUCCESS) { + LOGE("The %zuth model cache is invalid. Please put valid model cache.", i + 1); + return ret; + } + iBuffers.emplace_back(V1_0::SharedBuffer {memory.fd, memory.length, 0, memory.length}); + } + + V1_0::ModelConfig iModelConfig; + iModelConfig.enableFloat16 = config.enableFloat16; + iModelConfig.mode = NNToHDI::TransPerformanceMode(config.mode); + iModelConfig.priority = NNToHDI::TransPriority(config.priority); + + OHOS::sptr iPreparedModel; + auto hdiRet = m_iDevice->PrepareModelFromModelCache(iBuffers, iModelConfig, iPreparedModel); + if (hdiRet != HDF_SUCCESS) { + LOGE("Prepare model from cache failed. ErrorCode=%d", hdiRet); + return OH_NN_UNAVALIDABLE_DEVICE; + } + + preparedModel = CreateSharedPtr(iPreparedModel); + if (preparedModel == nullptr) { + LOGE("Prepare model from model cache failed, because fail to create preparedModel instance."); + return OH_NN_MEMORY_ERROR; + } + return OH_NN_SUCCESS; +} + +void* HDIDevice::AllocateBuffer(size_t length) +{ + if (length == 0) { + LOGE("The length param is invalid, length=0"); + return nullptr; + } + + V1_0::SharedBuffer buffer; + auto ret = m_iDevice->AllocateBuffer(length, buffer); + if (ret != HDF_SUCCESS) { + LOGE("Allocate buffer error. ErrorCode: %d", ret); + return nullptr; + } + + auto memManager = MemoryManager::GetInstance(); + auto addr = memManager->MapMemory(buffer.fd, length); + if (addr == nullptr) { + LOGE("Map fd to address failed."); + } + return addr; +} + +OH_NN_ReturnCode HDIDevice::ReleaseBuffer(const void* buffer) +{ + if (buffer == nullptr) { + LOGE("Buffer is nullptr, no need to release."); + return OH_NN_INVALID_PARAMETER; + } + + auto memManager = MemoryManager::GetInstance(); + Memory memory; + auto ret = memManager->GetMemory(buffer, memory); + if (ret != OH_NN_SUCCESS) { + LOGE("Invalid Buffer, it is not NNRt buffer."); + return ret; + } + + V1_0::SharedBuffer hdiBuffer {memory.fd, memory.length, 0, memory.length}; + auto deviceResult = m_iDevice->ReleaseBuffer(hdiBuffer); + if (deviceResult != HDF_SUCCESS) { + LOGE("Device release buffer error. ErrorCode: %d", deviceResult); + return OH_NN_FAILED; + } + + ret = memManager->UnMapMemory(buffer); + if (ret != OH_NN_SUCCESS) { + LOGE("Unmap memory failed."); + return ret; + } + + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode HDIDevice::ReleaseSharedBuffer(const V1_0::SharedBuffer& buffer) +{ + if (buffer.fd == INVALID_FD) { + LOGI("No need to release. fd=%d", INVALID_FD); + return OH_NN_SUCCESS; + } + + auto ret = m_iDevice->ReleaseBuffer(buffer); + if (ret != HDF_SUCCESS) { + LOGE("Device release buffer error. ErrorCode=%d", ret); + return OH_NN_FAILED; + } + return OH_NN_SUCCESS; +} +} // namespace NeuralNetworkRuntime +} // namespace OHOS \ No newline at end of file diff --git a/frameworks/native/hdi_device.h b/frameworks/native/hdi_device.h new file mode 100644 index 0000000..ba52530 --- /dev/null +++ b/frameworks/native/hdi_device.h @@ -0,0 +1,63 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_HDI_DEVICE_H +#define NEURAL_NETWORK_RUNTIME_HDI_DEVICE_H + +#include "refbase.h" +#include "hdi_interfaces.h" + +#include "interfaces/oem/cpp_api/device.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +class HDIDevice : public Device { +public: + explicit HDIDevice(OHOS::sptr device); + + OH_NN_ReturnCode GetDeviceName(std::string& name) override; + OH_NN_ReturnCode GetVendorName(std::string& name) override; + OH_NN_ReturnCode GetDeviceType(OH_NN_DeviceType& deviceType) override; + OH_NN_ReturnCode GetDeviceStatus(DeviceStatus& status) override; + OH_NN_ReturnCode GetSupportedOperation(std::shared_ptr model, + std::vector& ops) override; + + OH_NN_ReturnCode IsFloat16PrecisionSupported(bool& isSupported) override; + OH_NN_ReturnCode IsPerformanceModeSupported(bool& isSupported) override; + OH_NN_ReturnCode IsPrioritySupported(bool& isSupported) override; + OH_NN_ReturnCode IsDynamicInputSupported(bool& isSupported) override; + OH_NN_ReturnCode IsModelCacheSupported(bool& isSupported) override; + + OH_NN_ReturnCode PrepareModel(std::shared_ptr model, + const ModelConfig& config, + std::shared_ptr& preparedModel) override; + OH_NN_ReturnCode PrepareModelFromModelCache(const std::vector& modelCache, + const ModelConfig& config, + std::shared_ptr& preparedModel) override; + + void* AllocateBuffer(size_t length) override; + OH_NN_ReturnCode ReleaseBuffer(const void* buffer) override; + +private: + OH_NN_ReturnCode ReleaseSharedBuffer(const V1_0::SharedBuffer& buffer); + +private: + // first: major version, second: minor version + std::pair m_hdiVersion; + OHOS::sptr m_iDevice {nullptr}; +}; +} // namespace NeuralNetworkRuntime +} // namespace OHOS +#endif // NEURAL_NETWORK_RUNTIME_HDI_DEVICE_H \ No newline at end of file diff --git a/frameworks/native/hdi_interfaces.h b/frameworks/native/hdi_interfaces.h new file mode 100644 index 0000000..1d3416b --- /dev/null +++ b/frameworks/native/hdi_interfaces.h @@ -0,0 +1,29 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_HDI_INTERFACES_H +#define NEURAL_NETWORK_RUNTIME_HDI_INTERFACES_H + +#include +#include +#include + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace V1_0 = OHOS::HDI::Nnrt::V1_0; +} // namespace NeuralNetworkRuntime +} // namespace OHOS + +#endif // NEURAL_NETWORK_RUNTIME_HDI_INTERFACES_H \ No newline at end of file diff --git a/frameworks/native/hdi_prepared_model.cpp b/frameworks/native/hdi_prepared_model.cpp new file mode 100644 index 0000000..491aec6 --- /dev/null +++ b/frameworks/native/hdi_prepared_model.cpp @@ -0,0 +1,91 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "hdi_prepared_model.h" + +#include "common/log.h" +#include "memory_manager.h" +#include "transform.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +HDIPreparedModel::HDIPreparedModel(OHOS::sptr hdiPreparedModel) + : m_hdiPreparedModel(hdiPreparedModel) +{ + hdiPreparedModel->GetVersion(m_hdiVersion.first, m_hdiVersion.second); +} + +OH_NN_ReturnCode HDIPreparedModel::ExportModelCache(std::vector& modelCache) +{ + if (!modelCache.empty()) { + LOGE("The vector of modelCache should be empty. size=%zu", modelCache.size()); + return OH_NN_INVALID_PARAMETER; + } + + std::vector iBuffers; + auto ret = m_hdiPreparedModel->ExportModelCache(iBuffers); + if (ret != HDF_SUCCESS) { + LOGE("Export model cache failed. ErrorCode=%d", ret); + return OH_NN_UNAVALIDABLE_DEVICE; + } + + auto memManager = MemoryManager::GetInstance(); + for (size_t i = 0; i < iBuffers.size(); i++) { + auto addr = memManager->MapMemory(iBuffers[i].fd, iBuffers[i].bufferSize); + if (addr == nullptr) { + LOGE("Export the %zuth model cache failed, cannot not map fd to address.", i + 1); + return OH_NN_MEMORY_ERROR; + } + ModelBuffer modelbuffer {addr, iBuffers[i].bufferSize}; + modelCache.emplace_back(modelbuffer); + } + + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode HDIPreparedModel::Run(const std::vector& inputs, const std::vector& outputs, + std::vector>& outputsDims, std::vector& isOutputBufferEnough) +{ + V1_0::IOTensor iTensor; + std::vector iInputTensors; + for (auto& input: inputs) { + iTensor = NNToHDI::TransIOTensor(input); + if (iTensor.data.fd == INVALID_FD) { + LOGE("Transform inputs tensor failed, cannot find data file descriptor."); + return OH_NN_INVALID_PARAMETER; + } + iInputTensors.emplace_back(iTensor); + } + + std::vector iOutputTensors; + for (auto& output: outputs) { + iTensor = NNToHDI::TransIOTensor(output); + if (iTensor.data.fd == INVALID_FD) { + LOGE("Transform outputs tensor failed, cannot find data file descriptor."); + return OH_NN_INVALID_PARAMETER; + } + iOutputTensors.emplace_back(iTensor); + } + + auto ret = m_hdiPreparedModel->Run(iInputTensors, iOutputTensors, outputsDims, isOutputBufferEnough); + if (ret != HDF_SUCCESS || outputsDims.empty()) { + LOGE("Run model failed. ErrorCode=%d", ret); + return OH_NN_UNAVALIDABLE_DEVICE; + } + + return OH_NN_SUCCESS; +} +} // namespace NeuralNetworkRuntime +} // OHOS \ No newline at end of file diff --git a/frameworks/native/hdi_prepared_model.h b/frameworks/native/hdi_prepared_model.h new file mode 100644 index 0000000..538ab05 --- /dev/null +++ b/frameworks/native/hdi_prepared_model.h @@ -0,0 +1,47 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +#ifndef NEURAL_NETWORK_RUNTIME_HDI_PREPARED_MODEL_H +#define NEURAL_NETWORK_RUNTIME_HDI_PREPARED_MODEL_H + +#include + +#include "refbase.h" +#include "hdi_interfaces.h" +#include "interfaces/oem/cpp_api/prepared_model.h" +#include "interfaces/oem/cpp_api/cpp_type.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +class HDIPreparedModel : public PreparedModel { +public: + explicit HDIPreparedModel(OHOS::sptr hdiPreparedModel); + + OH_NN_ReturnCode ExportModelCache(std::vector& modelCache) override; + + OH_NN_ReturnCode Run(const std::vector& inputs, + const std::vector& outputs, + std::vector>& outputsDims, + std::vector& isOutputBufferEnough) override; + +private: + // first: major version, second: minor version + std::pair m_hdiVersion; + OHOS::sptr m_hdiPreparedModel {nullptr}; +}; +} // namespace NeuralNetworkRuntime +} // OHOS +#endif // NEURAL_NETWORK_RUNTIME_HDI_PREPARED_MODEL_H \ No newline at end of file diff --git a/frameworks/native/inner_model.cpp b/frameworks/native/inner_model.cpp new file mode 100644 index 0000000..bcd20c6 --- /dev/null +++ b/frameworks/native/inner_model.cpp @@ -0,0 +1,546 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "inner_model.h" + +#include +#include +#include + +#include "securec.h" + +#include "common/utils.h" +#include "common/scoped_trace.h" +#include "device_manager.h" +#include "hdi_device.h" +#include "validation.h" +#include "ops_builder.h" +#include "ops_registry.h" +#include "transform.h" + +namespace MSLITE = mindspore::lite; + +namespace OHOS { +namespace NeuralNetworkRuntime { +const std::string NNR_MODEL = "NNR_Model"; +const std::string LOADED_NNR_MODEL = "Loaded_NNR_Model"; + +namespace { +class LiteGraphDeleter { +public: + void operator()(MSLITE::LiteGraph* liteGraph) const + { + MindIR_LiteGraph_Destroy(&liteGraph); + } +}; + +std::shared_ptr ConstructNNTensorFromLiteGraphTensor(const MSLITE::TensorPtr msTensor) +{ + MSLITE::DataType msDataType = MSLITE::MindIR_Tensor_GetDataType(msTensor); + OH_NN_DataType dataType = MSToNN::TransformDataType(msDataType); + std::vector msDims = MSLITE::MindIR_Tensor_GetDims(msTensor); + std::vector msQuantParams = MSLITE::MindIR_Tensor_GetQuantParams(msTensor); + std::vector nnQuantParams = MSToNN::TransformQuantParams(msQuantParams); + + std::shared_ptr nnTensor = CreateSharedPtr(); + if (nnTensor == nullptr) { + LOGE("ConstructNNTensorFromLiteGraphTensor failed, error happened when creating NNTensor."); + return nullptr; + } + + OH_NN_ReturnCode ret = nnTensor->Build(dataType, msDims, nnQuantParams, OH_NN_TENSOR); + if (ret != OH_NN_SUCCESS) { + LOGE("ConstructNNTensorFromLiteGraphTensor failed, error happened when building NNTensor with attributes."); + return nullptr; + } + + return nnTensor; +} + +OH_NN_ReturnCode ConstructNNTensorsFromLiteGraph(const MSLITE::LiteGraph* liteGraph, + const std::vector& indices, + std::vector>& nnTensors) +{ + if (indices.empty()) { + LOGE("ConstructNNTensorsFromLiteGraph failed, passed empty indices list."); + return OH_NN_INVALID_PARAMETER; + } + + uint32_t maximumIndex = *(std::max_element(indices.begin(), indices.end())); + if (maximumIndex >= liteGraph->all_tensors_.size()) { + LOGE("ConstructNNTensorsFromLiteGraph failed, index exceed size of all_tensors inside liteGraph."); + return OH_NN_INVALID_PARAMETER; + } + + std::shared_ptr nnTensor; + for (uint32_t i : indices) { + nnTensor = ConstructNNTensorFromLiteGraphTensor(liteGraph->all_tensors_[i]); + if (nnTensor == nullptr) { + LOGE("ConstructNNTensorsFromLiteGraph failed, failed to construct NNTensor from LiteGraphTensor."); + return OH_NN_NULL_PTR; + } + + nnTensors.emplace_back(nnTensor); + } + + return OH_NN_SUCCESS; +} +} // anonymous namespace + +InnerModel::InnerModel() {} + +bool InnerModel::IsBuild() const +{ + return (m_liteGraph != nullptr); +} + +OH_NN_ReturnCode InnerModel::BuildFromLiteGraph(const MSLITE::LiteGraph* liteGraph) +{ + NNRT_TRACE_NAME("Build model from lite graph"); + if (liteGraph == nullptr) { + LOGE("BuildFromLiteGraph failed, passed empty liteGraph."); + return OH_NN_INVALID_PARAMETER; + } + + if (m_liteGraph != nullptr) { + LOGE("BuildFromLiteGraph failed, liteGraph has been built or loaded before."); + return OH_NN_OPERATION_FORBIDDEN; + } + + if (!m_allTensors.empty() || !m_ops.empty()) { + LOGE("BuildFromLiteGraph failed, please LoadLiteGraph without adding tensor and operations."); + return OH_NN_OPERATION_FORBIDDEN; + } + + m_inputTensors.clear(); + OH_NN_ReturnCode ret = ConstructNNTensorsFromLiteGraph(liteGraph, liteGraph->input_indices_, m_inputTensors); + if (ret != OH_NN_SUCCESS) { + LOGE("BuildFromLiteGraph failed, error happened when constructing input NNTensors from liteGraph."); + return ret; + } + + m_outputTensors.clear(); + ret = ConstructNNTensorsFromLiteGraph(liteGraph, liteGraph->output_indices_, m_outputTensors); + if (ret != OH_NN_SUCCESS) { + LOGE("BuildFromLiteGraph failed, error happened when constructing output NNTensors from liteGraph."); + return ret; + } + + m_liteGraph.reset(const_cast(liteGraph), LiteGraphDeleter()); + m_liteGraph->name_ = LOADED_NNR_MODEL; + + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode InnerModel::AddTensor(const OH_NN_Tensor& nnTensor) +{ + if (m_liteGraph != nullptr) { + LOGE("AddTensor failed, AddTensor is forbidden after Finish() or LoadLiteGraph() has been called."); + return OH_NN_OPERATION_FORBIDDEN; + } + + std::shared_ptr tensor = CreateSharedPtr(); + if (tensor == nullptr) { + LOGE("AddTensor failed, error happened when creating NNTensor."); + return OH_NN_MEMORY_ERROR; + } + + OH_NN_ReturnCode ret = tensor->BuildFromOHNNTensor(nnTensor); + if (ret != OH_NN_SUCCESS) { + LOGE("AddTensor failed, error happened when build NNTensor from OH_NN_Tensor."); + return ret; + } + + // The NNTensor is named as "Tensor: "". + tensor->SetName("Tensor: " + std::to_string(m_allTensors.size())); + m_allTensors.emplace_back(tensor); + + return OH_NN_SUCCESS; +} + +// DOTO: 圈复杂度待优化 +OH_NN_ReturnCode InnerModel::SetTensorValue(uint32_t index, const void* buffer, size_t length) +{ + if (m_liteGraph != nullptr) { + LOGE("SetTensorValue failed, SetTensorValue is forbidden after Finish() or LoadLiteGraph() has been called."); + return OH_NN_OPERATION_FORBIDDEN; + } + + if (index >= m_allTensors.size()) { + LOGE("SetTensorValue failed, passed index %u out of the number of added tensors.", index); + return OH_NN_INVALID_PARAMETER; + } + + const std::shared_ptr tensor = m_allTensors[index]; + if (tensor->GetBuffer() != nullptr) { + LOGE("SetTensorValue failed, tensor has been set value twice. Tensor index: %u.", index); + return OH_NN_INVALID_PARAMETER; + } + + if (buffer == nullptr) { + LOGW("SetTensorValue passed empty buffer, which makes no effect."); + return OH_NN_SUCCESS; + } + + if (tensor->IsDynamicShape()) { + LOGE("SetTensorValue failed, cannot set value to tensor with dynamic shape."); + return OH_NN_OPERATION_FORBIDDEN; + } + + if (length != tensor->GetDataLength()) { + LOGE("SetTensorValue failed, get buffer length %zu different from the byte size of tensor %zu.", + length, tensor->GetDataLength()); + return OH_NN_INVALID_PARAMETER; + } + + // Data will be released inside NNTensor if it is set inside NNTensor using SetBuffer(). + void* data = new (std::nothrow) char[length]; + if (data == nullptr) { + LOGE("SetTensorValue failed, please check whether it runs out of memory."); + return OH_NN_MEMORY_ERROR; + } + + errno_t ret = memcpy_s(data, length, buffer, length); + if (ret != EOK) { + LOGE("SetTensorValue failed, please the information of error number %d from memcpy_s.", ret); + delete [] reinterpret_cast(data); + return OH_NN_FAILED; + } + + tensor->SetBuffer(data, length); + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode InnerModel::ValidateInputAndOutput( + const OH_NN_UInt32Array& inputIndices, const OH_NN_UInt32Array& outputIndices) const +{ + OH_NN_ReturnCode ret = ValidateTensorArray(inputIndices); + if (ret != OH_NN_SUCCESS) { + LOGE("ValidateInputAndOutput failed, please check input indices."); + return ret; + } + + ret = ValidateTensorArray(outputIndices); + if (ret != OH_NN_SUCCESS) { + LOGE("ValidateInputAndOutput failed, please check output indices."); + return ret; + } + + if (inputIndices.size == 0) { + LOGE("ValidateInputAndOutput failed, passed empty input indices."); + return OH_NN_INVALID_PARAMETER; + } + + if (outputIndices.size == 0) { + LOGE("ValidateInputAndOutput failed, passed empty output indices."); + return OH_NN_INVALID_PARAMETER; + } + + std::shared_ptr tensor{nullptr}; + for (uint32_t i = 0; i < inputIndices.size; i++) { + tensor = m_allTensors[inputIndices.data[i]]; + if (tensor->GetType() != OH_NN_TENSOR) { + LOGE("ValidateInputAndOutput failed, tensor set as input should has type of OH_NN_TENSOR, but receive %d." + "Tensor index: %u.", tensor->GetType(), i); + return OH_NN_INVALID_PARAMETER; + } + } + + for (uint32_t i = 0; i < outputIndices.size; i++) { + tensor = m_allTensors[outputIndices.data[i]]; + if (tensor->GetType() != OH_NN_TENSOR) { + LOGE("ValidateInputAndOutput failed, tensor set as output should has type of OH_NN_TENSOR, but receive %d." + "Tensor index: %u.", tensor->GetType(), i); + return OH_NN_INVALID_PARAMETER; + } + } + + // The number of inputIndices and outputIndices are usually small, so O(n**2) iteration is fine. + for (uint32_t i = 0; i < inputIndices.size; i++) { + for (uint32_t j = 0; j < outputIndices.size; j++) { + if (inputIndices.data[i] == outputIndices.data[j]) { + LOGE("ValidateInputAndOutput failed, should not set an tensor as input and output at the same time, " + "input index %u, output index %u", inputIndices.data[i], outputIndices.data[j]); + return OH_NN_INVALID_PARAMETER; + } + } + } + return OH_NN_SUCCESS; +} + +/* Check whether the indices exceed the number of added tensors. */ +OH_NN_ReturnCode InnerModel::ValidateTensorArray(const OH_NN_UInt32Array& indices) const +{ + OH_NN_ReturnCode ret = Validation::ValidateArray(indices.data, indices.size); + if (ret != OH_NN_SUCCESS) { + LOGE("ValidateTensorArray failed, please check the validity of indices."); + return ret; + } + + for (uint32_t i = 0; i < indices.size; i++) { + if (indices.data[i] >= m_allTensors.size()) { + LOGE("ValidateTensors failed, index %u is out of the number of added tensors.", indices.data[i]); + return OH_NN_INVALID_PARAMETER; + } + } + + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode InnerModel::AddOperation(OH_NN_OperationType opType, const OH_NN_UInt32Array& paramIndices, + const OH_NN_UInt32Array& inputIndices, const OH_NN_UInt32Array& outputIndices) +{ + if (m_liteGraph != nullptr) { + LOGE("AddOperation failed, AddOperation is forbidden after after Finish() or LoadLiteGraph() has been called."); + return OH_NN_OPERATION_FORBIDDEN; + } + + OH_NN_ReturnCode ret = ValidateInputAndOutput(inputIndices, outputIndices); + if (ret != OH_NN_SUCCESS) { + LOGE("AddOperation failed, please check inputIndices and outputIndices."); + return ret; + } + std::vector inputs = ConstructVectorFromArray(inputIndices.data, inputIndices.size); + std::vector outputs = ConstructVectorFromArray(outputIndices.data, outputIndices.size); + + ret = ValidateTensorArray(paramIndices); + if (ret != OH_NN_SUCCESS) { + LOGE("AddOperation failed, please check paramIndices."); + return ret; + } + std::vector parameters = ConstructVectorFromArray(paramIndices.data, paramIndices.size); + + Ops::OpsRegistry& opsRegistry = Ops::OpsRegistry::GetSingleton(); + std::unique_ptr opsBuilder = opsRegistry.GetOpsBuilder(opType); + if (opsBuilder == nullptr) { + LOGE("AddOperation failed, cannot add operation of type: %d.", opType); + return OH_NN_INVALID_PARAMETER; + } + + ret = opsBuilder->Build(parameters, inputs, outputs, m_allTensors); + if (ret != OH_NN_SUCCESS) { + LOGE("AddOperation failed, error happens when build operations."); + return ret; + } + + m_ops.emplace_back(std::move(opsBuilder)); + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode InnerModel::SpecifyInputsAndOutputs( + const OH_NN_UInt32Array& inputIndices, const OH_NN_UInt32Array& outputIndices) +{ + if (m_liteGraph != nullptr) { + LOGE("SpecifyInputsAndOutputs failed, " + "SpecifyInputsAndOutputs is forbidden after Finish() or LoadLiteGraph() has been called."); + return OH_NN_OPERATION_FORBIDDEN; + } + + if (!m_inputTensors.empty()) { + LOGE("SpecifyInputsAndOutputs failed, SpecifyInputsAndOutputs should not be called twice."); + return OH_NN_OPERATION_FORBIDDEN; + } + + OH_NN_ReturnCode ret = ValidateInputAndOutput(inputIndices, outputIndices); + if (ret != OH_NN_SUCCESS) { + LOGE("SpecifyInputsAndOutputs failed, please check inputIndices and outputIndices."); + return ret; + } + + m_inputIndices = ConstructVectorFromArray(inputIndices.data, inputIndices.size); + m_outputIndices = ConstructVectorFromArray(outputIndices.data, outputIndices.size); + + for (uint32_t i : m_inputIndices) { + m_inputTensors.emplace_back(m_allTensors[i]); + } + + for (uint32_t i : m_outputIndices) { + m_outputTensors.emplace_back(m_allTensors[i]); + } + + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode InnerModel::Build() +{ + NNRT_TRACE_NAME("Build model"); + if (m_liteGraph != nullptr) { + LOGE("Build failed," + " OH_NNModel is not allowed to build again after Build() or BuildFromLiteGraph() has been called."); + return OH_NN_OPERATION_FORBIDDEN; + } + + if (m_allTensors.empty()) { + LOGE("Build failed, no OH_NN_Tensor has been added. Must call AddTensor before Build()."); + return OH_NN_OPERATION_FORBIDDEN; + } + + if (m_ops.empty()) { + LOGE("Build failed, no operation has beed added. Must call AddOperation before Build()."); + return OH_NN_OPERATION_FORBIDDEN; + } + + if ((m_inputIndices.empty()) || (m_outputIndices.empty())) { + LOGE("Build failed, inputs and outputs are unspecified. Must call SpecifyInputsAndOutputs before Build()."); + return OH_NN_OPERATION_FORBIDDEN; + } + + MSLITE::LiteGraph* pLiteGraph = new (std::nothrow) MSLITE::LiteGraph(); + if (pLiteGraph == nullptr) { + LOGE("Build failed, error happend when creating LiteGraph."); + return OH_NN_MEMORY_ERROR; + } + m_liteGraph.reset(pLiteGraph, LiteGraphDeleter()); + + m_liteGraph->name_ = NNR_MODEL; + + std::unordered_map modelIDToGraphID; + AddTensorsToLiteGraph(modelIDToGraphID); + + OH_NN_ReturnCode ret = AddNodesToLiteGraph(modelIDToGraphID); + if (ret != OH_NN_SUCCESS) { + return ret; + } + + // subGraph will be released by LiteGraph if it is added into instance of LiteGraph. + MSLITE::LiteGraph::SubGraph* subGraph = new (std::nothrow) MSLITE::LiteGraph::SubGraph(); + if (subGraph == nullptr) { + LOGE("AddNodesToLiteGraph failed, error happened when creating subgraph."); + return OH_NN_NULL_PTR; + } + + subGraph->name_ = "NNRt_SubGraph"; // Name of subGraph + subGraph->input_indices_ = m_liteGraph->input_indices_; + subGraph->output_indices_ = m_liteGraph->output_indices_; + uint32_t nodeCount = static_cast(m_ops.size()); // m_ops.size() smaller than UINT32_MAX + for (uint32_t i = 0; i < nodeCount; i++) { + subGraph->node_indices_.emplace_back(i); + } + m_liteGraph->sub_graphs_.emplace_back(subGraph); + + return OH_NN_SUCCESS; +} + +void InnerModel::AddTensorsToLiteGraph(std::unordered_map& modelIDToGraphID) +{ + uint32_t graphID = 0; + LiteGraphTensorPtr tensor(nullptr, DestroyLiteGraphTensor); + size_t tensorCount = m_allTensors.size(); + for (size_t i = 0; i < tensorCount; i++) { + const std::shared_ptr& nnTensor = m_allTensors[i]; + // If the tensor is used as operation parameter, it will not convert to the tensor of LiteGraph. + if (nnTensor->IsOpParameter()) { + continue; + } + + tensor = nnTensor->ConvertToLiteGraphTensor(); + m_liteGraph->all_tensors_.emplace_back(tensor.release()); + modelIDToGraphID[i] = graphID++; + } + + // Note: Indices in m_inputIndices and m_outputIndices have been checked in SpecifyInputAndOutput(), there is no + // need to check twice. + std::vector& inputIndices = m_liteGraph->input_indices_; + for (uint32_t index : m_inputIndices) { + inputIndices.emplace_back(modelIDToGraphID.at(index)); + } + + std::vector& outputIndices = m_liteGraph->output_indices_; + for (uint32_t index : m_outputIndices) { + outputIndices.emplace_back(modelIDToGraphID.at(index)); + } +} + +OH_NN_ReturnCode InnerModel::AddNodesToLiteGraph(const std::unordered_map& modelIDToGraphID) +{ + MSLITE::LiteGraph::Node* node{nullptr}; + size_t opCount = m_ops.size(); + Ops::LiteGraphPrimitvePtr primitive = {nullptr, DestroyLiteGraphTensor}; + for (size_t i = 0; i < opCount; i++) { + std::unique_ptr& op = m_ops[i]; + // node will be released by LiteGraph if it is added into instance of LiteGraph. + node = new(std::nothrow) MSLITE::LiteGraph::Node(); + if (node == nullptr) { + LOGE("AddNodesToLiteGraph failed, error happened when creating LiteGraph tensor."); + return OH_NN_NULL_PTR; + } + + node->name_ = op->GetName() + ":" + std::to_string(i); + node->quant_type_ = NNToMS::TransformQuantType(op->GetQuantType()); + + op->GetInputIndex(node->input_indices_, modelIDToGraphID); + op->GetOutputIndex(node->output_indices_, modelIDToGraphID); + + primitive = op->GetPrimitive(); + if (primitive == nullptr) { + LOGE("Build %s primitive failed.", op->GetName().c_str()); + delete node; + return OH_NN_FAILED; + } + + node->primitive_ = primitive.release(); + m_liteGraph->all_nodes_.emplace_back(node); + } + + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode InnerModel::GetSupportedOperations(size_t deviceID, const bool** isSupported, uint32_t& opCount) +{ + if (m_liteGraph == nullptr) { + LOGE("GetSupportedOperations failed. GetSupportedOperations() must be called after Finish()."); + return OH_NN_OPERATION_FORBIDDEN; + } + + DeviceManager& deviceManager = DeviceManager::GetInstance(); + + std::shared_ptr device = deviceManager.GetDevice(deviceID); + if (device == nullptr) { + LOGE("GetSupportedOperations failed, retrieve device failed."); + return OH_NN_FAILED; + } + + std::vector supportedOperations; + OH_NN_ReturnCode ret = device->GetSupportedOperation(m_liteGraph, supportedOperations); + if (ret != OH_NN_SUCCESS) { + LOGE("GetSupportedOperations failed, error happened when get supported operations from devices."); + return ret; + } + + m_supportedOperations.clear(); + std::copy(supportedOperations.begin(), supportedOperations.end(), std::back_inserter(m_supportedOperations)); + + *isSupported = reinterpret_cast(m_supportedOperations.data()); + opCount = m_supportedOperations.size(); + + return OH_NN_SUCCESS; +} + +std::shared_ptr InnerModel::GetLiteGraphs() const +{ + return m_liteGraph; +} + +std::vector> InnerModel::GetInputTensors() const +{ + return m_inputTensors; +} + +std::vector> InnerModel::GetOutputTensors() const +{ + return m_outputTensors; +} +} // namespace NeuralNetworkRuntime +} // namespace OHOS \ No newline at end of file diff --git a/frameworks/native/inner_model.h b/frameworks/native/inner_model.h new file mode 100644 index 0000000..6a4460d --- /dev/null +++ b/frameworks/native/inner_model.h @@ -0,0 +1,67 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_INNER_MODEL_H +#define NEURAL_NETWORK_RUNTIME_INNER_MODEL_H + +#include +#include + +#include "mindir.h" +#include "ops_builder.h" +#include "interfaces/kits/c/neural_network_runtime.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +class InnerModel { +public: + InnerModel(); + + bool IsBuild() const; + OH_NN_ReturnCode BuildFromLiteGraph(const mindspore::lite::LiteGraph* liteGraph); + OH_NN_ReturnCode AddTensor(const OH_NN_Tensor& nnTensor); + OH_NN_ReturnCode SetTensorValue(uint32_t index, const void* buffer, size_t length); + OH_NN_ReturnCode AddOperation(OH_NN_OperationType opType, + const OH_NN_UInt32Array& paramIndices, + const OH_NN_UInt32Array& inputIndices, + const OH_NN_UInt32Array& outputIndices); + OH_NN_ReturnCode GetSupportedOperations(size_t deviceID, const bool** isSupported, uint32_t& opCount); + OH_NN_ReturnCode SpecifyInputsAndOutputs( + const OH_NN_UInt32Array& inputIndices, const OH_NN_UInt32Array& outputIndices); + OH_NN_ReturnCode Build(); + std::vector> GetInputTensors() const; + std::vector> GetOutputTensors() const; + std::shared_ptr GetLiteGraphs() const; + +private: + void AddTensorsToLiteGraph(std::unordered_map& modelIDToGraphID); + OH_NN_ReturnCode AddNodesToLiteGraph(const std::unordered_map& modelIDToGraphID); + OH_NN_ReturnCode ValidateInputAndOutput( + const OH_NN_UInt32Array& inputIndices, const OH_NN_UInt32Array& outputIndices) const; + OH_NN_ReturnCode ValidateTensorArray(const OH_NN_UInt32Array& indices) const; + +private: + std::vector m_supportedOperations; // std::vector not support data(), use std::vector instead. + std::vector m_inputIndices; + std::vector m_outputIndices; + std::vector> m_ops; + std::vector> m_allTensors; + std::vector> m_inputTensors; // Used to pass input tensors to compilation. + std::vector> m_outputTensors; // Used to pass output tensors to compilation. + std::shared_ptr m_liteGraph {nullptr}; +}; +} // namespace NeuralNetworkRuntime +} // namespace OHOS +#endif // NEURAL_NETWORK_RUNTIME_INNER_MODEL_H diff --git a/frameworks/native/memory_manager.cpp b/frameworks/native/memory_manager.cpp new file mode 100644 index 0000000..2c87ada --- /dev/null +++ b/frameworks/native/memory_manager.cpp @@ -0,0 +1,102 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +#include "memory_manager.h" + +#include +#include + +#include "interfaces/oem/cpp_api/cpp_type.h" +#include "common/log.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +void* MemoryManager::MapMemory(int fd, size_t length) +{ + if (fd < 0) { + LOGE("Invalid fd, fd must greater than 0."); + return nullptr; + } + + if (length <= 0 || length > ALLOCATE_BUFFER_LIMIT) { + LOGE("Invalid buffer size, it must greater than 0 and less than 1Gb. length=%zu", length); + return nullptr; + } + + void* addr = mmap(nullptr, length, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0); + if (addr == MAP_FAILED) { + LOGE("Map fd to address failed."); + return nullptr; + } + + std::lock_guard lock(m_mtx); + Memory memory {fd, addr, length}; + m_memorys.emplace(addr, memory); + return addr; +} + +OH_NN_ReturnCode MemoryManager::UnMapMemory(const void* buffer) +{ + if (buffer == nullptr) { + LOGE("Buffer is nullptr, no need to release."); + return OH_NN_INVALID_PARAMETER; + } + + auto iter = m_memorys.find(buffer); + if (iter == m_memorys.end()) { + LOGE("This buffer is not found, cannot release."); + return OH_NN_INVALID_PARAMETER; + } + + auto& memory = m_memorys[buffer]; + auto unmapResult = munmap(const_cast(memory.data), memory.length); + if (unmapResult != 0) { + LOGE("Unmap memory failed. Please try again."); + return OH_NN_MEMORY_ERROR; + } + memory.data = nullptr; + + if (close(memory.fd) != 0) { + LOGE("Close memory fd failed. fd=%d", memory.fd); + return OH_NN_MEMORY_ERROR; + } + + std::lock_guard lock(m_mtx); + m_memorys.erase(iter); + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode MemoryManager::GetMemory(const void* buffer, Memory& memory) const +{ + if (buffer == nullptr) { + LOGE("Memory is nullptr."); + return OH_NN_NULL_PTR; + } + + auto iter = m_memorys.find(buffer); + if (iter == m_memorys.end()) { + LOGE("Memory is not found."); + return OH_NN_INVALID_PARAMETER; + } + + memory.fd = iter->second.fd; + memory.data = buffer; + memory.length = iter->second.length; + + return OH_NN_SUCCESS; +} +} // NeuralNetworkRuntime +} // OHOS \ No newline at end of file diff --git a/frameworks/native/memory_manager.h b/frameworks/native/memory_manager.h new file mode 100644 index 0000000..5518deb --- /dev/null +++ b/frameworks/native/memory_manager.h @@ -0,0 +1,60 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_MEMORY_MANAGER_H +#define NEURAL_NETWORK_RUNTIME_MEMORY_MANAGER_H + +#include +#include + +#include "interfaces/kits/c/neural_network_runtime_type.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +const int INVALID_FD = -1; + +struct Memory { + int fd; + const void* data; + size_t length; +}; + +class MemoryManager { +public: + ~MemoryManager() = default; + + void* MapMemory(int fd, size_t length); + OH_NN_ReturnCode UnMapMemory(const void* buffer); + OH_NN_ReturnCode GetMemory(const void* buffer, Memory& memory) const; + + static MemoryManager* GetInstance() + { + static MemoryManager instance; + return &instance; + } + +private: + MemoryManager() {}; + MemoryManager(const MemoryManager&) = delete; + MemoryManager& operator=(const MemoryManager&) = delete; + +private: + // key: OH_NN_Memory, value: fd + std::unordered_map m_memorys; + std::mutex m_mtx; +}; +} // namespace NeuralNetworkRuntime +} // OHOS +#endif // NEURAL_NETWORK_RUNTIME_MEMORY_MANAGER_H \ No newline at end of file diff --git a/frameworks/native/neural_network_runtime.cpp b/frameworks/native/neural_network_runtime.cpp new file mode 100644 index 0000000..dfd5f36 --- /dev/null +++ b/frameworks/native/neural_network_runtime.cpp @@ -0,0 +1,682 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "interfaces/innerkits/c/neural_network_runtime_inner.h" +#include "interfaces/kits/c/neural_network_runtime.h" + +#include "compilation.h" +#include "device_manager.h" +#include "executor.h" +#include "inner_model.h" +#include "common/log.h" + + +using namespace OHOS::NeuralNetworkRuntime; + +#define NNRT_API __attribute__((visibility("default"))) + +NNRT_API OH_NNModel *OH_NNModel_Construct(void) +{ + InnerModel *innerModel = new(std::nothrow) InnerModel(); + if (innerModel == nullptr) { + LOGE("OH_NNModel_Construct failed, please check whether it has enough memory."); + return nullptr; + } + + OH_NNModel *nnModel = reinterpret_cast(innerModel); + return nnModel; +} + +NNRT_API OH_NN_ReturnCode OH_NNModel_AddTensor(OH_NNModel *model, const OH_NN_Tensor *tensor) +{ + if (model == nullptr) { + LOGE("OH_NNModel_AddTensor failed, passed nullptr to model."); + return OH_NN_INVALID_PARAMETER; + } + + if (tensor == nullptr) { + LOGE("OH_NNModel_AddTensor failed, passed nullptr to tensor."); + return OH_NN_INVALID_PARAMETER; + } + + InnerModel *innerModel = reinterpret_cast(model); + return innerModel->AddTensor(*tensor); +} + +NNRT_API OH_NN_ReturnCode OH_NNModel_AddOperation(OH_NNModel *model, + OH_NN_OperationType op, + const OH_NN_UInt32Array *paramIndices, + const OH_NN_UInt32Array *inputIndices, + const OH_NN_UInt32Array *outputIndices) +{ + if (model == nullptr) { + LOGE("OH_NNModel_AddOperation failed, passed nullptr to model."); + return OH_NN_INVALID_PARAMETER; + } + + if (paramIndices == nullptr) { + LOGE("OH_NNModel_AddOperation failed, passed nullptr to paramIndices."); + return OH_NN_INVALID_PARAMETER; + } + + if (inputIndices == nullptr) { + LOGE("OH_NNModel_AddOperation failed, passed nullptr to inputIndices."); + return OH_NN_INVALID_PARAMETER; + } + + if (outputIndices == nullptr) { + LOGE("OH_NNModel_AddOperation failed, passed nullptr to outputIndices."); + return OH_NN_INVALID_PARAMETER; + } + + InnerModel *innerModel = reinterpret_cast(model); + return innerModel->AddOperation(op, *paramIndices, *inputIndices, *outputIndices); +} + +NNRT_API OH_NN_ReturnCode OH_NNModel_SetTensorData(OH_NNModel *model, + uint32_t index, + const void *dataBuffer, + size_t length) +{ + if (model == nullptr) { + LOGE("OH_NNModel_SetTensorData failed, passed nullptr to model."); + return OH_NN_INVALID_PARAMETER; + } + + if (dataBuffer == nullptr) { + LOGE("OH_NNModel_SetTensorData failed, passed nullptr to dataBuffer, which has no effect."); + return OH_NN_INVALID_PARAMETER; + } + + if (length == 0) { + LOGE("OH_NNModel_SetTensorData failed, passed dataBuffer with length 0, which has no effect."); + return OH_NN_INVALID_PARAMETER; + } + + InnerModel *innerModel = reinterpret_cast(model); + return innerModel->SetTensorValue(index, dataBuffer, length); +} + +NNRT_API OH_NN_ReturnCode OH_NNModel_SpecifyInputsAndOutputs(OH_NNModel *model, + const OH_NN_UInt32Array *inputIndices, + const OH_NN_UInt32Array *outputIndices) +{ + if (model == nullptr) { + LOGE("OH_NNModel_SpecifyInputsAndOutputs failed, passed nullptr to model."); + return OH_NN_INVALID_PARAMETER; + } + + if (inputIndices == nullptr) { + LOGE("OH_NNModel_SpecifyInputsAndOutputs failed, passed nullptr to inputIndices."); + return OH_NN_INVALID_PARAMETER; + } + + if (outputIndices == nullptr) { + LOGE("OH_NNModel_SpecifyInputsAndOutputs failed, passed nullptr to outputIndices."); + return OH_NN_INVALID_PARAMETER; + } + + InnerModel *innerModel = reinterpret_cast(model); + return innerModel->SpecifyInputsAndOutputs(*inputIndices, *outputIndices); +} + +NNRT_API OH_NN_ReturnCode OH_NNModel_Finish(OH_NNModel *model) +{ + if (model == nullptr) { + LOGE("OH_NNModel_Finish failed, passed nullptr to model."); + return OH_NN_INVALID_PARAMETER; + } + + InnerModel *innerModel = reinterpret_cast(model); + return innerModel->Build(); +} + +NNRT_API OH_NN_ReturnCode OH_NNModel_BuildFromLiteGraph(OH_NNModel *model, const void *liteGraph) +{ + if (model == nullptr) { + LOGE("OH_NNModel_BuildFromLiteGraph failed, passed nullptr to model."); + return OH_NN_INVALID_PARAMETER; + } + + if (liteGraph == nullptr) { + LOGE("OH_NNModel_BuildFromLiteGraph failed, passed nullptr to liteGraph."); + return OH_NN_INVALID_PARAMETER; + } + + auto *pLiteGraph = static_cast(liteGraph); + InnerModel *innerModel = reinterpret_cast(model); + + // Once the innerModel built from the liteGraph successfully, the innerModel + // owns the liteGraph, in which case, the invoker should not delete + // the liteGraph actively. Otherwise, the invoker still has the ownership. + return innerModel->BuildFromLiteGraph(pLiteGraph); +} + +NNRT_API void OH_NNModel_Destroy(OH_NNModel **model) +{ + if (model == nullptr) { + LOGW("OH_NNModel_Destroy has no effect, passed nullptr to model."); + return; + } + + if (*model == nullptr) { + LOGW("OH_NNModel_Destroy has no effect, passed nullptr to *model."); + return; + } + + InnerModel *innerModel = reinterpret_cast(*model); + delete innerModel; + *model = nullptr; +} + +NNRT_API OH_NN_ReturnCode OH_NNModel_GetAvailableOperations(OH_NNModel *model, + size_t deviceID, + const bool **isAvailable, + uint32_t *opCount) +{ + if (model == nullptr) { + LOGE("OH_NNModel_GetAvailableOperations failed, passed nullptr to model."); + return OH_NN_INVALID_PARAMETER; + } + + if (isAvailable == nullptr) { + LOGE("OH_NNModel_GetAvailableOperations failed, passed nullptr to isAvailable."); + return OH_NN_INVALID_PARAMETER; + } + + if (*isAvailable != nullptr) { + LOGE("OH_NNModel_GetAvailableOperations failed, *isAvailable is not nullptr."); + return OH_NN_INVALID_PARAMETER; + } + + if (opCount == nullptr) { + LOGE("OH_NNModel_GetAvailableOperations failed, passed nullptr to opCount."); + return OH_NN_INVALID_PARAMETER; + } + + InnerModel *innerModel = reinterpret_cast(model); + return innerModel->GetSupportedOperations(deviceID, isAvailable, *opCount); +} + +NNRT_API OH_NNCompilation *OH_NNCompilation_Construct(const OH_NNModel *model) +{ + if (model == nullptr) { + LOGE("OH_NNCompilation_Construct failed, passed nullptr to model."); + return nullptr; + } + const InnerModel *innerModel = reinterpret_cast(model); + + if (!innerModel->IsBuild()) { + LOGE("OH_NNCompilation_Construct failed, should call OH_NNModel_Finish before creating compilation."); + return nullptr; + } + + Compilation *compilation = new(std::nothrow) Compilation(innerModel); + if (compilation == nullptr) { + LOGE("OH_NNCompilation_Construct failed, please check whether it has enough memory."); + return nullptr; + } + + OH_NNCompilation* nnCompilation = reinterpret_cast(compilation); + return nnCompilation; +} + +NNRT_API OH_NN_ReturnCode OH_NNCompilation_SetDevice(OH_NNCompilation *compilation, size_t deviceID) +{ + if (compilation == nullptr) { + LOGE("OH_NNCompilation_SetDevice failed, passed nullptr to compilation."); + return OH_NN_INVALID_PARAMETER; + } + + Compilation* innerCompilation = reinterpret_cast(compilation); + return innerCompilation->SetDevice(deviceID); +} + +NNRT_API OH_NN_ReturnCode OH_NNCompilation_SetCache(OH_NNCompilation *compilation, + const char *cachePath, + uint32_t version) +{ + if (compilation == nullptr) { + LOGE("OH_NNCompilation_SetCache failed, passed nullptr to compilation."); + return OH_NN_INVALID_PARAMETER; + } + + if (cachePath == nullptr) { + LOGE("OH_NNCompilation_SetCache failed, passed nullptr to cachePath."); + return OH_NN_INVALID_PARAMETER; + } + + Compilation* innerCompilation = reinterpret_cast(compilation); + return innerCompilation->SetCacheDir(cachePath, version); +} + +NNRT_API OH_NN_ReturnCode OH_NNCompilation_SetPerformanceMode(OH_NNCompilation *compilation, + OH_NN_PerformanceMode performanceMode) +{ + if (compilation == nullptr) { + LOGE("OH_NNCompilation_SetPerformanceMode failed, passed nullptr to compilation."); + return OH_NN_INVALID_PARAMETER; + } + + Compilation* innerCompilation = reinterpret_cast(compilation); + return innerCompilation->SetPerformance(performanceMode); +} + +NNRT_API OH_NN_ReturnCode OH_NNCompilation_SetPriority(OH_NNCompilation *compilation, + OH_NN_Priority priority) +{ + if (compilation == nullptr) { + LOGE("OH_NNCompilation_SetPriority failed, passed nullptr to compilation."); + return OH_NN_INVALID_PARAMETER; + } + + Compilation* innerCompilation = reinterpret_cast(compilation); + return innerCompilation->SetPriority(priority); +} + +NNRT_API OH_NN_ReturnCode OH_NNCompilation_EnableFloat16(OH_NNCompilation *compilation, bool enableFloat16) +{ + if (compilation == nullptr) { + LOGE("OH_NNCompilation_EnableFloat16 failed, passed nullptr to compilation."); + return OH_NN_INVALID_PARAMETER; + } + + Compilation* innerCompilation = reinterpret_cast(compilation); + return innerCompilation->SetEnableFp16(enableFloat16); +} + +NNRT_API OH_NN_ReturnCode OH_NNCompilation_Build(OH_NNCompilation *compilation) +{ + if (compilation == nullptr) { + LOGE("OH_NNCompilation_Build failed, passed nullptr to compilation."); + return OH_NN_INVALID_PARAMETER; + } + + Compilation* innerCompilation = reinterpret_cast(compilation); + return innerCompilation->Build(); +} + +NNRT_API void OH_NNCompilation_Destroy(OH_NNCompilation **compilation) +{ + if (compilation == nullptr) { + LOGW("OH_NNCompilation_Destroy has no effect, passed nullptr to compilation."); + return; + } + + if (*compilation == nullptr) { + LOGW("OH_NNCompilation_Destroy has no effect, passed nullptr to *compilation."); + return; + } + + Compilation *innerCompilation = reinterpret_cast(*compilation); + delete innerCompilation; + *compilation = nullptr; +} + +NNRT_API OH_NNExecutor *OH_NNExecutor_Construct(OH_NNCompilation *compilation) +{ + if (compilation == nullptr) { + LOGE("OH_NNExecutor_Construct failed, passed nullptr to compilation."); + return nullptr; + } + Compilation *innerCompilation = reinterpret_cast(compilation); + + if (!innerCompilation->IsBuild()) { + LOGE("OH_NNExecutor_Construct failed, should call OH_NNCompilation_Build before creating executor."); + return nullptr; + } + + Executor* executor = new(std::nothrow) Executor(innerCompilation); + if (executor == nullptr) { + LOGE("OH_NNExecutor_Construct failed, please check whether it has enough memory."); + return nullptr; + } + + OH_NNExecutor* nnExecutor = reinterpret_cast(executor); + return nnExecutor; +} + +NNRT_API OH_NN_ReturnCode OH_NNExecutor_SetInput(OH_NNExecutor *executor, + uint32_t inputIndex, + const OH_NN_Tensor *tensor, + const void *dataBuffer, + size_t length) +{ + if (executor == nullptr) { + LOGE("OH_NNExecutor_SetInput failed, passed nullptr to executor."); + return OH_NN_INVALID_PARAMETER; + } + + if (tensor == nullptr) { + LOGE("OH_NNExecutor_SetInput failed, passed nullptr to tensor."); + return OH_NN_INVALID_PARAMETER; + } + + if (dataBuffer == nullptr) { + LOGE("OH_NNExecutor_SetInput failed, passed nullptr to dataBuffer."); + return OH_NN_INVALID_PARAMETER; + } + + if (length == 0) { + LOGE("OH_NNExecutor_SetInput failed, dataBuffer length is 0."); + return OH_NN_INVALID_PARAMETER; + } + + Executor* innerExecutor = reinterpret_cast(executor); + return innerExecutor->SetInput(inputIndex, *tensor, dataBuffer, length); +} + +NNRT_API OH_NN_ReturnCode OH_NNExecutor_SetOutput(OH_NNExecutor *executor, + uint32_t outputIndex, + void *dataBuffer, + size_t length) +{ + if (executor == nullptr) { + LOGE("OH_NNExecutor_SetOutput failed, passed nullptr to executor."); + return OH_NN_INVALID_PARAMETER; + } + + if (dataBuffer == nullptr) { + LOGE("OH_NNExecutor_SetOutput failed, passed nullptr to dataBuffer."); + return OH_NN_INVALID_PARAMETER; + } + + if (length == 0) { + LOGE("OH_NNExecutor_SetOutput failed, dataBuffer length is 0."); + return OH_NN_INVALID_PARAMETER; + } + + Executor* innerExecutor = reinterpret_cast(executor); + return innerExecutor->SetOutput(outputIndex, dataBuffer, length); +} + +NNRT_API OH_NN_ReturnCode OH_NNExecutor_GetOutputShape(OH_NNExecutor *executor, + uint32_t outputIndex, + int32_t **shape, + uint32_t *shapeLength) +{ + if (executor == nullptr) { + LOGE("OH_NNExecutor_GetOutputShape failed, passed nullptr to executor."); + return OH_NN_INVALID_PARAMETER; + } + + if (shape == nullptr) { + LOGE("OH_NNExecutor_GetOutputShape failed, passed nullptr to shape."); + return OH_NN_INVALID_PARAMETER; + } + + if (*shape != nullptr) { + LOGE("OH_NNExecutor_GetOutputShape failed, *shape is not nullptr."); + return OH_NN_INVALID_PARAMETER; + } + + if (shapeLength == nullptr) { + LOGE("OH_NNExecutor_GetOutputShape failed, passed nullptr to shapeLength."); + return OH_NN_INVALID_PARAMETER; + } + + Executor* innerExecutor = reinterpret_cast(executor); + return innerExecutor->GetOutputShape(outputIndex, shape, *shapeLength); +} + +NNRT_API OH_NN_ReturnCode OH_NNExecutor_Run(OH_NNExecutor *executor) +{ + if (executor == nullptr) { + LOGE("OH_NNExecutor_Run failed, passed nullptr to executor."); + return OH_NN_INVALID_PARAMETER; + } + + Executor *innerExecutor = reinterpret_cast(executor); + return innerExecutor->Run(); +} + +NNRT_API OH_NN_Memory *OH_NNExecutor_AllocateInputMemory(OH_NNExecutor *executor, uint32_t inputIndex, size_t length) +{ + if (executor == nullptr) { + LOGE("OH_NNExecutor_AllocateInputMemory failed, passed nullptr to executor."); + return nullptr; + } + + if (length == 0) { + LOGW("OH_NNExecutor_AllocateInputMemory has no effect, passed length equals 0."); + return nullptr; + } + + OH_NN_Memory *nnMemory = nullptr; + Executor *innerExecutor = reinterpret_cast(executor); + OH_NN_ReturnCode ret = innerExecutor->CreateInputMemory(inputIndex, length, &nnMemory); + if (ret != OH_NN_SUCCESS) { + LOGE("OH_NNExecutor_AllocateInputMemory failed, error happened when creating input memory in executor."); + return nullptr; + } + + return nnMemory; +} + +NNRT_API OH_NN_Memory *OH_NNExecutor_AllocateOutputMemory(OH_NNExecutor *executor, uint32_t outputIndex, size_t length) +{ + if (executor == nullptr) { + LOGE("OH_NNExecutor_AllocateOutputMemory failed, passed nullptr to executor."); + return nullptr; + } + + if (length == 0) { + LOGW("OH_NNExecutor_AllocateOutputMemory has no effect, passed length equals 0."); + return nullptr; + } + + OH_NN_Memory *nnMemory = nullptr; + Executor *innerExecutor = reinterpret_cast(executor); + OH_NN_ReturnCode ret = innerExecutor->CreateOutputMemory(outputIndex, length, &nnMemory); + if (ret != OH_NN_SUCCESS) { + LOGE("OH_NNExecutor_AllocateOutputMemory failed, error happened when creating output memory in executor."); + return nullptr; + } + + return nnMemory; +} + +NNRT_API void OH_NNExecutor_DestroyInputMemory(OH_NNExecutor *executor, uint32_t inputIndex, OH_NN_Memory **memory) +{ + if (executor == nullptr) { + LOGE("OH_NNExecutor_DestroyInputMemory failed, passed nullptr to executor."); + return; + } + + if (memory == nullptr) { + LOGW("OH_NNExecutor_DestroyInputMemory has no effect, passed nullptr to memory."); + return; + } + + if (*memory == nullptr) { + LOGW("OH_NNExecutor_DestroyInputMemory has no effect, passed nullptr to *memory."); + return; + } + + Executor *innerExecutor = reinterpret_cast(executor); + OH_NN_ReturnCode ret = innerExecutor->DestroyInputMemory(inputIndex, memory); + if (ret != OH_NN_SUCCESS) { + LOGE("OH_NNExecutor_DestroyInputMemory failed, error happened when destroying input memory."); + return; + } + + *memory = nullptr; +} + +NNRT_API void OH_NNExecutor_DestroyOutputMemory(OH_NNExecutor *executor, uint32_t outputIndex, OH_NN_Memory **memory) +{ + if (executor == nullptr) { + LOGE("OH_NNExecutor_DestroyOutputMemory failed, passed nullptr to executor."); + return; + } + + if (memory == nullptr) { + LOGW("OH_NNExecutor_DestroyOutputMemory has no effect, passed nullptr to memory."); + return; + } + + if (*memory == nullptr) { + LOGW("OH_NNExecutor_DestroyOutputMemory has no effect, passed nullptr to *memory."); + return; + } + + Executor *innerExecutor = reinterpret_cast(executor); + OH_NN_ReturnCode ret = innerExecutor->DestroyOutputMemory(outputIndex, memory); + if (ret != OH_NN_SUCCESS) { + LOGE("OH_NNExecutor_DestroyOutputMemory failed, error happened when destroying output memory."); + return; + } + + *memory = nullptr; +} + +NNRT_API OH_NN_ReturnCode OH_NNExecutor_SetInputWithMemory(OH_NNExecutor *executor, + uint32_t inputIndex, + const OH_NN_Tensor *tensor, + const OH_NN_Memory *memory) +{ + if (executor == nullptr) { + LOGE("OH_NNExecutor_SetInputWithMemory failed, passed nullptr to executor."); + return OH_NN_INVALID_PARAMETER; + } + + if (tensor == nullptr) { + LOGE("OH_NNExecutor_SetInputWithMemory failed, passed nullptr to tensor."); + return OH_NN_INVALID_PARAMETER; + } + + if (memory == nullptr) { + LOGE("OH_NNExecutor_SetInputWithMemory failed, passed nullptr to memory."); + return OH_NN_INVALID_PARAMETER; + } + + Executor *innerExecutor = reinterpret_cast(executor); + return innerExecutor->SetInputFromMemory(inputIndex, *tensor, *memory); +} + +NNRT_API OH_NN_ReturnCode OH_NNExecutor_SetOutputWithMemory(OH_NNExecutor *executor, + uint32_t outputIndex, + const OH_NN_Memory *memory) +{ + if (executor == nullptr) { + LOGE("OH_NNExecutor_SetOutputWithMemory failed, passed nullptr to executor."); + return OH_NN_INVALID_PARAMETER; + } + + if (memory == nullptr) { + LOGE("OH_NNExecutor_SetOutputWithMemory failed, passed nullptr to memory."); + return OH_NN_INVALID_PARAMETER; + } + + Executor *innerExecutor = reinterpret_cast(executor); + return innerExecutor->SetOutputFromMemory(outputIndex, *memory); +} + +NNRT_API void OH_NNExecutor_Destroy(OH_NNExecutor **executor) +{ + if (executor == nullptr) { + LOGW("OH_NNExecutor_Destroy has no effect, since executor is nullptr."); + return; + } + + if ((*executor) == nullptr) { + LOGW("OH_NNExecutor_Destroy has no effect, since *executor is nullptr"); + return; + } + + Executor *innerExecutor = reinterpret_cast(*executor); + delete innerExecutor; + *executor = nullptr; +} + +NNRT_API OH_NN_ReturnCode OH_NNDevice_GetAllDevicesID(const size_t **allDevicesID, uint32_t *deviceCount) +{ + if (allDevicesID == nullptr) { + LOGE("OH_NNDevice_GetAllDevicesID failed, passed nullptr to allDevicesID."); + return OH_NN_INVALID_PARAMETER; + } + + if ((*allDevicesID) != nullptr) { + LOGE("OH_NNDevice_GetAllDevicesID failed, *allDevicesID should be nullptr."); + return OH_NN_INVALID_PARAMETER; + } + + if (deviceCount == nullptr) { + LOGE("OH_NNDevice_GetAllDevicesID failed, passed nullptr to deviceCount."); + return OH_NN_INVALID_PARAMETER; + } + + DeviceManager& deviceManager = DeviceManager::GetInstance(); + const std::vector& allDevices = deviceManager.GetAllDeviceId(); + + if (allDevices.empty()) { + LOGW("OH_NNDevice_GetAllDevicesID got no device."); + *allDevicesID = nullptr; + *deviceCount = 0; + return OH_NN_SUCCESS; + } + + *allDevicesID = allDevices.data(); + // allDevices.size() will not exceed UINT32_MAX, it is safe to cast to uint32_t. + *deviceCount = static_cast(allDevices.size()); + + return OH_NN_SUCCESS; +} + +NNRT_API OH_NN_ReturnCode OH_NNDevice_GetName(size_t deviceID, const char **name) +{ + if (name == nullptr) { + LOGE("OH_NNDevice_GetName failed, passed nullptr to name."); + return OH_NN_INVALID_PARAMETER; + } + + if ((*name) != nullptr) { + LOGE("OH_NNDevice_GetName failed, *name should be nullptr."); + return OH_NN_INVALID_PARAMETER; + } + + DeviceManager& deviceManager = DeviceManager::GetInstance(); + const std::string& deviceName = deviceManager.GetDeviceName(deviceID); + if (deviceName.empty()) { + LOGE("OH_NNDevice_GetName failed, error happened when getting name of deviceID %zu.", deviceID); + *name = nullptr; + return OH_NN_FAILED; + } + + *name = deviceName.data(); + return OH_NN_SUCCESS; +} + +NNRT_API OH_NN_ReturnCode OH_NNDevice_GetType(size_t deviceID, OH_NN_DeviceType* deviceType) +{ + DeviceManager& deviceManager = DeviceManager::GetInstance(); + std::shared_ptr device = deviceManager.GetDevice(deviceID); + if (device == nullptr) { + LOGE("OH_NNDevice_GetName failed, passed invalid deviceID."); + return OH_NN_INVALID_PARAMETER; + } + + if (deviceType == nullptr) { + LOGE("OH_NNDevice_GetType failed, passed nullptr to deviceType."); + return OH_NN_INVALID_PARAMETER; + } + + OH_NN_ReturnCode ret = device->GetDeviceType(*deviceType); + if (ret != OH_NN_SUCCESS) { + LOGE("OH_NNDevice_GetType failed, device id: %zu.", deviceID); + return ret; + } + return OH_NN_SUCCESS; +} \ No newline at end of file diff --git a/frameworks/native/nn_tensor.cpp b/frameworks/native/nn_tensor.cpp new file mode 100644 index 0000000..68f392a --- /dev/null +++ b/frameworks/native/nn_tensor.cpp @@ -0,0 +1,409 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include + +#include "nn_tensor.h" +#include "validation.h" +#include "transform.h" +#include "common/log.h" +#include "mindir.h" +#include "mindir_types.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +const uint32_t SUPPORT_NUM_BIT = 8; // Currently support 8-bit quantization only +const uint32_t INVALID_NUM_BIT = 0; + +void DestroyLiteGraphTensor(void* tensor) +{ + mindspore::lite::MindIR_Tensor_Destroy(&tensor); +} + +NNTensor::~NNTensor() +{ + if (m_buffer != nullptr) { + delete [] reinterpret_cast(m_buffer); + } +} + +NNTensor::NNTensor(NNTensor&& tensor) noexcept +{ + *this = std::move(tensor); +} + +NNTensor& NNTensor::operator=(NNTensor&& tensor) noexcept +{ + if (this == &tensor) { + return *this; + } + + m_type = tensor.m_type; + m_dataType = tensor.m_dataType; + m_format = tensor.m_format; + m_name = std::move(tensor.m_name); + m_dimensions = std::move(tensor.m_dimensions); + m_quantParams = std::move(tensor.m_quantParams); + m_elementCount = tensor.m_elementCount; + m_isDynamicShape = tensor.m_isDynamicShape; + m_isOpParameter = tensor.m_isOpParameter; + m_buffer = tensor.m_buffer; + m_bufferLength = tensor.m_bufferLength; + m_dataLength = tensor.m_dataLength; + + tensor.m_buffer = nullptr; + tensor.m_bufferLength = 0; + tensor.m_dataLength = 0; + + return *this; +} + +OH_NN_ReturnCode NNTensor::Build(OH_NN_DataType dataType, + const std::vector& dimensions, + const std::vector& quantParam, + OH_NN_TensorType type) +{ + m_type = type; + + if (!Validation::ValidateTensorDataType(dataType)) { + LOGE("Build failed, passed invalid data type."); + return OH_NN_INVALID_PARAMETER; + } + m_dataType = dataType; + + OH_NN_ReturnCode ret = ParseDimensions(dimensions); + if (ret != OH_NN_SUCCESS) { + LOGE("Build failed, passed invalid dimensions."); + return ret; + } + + ret = ParseQuantParams(quantParam); + if (ret != OH_NN_SUCCESS) { + LOGE("Build failed, please check quantParam."); + return ret; + } + + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode NNTensor::BuildFromOHNNTensor(const OH_NN_Tensor& nnTensor) +{ + m_type = nnTensor.type; + + if (!Validation::ValidateTensorDataType(nnTensor.dataType)) { + LOGE("BuildFromOHNNTensor failed, passed invalid data type: %d.", nnTensor.dataType); + return OH_NN_INVALID_PARAMETER; + } + m_dataType = nnTensor.dataType; + + if (!Validation::ValidateTensorType(nnTensor.type)) { + LOGE("BuildFromOHNNTensor failed, passed invalid nnTensor type: %d.", nnTensor.type); + return OH_NN_INVALID_PARAMETER; + } + + OH_NN_ReturnCode ret = ParseDimensions(nnTensor); + if (ret != OH_NN_SUCCESS) { + LOGE("BuildFromOHNNTensor failed, passed invalid nnTensor dimensions."); + return ret; + } + + ret = ParseQuantParams(nnTensor.quantParam); + if (ret != OH_NN_SUCCESS) { + LOGE("BuildFromOHNNTensor failed, please check quantParam in nnTensor."); + return ret; + } + + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode NNTensor::ParseDimensions(const std::vector& dimensions) +{ + // Temporary variable to check overflow. + uint64_t absoluteDim {0}; + uint64_t elementCount {1}; + uint64_t dataLength {static_cast(GetTypeSize(m_dataType))}; + m_isDynamicShape = false; + for (int32_t dim : dimensions) { + if (dim < -1 || dim == 0) { + LOGE("ParseDimension failed, dimension of OH_NN_Tensor cannot be 0 or less than -1, receive %d.", dim); + return OH_NN_INVALID_PARAMETER; + } + + m_isDynamicShape = m_isDynamicShape || (dim == -1); + absoluteDim = static_cast(abs(dim)); + elementCount *= absoluteDim; + dataLength *= absoluteDim; + + if (dataLength > UINT32_MAX) { + LOGE("ParseDimension failed, expected data length of tensor exceed limit %u.", UINT32_MAX); + return OH_NN_INVALID_PARAMETER; + } + } + + if (m_isDynamicShape) { + // If tensor has dynamic shape, m_elementCount and m_dataLength take 0. + m_elementCount = 0; + m_dataLength = 0; + } else { + m_elementCount = static_cast(elementCount); + m_dataLength = static_cast(dataLength); + } + + m_dimensions = std::move(dimensions); + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode NNTensor::ParseDimensions(const OH_NN_Tensor& nnTensor) +{ + OH_NN_ReturnCode ret = Validation::ValidateArray(nnTensor.dimensions, nnTensor.dimensionCount); + if (ret != OH_NN_SUCCESS) { + LOGE("BuildFromOHNNTensor failed, please check dimension and dimensionCount in NNTensor."); + return ret; + } + std::vector dimensions = ConstructVectorFromArray(nnTensor.dimensions, nnTensor.dimensionCount); + + ret = ParseDimensions(dimensions); + if (ret != OH_NN_SUCCESS) { + LOGE("BuildFromOHNNTensor failed, passed invalid dimension info."); + return ret; + } + + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode NNTensor::ParseQuantParams(const OH_NN_QuantParam* quantParam) +{ + if (quantParam == nullptr) { + return OH_NN_SUCCESS; + } + + if ((quantParam->numBits == nullptr) || (quantParam->scale == nullptr) || (quantParam->zeroPoint == nullptr)) { + LOGE("ParseQuantParams failed, scale or zeroPoint is nullptr."); + return OH_NN_INVALID_PARAMETER; + } + + std::vector tmpQuantParam; + uint32_t numBits{0}; + double scale{0.0}; + int32_t zeroPoint{0}; + for (uint32_t i = 0; i < quantParam->quantCount; i++) { + numBits = quantParam->numBits[i]; + scale = quantParam->scale[i]; + zeroPoint = quantParam->zeroPoint[i]; + tmpQuantParam.emplace_back((QuantParam){numBits, scale, zeroPoint}); + } + + OH_NN_ReturnCode ret = ParseQuantParams(tmpQuantParam); + if (ret != OH_NN_SUCCESS) { + LOGE("ParseQuantParams failed, please numBits in NNTensor."); + return ret; + } + + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode NNTensor::ParseQuantParams(const std::vector& quantParams) +{ + for (const QuantParam& param : quantParams) { + // Only support 8-bit quantization in NNR version 1.0 + if ((param.numBits != SUPPORT_NUM_BIT) || (param.numBits == INVALID_NUM_BIT)) { + LOGE("ParseQuantParams failed, get invalid numBits %d.", param.numBits); + return OH_NN_INVALID_PARAMETER; + } + } + + m_quantParams = quantParams; + return OH_NN_SUCCESS; +} + +void NNTensor::IdentifyOpParameter() +{ + m_isOpParameter = true; +} + +void NNTensor::SetName(const std::string& name) +{ + m_name = name; +} + +// Buffer set inside NNTensor will be released during deconstruction, make sure the buffer won't be released twice. +void NNTensor::SetBuffer(const void* buffer, size_t length) +{ + // copy pointer instead of memory copying + m_buffer = const_cast(buffer); + m_bufferLength = length; +} + +OH_NN_ReturnCode NNTensor::SetDimensions(const std::vector& dimensions) +{ + size_t expectedDimensionCount = m_dimensions.size(); + size_t dimensionCount = dimensions.size(); + if (dimensionCount != expectedDimensionCount) { + LOGE("Passed dimensions have different dimension counts from NNTensor, expected %zu, but passed %zu.", + expectedDimensionCount, dimensionCount); + return OH_NN_INVALID_PARAMETER; + } + + auto ret = ParseDimensions(dimensions); + if (ret != OH_NN_SUCCESS) { + LOGE("SetDimemsions failed, passed invalid dimension info."); + return ret; + } + + m_dimensions = dimensions; + return OH_NN_SUCCESS; +} + +OH_NN_TensorType NNTensor::GetType() const +{ + return m_type; +} + +std::string NNTensor::GetName() const +{ + return m_name; +} + +void* NNTensor::GetBuffer() const +{ + return m_buffer; +} + +size_t NNTensor::GetBufferLength() const +{ + return m_bufferLength; +} + +size_t NNTensor::GetDataLength() const +{ + return m_dataLength; +} + +OH_NN_DataType NNTensor::GetDataType() const +{ + return m_dataType; +} + +uint32_t NNTensor::GetElementCount() const +{ + return m_elementCount; +} + +std::vector NNTensor::GetDimensions() const +{ + return m_dimensions; +} + +OH_NN_Format NNTensor::GetFormat() const +{ + return m_format; +} + +std::vector NNTensor::GetQuantParam() const +{ + return m_quantParams; +} + +LiteGraphTensorPtr NNTensor::ConvertToLiteGraphTensor() const +{ + mindspore::lite::DataType dataType = NNToMS::TransformDataType(m_dataType); + mindspore::lite::Format format = NNToMS::TransformFormat(m_format); + const uint8_t* buffer = static_cast(m_buffer); + std::vector data = ConstructVectorFromArray(buffer, m_dataLength); + + std::vector quantParams; + mindspore::lite::QuantParam msQuantParam; + for (const QuantParam& param : m_quantParams) { + msQuantParam = {param.zeroPoint, param.scale, param.numBits}; + quantParams.emplace_back(std::move(msQuantParam)); + } + + mindspore::lite::TensorPtr tensor = mindspore::lite::MindIR_Tensor_Create( + m_name, dataType, m_dimensions, format, data, quantParams); + if (tensor == nullptr) { + LOGE("ConvertToLiteGraphTensor failed, please check attributes of NNTensor."); + return {nullptr, DestroyLiteGraphTensor}; + } + + LiteGraphTensorPtr liteGraphTensor(tensor, DestroyLiteGraphTensor); + return liteGraphTensor; +} + +void NNTensor::ConvertToIOTensor(IOTensor& tensor) const +{ + tensor.dataType = m_dataType; + tensor.format = m_format; + tensor.dimensions = m_dimensions; + tensor.data = const_cast(m_buffer); + tensor.length = m_bufferLength; +} + +bool NNTensor::IsDynamicShape() const +{ + return m_isDynamicShape; +} + +bool NNTensor::IsQuantTensor() const +{ + return (m_quantParams.size() > 0); +} + +bool NNTensor::IsScalar() const +{ + return (m_dimensions.empty()); +} + +bool NNTensor::IsOpParameter() const +{ + return m_isOpParameter; +} + +bool NNTensor::CompareAttribute(const NNTensor& tensor) const +{ + if (m_dataType != tensor.GetDataType()) { + LOGI("Tensors have different data type: %d and %d.", m_dataType, tensor.GetDataType()); + return false; + } + + if (m_format != tensor.GetFormat()) { + LOGI("Tensors have different format: %d and %d.", m_format, tensor.GetFormat()); + return false; + } + + const std::vector dimensions = tensor.GetDimensions(); + if (m_dimensions.size() != dimensions.size()) { + LOGI("Tensors have differents dimension counts: %zu and %zu.", m_dimensions.size(), dimensions.size()); + return false; + } + + for (auto i = 0; i < dimensions.size(); i++) { + if (m_dimensions[i] != -1 && m_dimensions[i] != dimensions[i]) { + LOGI("Tensors have different dimension: dimension index: %u, dimension value: %d and %d.", + i, m_dimensions[i], dimensions[i]); + return false; + } + } + + if (m_type != tensor.GetType()) { + LOGI("Tensors have different type: %d and %d.", m_type, tensor.GetType()); + return false; + } + + return true; +} +} // NeuralNetworkRuntime +} // OHOS \ No newline at end of file diff --git a/frameworks/native/nn_tensor.h b/frameworks/native/nn_tensor.h new file mode 100644 index 0000000..1b8cf20 --- /dev/null +++ b/frameworks/native/nn_tensor.h @@ -0,0 +1,97 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_NN_TENSOR_H +#define NEURAL_NETWORK_RUNTIME_NN_TENSOR_H + +#include +#include + +#include "interfaces/oem/cpp_api/cpp_type.h" +#include "interfaces/kits/c/neural_network_runtime.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +using LiteGraphTensorPtr = std::unique_ptr; + +void DestroyLiteGraphTensor(void* tensor); + +class NNTensor { +public: + NNTensor() = default; + ~NNTensor(); + NNTensor(NNTensor&& tensor) noexcept; + NNTensor& operator=(NNTensor&& tensor) noexcept; + // Copy construction and assignment is not allowed in case of double-free of m_buffer + NNTensor(const NNTensor& tensor) = delete; + NNTensor& operator=(const NNTensor& tensor) = delete; + + OH_NN_ReturnCode BuildFromOHNNTensor(const OH_NN_Tensor& nnTensor); + OH_NN_ReturnCode Build(OH_NN_DataType dataType, + const std::vector& dimensions, + const std::vector& quantParam, + OH_NN_TensorType type); + void IdentifyOpParameter(); + + void SetName(const std::string& name); + void SetBuffer(const void* buffer, size_t length); + OH_NN_ReturnCode SetDimensions(const std::vector& dimensions); + + std::string GetName() const; + OH_NN_TensorType GetType() const; + void* GetBuffer() const; + // Return complete buffer length + size_t GetBufferLength() const; + // Return actual data length, since the data can be store in a larger buffer + size_t GetDataLength() const; + OH_NN_DataType GetDataType() const; + uint32_t GetElementCount() const; + std::vector GetDimensions() const; + OH_NN_Format GetFormat() const; + std::vector GetQuantParam() const; + LiteGraphTensorPtr ConvertToLiteGraphTensor() const; + void ConvertToIOTensor(IOTensor& tensor) const; + + bool IsDynamicShape() const; + bool IsQuantTensor() const; + bool IsScalar() const; + bool IsOpParameter() const; + bool CompareAttribute(const NNTensor& tensor) const; + +private: + // Used in BuildFromOHNNTensor() + OH_NN_ReturnCode ParseQuantParams(const OH_NN_QuantParam* quantParams); + OH_NN_ReturnCode ParseDimensions(const OH_NN_Tensor& nnTensor); + // Used in Build() + OH_NN_ReturnCode ParseQuantParams(const std::vector& quantParams); + OH_NN_ReturnCode ParseDimensions(const std::vector& dimensions); + +private: + OH_NN_TensorType m_type {OH_NN_TENSOR}; + OH_NN_DataType m_dataType {OH_NN_FLOAT32}; + OH_NN_Format m_format {OH_NN_FORMAT_NHWC}; + std::string m_name; + std::vector m_dimensions; + std::vector m_quantParams; + uint32_t m_elementCount {0}; + bool m_isDynamicShape {false}; + bool m_isOpParameter {false}; + void* m_buffer {nullptr}; + size_t m_bufferLength {0}; + size_t m_dataLength {0}; +}; +} // namespace NeuralNetworkRuntime +} // namespace OHOS +#endif // NEURAL_NETWORK_RUNTIME_NN_TENSOR_H \ No newline at end of file diff --git a/frameworks/native/ops/add_builder.cpp b/frameworks/native/ops/add_builder.cpp new file mode 100644 index 0000000..96dd295 --- /dev/null +++ b/frameworks/native/ops/add_builder.cpp @@ -0,0 +1,115 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "add_builder.h" + +#include "frameworks/native/transform.h" +#include "frameworks/native/validation.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +static const int INPUT_NUM = 2; +static const int OUTPUT_NUM = 1; +static const std::string OP_NAME = "Add"; + +AddBuilder::AddBuilder() {} + +AddBuilder::~AddBuilder() {} + +OH_NN_ReturnCode AddBuilder::SetActivation(std::shared_ptr& tensor) +{ + tensor->IdentifyOpParameter(); + + if (tensor->GetDataType() != OH_NN_INT8) { + LOGE("[Add] SetActivation failed, the activationType should be type OH_NN_INT8."); + return OH_NN_INVALID_PARAMETER; + } + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[Add] SetActivation GetBuffer return nullptr."); + return OH_NN_INVALID_PARAMETER; + } + int8_t* fuseData = static_cast(buffer); + if (!Validation::ValidateFuseType(static_cast(*fuseData))) { + LOGE("[Add] SetActivation failed, fuse activation type is invalid."); + return OH_NN_INVALID_PARAMETER; + } + m_activationType = NNToMS::TransfromFusionType(static_cast(*fuseData)); + + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode AddBuilder::Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) +{ + if (m_isBuild) { + LOGE("[Add] Build failed, operation has been build, cannot build again."); + return OH_NN_OPERATION_FORBIDDEN; + } + + auto ret = CheckIOIndex(inputsIndex, outputsIndex, allTensors, INPUT_NUM, OUTPUT_NUM); + if (ret != OH_NN_SUCCESS) { + LOGE("[Add] Build failed, the input or output index of Add operation is invalid."); + return ret; + } + + m_inputsIndex = inputsIndex; + m_outputsIndex = outputsIndex; + + for (uint32_t i : paramsIndex) { + std::shared_ptr tensor = allTensors[i]; + switch (tensor->GetType()) { + case OH_NN_ADD_ACTIVATIONTYPE: + ret = SetActivation(tensor); + break; + default: + LOGE("[Add] Build failed, param invalid, type = %d.", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; + } + + if (ret != OH_NN_SUCCESS) { + LOGE("[Add] Build failed, passed invalid param."); + return ret; + } + } + + // The quantization type of the first output determinies that of the operator. + SetQuantType(outputsIndex, allTensors); + + m_name = OP_NAME; + m_isBuild = true; + return OH_NN_SUCCESS; +} + +LiteGraphPrimitvePtr AddBuilder::GetPrimitive() +{ + if (!m_isBuild) { + LOGE("[Add] GetPrimitive failed, cannot get primitive before call build."); + return {nullptr, DestroyLiteGraphPrimitive}; + } + + void* primitive = mindspore::lite::MindIR_AddFusion_CreatePrimitive(m_activationType); + LiteGraphPrimitvePtr graphPrimitivePtr(primitive, DestroyLiteGraphPrimitive) ; + return graphPrimitivePtr; +} + +REGISTER_OPS(AddBuilder, OH_NN_OPS_ADD); +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS diff --git a/frameworks/native/ops/add_builder.h b/frameworks/native/ops/add_builder.h new file mode 100644 index 0000000..c08d4d9 --- /dev/null +++ b/frameworks/native/ops/add_builder.h @@ -0,0 +1,48 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_ADD_BUILDER_H +#define NEURAL_NETWORK_RUNTIME_ADD_BUILDER_H + +#include "mindir.h" + +#include "frameworks/native/ops_builder.h" +#include "frameworks/native/ops_registry.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +class AddBuilder : public OpsBuilder { +public: + AddBuilder(); + ~AddBuilder() override; + OH_NN_ReturnCode Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) override; + + LiteGraphPrimitvePtr GetPrimitive() override; + +private: + OH_NN_ReturnCode SetActivation(std::shared_ptr& tensor); + +private: + mindspore::lite::ActivationType m_activationType{mindspore::lite::ACTIVATION_TYPE_NO_ACTIVATION}; +}; +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS + +#endif // NEURAL_NETWORK_RUNTIME_ADD_BUILDER_H diff --git a/frameworks/native/ops/argmax_builder.cpp b/frameworks/native/ops/argmax_builder.cpp new file mode 100644 index 0000000..d40f3a8 --- /dev/null +++ b/frameworks/native/ops/argmax_builder.cpp @@ -0,0 +1,131 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "argmax_builder.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +static const int INPUT_NUM = 1; +static const int OUTPUT_NUM = 1; +static const std::string OP_NAME = "ArgMax"; + +ArgMaxBuilder::ArgMaxBuilder() {} + +ArgMaxBuilder::~ArgMaxBuilder() {} + +OH_NN_ReturnCode ArgMaxBuilder::SetAxis(std::shared_ptr tensor) +{ + tensor->IdentifyOpParameter(); + + if (tensor->GetDataType() != OH_NN_INT64) { + LOGE("[ArgMax] SetAxis failed, the axis should be type HNN_INT64."); + return OH_NN_INVALID_PARAMETER; + } + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[ArgMax] SetAxis GetBuffer return nullptr."); + return OH_NN_INVALID_PARAMETER; + } + + m_axis = *(static_cast(buffer)); + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode ArgMaxBuilder::SetKeepdims(std::shared_ptr tensor) +{ + tensor->IdentifyOpParameter(); + + if (tensor->GetDataType() != OH_NN_BOOL) { + LOGE("[ArgMax] SetKeepdims failed, the keep_dims should be type HNN_BOOL."); + return OH_NN_INVALID_PARAMETER; + } + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[ArgMax] SetKeepdims GetBuffer return nullptr."); + return OH_NN_INVALID_PARAMETER; + } + m_keepDims = *(static_cast(buffer)); + + return OH_NN_SUCCESS; +} + +/** + * Build method. + * 1.build primitive of ops. + * 2.build inputIndex of ops. + * 3.build outputIndex of ops. + */ +OH_NN_ReturnCode ArgMaxBuilder::Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, const std::vector& outputsIndex, + const std::vector>& allTensors) +{ + if (m_isBuild) { + LOGE("[ArgMax] Build failed, build operation has been completed, cannot build again."); + return OH_NN_OPERATION_FORBIDDEN; + } + + OH_NN_ReturnCode returnCode = CheckIOIndex(inputsIndex, outputsIndex, allTensors, INPUT_NUM, OUTPUT_NUM); + if (returnCode != OH_NN_SUCCESS) { + LOGE("[ArgMax] Build failed, passed invalid input or output index."); + return returnCode; + } + m_inputsIndex = inputsIndex; + m_outputsIndex = outputsIndex; + + for (int i : paramsIndex) { + const std::shared_ptr tensor = allTensors[i]; + switch (tensor->GetType()) { + case OH_NN_ARG_MAX_AXIS: + returnCode = SetAxis(tensor); + break; + case OH_NN_ARG_MAX_KEEPDIMS: + returnCode = SetKeepdims(tensor); + break; + default: + LOGE("[ArgMax] Build failed, param invalid, type = %d.", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; + } + if (returnCode != OH_NN_SUCCESS) { + LOGE("[ArgMax] Build failed, passed invalid param."); + return returnCode; + } + } + + // The quantization type of the first output determinies that of the operator. + SetQuantType(outputsIndex, allTensors); + + m_name = OP_NAME; + m_isBuild = true; + return OH_NN_SUCCESS; +} + +LiteGraphPrimitvePtr ArgMaxBuilder::GetPrimitive() +{ + if (!m_isBuild) { + LOGE("[ArgMax] GetPrimitive failed, cannot get primitive before call build."); + return {nullptr, DestroyLiteGraphPrimitive}; + } + + void* primitive = mindspore::lite::MindIR_ArgMaxFusion_CreatePrimitive(m_axis, m_topK, m_keepDims, m_outMaxValue); + LiteGraphPrimitvePtr graphPrimitivePtr(primitive, DestroyLiteGraphPrimitive); + return graphPrimitivePtr; +} +REGISTER_OPS(ArgMaxBuilder, OH_NN_OPS_ARG_MAX); +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS diff --git a/frameworks/native/ops/argmax_builder.h b/frameworks/native/ops/argmax_builder.h new file mode 100644 index 0000000..73139e3 --- /dev/null +++ b/frameworks/native/ops/argmax_builder.h @@ -0,0 +1,51 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_ARGMAX_BUILDER_H +#define NEURAL_NETWORK_RUNTIME_ARGMAX_BUILDER_H + +#include "mindir.h" + +#include "frameworks/native/ops_builder.h" +#include "frameworks/native/ops_registry.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +class ArgMaxBuilder : public OpsBuilder { +public: + ArgMaxBuilder(); + ~ArgMaxBuilder() override; + OH_NN_ReturnCode Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) override; + LiteGraphPrimitvePtr GetPrimitive() override; + +private: + OH_NN_ReturnCode SetAxis(std::shared_ptr tensor); + OH_NN_ReturnCode SetKeepdims(std::shared_ptr tensor); + +private: + int64_t m_axis {-1}; + int64_t m_topK {1}; + bool m_keepDims {false}; + bool m_outMaxValue {false}; +}; +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS + +#endif // NEURAL_NETWORK_RUNTIME_ARGMAX_BUILDER_H diff --git a/frameworks/native/ops/avgpool_builder.cpp b/frameworks/native/ops/avgpool_builder.cpp new file mode 100644 index 0000000..02c78a7 --- /dev/null +++ b/frameworks/native/ops/avgpool_builder.cpp @@ -0,0 +1,60 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "avgpool_builder.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +static const std::string OP_NAME = "AvgPool"; + +AvgPoolBuilder::AvgPoolBuilder() {} + +AvgPoolBuilder::~AvgPoolBuilder() {} + +OH_NN_ReturnCode AvgPoolBuilder::Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) +{ + OH_NN_ReturnCode returnCode = PoolingBuild(paramsIndex, inputsIndex, outputsIndex, allTensors); + if (returnCode != OH_NN_SUCCESS) { + LOGE("[AvgPool] Build failed, the PoolingBuild failed."); + return returnCode; + } + + m_isBuild = true; + m_name = OP_NAME; + return OH_NN_SUCCESS; +} + +LiteGraphPrimitvePtr AvgPoolBuilder::GetPrimitive() +{ + if (!m_isBuild) { + LOGE("[AvgPool] GetPrimitive failed, cannot get primitive before call build."); + return {nullptr, DestroyLiteGraphPrimitive}; + } + + void* primitive = mindspore::lite::MindIR_AvgPoolFusion_CreatePrimitive(m_kernelSize, m_strides, m_pad, + m_padMode, m_roundMode, m_format, m_global, m_activationType); + + LiteGraphPrimitvePtr graphPrimitivePtr(primitive, DestroyLiteGraphPrimitive); + return graphPrimitivePtr; +} + +REGISTER_OPS(AvgPoolBuilder, OH_NN_OPS_AVG_POOL); +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS diff --git a/frameworks/native/ops/avgpool_builder.h b/frameworks/native/ops/avgpool_builder.h new file mode 100644 index 0000000..fc58d41 --- /dev/null +++ b/frameworks/native/ops/avgpool_builder.h @@ -0,0 +1,40 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_AVGPOOL_BUILDER_H +#define NEURAL_NETWORK_AVGPOOL_BUILDER_H + +#include "pooling_builder.h" +#include "frameworks/native/ops_registry.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +class AvgPoolBuilder : public PoolingBuilder { +public: + AvgPoolBuilder(); + ~AvgPoolBuilder() override; + OH_NN_ReturnCode Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) override; + + LiteGraphPrimitvePtr GetPrimitive() override; +}; +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS + +#endif // NEURAL_NETWORK_AVGPOOL_BUILDER_H \ No newline at end of file diff --git a/frameworks/native/ops/batch_to_space_nd_builder.cpp b/frameworks/native/ops/batch_to_space_nd_builder.cpp new file mode 100644 index 0000000..b56ffcb --- /dev/null +++ b/frameworks/native/ops/batch_to_space_nd_builder.cpp @@ -0,0 +1,142 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "batch_to_space_nd_builder.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +static const int INPUT_NUM = 1; +static const int OUTPUT_NUM = 1; +static const int CROPS_ROWS = 2; +static const int CROPS_COLUMN = 2; +static const std::string OP_NAME = "BatchToSpaceND"; + +BatchToSpaceNDBuilder::BatchToSpaceNDBuilder() {} + +BatchToSpaceNDBuilder::~BatchToSpaceNDBuilder() {} + +OH_NN_ReturnCode BatchToSpaceNDBuilder::SetInputBlock(std::shared_ptr tensor) +{ + tensor->IdentifyOpParameter(); + + if (tensor->GetDataType() != OH_NN_INT64) { + LOGE("[BatchToSpaceND] SetInputBlock failed, the BlockSize should be type OH_NN_INT64."); + return OH_NN_INVALID_PARAMETER; + } + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[BatchToSpaceND] SetInputBlock GetBuffer return nullptr."); + return OH_NN_INVALID_PARAMETER; + } + int64_t* pBlockSize = static_cast(buffer); + + uint32_t elementCount = tensor->GetElementCount(); + for (uint32_t i = 0; i < elementCount; ++i) { + m_blockSize.emplace_back(*pBlockSize); + ++pBlockSize; + } + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode BatchToSpaceNDBuilder::SetInputCrops(std::shared_ptr tensor) +{ + tensor->IdentifyOpParameter(); + + if (tensor->GetDataType() != OH_NN_INT64) { + LOGE("[BatchToSpaceND] SetInputCrops failed, the Crops should be type OH_NN_INT64."); + return OH_NN_INVALID_PARAMETER; + } + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[BatchToSpaceND] SetInputCrops GetBuffer return nullptr."); + return OH_NN_INVALID_PARAMETER; + } + int64_t* pCropsData = static_cast(buffer); + + std::vector> cropsData; + for (int i = 0; i < CROPS_ROWS; i++) { + std::vector vect_data; + vect_data.reserve(CROPS_COLUMN); + for (int j = 0; j < CROPS_COLUMN; j++) { + vect_data.push_back(*pCropsData++); + } + cropsData.push_back(vect_data); + } + m_crops = cropsData; + + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode BatchToSpaceNDBuilder::Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, const std::vector& outputsIndex, + const std::vector>& allTensors) +{ + if (m_isBuild) { + LOGE("[BatchToSpaceND] Build failed, operation has been build, cannot build again."); + return OH_NN_OPERATION_FORBIDDEN; + } + + OH_NN_ReturnCode returnCode = CheckIOIndex(inputsIndex, outputsIndex, allTensors, INPUT_NUM, OUTPUT_NUM); + if (returnCode != OH_NN_SUCCESS) { + LOGE("[BatchToSpaceND] Build failed, passed invalid input or output index."); + return returnCode; + } + + for (int i : paramsIndex) { + std::shared_ptr tensor = allTensors[i]; + switch (tensor->GetType()) { + case OH_NN_BATCH_TO_SPACE_ND_BLOCKSIZE: + returnCode = SetInputBlock(tensor); + break; + case OH_NN_BATCH_TO_SPACE_ND_CROPS: + returnCode = SetInputCrops(tensor); + break; + default: + LOGE("[BatchToSpaceND] Build failed, param invalid, type = %d.", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; + } + if (returnCode != OH_NN_SUCCESS) { + LOGE("[BatchToSpaceND] Build failed, passed invalid param."); + return returnCode; + } + } + + // The quantization type of the first output determinies that of the operator. + SetQuantType(outputsIndex, allTensors); + + m_isBuild = true; + m_name = OP_NAME; + return OH_NN_SUCCESS; +} + +LiteGraphPrimitvePtr BatchToSpaceNDBuilder::GetPrimitive() +{ + if (!m_isBuild) { + LOGE("[BatchToSpaceND] Cannot get primitive before call build."); + return {nullptr, DestroyLiteGraphPrimitive}; + } + + void* primitive = mindspore::lite::MindIR_BatchToSpaceND_CreatePrimitive(m_blockSize, m_crops); + LiteGraphPrimitvePtr graphPrimitivePtr(primitive, DestroyLiteGraphPrimitive); + return graphPrimitivePtr; +} + +REGISTER_OPS(BatchToSpaceNDBuilder, OH_NN_OPS_BATCH_TO_SPACE_ND); +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS diff --git a/frameworks/native/ops/batch_to_space_nd_builder.h b/frameworks/native/ops/batch_to_space_nd_builder.h new file mode 100644 index 0000000..beab53a --- /dev/null +++ b/frameworks/native/ops/batch_to_space_nd_builder.h @@ -0,0 +1,53 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_BATCHTOSPACEND_BUILDER_H +#define NEURAL_NETWORK_BATCHTOSPACEND_BUILDER_H + +#include "mindir.h" + +#include "frameworks/native/ops_builder.h" +#include "frameworks/native/ops_registry.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +class BatchToSpaceNDBuilder : public OpsBuilder { +public: + BatchToSpaceNDBuilder(); + ~BatchToSpaceNDBuilder() override; + OH_NN_ReturnCode Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) override; + + LiteGraphPrimitvePtr GetPrimitive() override; + +private: + OH_NN_ReturnCode SetBatchToSpaceInput(const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors); + OH_NN_ReturnCode SetInputBlock(std::shared_ptr tensor); + OH_NN_ReturnCode SetInputCrops(std::shared_ptr tensor); + +private: + std::vector m_blockSize; + std::vector> m_crops; +}; +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS + +#endif // NEURAL_NETWORK_BATCHTOSPACEND_BUILDER_H \ No newline at end of file diff --git a/frameworks/native/ops/batchnorm_builder.cpp b/frameworks/native/ops/batchnorm_builder.cpp new file mode 100644 index 0000000..1fd5997 --- /dev/null +++ b/frameworks/native/ops/batchnorm_builder.cpp @@ -0,0 +1,116 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "batchnorm_builder.h" + +#include "mindir.h" + +#include "frameworks/native/ops_registry.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +static const int INPUT_NUM = 5; +static const int OUTPUT_NUM = 1; +static const int SCALAR_LENGTH = 1; +const std::string OP_NAME = "BatchNorm"; + +BatchNormBuilder::BatchNormBuilder() {} + +BatchNormBuilder::~BatchNormBuilder() {} + +OH_NN_ReturnCode BatchNormBuilder::SetEpsilon(std::shared_ptr tensor) +{ + tensor->IdentifyOpParameter(); + if (tensor->GetDataType() != OH_NN_FLOAT32) { + LOGE("[BatchNorm] SetEpsilon failed, the Epsilon should be type OH_NN_FLOAT32."); + return OH_NN_INVALID_PARAMETER; + } + + if (tensor->GetElementCount() != SCALAR_LENGTH) { + LOGE("[BatchNorm] SetEpsilon failed, the Epsilon shoule be a scalar"); + return OH_NN_INVALID_PARAMETER; + } + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[BatchNorm] SetEpsilon failed, the epsilon passed a empty buffer."); + return OH_NN_INVALID_PARAMETER; + } + + m_epsilon = *static_cast(buffer); + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode BatchNormBuilder::Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) +{ + if (m_isBuild) { + LOGE("[BatchNorm] Build failed, batchNorm operation has been build, cannot build again."); + return OH_NN_OPERATION_FORBIDDEN; + } + + OH_NN_ReturnCode returnCode = CheckIOIndex(inputsIndex, outputsIndex, allTensors, INPUT_NUM, OUTPUT_NUM); + if (returnCode != OH_NN_SUCCESS) { + LOGE("[BatchNorm] Build failed, passed invalid input or output index."); + return returnCode; + } + + m_inputsIndex = inputsIndex; + m_outputsIndex = outputsIndex; + + for (int i : paramsIndex) { + std::shared_ptr tensor = allTensors[i]; + switch (tensor->GetType()) { + case OH_NN_BATCH_NORM_EPSILON: + returnCode = SetEpsilon(tensor); + break; + default: + LOGE("[BatchNorm] Parameter Type is invalid, type=%d", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; + } + + if (returnCode != OH_NN_SUCCESS) { + LOGE("[BatchNorm] BatchNorm Build failed,, Passed invalid param."); + return returnCode; + } + } + + // The quantization type of the first output determinies that of the operator. + SetQuantType(outputsIndex, allTensors); + + m_isBuild = true; + m_name = OP_NAME; + return OH_NN_SUCCESS; +} + +LiteGraphPrimitvePtr BatchNormBuilder::GetPrimitive() +{ + if (!m_isBuild) { + LOGE("[BatchNorm] GetPrimitive failed, cannot get primitive before call build."); + return {nullptr, DestroyLiteGraphPrimitive}; + } + + void* primitive = mindspore::lite::MindIR_FusedBatchNorm_CreatePrimitive(m_epsilon); + LiteGraphPrimitvePtr graphPrimitivePtr(primitive, DestroyLiteGraphPrimitive); + return graphPrimitivePtr; +} + +REGISTER_OPS(BatchNormBuilder, OH_NN_OPS_BATCH_NORM); +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS \ No newline at end of file diff --git a/frameworks/native/ops/batchnorm_builder.h b/frameworks/native/ops/batchnorm_builder.h new file mode 100644 index 0000000..c8b4e9c --- /dev/null +++ b/frameworks/native/ops/batchnorm_builder.h @@ -0,0 +1,44 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_BATHNORM_BUILDER_H +#define NEURAL_NETWORK_RUNTIME_BATHNORM_BUILDER_H + +#include "frameworks/native/ops_builder.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +class BatchNormBuilder : public OpsBuilder { +public: + BatchNormBuilder(); + ~BatchNormBuilder() override; + OH_NN_ReturnCode Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) override; + LiteGraphPrimitvePtr GetPrimitive() override; + +private: + OH_NN_ReturnCode SetEpsilon(std::shared_ptr tensor); + +private: + float m_epsilon{1e-7}; +}; +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS + +#endif // NEURAL_NETWORK_RUNTIME_BATHNORM_BUILDER_H \ No newline at end of file diff --git a/frameworks/native/ops/bias_add_builder.cpp b/frameworks/native/ops/bias_add_builder.cpp new file mode 100644 index 0000000..4130bd6 --- /dev/null +++ b/frameworks/native/ops/bias_add_builder.cpp @@ -0,0 +1,75 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "bias_add_builder.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +static const int INPUT_NUM = 2; +static const int OUTPUT_NUM = 1; +static const std::string OP_NAME = "BiasAdd"; + +BiasAddBuilder::BiasAddBuilder() {} + +BiasAddBuilder::~BiasAddBuilder() {} + +OH_NN_ReturnCode BiasAddBuilder::Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) +{ + if (m_isBuild) { + LOGE("[BiasAdd] Build failed, biasAdd operation has been build, cannot build again."); + return OH_NN_OPERATION_FORBIDDEN; + } + + OH_NN_ReturnCode returnCode = CheckIOIndex(inputsIndex, outputsIndex, allTensors, INPUT_NUM, OUTPUT_NUM); + if (returnCode != OH_NN_SUCCESS) { + LOGE("[BiasAdd] Build failed, passed invalid input or output index."); + return returnCode; + } + + m_inputsIndex = inputsIndex; + m_outputsIndex = outputsIndex; + if (!paramsIndex.empty()) { + LOGE("[BiasAdd] Build failed, expects no parameters"); + return OH_NN_INVALID_PARAMETER; + } + + // The quantization type of the first output determinies that of the operator. + SetQuantType(outputsIndex, allTensors); + + m_isBuild = true; + m_name = OP_NAME; + return OH_NN_SUCCESS; +} + +LiteGraphPrimitvePtr BiasAddBuilder::GetPrimitive() +{ + if (!m_isBuild) { + LOGE("[BiasAdd] Build failed, cannot get primitive before call build."); + return {nullptr, DestroyLiteGraphPrimitive}; + } + + void* primitive = mindspore::lite::MindIR_BiasAdd_CreatePrimitive(); + LiteGraphPrimitvePtr graphPrimitivePtr(primitive, DestroyLiteGraphPrimitive); + return graphPrimitivePtr; +} + +REGISTER_OPS(BiasAddBuilder, OH_NN_OPS_BIAS_ADD); +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS \ No newline at end of file diff --git a/frameworks/native/ops/bias_add_builder.h b/frameworks/native/ops/bias_add_builder.h new file mode 100644 index 0000000..410bfe8 --- /dev/null +++ b/frameworks/native/ops/bias_add_builder.h @@ -0,0 +1,42 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_BIASADD_BUILDER_H +#define NEURAL_NETWORK_RUNTIME_BIASADD_BUILDER_H + +#include "mindir.h" + +#include "frameworks/native/ops_builder.h" +#include "frameworks/native/ops_registry.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +class BiasAddBuilder : public OpsBuilder { +public: + BiasAddBuilder(); + ~BiasAddBuilder() override; + OH_NN_ReturnCode Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) override; + + LiteGraphPrimitvePtr GetPrimitive() override; +}; +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS + +#endif // NEURAL_NETWORK_RUNTIME_BIASADD_BUILDER_H \ No newline at end of file diff --git a/frameworks/native/ops/cast_builder.cpp b/frameworks/native/ops/cast_builder.cpp new file mode 100644 index 0000000..81dc1eb --- /dev/null +++ b/frameworks/native/ops/cast_builder.cpp @@ -0,0 +1,89 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "cast_builder.h" + +#include "frameworks/native/transform.h" +#include "frameworks/native/validation.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +static const int INPUT_NUM = 2; +static const int OUTPUT_NUM = 1; +static const int INPUT_TYPE = 1; +static const std::string OP_NAME = "Cast"; + +CastBuilder::CastBuilder() {} + +CastBuilder::~CastBuilder() {} + +OH_NN_ReturnCode CastBuilder::Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) +{ + if (m_isBuild) { + LOGE("[Cast] Build failed, operation has been build, cannot build again."); + return OH_NN_OPERATION_FORBIDDEN; + } + auto ret = CheckIOIndex(inputsIndex, outputsIndex, allTensors, INPUT_NUM, OUTPUT_NUM); + if (ret != OH_NN_SUCCESS) { + LOGE("[Cast] Build failed, the input or output index of Cast operation is invalid."); + return ret; + } + m_inputsIndex = inputsIndex; + m_outputsIndex = outputsIndex; + + auto castType = allTensors[inputsIndex[INPUT_TYPE]]->GetBuffer(); + if (castType == nullptr) { + LOGE("[Cast] Build castType GetBuffer return nullptr."); + return OH_NN_INVALID_PARAMETER; + } + OH_NN_DataType* castTypeInt = reinterpret_cast(castType); + if (!Validation::ValidateTensorDataType(*castTypeInt)) { + LOGE("[Cast] Type of cast operator is not validation."); + return OH_NN_INVALID_PARAMETER; + } + *castTypeInt = (OH_NN_DataType)NNToHDI::TransDataType(*castTypeInt); + + if (!paramsIndex.empty()) { + LOGE("[Cast] Cast expects no parameters"); + return OH_NN_INVALID_PARAMETER; + } + + // The quantization type of the first output determinies that of the operator. + SetQuantType(outputsIndex, allTensors); + m_isBuild = true; + m_name = OP_NAME; + return OH_NN_SUCCESS; +} + +LiteGraphPrimitvePtr CastBuilder::GetPrimitive() +{ + if (!m_isBuild) { + LOGE("[Cast] Cannot get primitive before call build."); + return {nullptr, DestroyLiteGraphPrimitive}; + } + + void* primitive = mindspore::lite::MindIR_Cast_CreatePrimitive(); + LiteGraphPrimitvePtr graphPrimitivePtr(primitive, DestroyLiteGraphPrimitive); + return graphPrimitivePtr; +} + +REGISTER_OPS(CastBuilder, OH_NN_OPS_CAST); +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS diff --git a/frameworks/native/ops/cast_builder.h b/frameworks/native/ops/cast_builder.h new file mode 100644 index 0000000..09682db --- /dev/null +++ b/frameworks/native/ops/cast_builder.h @@ -0,0 +1,42 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_CAST_BUILDER_H +#define NEURAL_NETWORK_RUNTIME_CAST_BUILDER_H + +#include "mindir.h" + +#include "frameworks/native/ops_builder.h" +#include "frameworks/native/ops_registry.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +class CastBuilder : public OpsBuilder { +public: + CastBuilder(); + ~CastBuilder() override; + OH_NN_ReturnCode Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) override; + + LiteGraphPrimitvePtr GetPrimitive() override; +}; +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS + +#endif // NEURAL_NETWORK_RUNTIME_CAST_BUILDER_H diff --git a/frameworks/native/ops/concat_builder.cpp b/frameworks/native/ops/concat_builder.cpp new file mode 100644 index 0000000..d184114 --- /dev/null +++ b/frameworks/native/ops/concat_builder.cpp @@ -0,0 +1,146 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "concat_builder.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +static constexpr int MINIMUM_INTPUT = 2; +static constexpr int OUTPUT_NUM = 1; +static constexpr int AXIS_LENGTH = 1; +static const std::string OP_NAME = "Concat"; + +ConcatBuilder::ConcatBuilder() {} + +ConcatBuilder::~ConcatBuilder() {} + +OH_NN_ReturnCode ConcatBuilder::SetAxis(std::shared_ptr tensor) +{ + tensor->IdentifyOpParameter(); + + if (tensor->GetElementCount() != AXIS_LENGTH) { + LOGE("[Concat] SetAxis failed, the Activation shoule be a scalar"); + return OH_NN_INVALID_PARAMETER; + } + + if (tensor->GetDataType() != OH_NN_INT64) { + LOGE("[Concat] SetAxis failed, the axis should be type OH_NN_INT64."); + return OH_NN_INVALID_PARAMETER; + } + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[Concat] SetAxis GetBuffer return nullptr."); + return OH_NN_INVALID_PARAMETER; + } + m_axis = *(static_cast(buffer)); + + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode ConcatBuilder::Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, const std::vector& outputsIndex, + const std::vector>& allTensors) +{ + if (m_isBuild) { + LOGE("[Concat] Build failed, operation has been build, cannot build again."); + return OH_NN_OPERATION_FORBIDDEN; + } + + if (inputsIndex.size() < MINIMUM_INTPUT) { + LOGE("[Concat] Build failed, Concat need more than one inputs."); + return OH_NN_INVALID_PARAMETER; + } + + if (outputsIndex.size() != OUTPUT_NUM) { + LOGE("[Concat] Build failed, The number of index of outputs not equal to 1."); + return OH_NN_INVALID_PARAMETER; + } + + OH_NN_ReturnCode returnCode = SetInputsAndOutputs(inputsIndex, outputsIndex, allTensors); + if (returnCode != OH_NN_SUCCESS) { + LOGE("[Concat] Build failed, set inputs or outputs failed."); + return returnCode; + } + + for (int i : paramsIndex) { + std::shared_ptr tensor = allTensors[i]; + switch (tensor->GetType()) { + case OH_NN_CONCAT_AXIS: + returnCode = SetAxis(tensor); + break; + default: + LOGE("[Concat] Build failed, param invalid, type = %d.", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; + } + if (returnCode != OH_NN_SUCCESS) { + LOGE("[Concat] Build failed, passed invalid param."); + return returnCode; + } + } + + // The quantization type of the first output determinies that of the operator. + SetQuantType(outputsIndex, allTensors); + + m_isBuild = true; + m_name = OP_NAME; + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode ConcatBuilder::SetInputsAndOutputs(const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) +{ + size_t allTensorsSize = allTensors.size(); + for (auto index : inputsIndex) { + if (index >= allTensorsSize) { + LOGE("[Concat] Invalid input index, it is out of range %zu.", allTensorsSize); + return OH_NN_INVALID_PARAMETER; + } + } + + for (auto index : outputsIndex) { + if (index >= allTensorsSize) { + LOGE("[Concat] Invalid output index, it is out of range %zu.", allTensorsSize); + return OH_NN_INVALID_PARAMETER; + } + } + + m_inputsIndex.clear(); + m_inputsIndex = inputsIndex; + + m_outputsIndex.clear(); + m_outputsIndex = outputsIndex; + + return OH_NN_SUCCESS; +} + +LiteGraphPrimitvePtr ConcatBuilder::GetPrimitive() +{ + if (!m_isBuild) { + LOGE("[Concat] GetPrimitive failed, cannot get primitive before call build."); + return {nullptr, DestroyLiteGraphPrimitive}; + } + + void* primitive = mindspore::lite::MindIR_Concat_CreatePrimitive(m_axis); + LiteGraphPrimitvePtr graphPrimitivePtr(primitive, DestroyLiteGraphPrimitive); + return graphPrimitivePtr; +} + +REGISTER_OPS(ConcatBuilder, OH_NN_OPS_CONCAT); +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS \ No newline at end of file diff --git a/frameworks/native/ops/concat_builder.h b/frameworks/native/ops/concat_builder.h new file mode 100644 index 0000000..c80a53a --- /dev/null +++ b/frameworks/native/ops/concat_builder.h @@ -0,0 +1,50 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_CONCAT_BUILDER_H +#define NEURAL_NETWORK_RUNTIME_CONCAT_BUILDER_H + +#include "mindir.h" + +#include "frameworks/native/ops_builder.h" +#include "frameworks/native/ops_registry.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +class ConcatBuilder : public OpsBuilder { +public: + ConcatBuilder(); + ~ConcatBuilder() override; + OH_NN_ReturnCode Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) override; + + LiteGraphPrimitvePtr GetPrimitive() override; + +private: + OH_NN_ReturnCode SetAxis(std::shared_ptr tensor); + OH_NN_ReturnCode SetInputsAndOutputs(const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors); +private: + int64_t m_axis{0}; +}; +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS + +#endif // NEURAL_NETWORK_RUNTIME_CONCAT_BUILDER_H \ No newline at end of file diff --git a/frameworks/native/ops/conv2d_builder.cpp b/frameworks/native/ops/conv2d_builder.cpp new file mode 100644 index 0000000..302f1e4 --- /dev/null +++ b/frameworks/native/ops/conv2d_builder.cpp @@ -0,0 +1,298 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "conv2d_builder.h" + +#include "frameworks/native/transform.h" +#include "frameworks/native/validation.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +static constexpr int INPUT_NUM = 3; +static constexpr int OUTPUT_NUM = 1; +static constexpr int CONV2D_INPUT_WEIGHT = 1; +static constexpr int WEIGHT_SIZE = 4; +static constexpr int OUT_CHANNEL_INDEX = 0; +static constexpr int IN_CHANNEL_INDEX = 3; +static constexpr int KERNEL_HEIGHT_INDEX = 1; +static constexpr int KERNEL_WEIGHT_INDEX = 2; +static constexpr int PAD_MODE_GET = 1; +static constexpr int PAD_LIST_GET = 4; +static constexpr int SCALAR_LENGTH = 1; +static const std::string OP_NAME = "Conv2D"; + +Conv2DBuilder::Conv2DBuilder() {} + +Conv2DBuilder::~Conv2DBuilder() {} + +OH_NN_ReturnCode Conv2DBuilder::SetInputAndOutput(const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) +{ + OH_NN_ReturnCode returnCode = CheckIOIndex(inputsIndex, outputsIndex, allTensors, INPUT_NUM, OUTPUT_NUM); + if (returnCode != OH_NN_SUCCESS) { + LOGE("[Conv2d] SetInputAndOutput failed, passed invalid input or output index."); + return returnCode; + } + + m_inputsIndex = inputsIndex; + m_outputsIndex = outputsIndex; + + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode Conv2DBuilder::SetChannel(const std::vector& inputsIndex, + const std::vector>& allTensors) +{ + // set inChannel, outChannel, kernelSize + auto weightShape = allTensors[inputsIndex[CONV2D_INPUT_WEIGHT]]->GetDimensions(); + if (weightShape.size() != WEIGHT_SIZE) { + LOGE("[Conv2d] SetChannel failed, the dimension of weight should be %d", WEIGHT_SIZE); + return OH_NN_INVALID_PARAMETER; + } + + m_inChannel = weightShape[IN_CHANNEL_INDEX]; + m_outChannel = weightShape[OUT_CHANNEL_INDEX]; + + return OH_NN_SUCCESS; +} + +void Conv2DBuilder::SetKernelSize(const std::vector& inputsIndex, + const std::vector>& allTensors) +{ + // set inChannel, outChannel, kernelSize + auto weightShape = allTensors[inputsIndex[CONV2D_INPUT_WEIGHT]]->GetDimensions(); + + m_kernelSize.clear(); + m_kernelSize.emplace_back(weightShape[KERNEL_HEIGHT_INDEX]); + m_kernelSize.emplace_back(weightShape[KERNEL_WEIGHT_INDEX]); +} + +OH_NN_ReturnCode Conv2DBuilder::SetStrides(std::shared_ptr tensor) +{ + tensor->IdentifyOpParameter(); + // Set Strides + if (tensor->GetDataType() != OH_NN_INT64) { + LOGE("[Conv2d] SetStrides failed, the Strides should be type OH_NN_INT64."); + return OH_NN_INVALID_PARAMETER; + } + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[Conv2d] SetStrides GetBuffer return nullptr"); + return OH_NN_INVALID_PARAMETER; + } + const int64_t* pStrides = reinterpret_cast(buffer); + int stridesSize = tensor->GetElementCount(); + m_strides.assign(pStrides, pStrides + stridesSize); + + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode Conv2DBuilder::SetDilation(std::shared_ptr tensor) +{ + tensor->IdentifyOpParameter(); + // Set Dilation + if (tensor->GetDataType() != OH_NN_INT64) { + LOGE("[Conv2d] SetDilation failed, the Dilation should have type OH_NN_INT64"); + return OH_NN_INVALID_PARAMETER; + } + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[Conv2d] SetDilation GetBuffer return nullptr"); + return OH_NN_INVALID_PARAMETER; + } + const int64_t* pDilation = reinterpret_cast(buffer); + int dilationSize = tensor->GetElementCount(); + m_dilation.assign(pDilation, pDilation + dilationSize); + + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode Conv2DBuilder::SetPad(std::shared_ptr tensor) +{ + tensor->IdentifyOpParameter(); + + bool isPadMode = false; + if (tensor->GetElementCount() == PAD_MODE_GET) { + isPadMode = true; + } else if (tensor->GetElementCount() != PAD_LIST_GET) { + LOGE("[Conv2d] SetPad failed, inputs should be 1 for padMode and 4 for padList."); + return OH_NN_INVALID_PARAMETER; + } + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[Conv2d] SetPadList GetBuffer return nullptr"); + return OH_NN_INVALID_PARAMETER; + } + + // Set PadMode or PadList + if (isPadMode) { + if (tensor->GetDataType() != OH_NN_INT8) { + LOGE("[Conv2d] SetPad failed, the PadMode should have type OH_NN_INT8."); + return OH_NN_INVALID_PARAMETER; + } + + int8_t* pPad = static_cast(buffer); + if (!OHOS::NeuralNetworkRuntime::Validation::ValidatePadMode(*pPad)) { + LOGE("[Conv2d] SetPad failed, invalid pad mode."); + return OH_NN_INVALID_PARAMETER; + } + m_padMode = NNToMS::TransformPadModeValue(*pPad); + } else { + if (tensor->GetDataType() != OH_NN_INT64) { + LOGE("[Conv2d] SetPad failed, the PadList should have type OH_NN_INT64."); + return OH_NN_INVALID_PARAMETER; + } + + int64_t* pPadList = static_cast(buffer); + int padListSize = tensor->GetElementCount(); + m_pad.assign(pPadList, pPadList + padListSize); + } + + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode Conv2DBuilder::SetGroup(std::shared_ptr tensor) +{ + tensor->IdentifyOpParameter(); + // Set Group + if (tensor->GetElementCount() != SCALAR_LENGTH) { + LOGE("[Conv2d] SetGroup failed, The Group shoule be a scalar"); + return OH_NN_INVALID_PARAMETER; + } + + if (tensor->GetDataType() != OH_NN_INT64) { + LOGE("[Conv2d] SetGroup failed, The Group should have type OH_NN_INT64."); + return OH_NN_INVALID_PARAMETER; + } + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[Conv2d] SetGroup GetBuffer return nullptr"); + return OH_NN_INVALID_PARAMETER; + } + m_group = *static_cast(buffer); + + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode Conv2DBuilder::SetActavitation(std::shared_ptr tensor) +{ + tensor->IdentifyOpParameter(); + + if (tensor->GetElementCount() != SCALAR_LENGTH) { + LOGE("[Conv2d] SetActavitation failed, the ActivationType shoule be a scalar"); + return OH_NN_INVALID_PARAMETER; + } + + if (tensor->GetDataType() != OH_NN_INT8) { + LOGE("[Conv2d] SetActavitation failed, the ActivationType should have type OH_NN_INT8."); + return OH_NN_INVALID_PARAMETER; + } + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[Conv2d] SetGroup GetBuffer return nullptr"); + return OH_NN_INVALID_PARAMETER; + } + int8_t* pFuseData = static_cast(buffer); + if (!OHOS::NeuralNetworkRuntime::Validation::ValidateFuseType(static_cast(*pFuseData))) { + LOGE("[Conv2d] SetActavitation failed, activation input is invalid."); + return OH_NN_INVALID_PARAMETER; + } + m_activationType = NNToMS::TransfromFusionType((OH_NN_FuseType)(*pFuseData)); + + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode Conv2DBuilder::Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, const std::vector& outputsIndex, + const std::vector>& allTensors) +{ + if (m_isBuild) { + LOGE("[Conv2d] Build failed, Conv2D operation has been build, cannot build again."); + return OH_NN_OPERATION_FORBIDDEN; + } + + OH_NN_ReturnCode returnCode = SetInputAndOutput(inputsIndex, outputsIndex, allTensors); + if (returnCode != OH_NN_SUCCESS) { + return returnCode; + } + + returnCode = SetChannel(inputsIndex, allTensors); + if (returnCode != OH_NN_SUCCESS) { + return returnCode; + } + + SetKernelSize(inputsIndex, allTensors); + + for (int i : paramsIndex) { + std::shared_ptr tensor = allTensors[i]; + switch (tensor->GetType()) { + case OH_NN_CONV2D_STRIDES: + returnCode = SetStrides(tensor); + break; + case OH_NN_CONV2D_DILATION: + returnCode = SetDilation(tensor); + break; + case OH_NN_CONV2D_PAD_MODE: + case OH_NN_CONV2D_PAD: + returnCode = SetPad(tensor); + break; + case OH_NN_CONV2D_GROUP: + returnCode = SetGroup(tensor); + break; + case OH_NN_CONV2D_ACTIVATION_TYPE: + returnCode = SetActavitation(tensor); + break; + default: + LOGE("[Conv2D] Build failed, param invalid, type = %d.", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; + } + if (returnCode != OH_NN_SUCCESS) { + LOGE("[Conv2D] Build failed, Passed invalid param."); + return returnCode; + } + } + + // The quantization type of the first output determinies that of the operator. + SetQuantType(outputsIndex, allTensors); + + m_isBuild = true; + m_name = OP_NAME; + return OH_NN_SUCCESS; +} + +LiteGraphPrimitvePtr Conv2DBuilder::GetPrimitive() +{ + if (!m_isBuild) { + LOGE("[Conv2d] GetPrimitive failed, Cannot get primitive before call build."); + return {nullptr, DestroyLiteGraphPrimitive}; + } + + auto primitive = MindIR_Conv2DFusion_CreatePrimitive(m_kernelSize, m_strides, + m_dilation, m_padMode, m_pad, m_group, m_inChannel, m_outChannel, m_activationType); + LiteGraphPrimitvePtr graphPrimitivePtr(primitive, DestroyLiteGraphPrimitive); + return graphPrimitivePtr; +} + +REGISTER_OPS(Conv2DBuilder, OH_NN_OPS_CONV2D); +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS diff --git a/frameworks/native/ops/conv2d_builder.h b/frameworks/native/ops/conv2d_builder.h new file mode 100644 index 0000000..412427e --- /dev/null +++ b/frameworks/native/ops/conv2d_builder.h @@ -0,0 +1,65 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_CONV2D_BUILDER_H +#define NEURAL_NETWORK_RUNTIME_CONV2D_BUILDER_H + +#include "frameworks/native/ops_builder.h" +#include "frameworks/native/ops_registry.h" +#include "mindir.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +class Conv2DBuilder : public OpsBuilder { +public: + Conv2DBuilder(); + ~Conv2DBuilder() override; + OH_NN_ReturnCode Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) override; + + LiteGraphPrimitvePtr GetPrimitive() override; + +private: + OH_NN_ReturnCode SetInputAndOutput(const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors); + OH_NN_ReturnCode SetChannel(const std::vector& inputsIndex, + const std::vector>& allTensors); + void SetKernelSize(const std::vector& inputsIndex, + const std::vector>& allTensors); + OH_NN_ReturnCode SetStrides(std::shared_ptr tensor); + OH_NN_ReturnCode SetDilation(std::shared_ptr tensor); + OH_NN_ReturnCode SetPad(std::shared_ptr tensor); + OH_NN_ReturnCode SetGroup(std::shared_ptr tensor); + OH_NN_ReturnCode SetActavitation(std::shared_ptr tensor); + +private: + int64_t m_group{1}; + int64_t m_inChannel{0}; + int64_t m_outChannel{0}; + std::vector m_kernelSize; + std::vector m_strides; + std::vector m_pad; + std::vector m_dilation; + mindspore::lite::PadMode m_padMode{mindspore::lite::PAD_MODE_PAD}; + mindspore::lite::ActivationType m_activationType{mindspore::lite::ACTIVATION_TYPE_NO_ACTIVATION}; +}; +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS +#endif // NEURAL_NETWORK_RUNTIME_CONV2D_BUILDER_H \ No newline at end of file diff --git a/frameworks/native/ops/conv2d_transpose_builder.cpp b/frameworks/native/ops/conv2d_transpose_builder.cpp new file mode 100644 index 0000000..2e7b8b0 --- /dev/null +++ b/frameworks/native/ops/conv2d_transpose_builder.cpp @@ -0,0 +1,313 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "conv2d_transpose_builder.h" + +#include "frameworks/native/transform.h" +#include "frameworks/native/validation.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +static constexpr int INPUT_NUM = 3; +static constexpr int OUTPUT_NUM = 1; +static constexpr int INPUT_WEIGHT = 1; +static constexpr int WEIGHT_SIZE = 4; +static constexpr int OUT_CHANNEL_INDEX = 0; +static constexpr int IN_CHANNEL_INDEX = 3; +static constexpr int KERNEL_HEIGHT_INDEX = 1; +static constexpr int KERNEL_WEIGHT_INDEX = 2; +static constexpr int PAD_MODE_PARAM_NUM = 1; +static constexpr int PAD_LIST_PARAM_NUM = 4; +static constexpr int SCALAR_LENGTH = 1; +static const std::string OP_NAME = "Conv2DTranspose"; + +Conv2DTransposeBuilder::Conv2DTransposeBuilder() {} + +Conv2DTransposeBuilder::~Conv2DTransposeBuilder() {} + +OH_NN_ReturnCode Conv2DTransposeBuilder::SetInput(const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) +{ + OH_NN_ReturnCode returnCode = CheckIOIndex(inputsIndex, outputsIndex, allTensors, INPUT_NUM, OUTPUT_NUM); + if (returnCode != OH_NN_SUCCESS) { + LOGE("[Conv2dTranspose] SetInput failed, Passed invalid input or output index."); + return returnCode; + } + + m_inputsIndex = inputsIndex; + m_outputsIndex = outputsIndex; + + // set inChannel, outChannel, kernelSize + auto weightShape = allTensors[inputsIndex[INPUT_WEIGHT]]->GetDimensions(); + if (weightShape.size() != WEIGHT_SIZE) { + LOGE("[Conv2dTranspose] SetInput failed, the dimension of weight should be %d", WEIGHT_SIZE); + return OH_NN_INVALID_PARAMETER; + } + + m_inChannel = weightShape[IN_CHANNEL_INDEX]; + m_outChannel = weightShape[OUT_CHANNEL_INDEX]; + + return OH_NN_SUCCESS; +} + +void Conv2DTransposeBuilder::SetKernelSize(const std::vector& inputsIndex, + const std::vector>& allTensors) +{ + auto weightShape = allTensors[inputsIndex[INPUT_WEIGHT]]->GetDimensions(); + + m_kernelSize.clear(); + m_kernelSize.emplace_back(weightShape[KERNEL_HEIGHT_INDEX]); + m_kernelSize.emplace_back(weightShape[KERNEL_WEIGHT_INDEX]); +} + +OH_NN_ReturnCode Conv2DTransposeBuilder::SetStrides(std::shared_ptr tensor) +{ + tensor->IdentifyOpParameter(); + // Set Strides + if (tensor->GetDataType() != OH_NN_INT64) { + LOGE("[Conv2DTranspose] SetStrides failed, the Strides should be type OH_NN_INT64."); + return OH_NN_INVALID_PARAMETER; + } + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[Conv2DTranspose] SetStrides GetBuffer return nullptr"); + return OH_NN_INVALID_PARAMETER; + } + const int64_t* pStrides = reinterpret_cast(buffer); + int elementSize = tensor->GetElementCount(); + m_strides.assign(pStrides, pStrides + elementSize); + + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode Conv2DTransposeBuilder::SetDilation(std::shared_ptr tensor) +{ + tensor->IdentifyOpParameter(); + // Set Dilation + if (tensor->GetDataType() != OH_NN_INT64) { + LOGE("[Conv2DTranspose] SetDilation failed, the Dilation should be type OH_NN_INT64"); + return OH_NN_INVALID_PARAMETER; + } + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[Conv2DTranspose] SetDilation GetBuffer return nullptr"); + return OH_NN_INVALID_PARAMETER; + } + const int64_t* pDilation = reinterpret_cast(buffer); + int dilationSize = tensor->GetElementCount(); + m_dilation.assign(pDilation, pDilation + dilationSize); + + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode Conv2DTransposeBuilder::SetPad(std::shared_ptr tensor) +{ + tensor->IdentifyOpParameter(); + + bool isPadMode = false; + if (tensor->GetElementCount() == PAD_MODE_PARAM_NUM) { + isPadMode = true; + } else if (tensor->GetElementCount() != PAD_LIST_PARAM_NUM) { + LOGE("[Conv2DTranspose] SetPad failed, the inputs should be 1 if using padMode or 4 if using padList."); + return OH_NN_INVALID_PARAMETER; + } + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[Conv2DTranspose] SetPadMode GetBuffer return nullptr"); + return OH_NN_INVALID_PARAMETER; + } + + // Set PadMode or PadList + if (isPadMode) { + if (tensor->GetDataType() != OH_NN_INT8) { + LOGE("[Conv2DTranspose] SetPad failed, the PadMode should have type OH_NN_INT8."); + return OH_NN_INVALID_PARAMETER; + } + + int8_t* pPad = static_cast(buffer); + if (!OHOS::NeuralNetworkRuntime::Validation::ValidatePadMode(*pPad)) { + LOGE("[Conv2DTranspose] SetPad failed, invalid pad mode."); + return OH_NN_INVALID_PARAMETER; + } + m_padMode = NNToMS::TransformPadModeValue(*pPad); + } else { + if (tensor->GetDataType() != OH_NN_INT64) { + LOGE("[Conv2DTranspose] SetPad failed, the PadList should have type OH_NN_INT64."); + return OH_NN_INVALID_PARAMETER; + } + + const int64_t* pPadList = reinterpret_cast(buffer); + int padListPadSize = tensor->GetElementCount(); + m_padList.assign(pPadList, pPadList + padListPadSize); + } + + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode Conv2DTransposeBuilder::SetGroup(std::shared_ptr tensor) +{ + tensor->IdentifyOpParameter(); + // Set Group + if (tensor->GetElementCount() != SCALAR_LENGTH) { + LOGE("[Conv2dTranspose] SetGroup failed, the Group shoule be a scalar"); + return OH_NN_INVALID_PARAMETER; + } + + if (tensor->GetDataType() != OH_NN_INT64) { + LOGE("[Conv2dTranspose] SetGroup failed, the Group should have type OH_NN_INT64."); + return OH_NN_INVALID_PARAMETER; + } + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[Conv2DTranspose] SetGroup GetBuffer return nullptr"); + return OH_NN_INVALID_PARAMETER; + } + m_group = *reinterpret_cast(buffer); + + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode Conv2DTransposeBuilder::SetOutPadding(std::shared_ptr tensor) +{ + tensor->IdentifyOpParameter(); + // Set outputPadding + if (tensor->GetDataType() != OH_NN_INT64) { + LOGE("[Conv2DTranspose] SetOutPadding failed, the outputPadding should be type OH_NN_INT64."); + return OH_NN_INVALID_PARAMETER; + } + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[Conv2DTranspose] SetOutPadding GetBuffer return nullptr"); + return OH_NN_INVALID_PARAMETER; + } + const int64_t* pOutputPadding = reinterpret_cast(buffer); + int outputPadSize = tensor->GetElementCount(); + m_outputPaddings.assign(pOutputPadding, pOutputPadding + outputPadSize); + + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode Conv2DTransposeBuilder::SetActivation(std::shared_ptr tensor) +{ + tensor->IdentifyOpParameter(); + + if (tensor->GetElementCount() != SCALAR_LENGTH) { + LOGE("[Conv2DTranspose] SetActivation failed, the ActivationType shoule be a scalar"); + return OH_NN_INVALID_PARAMETER; + } + + if (tensor->GetDataType() != OH_NN_INT8) { + LOGE("[Conv2DTranspose] SetActivation failed, the ActivationType should have type OH_NN_INT8."); + return OH_NN_INVALID_PARAMETER; + } + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[Conv2DTranspose] SetOutPadding GetBuffer return nullptr"); + return OH_NN_INVALID_PARAMETER; + } + int8_t* pFuseData = static_cast(buffer); + if (!OHOS::NeuralNetworkRuntime::Validation::ValidateFuseType(static_cast(*pFuseData))) { + LOGE("[Conv2DTranspose] SetActivation failed, activation input is invalid."); + return OH_NN_INVALID_PARAMETER; + } + m_activationType = NNToMS::TransfromFusionType((OH_NN_FuseType)(*pFuseData)); + + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode Conv2DTransposeBuilder::Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) +{ + if (m_isBuild) { + LOGE("[Conv2DTranspose] Build failed, conv2DTranspose operation has been build, cannot build again."); + return OH_NN_OPERATION_FORBIDDEN; + } + + OH_NN_ReturnCode returnCode = SetInput(inputsIndex, outputsIndex, allTensors); + if (returnCode != OH_NN_SUCCESS) { + return returnCode; + } + + SetKernelSize(inputsIndex, allTensors); + + for (int i : paramsIndex) { + std::shared_ptr tensor = allTensors[i]; // 参数 tensor + switch (tensor->GetType()) { + case OH_NN_CONV2D_TRANSPOSE_STRIDES: + returnCode = SetStrides(tensor); + break; + case OH_NN_CONV2D_TRANSPOSE_DILATION: + returnCode = SetDilation(tensor); + break; + case OH_NN_CONV2D_TRANSPOSE_PAD_MODE: + case OH_NN_CONV2D_TRANSPOSE_PAD: + returnCode = SetPad(tensor); + break; + case OH_NN_CONV2D_TRANSPOSE_GROUP: + returnCode = SetGroup(tensor); + break; + case OH_NN_CONV2D_TRANSPOSE_OUTPUT_PADDINGS: + returnCode = SetOutPadding(tensor); + break; + case OH_NN_CONV2D_TRANSPOSE_ACTIVATION_TYPE: + returnCode = SetActivation(tensor); + break; + default: + LOGE("[Conv2DTranspose] Build failed, param invalid, type = %d.", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; + } + + if (returnCode != OH_NN_SUCCESS) { + LOGE("[Conv2DTranspose] Build failed, passed invalid param."); + return returnCode; + } + } + + // The quantization type of the first output determinies that of the operator. + SetQuantType(outputsIndex, allTensors); + + m_isBuild = true; + m_name = OP_NAME; + return OH_NN_SUCCESS; +} + +LiteGraphPrimitvePtr Conv2DTransposeBuilder::GetPrimitive() +{ + if (!m_isBuild) { + LOGE("[Conv2DTranspose] GetPrimitive failed, cannot get primitive before call build."); + return {nullptr, DestroyLiteGraphPrimitive}; + } + + void* primitive = MindIR_Conv2dTransposeFusion_CreatePrimitive(m_kernelSize, + m_strides, m_dilation, m_padMode, m_padList, m_group, m_inChannel, m_outChannel, + m_activationType, m_outputPaddings); + LiteGraphPrimitvePtr graphPrimitivePtr(primitive, DestroyLiteGraphPrimitive); + return graphPrimitivePtr; +} + +REGISTER_OPS(Conv2DTransposeBuilder, OH_NN_OPS_CONV2D_TRANSPOSE); +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS diff --git a/frameworks/native/ops/conv2d_transpose_builder.h b/frameworks/native/ops/conv2d_transpose_builder.h new file mode 100644 index 0000000..f54cb50 --- /dev/null +++ b/frameworks/native/ops/conv2d_transpose_builder.h @@ -0,0 +1,66 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_CONV2DTRANSPOSE_BUILDER_H +#define NEURAL_NETWORK_RUNTIME_CONV2DTRANSPOSE_BUILDER_H + +#include "frameworks/native/ops_builder.h" +#include "frameworks/native/ops_registry.h" +#include "mindir.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +class Conv2DTransposeBuilder : public OpsBuilder { +public: + Conv2DTransposeBuilder(); + ~Conv2DTransposeBuilder() override; + OH_NN_ReturnCode Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) override; + + LiteGraphPrimitvePtr GetPrimitive() override; + +private: + OH_NN_ReturnCode SetInput(const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors); + void SetKernelSize(const std::vector& inputsIndex, + const std::vector>& allTensors); + OH_NN_ReturnCode SetStrides(std::shared_ptr tensor); + OH_NN_ReturnCode SetDilation(std::shared_ptr tensor); + OH_NN_ReturnCode SetPad(std::shared_ptr tensor); + OH_NN_ReturnCode SetGroup(std::shared_ptr tensor); + OH_NN_ReturnCode SetOutPadding(std::shared_ptr tensor); + OH_NN_ReturnCode SetActivation(std::shared_ptr tensor); + +private: + int64_t m_group{1}; + int64_t m_inChannel{0}; + int64_t m_outChannel{0}; + std::vector m_kernelSize; + std::vector m_strides; + std::vector m_padList; + std::vector m_dilation; + std::vector m_outputPaddings; + mindspore::lite::PadMode m_padMode{mindspore::lite::PAD_MODE_PAD}; + mindspore::lite::ActivationType m_activationType{mindspore::lite::ACTIVATION_TYPE_NO_ACTIVATION}; +}; +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS + +#endif // NEURAL_NETWORK_RUNTIME_CONV2DTRANSPOSE_BUILDER_H diff --git a/frameworks/native/ops/depthwise_conv2d_native_builder.cpp b/frameworks/native/ops/depthwise_conv2d_native_builder.cpp new file mode 100644 index 0000000..51a2066 --- /dev/null +++ b/frameworks/native/ops/depthwise_conv2d_native_builder.cpp @@ -0,0 +1,275 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "depthwise_conv2d_native_builder.h" + +#include "frameworks/native/transform.h" +#include "frameworks/native/validation.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +static const int INPUT_NUM = 3; +static const int OUTPUT_NUM = 1; +static const int PAD_MODE_SIZE = 1; +static const int PAD_LIST_SIZE = 4; +static const int IN_CHANNEL_IN_INPUT = 3; +static const int OUT_CHANNEL_IN_WEIGHT = 0; +static const int HEIGHT_IN_WEIGHT = 1; +static const int WIDTH_IN_WEIGHT = 2; +static const int INPUT_RANK = 4; +static const int INPUT_X = 0; +static const int INPUT_WEIGHT = 1; +static const int SCALE_LENGTH = 1; +static const std::string OP_NAME = "DepthwiseConv2DNative"; + +DepthwiseConv2DNativeBuilder::DepthwiseConv2DNativeBuilder() {} + +DepthwiseConv2DNativeBuilder::~DepthwiseConv2DNativeBuilder() {} + +OH_NN_ReturnCode DepthwiseConv2DNativeBuilder::SetIsPadMode(std::shared_ptr tensor, + bool &isPadMode) +{ + if (tensor->GetElementCount() == PAD_MODE_SIZE) { + isPadMode = true; + } else if (tensor->GetElementCount() != PAD_LIST_SIZE) { + LOGE("[DepthwiseConv2DNative] The element size of padMode should be 1 or " + "the element size of padList should be 4."); + return OH_NN_INVALID_PARAMETER; + } + + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode DepthwiseConv2DNativeBuilder::SetActivation(std::shared_ptr tensor) +{ + tensor->IdentifyOpParameter(); + // Set ActivationType + if (tensor->GetElementCount() != SCALE_LENGTH) { + LOGE("[DepthwiseConv2DNative] SetActivation failed, the Activation should be scaler."); + return OH_NN_INVALID_PARAMETER; + } + + if (tensor->GetDataType() != OH_NN_INT8) { + LOGE("[DepthwiseConv2DNative] SetActivation failed, the activationType should have type OH_NN_INT8."); + return OH_NN_INVALID_PARAMETER; + } + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[DepthwiseConv2DNative] SetActivation GetBuffer return nullptr"); + return OH_NN_INVALID_PARAMETER; + } + int8_t* pFuseData = static_cast(buffer); + if (!OHOS::NeuralNetworkRuntime::Validation::ValidateFuseType(static_cast(*pFuseData))) { + LOGE("[DepthwiseConv2DNative] SetActivation failed, activation input is invalid."); + return OH_NN_INVALID_PARAMETER; + } + m_activationType = NNToMS::TransfromFusionType((OH_NN_FuseType)(*pFuseData)); + + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode DepthwiseConv2DNativeBuilder::SetKernelSize(const std::vector& inputsIndex, + const std::vector>& allTensors) +{ + // Set kernleSize and outChannel + auto weightShape = allTensors[inputsIndex[INPUT_WEIGHT]]->GetDimensions(); + if (weightShape.size() != INPUT_RANK) { + LOGE("[DepthwiseConv2DNative] SetKernelSize failed, invalid rank of shape of weight, should be 4 dimensions."); + return OH_NN_INVALID_PARAMETER; + } + + m_outChannel = weightShape[OUT_CHANNEL_IN_WEIGHT]; + m_kernelSize.clear(); + m_kernelSize.emplace_back(weightShape[HEIGHT_IN_WEIGHT]); + m_kernelSize.emplace_back(weightShape[WIDTH_IN_WEIGHT]); + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode DepthwiseConv2DNativeBuilder::SetStrides(std::shared_ptr tensor) +{ + tensor->IdentifyOpParameter(); + if (tensor->GetDataType() != OH_NN_INT64) { + LOGE("[DepthwiseConv2DNative] SetStrides failed, the stride should have type OH_NN_INT64."); + return OH_NN_INVALID_PARAMETER; + } + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[DepthwiseConv2DNative] SetStrides GetBuffer return nullptr"); + return OH_NN_INVALID_PARAMETER; + } + const int64_t* pStrides = reinterpret_cast(buffer); + int stridesSize = tensor->GetElementCount(); + m_strides.assign(pStrides, pStrides + stridesSize); + + return OH_NN_SUCCESS; +} +OH_NN_ReturnCode DepthwiseConv2DNativeBuilder::SetDilation(std::shared_ptr tensor) +{ + tensor->IdentifyOpParameter(); + if (tensor->GetDataType() != OH_NN_INT64) { + LOGE("[DepthwiseConv2DNative] SetDilation failed, the dilation should have type OH_NN_INT64"); + return OH_NN_INVALID_PARAMETER; + } + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[DepthwiseConv2DNative] SetDilation GetBuffer return nullptr"); + return OH_NN_INVALID_PARAMETER; + } + const int64_t* pDilation = reinterpret_cast(buffer); + int dilationSize = tensor->GetElementCount(); + m_dilation.assign(pDilation, pDilation + dilationSize); + + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode DepthwiseConv2DNativeBuilder::SetPadModeOrPaddings( + std::shared_ptr tensor) +{ + tensor->IdentifyOpParameter(); + + bool isPadMode = false; + OH_NN_ReturnCode ret = SetIsPadMode(tensor, isPadMode); + if (ret != OH_NN_SUCCESS) { + return ret; + } + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[DepthwiseConv2DNative] SetPad GetBuffer return nullptr"); + return OH_NN_INVALID_PARAMETER; + } + + if (isPadMode) { + if (tensor->GetDataType() != OH_NN_INT8) { + LOGE("[DepthwiseConv2DNative] SetPadModeOrPaddings failed, the padMode should have type OH_NN_INT8."); + return OH_NN_INVALID_PARAMETER; + } + + int8_t* pPad = static_cast(buffer); + if (!OHOS::NeuralNetworkRuntime::Validation::ValidatePadMode(*pPad)) { + LOGE("[DepthwiseConv2DNative] SetPadModeOrPaddings failed, invalid pad mode."); + return OH_NN_INVALID_PARAMETER; + } + m_padMode = NNToMS::TransformPadModeValue(*pPad); + } else { + if (tensor->GetDataType() != OH_NN_INT64) { + LOGE("[DepthwiseConv2DNative] SetPadModeOrPaddings failed, the padList should have type OH_NN_INT64."); + return OH_NN_INVALID_PARAMETER; + } + + const int64_t* pPadList = reinterpret_cast(buffer); + int padListSize = tensor->GetElementCount(); + m_pad.assign(pPadList, pPadList + padListSize); + } + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode DepthwiseConv2DNativeBuilder::SetInputAndOutput( + const std::vector& inputsIndex, const std::vector& outputsIndex, + const std::vector>& allTensors) +{ + OH_NN_ReturnCode returnCode = CheckIOIndex(inputsIndex, outputsIndex, allTensors, INPUT_NUM, OUTPUT_NUM); + if (returnCode != OH_NN_SUCCESS) { + LOGE("[DepthwiseConv2DNative] SetInputAndOutput failed, passed invalid input or output index."); + return returnCode; + } + + m_inputsIndex = inputsIndex; + m_outputsIndex = outputsIndex; + + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode DepthwiseConv2DNativeBuilder::Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, const std::vector& outputsIndex, + const std::vector>& allTensors) +{ + if (m_isBuild) { + LOGE("[DepthwiseConv2DNative] Build failed, operation has been build, cannot build again."); + return OH_NN_OPERATION_FORBIDDEN; + } + + OH_NN_ReturnCode ret = SetInputAndOutput(inputsIndex, outputsIndex, allTensors); + if (ret != OH_NN_SUCCESS) { + return ret; + } + + auto inputShape = allTensors[inputsIndex[INPUT_X]]->GetDimensions(); + if (inputShape.size() != INPUT_RANK) { + LOGE("[DepthwiseConv2DNative] Build failed, invalid rank of shape of input, should be 4 dimensions."); + return OH_NN_INVALID_PARAMETER; + } + m_inChannel = inputShape[IN_CHANNEL_IN_INPUT]; + // Set Kernel Size + ret = SetKernelSize(inputsIndex, allTensors); + if (ret != OH_NN_SUCCESS) { + LOGE("[DepthwiseConv2DNative] Build failed, SetKernelSize failed."); + return ret; + } + + for (int i : paramsIndex) { + std::shared_ptr tensor = allTensors[i]; // 参数 tensor + switch (tensor->GetType()) { + case OH_NN_DEPTHWISE_CONV2D_NATIVE_STRIDES: + ret = SetStrides(tensor); + break; + case OH_NN_DEPTHWISE_CONV2D_NATIVE_DILATION: + ret = SetDilation(tensor); + break; + case OH_NN_DEPTHWISE_CONV2D_NATIVE_PAD_MODE: + case OH_NN_DEPTHWISE_CONV2D_NATIVE_PAD: + ret = SetPadModeOrPaddings(tensor); + break; + case OH_NN_DEPTHWISE_CONV2D_NATIVE_ACTIVATION_TYPE: + ret = SetActivation(tensor); + break; + default: + LOGE("[DepthwiseConv2DNative] Build failed, param invalid, type = %d.", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; + } + if (ret != OH_NN_SUCCESS) { + LOGE("[DepthwiseConv2DNative] Build failed, passed invalid param."); + return ret; + } + } + + SetQuantType(outputsIndex, allTensors); + + m_isBuild = true; + m_name = OP_NAME; + return OH_NN_SUCCESS; +} + +LiteGraphPrimitvePtr DepthwiseConv2DNativeBuilder::GetPrimitive() +{ + if (!m_isBuild) { + LOGE("[DepthwiseConv2DNative] GetPrimitive failed, cannot get primitive before call build."); + return {nullptr, DestroyLiteGraphPrimitive}; + } + + auto primitive = MindIR_Conv2DFusion_CreatePrimitive(m_kernelSize, m_strides, + m_dilation, m_padMode, m_pad, m_inChannel, m_inChannel, m_outChannel, m_activationType); + LiteGraphPrimitvePtr graphPrimitivePtr(primitive, DestroyLiteGraphPrimitive) ; + return graphPrimitivePtr; +} + +REGISTER_OPS(DepthwiseConv2DNativeBuilder, OH_NN_OPS_DEPTHWISE_CONV2D_NATIVE); +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS diff --git a/frameworks/native/ops/depthwise_conv2d_native_builder.h b/frameworks/native/ops/depthwise_conv2d_native_builder.h new file mode 100644 index 0000000..f1663f4 --- /dev/null +++ b/frameworks/native/ops/depthwise_conv2d_native_builder.h @@ -0,0 +1,62 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_DEPTHWISE_CONV2D_NATIVE_BUILDER_H +#define NEURAL_NETWORK_RUNTIME_DEPTHWISE_CONV2D_NATIVE_BUILDER_H + +#include "frameworks/native/ops_builder.h" +#include "frameworks/native/ops_registry.h" +#include "mindir.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +class DepthwiseConv2DNativeBuilder : public OpsBuilder { +public: + DepthwiseConv2DNativeBuilder(); + ~DepthwiseConv2DNativeBuilder() override; + OH_NN_ReturnCode Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) override; + LiteGraphPrimitvePtr GetPrimitive() override; + +private: + OH_NN_ReturnCode SetInputAndOutput(const std::vector& inputsIndex, + const std::vector& outputsIndex, const std::vector>& allTensors); + OH_NN_ReturnCode SetIsPadMode(std::shared_ptr tensor, + bool &isPadMode); + OH_NN_ReturnCode SetPadModeOrPaddings(std::shared_ptr tensor); + OH_NN_ReturnCode SetKernelSize(const std::vector& inputsIndex, + const std::vector>& allTensors); + OH_NN_ReturnCode SetDilation(std::shared_ptr tensor); + OH_NN_ReturnCode SetStrides(std::shared_ptr tensor); + OH_NN_ReturnCode SetActivation(std::shared_ptr tensor); + +private: + int64_t m_inChannel{0}; + int64_t m_outChannel{0}; + std::vector m_kernelSize; + std::vector m_strides; + std::vector m_pad; + std::vector m_dilation; + mindspore::lite::PadMode m_padMode{mindspore::lite::PAD_MODE_PAD}; + mindspore::lite::ActivationType m_activationType{mindspore::lite::ACTIVATION_TYPE_NO_ACTIVATION}; +}; +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS + +#endif // NEURAL_NETWORK_RUNTIME_DEPTHWISE_CONV2D_NATIVE_BUILDER_H \ No newline at end of file diff --git a/frameworks/native/ops/div_builder.cpp b/frameworks/native/ops/div_builder.cpp new file mode 100644 index 0000000..17dd34e --- /dev/null +++ b/frameworks/native/ops/div_builder.cpp @@ -0,0 +1,119 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "div_builder.h" + +#include "frameworks/native/transform.h" +#include "frameworks/native/validation.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +static const int INPUT_NUM = 2; +static const int OUTPUT_NUM = 1; +static constexpr int SCALAR_LENGTH = 1; +static const std::string OP_NAME = "Div"; + +DivBuilder::DivBuilder() {} + +DivBuilder::~DivBuilder() {} + +OH_NN_ReturnCode DivBuilder::SetActicationType(std::shared_ptr tensor) +{ + tensor->IdentifyOpParameter(); + + if (tensor->GetElementCount() != SCALAR_LENGTH) { + LOGE("[Div] SetActicationType failed, the Activation shoule be a scalar"); + return OH_NN_INVALID_PARAMETER; + } + + if (tensor->GetDataType() != OH_NN_INT8) { + LOGE("[Div] SetActicationType failed, the activation should be type OH_NN_INT8."); + return OH_NN_INVALID_PARAMETER; + } + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[Div] SetActivation GetBuffer return nullptr"); + return OH_NN_INVALID_PARAMETER; + } + const int8_t* fuseData = static_cast(buffer); + if (!OHOS::NeuralNetworkRuntime::Validation::ValidateFuseType(static_cast(*fuseData))) { + LOGE("[Div] SetActicationType failed, fuse activation type is invalid"); + return OH_NN_INVALID_PARAMETER; + } + auto fuseType = (OH_NN_FuseType)(*fuseData); + m_activationType = NNToMS::TransfromFusionType(fuseType); + + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode DivBuilder::Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) +{ + if (m_isBuild) { + LOGE("[Div] Build failed, operation has been build, cannot build again."); + return OH_NN_OPERATION_FORBIDDEN; + } + + OH_NN_ReturnCode returnCode = CheckIOIndex(inputsIndex, outputsIndex, allTensors, INPUT_NUM, OUTPUT_NUM); + if (returnCode != OH_NN_SUCCESS) { + LOGE("[Div] Build failed, passed invalid input or output index."); + return returnCode; + } + m_inputsIndex = inputsIndex; + m_outputsIndex = outputsIndex; + + for (int i : paramsIndex) { + std::shared_ptr tensor = allTensors[i]; + switch (tensor->GetType()) { + case OH_NN_DIV_ACTIVATIONTYPE: + returnCode = SetActicationType(tensor); + break; + default: + LOGE("[Div] Build failed, param invalid, type = %d.", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; + } + if (returnCode != OH_NN_SUCCESS) { + LOGE("[Div] Build failed, passed invalid param."); + return returnCode; + } + } + + SetQuantType(outputsIndex, allTensors); + + m_isBuild = true; + m_name = OP_NAME; + return OH_NN_SUCCESS; +} + +LiteGraphPrimitvePtr DivBuilder::GetPrimitive() +{ + if (!m_isBuild) { + LOGE("[Div] GetPrimitive failed, cannot get primitive before call build."); + return {nullptr, DestroyLiteGraphPrimitive}; + } + + auto primitive = mindspore::lite::MindIR_DivFusion_CreatePrimitive(m_activationType); + LiteGraphPrimitvePtr graphPrimitivePtr(primitive, DestroyLiteGraphPrimitive); + return graphPrimitivePtr; +} + +REGISTER_OPS(DivBuilder, OH_NN_OPS_DIV); +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS diff --git a/frameworks/native/ops/div_builder.h b/frameworks/native/ops/div_builder.h new file mode 100644 index 0000000..3c0905c --- /dev/null +++ b/frameworks/native/ops/div_builder.h @@ -0,0 +1,48 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_DIV_BUILDER_H +#define NEURAL_NETWORK_RUNTIME_DIV_BUILDER_H + +#include "mindir.h" + +#include "frameworks/native/ops_builder.h" +#include "frameworks/native/ops_registry.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +class DivBuilder : public OpsBuilder { +public: + DivBuilder(); + ~DivBuilder() override; + OH_NN_ReturnCode Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) override; + + LiteGraphPrimitvePtr GetPrimitive() override; + +private: + OH_NN_ReturnCode SetActicationType(std::shared_ptr tensor); + +private: + mindspore::lite::ActivationType m_activationType{mindspore::lite::ACTIVATION_TYPE_NO_ACTIVATION}; +}; +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS + +#endif // NEURAL_NETWORK_RUNTIME_DIV_BUILDER_H \ No newline at end of file diff --git a/frameworks/native/ops/eltwise_builder.cpp b/frameworks/native/ops/eltwise_builder.cpp new file mode 100644 index 0000000..df6b649 --- /dev/null +++ b/frameworks/native/ops/eltwise_builder.cpp @@ -0,0 +1,119 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "eltwise_builder.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +static const int INPUT_NUM = 2; +static const int OUTPUT_NUM = 1; +static constexpr int SCALAR_LENGTH = 1; +static const std::string OP_NAME = "Eltwise"; + +EltwiseBuilder::EltwiseBuilder() {} + +EltwiseBuilder::~EltwiseBuilder() {} + +OH_NN_ReturnCode EltwiseBuilder::SetMode(std::shared_ptr tensor) +{ + tensor->IdentifyOpParameter(); + + if (tensor->GetDataType() != OH_NN_INT8) { + LOGE("[Eltwise] SetMode failed, the EltwiseMode should be type OH_NN_INT8."); + return OH_NN_INVALID_PARAMETER; + } + + if (tensor->GetElementCount() != SCALAR_LENGTH) { + LOGE("[Eltwise] SetMode failed, the eltwiseMode shoule be a scalar"); + return OH_NN_INVALID_PARAMETER; + } + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[Eltwise] SetMode GetBuffer return nullptr"); + return OH_NN_INVALID_PARAMETER; + } + int8_t eltwiseMode = *static_cast(buffer); + if (eltwiseMode < mindspore::lite::ELTWISE_MODE_PROD || + eltwiseMode > mindspore::lite::ELTWISE_MODE_UNKNOWN) { + LOGE("[Eltwise] SetMode failed, passed invalid eltwiseMode, received %d", eltwiseMode); + return OH_NN_INVALID_PARAMETER; + } + m_mode = (mindspore::lite::EltwiseMode)eltwiseMode; + + return OH_NN_SUCCESS; +} + + +OH_NN_ReturnCode EltwiseBuilder::Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) +{ + if (m_isBuild) { + LOGE("[Eltwise] Build failed, operation has been build, cannot build again."); + return OH_NN_OPERATION_FORBIDDEN; + } + + OH_NN_ReturnCode returnCode = CheckIOIndex(inputsIndex, outputsIndex, allTensors, INPUT_NUM, OUTPUT_NUM); + if (returnCode != OH_NN_SUCCESS) { + LOGE("[Eltwise] Build failed, passed invalid input index or output indices."); + return returnCode; + } + + m_inputsIndex = inputsIndex; + m_outputsIndex = outputsIndex; + + for (int i : paramsIndex) { + std::shared_ptr tensor = allTensors[i]; + switch (tensor->GetType()) { + case OH_NN_ELTWISE_MODE: + returnCode = SetMode(tensor); + break; + default: + LOGE("[Eltwise] Build failed, param invalid, type = %d.", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; + } + if (returnCode != OH_NN_SUCCESS) { + LOGE("[Eltwise] Build failed, passed invalid param."); + return returnCode; + } + } + + // The quantization type of the first output determinies that of the operator. + SetQuantType(outputsIndex, allTensors); + + m_isBuild = true; + m_name = OP_NAME; + return OH_NN_SUCCESS; +} + +LiteGraphPrimitvePtr EltwiseBuilder::GetPrimitive() +{ + if (!m_isBuild) { + LOGE("[Eltwise] GetPrimitive failed, cannot get primitive before call build."); + return {nullptr, DestroyLiteGraphPrimitive}; + } + + void* primitive = mindspore::lite::MindIR_Eltwise_CreatePrimitive(m_mode); + LiteGraphPrimitvePtr graphPrimitivePtr(primitive, DestroyLiteGraphPrimitive); + return graphPrimitivePtr; +} + +REGISTER_OPS(EltwiseBuilder, OH_NN_OPS_ELTWISE); +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS \ No newline at end of file diff --git a/frameworks/native/ops/eltwise_builder.h b/frameworks/native/ops/eltwise_builder.h new file mode 100644 index 0000000..09ff0d1 --- /dev/null +++ b/frameworks/native/ops/eltwise_builder.h @@ -0,0 +1,48 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_ELTWISE_BUILDER_H +#define NEURAL_NETWORK_RUNTIME_ELTWISE_BUILDER_H + +#include "mindir.h" + +#include "frameworks/native/ops_builder.h" +#include "frameworks/native/ops_registry.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +class EltwiseBuilder : public OpsBuilder { +public: + EltwiseBuilder(); + ~EltwiseBuilder() override; + OH_NN_ReturnCode Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) override; + + LiteGraphPrimitvePtr GetPrimitive() override; + +private: + OH_NN_ReturnCode SetMode(std::shared_ptr tensor); + +private: + mindspore::lite::EltwiseMode m_mode{mindspore::lite::ELTWISE_MODE_PROD}; +}; +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS + +#endif // NEURAL_NETWORK_RUNTIME_ELTWISE_BUILDER_H \ No newline at end of file diff --git a/frameworks/native/ops/expandims_builder.cpp b/frameworks/native/ops/expandims_builder.cpp new file mode 100644 index 0000000..095db7b --- /dev/null +++ b/frameworks/native/ops/expandims_builder.cpp @@ -0,0 +1,73 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "expandims_builder.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +static const int INPUT_NUM = 2; +static const int OUTPUT_NUM = 1; +static const std::string OP_NAME = "ExpandDims"; + +ExpandDimsBuilder::ExpandDimsBuilder() {} + +ExpandDimsBuilder::~ExpandDimsBuilder() {} + +OH_NN_ReturnCode ExpandDimsBuilder::Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) +{ + if (m_isBuild) { + LOGE("[ExpandDims] Build failed, operation has been build, cannot build again."); + return OH_NN_OPERATION_FORBIDDEN; + } + + auto ret = CheckIOIndex(inputsIndex, outputsIndex, allTensors, INPUT_NUM, OUTPUT_NUM); + if (ret != OH_NN_SUCCESS) { + LOGE("[ExpandDims] Build failed, the input or output index of ExpandDims operation is invalid."); + return ret; + } + + m_inputsIndex = inputsIndex; + m_outputsIndex = outputsIndex; + + if (!paramsIndex.empty()) { + LOGE("[ExpandDims] Build failed, expandDims expects no parameters"); + return OH_NN_INVALID_PARAMETER; + } + + m_isBuild = true; + m_name = OP_NAME; + return OH_NN_SUCCESS; +} + +LiteGraphPrimitvePtr ExpandDimsBuilder::GetPrimitive() +{ + if (!m_isBuild) { + LOGE("[ExpandDims] GetPrimitive failed, cannot get primitive before call build."); + return {nullptr, DestroyLiteGraphPrimitive}; + } + + void* primitive = mindspore::lite::MindIR_ExpandDims_CreatePrimitive(); + LiteGraphPrimitvePtr graphPrimitivePtr(primitive, DestroyLiteGraphPrimitive); + return graphPrimitivePtr; +} + +REGISTER_OPS(ExpandDimsBuilder, OH_NN_OPS_EXPAND_DIMS); +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS diff --git a/frameworks/native/ops/expandims_builder.h b/frameworks/native/ops/expandims_builder.h new file mode 100644 index 0000000..4e16b9b --- /dev/null +++ b/frameworks/native/ops/expandims_builder.h @@ -0,0 +1,42 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_EXPANDDIMS_BUILDER_H +#define NEURAL_NETWORK_RUNTIME_EXPANDDIMS_BUILDER_H + +#include "mindir.h" + +#include "frameworks/native/ops_builder.h" +#include "frameworks/native/ops_registry.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +class ExpandDimsBuilder : public OpsBuilder { +public: + ExpandDimsBuilder(); + ~ExpandDimsBuilder() override; + OH_NN_ReturnCode Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) override; + + LiteGraphPrimitvePtr GetPrimitive() override; +}; +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS + +#endif // NEURAL_NETWORK_RUNTIME_EXPANDDIMS_BUILDER_H \ No newline at end of file diff --git a/frameworks/native/ops/fill_builder.cpp b/frameworks/native/ops/fill_builder.cpp new file mode 100644 index 0000000..83c085c --- /dev/null +++ b/frameworks/native/ops/fill_builder.cpp @@ -0,0 +1,80 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "fill_builder.h" + +#include "mindir.h" + +#include "frameworks/native/ops_registry.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +static const int INPUT_NUM = 2; +static const int OUTPUT_NUM = 1; +static const std::string OP_NAME = "Fill"; + +FillBuilder::FillBuilder() {} + +FillBuilder::~FillBuilder() {} + +OH_NN_ReturnCode FillBuilder::Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) +{ + if (m_isBuild) { + LOGE("[Fill] Build failed, fill operation has been build, cannot build again."); + return OH_NN_OPERATION_FORBIDDEN; + } + + OH_NN_ReturnCode returnCode = CheckIOIndex(inputsIndex, outputsIndex, allTensors, INPUT_NUM, OUTPUT_NUM); + if (returnCode != OH_NN_SUCCESS) { + LOGE("[Fill] Fill Build failed, Passed invalid input or output indices."); + return returnCode; + } + + if (!paramsIndex.empty()) { + LOGW("[Fill] Build failed, fill expects no parameters, but receive %zu", paramsIndex.size()); + return OH_NN_INVALID_PARAMETER; + } + + m_inputsIndex = inputsIndex; + m_outputsIndex = outputsIndex; + + // The quantization type of the first output determinies that of the operator. + SetQuantType(outputsIndex, allTensors); + + m_isBuild = true; + m_name = OP_NAME; + return OH_NN_SUCCESS; +} + +LiteGraphPrimitvePtr FillBuilder::GetPrimitive() +{ + if (!m_isBuild) { + LOGE("[Fill] GetPrimitive failed, cannot get primitive before call build."); + return {nullptr, DestroyLiteGraphPrimitive}; + } + + void* primitive = mindspore::lite::MindIR_Fill_CreatePrimitive(); + LiteGraphPrimitvePtr graphPrimitivePtr(primitive, DestroyLiteGraphPrimitive); + return graphPrimitivePtr; +} + +REGISTER_OPS(FillBuilder, OH_NN_OPS_FILL); +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS \ No newline at end of file diff --git a/frameworks/native/ops/fill_builder.h b/frameworks/native/ops/fill_builder.h new file mode 100644 index 0000000..8e9f224 --- /dev/null +++ b/frameworks/native/ops/fill_builder.h @@ -0,0 +1,38 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_FILL_BUILDER_H +#define NEURAL_NETWORK_RUNTIME_FILL_BUILDER_H + +#include "frameworks/native/ops_builder.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +class FillBuilder : public OpsBuilder { +public: + FillBuilder(); + ~FillBuilder() override; + OH_NN_ReturnCode Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) override; + LiteGraphPrimitvePtr GetPrimitive() override; +}; +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS + +#endif // NEURAL_NETWORK_RUNTIME_FILL_BUILDER_H \ No newline at end of file diff --git a/frameworks/native/ops/fullconnection_builder.cpp b/frameworks/native/ops/fullconnection_builder.cpp new file mode 100644 index 0000000..ce464a3 --- /dev/null +++ b/frameworks/native/ops/fullconnection_builder.cpp @@ -0,0 +1,183 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "fullconnection_builder.h" + +#include "frameworks/native/transform.h" +#include "frameworks/native/validation.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +static constexpr int INPUT_WITH_AXIS = 2; +static constexpr int INPUT_WITHOUT_AXIS = 1; +static constexpr int OUTPUT_NUM = 1; +static constexpr int SCALAR_LENGTH = 1; +static const std::string OP_NAME = "FullConnection"; + +FullConnectionBuilder::FullConnectionBuilder() {} + +FullConnectionBuilder::~FullConnectionBuilder() {} + +OH_NN_ReturnCode FullConnectionBuilder::SetFullConnectionInput(const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) +{ + if (outputsIndex.size() != OUTPUT_NUM) { + LOGE("[FullConnection] SetFullConnectionInput failed, the index of outputs don't equal to %d.", OUTPUT_NUM); + return OH_NN_INVALID_PARAMETER; + } + size_t allTensorsSize = allTensors.size(); + for (auto index : inputsIndex) { + if (index >= allTensorsSize) { + LOGE("[FullConnection] SetFullConnectionInput failed, the index of inputs is out of range."); + return OH_NN_INVALID_PARAMETER; + } + } + + m_inputsIndex = inputsIndex; + m_outputsIndex = outputsIndex; + + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode FullConnectionBuilder::SetFullConnectionActivation(std::shared_ptr tensor) +{ + tensor->IdentifyOpParameter(); + // Set Activation + if (tensor->GetElementCount() != SCALAR_LENGTH) { + LOGE("[FullConnection] SetFullConnectionActivation failed, the Activation shoule be a scalar"); + return OH_NN_INVALID_PARAMETER; + } + + if (tensor->GetDataType() != OH_NN_INT8) { + LOGE("[FullConnection] SetFullConnectionActivation failed, the Activation should have type OH_NN_INT8."); + return OH_NN_INVALID_PARAMETER; + } + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[FullConnection] SetFullConnectionActivation GetBuffer return nullptr"); + return OH_NN_INVALID_PARAMETER; + } + + int8_t* pFuseData = static_cast(tensor->GetBuffer()); + if (!OHOS::NeuralNetworkRuntime::Validation::ValidateFuseType(static_cast(*pFuseData))) { + LOGE("[FullConnection] SetFullConnectionActivation failed, activation input is invalid."); + return OH_NN_INVALID_PARAMETER; + } + m_activationType = NNToMS::TransfromFusionType((OH_NN_FuseType)(*pFuseData)); + + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode FullConnectionBuilder::SetAxis(std::shared_ptr tensor) +{ + if (m_useAxis) { + tensor->IdentifyOpParameter(); + + if (tensor->GetElementCount() != SCALAR_LENGTH) { + LOGE("[FullConnection] SetFullConnectionActivation failed, the axis shoule be a scalar"); + return OH_NN_INVALID_PARAMETER; + } + + if (tensor->GetDataType() != OH_NN_INT64) { + LOGE("[FullConnection] SetFullConnectionActivation failed, the Axis should be type OH_NN_INT64."); + return OH_NN_INVALID_PARAMETER; + } + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[FullConnection] SetAxis GetBuffer return nullptr"); + return OH_NN_INVALID_PARAMETER; + } + + m_axis = *static_cast(buffer); + } + return OH_NN_SUCCESS; +} + + +OH_NN_ReturnCode FullConnectionBuilder::Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) +{ + if (m_isBuild) { + LOGE("[FullConnection] Build failed, operation has been build, cannot build again."); + return OH_NN_OPERATION_FORBIDDEN; + } + + bool useAxis = false; + if (paramsIndex.size() == INPUT_WITH_AXIS) { + useAxis = true; + } else if (paramsIndex.size() != INPUT_WITHOUT_AXIS) { + LOGE("[FullConnection] Build failed, the index of inputs should equal to %d if axis used or %d if not.", + INPUT_WITH_AXIS, INPUT_WITHOUT_AXIS); + return OH_NN_INVALID_PARAMETER; + } + + OH_NN_ReturnCode returnCode = SetFullConnectionInput(inputsIndex, outputsIndex, allTensors); + if (returnCode != OH_NN_SUCCESS) { + LOGE("[FullConnection] Build failed, SetFullConnectionInput failed."); + return returnCode; + } + + // Set axis + m_useAxis = useAxis; + for (int i : paramsIndex) { + std::shared_ptr tensor = allTensors[i]; // 参数 tensor + switch (tensor->GetType()) { + case OH_NN_FULL_CONNECTION_AXIS: + returnCode = SetAxis(tensor); + break; + case OH_NN_FULL_CONNECTION_ACTIVATIONTYPE: + returnCode = SetFullConnectionActivation(tensor); + break; + default: + LOGE("[FullConnection] Build failed, param invalid, type = %d.", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; + } + if (returnCode != OH_NN_SUCCESS) { + LOGE("[FullConnection] Build failed, passed invalid param."); + return returnCode; + } + } + + // The quantization type of the first output determinies that of the operator. + SetQuantType(outputsIndex, allTensors); + + m_isBuild = true; + m_name = OP_NAME; + return OH_NN_SUCCESS; +} + +LiteGraphPrimitvePtr FullConnectionBuilder::GetPrimitive() +{ + if (!m_isBuild) { + LOGE("[FullConnection] GetPrimitive failed, cannot get primitive before call build."); + return {nullptr, DestroyLiteGraphPrimitive}; + } + + void* primitive = mindspore::lite::MindIR_FullConnection_CreatePrimitive(m_hasBias, m_useAxis, + m_axis, m_activationType); + LiteGraphPrimitvePtr graphPrimitivePtr(primitive, DestroyLiteGraphPrimitive) ; + return graphPrimitivePtr; +} + +REGISTER_OPS(FullConnectionBuilder, OH_NN_OPS_FULL_CONNECTION); +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS \ No newline at end of file diff --git a/frameworks/native/ops/fullconnection_builder.h b/frameworks/native/ops/fullconnection_builder.h new file mode 100644 index 0000000..50eaa38 --- /dev/null +++ b/frameworks/native/ops/fullconnection_builder.h @@ -0,0 +1,55 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_FULLCONNECTION_BUILDER_H +#define NEURAL_NETWORK_RUNTIME_FULLCONNECTION_BUILDER_H + +#include "mindir.h" + +#include "frameworks/native/ops_builder.h" +#include "frameworks/native/ops_registry.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +class FullConnectionBuilder : public OpsBuilder { +public: + FullConnectionBuilder(); + ~FullConnectionBuilder() override; + OH_NN_ReturnCode Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) override; + + LiteGraphPrimitvePtr GetPrimitive() override; + +private: + OH_NN_ReturnCode SetFullConnectionInput(const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors); + OH_NN_ReturnCode SetFullConnectionActivation(std::shared_ptr tensor); + OH_NN_ReturnCode SetAxis(std::shared_ptr tensor); + +private: + bool m_hasBias{true}; + bool m_useAxis{false}; + int64_t m_axis{0}; + mindspore::lite::ActivationType m_activationType{mindspore::lite::ACTIVATION_TYPE_NO_ACTIVATION}; +}; +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS + +#endif // NEURAL_NETWORK_RUNTIME_FULLCONNECTION_BUILDER_H \ No newline at end of file diff --git a/frameworks/native/ops/gather_builder.cpp b/frameworks/native/ops/gather_builder.cpp new file mode 100644 index 0000000..aba0bfc --- /dev/null +++ b/frameworks/native/ops/gather_builder.cpp @@ -0,0 +1,80 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "gather_builder.h" + +#include "mindir.h" + +#include "frameworks/native/ops_registry.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +static const int INPUT_NUM = 3; +static const int OUTPUT_NUM = 1; +static const std::string OP_NAME = "Gather"; + +GatherBuilder::GatherBuilder() {} + +GatherBuilder::~GatherBuilder() {} + +OH_NN_ReturnCode GatherBuilder::Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) +{ + if (m_isBuild) { + LOGE("[Gather] Gather Build failed, gather operation has been build, cannot build again."); + return OH_NN_OPERATION_FORBIDDEN; + } + + OH_NN_ReturnCode returnCode = CheckIOIndex(inputsIndex, outputsIndex, allTensors, INPUT_NUM, OUTPUT_NUM); + if (returnCode != OH_NN_SUCCESS) { + LOGE("[Gather] Build failed, the input or output index of Gather operation is invalid."); + return returnCode; + } + + if (!paramsIndex.empty()) { + LOGW("[Gather] Build failed, gather expects no parameters, but receive %zu", paramsIndex.size()); + return OH_NN_INVALID_PARAMETER; + } + + m_inputsIndex = inputsIndex; + m_outputsIndex = outputsIndex; + + // The quantization type of the first output determinies that of the operator. + SetQuantType(outputsIndex, allTensors); + + m_isBuild = true; + m_name = OP_NAME; + return OH_NN_SUCCESS; +} + +LiteGraphPrimitvePtr GatherBuilder::GetPrimitive() +{ + if (!m_isBuild) { + LOGE("[Gather] GetPrimitive failed, cannot get primitive before call build."); + return {nullptr, DestroyLiteGraphPrimitive}; + } + + void* primitive = mindspore::lite::MindIR_Gather_CreatePrimitive(); + LiteGraphPrimitvePtr graphPrimitivePtr(primitive, DestroyLiteGraphPrimitive); + return graphPrimitivePtr; +} + +REGISTER_OPS(GatherBuilder, OH_NN_OPS_GATHER); +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS \ No newline at end of file diff --git a/frameworks/native/ops/gather_builder.h b/frameworks/native/ops/gather_builder.h new file mode 100644 index 0000000..7fe35df --- /dev/null +++ b/frameworks/native/ops/gather_builder.h @@ -0,0 +1,38 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_GATHER_BUILDER_H +#define NEURAL_NETWORK_RUNTIME_GATHER_BUILDER_H + +#include "frameworks/native/ops_builder.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +class GatherBuilder : public OpsBuilder { +public: + GatherBuilder(); + ~GatherBuilder() override; + OH_NN_ReturnCode Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) override; + LiteGraphPrimitvePtr GetPrimitive() override; +}; +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS + +#endif // HETERNEURAL_NETWORK_GATHER_BUILDER_H \ No newline at end of file diff --git a/frameworks/native/ops/gelu_builder.cpp b/frameworks/native/ops/gelu_builder.cpp new file mode 100644 index 0000000..0f75922 --- /dev/null +++ b/frameworks/native/ops/gelu_builder.cpp @@ -0,0 +1,83 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "gelu_builder.h" + +#include "mindir.h" + +#include "frameworks/native/ops_registry.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +static const int INPUT_NUMS = 1; +static const int OUTPUT_NUMS = 1; +static const std::string OP_NAME = "Gelu"; + +GeluBuilder::GeluBuilder() {} + +GeluBuilder::~GeluBuilder() {} + +OH_NN_ReturnCode GeluBuilder::Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) +{ + if (m_isBuild) { + LOGE("[Gelu] Build failed, operation has been build, cannot build again"); + return OH_NN_OPERATION_FORBIDDEN; + } + + OH_NN_ReturnCode returnCode = CheckIOIndex(inputsIndex, outputsIndex, allTensors, INPUT_NUMS, OUTPUT_NUMS); + if (returnCode != OH_NN_SUCCESS) { + LOGE("[Gelu] Build failed, passed invalid input or output indices."); + return returnCode; + } + + if (!paramsIndex.empty()) { + LOGW("[Gelu] Build failed, gelu expects no parameters, but receive %zu", paramsIndex.size()); + return OH_NN_INVALID_PARAMETER; + } + + m_inputsIndex = inputsIndex; + m_outputsIndex = outputsIndex; + + m_isBuild = true; + m_name = OP_NAME; + return OH_NN_SUCCESS; +} + +LiteGraphPrimitvePtr GeluBuilder::GetPrimitive() +{ + if (!m_isBuild) { + LOGE("[Gelu] GetPrimitive failed, cannot get primitive before call build."); + return {nullptr, DestroyLiteGraphPrimitive}; + } + + mindspore::lite::ActivationType activationType = mindspore::lite::ACTIVATION_TYPE_GELU; + float alpha = 0.0f; + float minVal = 0.0f; + float maxVal = 0.0f; + bool approximate = false; + void* primitive = mindspore::lite::MindIR_Activation_CreatePrimitive(activationType, + alpha, minVal, maxVal, approximate); + LiteGraphPrimitvePtr graphPrimitivePtr(primitive, DestroyLiteGraphPrimitive); + return graphPrimitivePtr; +} + +REGISTER_OPS(GeluBuilder, OH_NN_OPS_GELU); +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS \ No newline at end of file diff --git a/frameworks/native/ops/gelu_builder.h b/frameworks/native/ops/gelu_builder.h new file mode 100644 index 0000000..6f0346a --- /dev/null +++ b/frameworks/native/ops/gelu_builder.h @@ -0,0 +1,38 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_GELU_BUILDER_H +#define NEURAL_NETWORK_RUNTIME_GELU_BUILDER_H + +#include "frameworks/native/ops_builder.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +class GeluBuilder : public OpsBuilder { +public: + GeluBuilder(); + ~GeluBuilder() override; + OH_NN_ReturnCode Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) override; + LiteGraphPrimitvePtr GetPrimitive() override; +}; +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS + +#endif // NEURAL_NETWORK_RUNTIME_GELU_BUILDER_H \ No newline at end of file diff --git a/frameworks/native/ops/hswish_builder.cpp b/frameworks/native/ops/hswish_builder.cpp new file mode 100644 index 0000000..3a9cae5 --- /dev/null +++ b/frameworks/native/ops/hswish_builder.cpp @@ -0,0 +1,87 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "hswish_builder.h" + +#include "mindir.h" + +#include "frameworks/native/ops_registry.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +static const int INPUT_NUMS = 1; +static const int OUTPUT_NUMS = 1; +static const std::string OP_NAME = "Hswish"; + +HswishBuilder::HswishBuilder() {} + +HswishBuilder::~HswishBuilder() {} + +OH_NN_ReturnCode HswishBuilder::Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) +{ + if (m_isBuild) { + LOGE("[Hswish] Build failed, operation has been build, cannot build again."); + return OH_NN_OPERATION_FORBIDDEN; + } + + OH_NN_ReturnCode returnCode = CheckIOIndex(inputsIndex, outputsIndex, allTensors, INPUT_NUMS, OUTPUT_NUMS); + if (returnCode != OH_NN_SUCCESS) { + LOGE("[Hswish] Build failed, passed invalid input or output indices."); + return returnCode; + } + + if (!paramsIndex.empty()) { + LOGW("[Hswish] Build failed, hswish expects no parameters, but receive %zu", paramsIndex.size()); + return OH_NN_INVALID_PARAMETER; + } + + m_inputsIndex = inputsIndex; + m_outputsIndex = outputsIndex; + + // The quantization type of the first output determinies that of the operator. + SetQuantType(outputsIndex, allTensors); + + m_isBuild = true; + m_name = OP_NAME; + return OH_NN_SUCCESS; +} + +LiteGraphPrimitvePtr HswishBuilder::GetPrimitive() +{ + if (!m_isBuild) { + LOGE("[Hswish] GetPrimitive failed, cannot get primitive before call build."); + return {nullptr, DestroyLiteGraphPrimitive}; + } + + mindspore::lite::ActivationType activationType = mindspore::lite::ACTIVATION_TYPE_HSWISH; + float alpha = 0.0f; + float minVal = 0.0f; + float maxVal = 0.0f; + bool approximate = false; + void* primitive = mindspore::lite::MindIR_Activation_CreatePrimitive(activationType, + alpha, minVal, maxVal, approximate); + LiteGraphPrimitvePtr graphPrimitivePtr(primitive, DestroyLiteGraphPrimitive); + + return graphPrimitivePtr; +} + +REGISTER_OPS(HswishBuilder, OH_NN_OPS_HSWISH); +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS \ No newline at end of file diff --git a/frameworks/native/ops/hswish_builder.h b/frameworks/native/ops/hswish_builder.h new file mode 100644 index 0000000..0d9a905 --- /dev/null +++ b/frameworks/native/ops/hswish_builder.h @@ -0,0 +1,38 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_HSWISH_BUILDER_H +#define NEURAL_NETWORK_RUNTIME_HSWISH_BUILDER_H + +#include "frameworks/native/ops_builder.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +class HswishBuilder : public OpsBuilder { +public: + HswishBuilder(); + ~HswishBuilder() override; + OH_NN_ReturnCode Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) override; + LiteGraphPrimitvePtr GetPrimitive() override; +}; +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS + +#endif // NEURAL_NETWORK_RUNTIME_HSWISH_BUILDER_H \ No newline at end of file diff --git a/frameworks/native/ops/layernorm_builder.cpp b/frameworks/native/ops/layernorm_builder.cpp new file mode 100644 index 0000000..a8938ab --- /dev/null +++ b/frameworks/native/ops/layernorm_builder.cpp @@ -0,0 +1,206 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "layernorm_builder.h" + +#include "mindir.h" + +#include "frameworks/native/ops_registry.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +static const int INPUT_NUM = 3; +static const int OUTPUT_NUM = 1; +static const int INPUT_X = 0; +static const int INPUT_GAMMA = 1; +static const int INPUT_BETA = 2; +static const std::string OP_NAME = "LayerNorm"; + +LayerNormBuilder::LayerNormBuilder() {} + +LayerNormBuilder::~LayerNormBuilder() {} + +OH_NN_ReturnCode LayerNormBuilder::SetBeginNormAxis(std::shared_ptr tensor) +{ + tensor->IdentifyOpParameter(); + if (tensor->GetDataType() != OH_NN_INT32) { + LOGE("[LayerNormBuilder] SetBeginNormAxis failed. The has_bias should be type OH_NN_INT32."); + return OH_NN_INVALID_PARAMETER; + } + + if (!tensor->IsScalar()) { + LOGE("[LayerNormBuilder] SetBeginNormAxis failed. The beginNormAxis should be a scalar value."); + return OH_NN_INVALID_PARAMETER; + } + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[LayerNormBuilder] SetBeginNormAxis failed, the beginNormAxis passed a empty buffer."); + return OH_NN_INVALID_PARAMETER; + } + + m_beginNormAxis = *static_cast(buffer); + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode LayerNormBuilder::SetEpsilon(std::shared_ptr tensor) +{ + tensor->IdentifyOpParameter(); + if (tensor->GetDataType() != OH_NN_FLOAT32) { + LOGE("[LayerNormBuilder] SetEpsilon failed. The epsilon should be type OH_NN_FLOAT32."); + return OH_NN_INVALID_PARAMETER; + } + + if (!tensor->IsScalar()) { + LOGE("[LayerNormBuilder] SetEpsilon failed. The epsilon should be a scalar value."); + return OH_NN_INVALID_PARAMETER; + } + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[LayerNormBuilder] SetEpsilon failed, the epsilon passed a empty buffer."); + return OH_NN_INVALID_PARAMETER; + } + + m_epsilon = *static_cast(buffer); + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode LayerNormBuilder::SetBeginParamsAxis(std::shared_ptr tensor) +{ + tensor->IdentifyOpParameter(); + if (tensor->GetDataType() != OH_NN_INT32) { + LOGE("[LayerNormBuilder] SetBeginParamsAxis failed. The has_bias should be type OH_NN_INT32."); + return OH_NN_INVALID_PARAMETER; + } + + if (!tensor->IsScalar()) { + LOGE("[LayerNormBuilder] SetBeginParamsAxis failed. The beginNormAxis should be a scalar value."); + return OH_NN_INVALID_PARAMETER; + } + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[LayerNormBuilder] SetBeginParamsAxis failed, the beginParamsAxis passed a empty buffer."); + return OH_NN_INVALID_PARAMETER; + } + + m_beginParamsAxis = *static_cast(buffer); + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode LayerNormBuilder::Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) +{ + if (m_isBuild) { + LOGE("[LayerNormBuilder] Build failed. LayerNorm operation has been build, cannot build again."); + return OH_NN_OPERATION_FORBIDDEN; + } + + OH_NN_ReturnCode returnCode = CheckIOIndex(inputsIndex, outputsIndex, allTensors, INPUT_NUM, OUTPUT_NUM); + if (returnCode != OH_NN_SUCCESS) { + LOGE("[LayerNormBuilder] Build failed. Passed invalid input or output index."); + return returnCode; + } + + m_inputsIndex = inputsIndex; + m_outputsIndex = outputsIndex; + + for (int i : paramsIndex) { + std::shared_ptr tensor = allTensors[i]; + switch (tensor->GetType()) { + case OH_NN_LAYER_NORM_BEGIN_NORM_AXIS: + returnCode = SetBeginNormAxis(tensor); + break; + case OH_NN_LAYER_NORM_EPSILON: + returnCode = SetEpsilon(tensor); + break; + case OH_NN_LAYER_NORM_BEGIN_PARAM_AXIS: + returnCode = SetBeginParamsAxis(tensor); + break; + default: + LOGE("[LayerNormBuilder] Parameter Type is invalid, type=%d", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; + } + + if (returnCode != OH_NN_SUCCESS) { + LOGE("[LayerNormBuilder] Build failed. Passed invalid param."); + return returnCode; + } + } + + auto inputShape = allTensors[inputsIndex[INPUT_X]]->GetDimensions(); + int inputShapeSize = static_cast(inputShape.size()); + // beginNormAxis must great than 1, because normal shape cannot equal input shape. + if (m_beginNormAxis >= inputShapeSize || m_beginNormAxis < 1) { + LOGE("[LayerNormBuilder] Build failed, invalid beginNormAxis value, it should be [1, rank(input))."); + return OH_NN_INVALID_PARAMETER; + } + // validate gamma and beta shape + returnCode = ValidateGammaAndBetaShape(inputsIndex, m_beginNormAxis, allTensors); + if (returnCode != OH_NN_SUCCESS) { + return returnCode; + } + + // The quantization type of the first output determinies that of the operator. + SetQuantType(outputsIndex, allTensors); + + m_name = OP_NAME; + m_isBuild = true; + return OH_NN_SUCCESS; +} + +LiteGraphPrimitvePtr LayerNormBuilder::GetPrimitive() +{ + if (!m_isBuild) { + LOGE("[LayerNormBuilder] GetPrimitive failed, cannot get primitive before call build."); + return {nullptr, DestroyLiteGraphPrimitive}; + } + + void* primitive = mindspore::lite::MindIR_LayerNormFusion_CreatePrimitive(m_beginNormAxis, + m_epsilon, m_elementwiseAffine, m_beginParamsAxis); + LiteGraphPrimitvePtr graphPrimitivePtr(primitive, DestroyLiteGraphPrimitive); + return graphPrimitivePtr; +} + +OH_NN_ReturnCode LayerNormBuilder::ValidateGammaAndBetaShape(const std::vector& inputsIndex, + int beginAxis, const std::vector>& allTensors) const +{ + auto inputShape = allTensors[inputsIndex[INPUT_X]]->GetDimensions(); + auto gammaShape = allTensors[inputsIndex[INPUT_GAMMA]]->GetDimensions(); + auto betaShape = allTensors[inputsIndex[INPUT_BETA]]->GetDimensions(); + int inputShapeSize = static_cast(inputShape.size()); + + for (auto i = beginAxis; i < inputShapeSize; i++) { + if (gammaShape[i - beginAxis] != inputShape[i]) { + LOGE("[LayerNormBuilder] Invalid gamma shape, gamma shape should equal to normalized shape."); + return OH_NN_INVALID_PARAMETER; + } + if (betaShape[i - beginAxis] != inputShape[i]) { + LOGE("[LayerNormBuilder] Invalid beta shape, bata shape should equal to normalized shape."); + return OH_NN_INVALID_PARAMETER; + } + } + + return OH_NN_SUCCESS; +} + +REGISTER_OPS(LayerNormBuilder, OH_NN_OPS_LAYER_NORM); +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS \ No newline at end of file diff --git a/frameworks/native/ops/layernorm_builder.h b/frameworks/native/ops/layernorm_builder.h new file mode 100644 index 0000000..68847b7 --- /dev/null +++ b/frameworks/native/ops/layernorm_builder.h @@ -0,0 +1,51 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_LAYERNORM_BUILDER_H +#define NEURAL_NETWORK_RUNTIME_LAYERNORM_BUILDER_H + +#include "frameworks/native/ops_builder.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +class LayerNormBuilder : public OpsBuilder { +public: + LayerNormBuilder(); + ~LayerNormBuilder() override; + OH_NN_ReturnCode Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) override; + LiteGraphPrimitvePtr GetPrimitive() override; + +private: + OH_NN_ReturnCode SetBeginNormAxis(std::shared_ptr tensor); + OH_NN_ReturnCode SetEpsilon(std::shared_ptr tensor); + OH_NN_ReturnCode SetBeginParamsAxis(std::shared_ptr tensor); + OH_NN_ReturnCode ValidateGammaAndBetaShape(const std::vector& inputsIndex, + int beginAxis, const std::vector>& allTensors) const; + +private: + int m_beginNormAxis{1}; + float m_epsilon{1e-7}; + bool m_elementwiseAffine{false}; + int m_beginParamsAxis{1}; +}; +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS + +#endif // NEURAL_NETWORK_RUNTIME_LAYERNORM_BUILDER_H \ No newline at end of file diff --git a/frameworks/native/ops/lessequal_builder.cpp b/frameworks/native/ops/lessequal_builder.cpp new file mode 100644 index 0000000..a74445e --- /dev/null +++ b/frameworks/native/ops/lessequal_builder.cpp @@ -0,0 +1,80 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "lessequal_builder.h" + +#include "mindir.h" + +#include "frameworks/native/ops_registry.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +static const int INPUT_NUMS = 2; +static const int OUTPUT_NUMS = 1; +static const std::string OP_NAME = "LessEqual"; + +LessEqualBuilder::LessEqualBuilder() {} + +LessEqualBuilder::~LessEqualBuilder() {} + +OH_NN_ReturnCode LessEqualBuilder::Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) +{ + if (m_isBuild) { + LOGE("[LessEqual] Build failded, operation has been build, cannot build again."); + return OH_NN_OPERATION_FORBIDDEN; + } + + OH_NN_ReturnCode returnCode = CheckIOIndex(inputsIndex, outputsIndex, allTensors, INPUT_NUMS, OUTPUT_NUMS); + if (returnCode != OH_NN_SUCCESS) { + LOGE("[LessEqual] Build failded, Passed invalid input or output indices."); + return returnCode; + } + + if (!paramsIndex.empty()) { + LOGW("[LessEqual] LessEqual expects no parameters, but receive %zu", paramsIndex.size()); + return OH_NN_INVALID_PARAMETER; + } + + m_inputsIndex = inputsIndex; + m_outputsIndex = outputsIndex; + + // The quantization type of the first output determinies that of the operator. + SetQuantType(outputsIndex, allTensors); + + m_name = OP_NAME; + m_isBuild = true; + return OH_NN_SUCCESS; +} + +LiteGraphPrimitvePtr LessEqualBuilder::GetPrimitive() +{ + if (!m_isBuild) { + LOGE("[LessEqual] GetPrimitive failed, cannot get primitive before call build."); + return {nullptr, DestroyLiteGraphPrimitive}; + } + + void* primitive = mindspore::lite::MindIR_LessEqual_CreatePrimitive(); + LiteGraphPrimitvePtr graphPrimitivePtr(primitive, DestroyLiteGraphPrimitive); + return graphPrimitivePtr; +} + +REGISTER_OPS(LessEqualBuilder, OH_NN_OPS_LESS_EQUAL); +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS \ No newline at end of file diff --git a/frameworks/native/ops/lessequal_builder.h b/frameworks/native/ops/lessequal_builder.h new file mode 100644 index 0000000..6933ffe --- /dev/null +++ b/frameworks/native/ops/lessequal_builder.h @@ -0,0 +1,38 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_LESSEQUAL_BUILDER_H +#define NEURAL_NETWORK_RUNTIME_LESSEQUAL_BUILDER_H + +#include "frameworks/native/ops_builder.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +class LessEqualBuilder : public OpsBuilder { +public: + LessEqualBuilder(); + ~LessEqualBuilder() override; + OH_NN_ReturnCode Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) override; + LiteGraphPrimitvePtr GetPrimitive() override; +}; +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS + +#endif // NEURAL_NETWORK_RUNTIME_LESSEQUAL_BUILDER_H \ No newline at end of file diff --git a/frameworks/native/ops/matmul_builder.cpp b/frameworks/native/ops/matmul_builder.cpp new file mode 100644 index 0000000..9b0440f --- /dev/null +++ b/frameworks/native/ops/matmul_builder.cpp @@ -0,0 +1,175 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "matmul_builder.h" + +#include "frameworks/native/transform.h" +#include "frameworks/native/validation.h" +#include "frameworks/native/ops_registry.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +static const int INPUT_NUM = 2; +static const int OUTPUT_NUM = 1; +static const int SCALE_LENGTH = 1; +static const std::string OP_NAME = "Matmul"; + +MatmulBuilder::MatmulBuilder() {} + +MatmulBuilder::~MatmulBuilder() {} + +OH_NN_ReturnCode MatmulBuilder::SetTransposeA(std::shared_ptr tensor) +{ + tensor->IdentifyOpParameter(); + if (tensor->GetElementCount() != SCALE_LENGTH) { + LOGE("[Matmul] Matmul SetTransposeA failed. The transposeA should be scaler."); + return OH_NN_INVALID_PARAMETER; + } + + if (tensor->GetDataType() != OH_NN_BOOL) { + LOGE("[Matmul] Matmul SetTransposeA failed. The transposeA should have type OH_NN_BOOL."); + return OH_NN_INVALID_PARAMETER; + } + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[Matmul] SetTransposeA failed, the transposeA passed a empty buffer."); + return OH_NN_INVALID_PARAMETER; + } + + m_transposeA = *static_cast(buffer); + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode MatmulBuilder::SetTransposeB(std::shared_ptr tensor) +{ + tensor->IdentifyOpParameter(); + if (tensor->GetElementCount() != SCALE_LENGTH) { + LOGE("[Matmul] Matmul SetTransposeB failed. The transposeB should be scaler."); + return OH_NN_INVALID_PARAMETER; + } + + if (tensor->GetDataType() != OH_NN_BOOL) { + LOGE("[Matmul] Matmul SetTransposeB failed. The transposeB TransposeY should have type OH_NN_BOOL."); + return OH_NN_INVALID_PARAMETER; + } + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[Matmul] SetTransposeB failed, the transposeB passed a empty buffer."); + return OH_NN_INVALID_PARAMETER; + } + + m_transposeB = *static_cast(buffer); + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode MatmulBuilder::SetActivationType(std::shared_ptr tensor) +{ + tensor->IdentifyOpParameter(); + if (tensor->GetElementCount() != SCALE_LENGTH) { + LOGE("[Matmul] Matmul SetActivationType failed. The shape of activation should be scaler."); + return OH_NN_INVALID_PARAMETER; + } + + if (tensor->GetDataType() != OH_NN_INT8) { + LOGE("[Matmul] Matmul SetActivationType failed. The activation should be type OH_NN_INT8."); + return OH_NN_INVALID_PARAMETER; + } + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[Matmul] SetActivationType failed, the activationType passed a empty buffer."); + return OH_NN_INVALID_PARAMETER; + } + + int8_t* fuseData = static_cast(buffer); + if (!OHOS::NeuralNetworkRuntime::Validation::ValidateFuseType(static_cast(*fuseData))) { + LOGE("[Matmul] Matmul SetActivationType failed. Fuse activation type is invalid"); + return OH_NN_INVALID_PARAMETER; + } + + auto fuseType = (OH_NN_FuseType)(*fuseData); + m_activationType = NNToMS::TransfromFusionType(fuseType); + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode MatmulBuilder::Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) +{ + if (m_isBuild) { + LOGE("[Matmul] Matmul Build failed. operation has been build, cannot build again."); + return OH_NN_OPERATION_FORBIDDEN; + } + + OH_NN_ReturnCode returnCode = CheckIOIndex(inputsIndex, outputsIndex, allTensors, INPUT_NUM, OUTPUT_NUM); + if (returnCode != OH_NN_SUCCESS) { + LOGE("[Matmul] Matmul Build failed. Passed invalid input or output indices."); + return returnCode; + } + + m_inputsIndex = inputsIndex; + m_outputsIndex = outputsIndex; + + for (int i : paramsIndex) { + std::shared_ptr tensor = allTensors[i]; + switch (tensor->GetType()) { + case OH_NN_MATMUL_TRANSPOSE_A: + returnCode = SetTransposeA(tensor); + break; + case OH_NN_MATMUL_TRANSPOSE_B: + returnCode = SetTransposeB(tensor); + break; + case OH_NN_MATMUL_ACTIVATION_TYPE: + returnCode = SetActivationType(tensor); + break; + default: + LOGE("[Matmul] Parameter Type is invalid, type=%d", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; + } + + if (returnCode != OH_NN_SUCCESS) { + LOGE("[Matmul] Matmul Build failed. Passed invalid param."); + return returnCode; + } + } + + // The quantization type of the first output determinies that of the operator. + SetQuantType(outputsIndex, allTensors); + + m_isBuild = true; + m_name = OP_NAME; + return OH_NN_SUCCESS; +} + +LiteGraphPrimitvePtr MatmulBuilder::GetPrimitive() +{ + if (!m_isBuild) { + LOGE("[Matmul] Matmul GetPrimitive failed. Cannot get primitive before call build."); + return {nullptr, DestroyLiteGraphPrimitive}; + } + + auto primitive = mindspore::lite::MindIR_MatMulFusion_CreatePrimitive(m_transposeA, m_transposeB, m_activationType); + LiteGraphPrimitvePtr graphPrimitivePtr(primitive, DestroyLiteGraphPrimitive); + return graphPrimitivePtr; +} + +REGISTER_OPS(MatmulBuilder, OH_NN_OPS_MATMUL); +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS \ No newline at end of file diff --git a/frameworks/native/ops/matmul_builder.h b/frameworks/native/ops/matmul_builder.h new file mode 100644 index 0000000..1efcdf1 --- /dev/null +++ b/frameworks/native/ops/matmul_builder.h @@ -0,0 +1,50 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_MATMUL_BUILDER_H +#define NEURAL_NETWORK_RUNTIME_MATMUL_BUILDER_H + +#include "mindir.h" + +#include "frameworks/native/ops_builder.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +class MatmulBuilder : public OpsBuilder { +public: + MatmulBuilder(); + ~MatmulBuilder() override; + OH_NN_ReturnCode Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) override; + LiteGraphPrimitvePtr GetPrimitive() override; + +private: + OH_NN_ReturnCode SetTransposeA(std::shared_ptr tensor); + OH_NN_ReturnCode SetTransposeB(std::shared_ptr tensor); + OH_NN_ReturnCode SetActivationType(std::shared_ptr tensor); + +private: + mindspore::lite::ActivationType m_activationType{mindspore::lite::ACTIVATION_TYPE_NO_ACTIVATION}; + bool m_transposeA{false}; + bool m_transposeB{false}; +}; +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS + +#endif // NEURAL_NETWORK_RUNTIME_MATMUL_BUILDER_H \ No newline at end of file diff --git a/frameworks/native/ops/maximum_builder.cpp b/frameworks/native/ops/maximum_builder.cpp new file mode 100644 index 0000000..b681ec1 --- /dev/null +++ b/frameworks/native/ops/maximum_builder.cpp @@ -0,0 +1,77 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "maximum_builder.h" + +#include "mindir.h" + +#include "frameworks/native/ops_registry.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +static const int INPUT_NUM = 2; +static const int OUTPUT_NUM = 1; +static const std::string OP_NAME = "Maximum"; + +MaximumBuilder::MaximumBuilder() {} + +MaximumBuilder::~MaximumBuilder() {} + +OH_NN_ReturnCode MaximumBuilder::Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) +{ + if (m_isBuild) { + LOGE("[Maximum] Maximum Build failed. operation has been build, cannot build again."); + return OH_NN_OPERATION_FORBIDDEN; + } + + OH_NN_ReturnCode ret = CheckIOIndex(inputsIndex, outputsIndex, allTensors, INPUT_NUM, OUTPUT_NUM); + if (ret != OH_NN_SUCCESS) { + LOGE("[Maximum] Maximum Build failed. The input or output index of Maximum operation is invalid."); + return ret; + } + + if (!paramsIndex.empty()) { + LOGW("[Maximum] Maximum Build failed. Maximum expects no parameters, but receive %zu", paramsIndex.size()); + return OH_NN_INVALID_PARAMETER; + } + + m_inputsIndex = inputsIndex; + m_outputsIndex = outputsIndex; + + m_isBuild = true; + m_name = OP_NAME; + return OH_NN_SUCCESS; +} + +LiteGraphPrimitvePtr MaximumBuilder::GetPrimitive() +{ + if (!m_isBuild) { + LOGE("[Maximum] Maximum GetPrimitive failed. Cannot get primitive before call build."); + return {nullptr, DestroyLiteGraphPrimitive}; + } + + void* primitive = mindspore::lite::MindIR_Maximum_CreatePrimitive(); + LiteGraphPrimitvePtr graphPrimitivePtr(primitive, DestroyLiteGraphPrimitive); + return graphPrimitivePtr; +} + +REGISTER_OPS(MaximumBuilder, OH_NN_OPS_MAXIMUM); +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS \ No newline at end of file diff --git a/frameworks/native/ops/maximum_builder.h b/frameworks/native/ops/maximum_builder.h new file mode 100644 index 0000000..5308c5d --- /dev/null +++ b/frameworks/native/ops/maximum_builder.h @@ -0,0 +1,38 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_MAXIMUM_BUILDER_H +#define NEURAL_NETWORK_RUNTIME_MAXIMUM_BUILDER_H + +#include "frameworks/native/ops_builder.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +class MaximumBuilder : public OpsBuilder { +public: + MaximumBuilder(); + ~MaximumBuilder() override; + OH_NN_ReturnCode Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) override; + LiteGraphPrimitvePtr GetPrimitive() override; +}; +} // namespace Ops +} // namespace NeuralNetworkRuntime +} + +#endif // NEURAL_NETWORK_RUNTIME_MAXIMUM_BUILDER_H \ No newline at end of file diff --git a/frameworks/native/ops/maxpool_builder.cpp b/frameworks/native/ops/maxpool_builder.cpp new file mode 100644 index 0000000..04b6167 --- /dev/null +++ b/frameworks/native/ops/maxpool_builder.cpp @@ -0,0 +1,58 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "maxpool_builder.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +static const std::string OP_NAME = "MaxPool"; + +MaxPoolBuilder::MaxPoolBuilder() {} + +MaxPoolBuilder::~MaxPoolBuilder() {} + +OH_NN_ReturnCode MaxPoolBuilder::Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, const std::vector& outputsIndex, + const std::vector>& allTensors) +{ + OH_NN_ReturnCode returnCode = PoolingBuild(paramsIndex, inputsIndex, outputsIndex, allTensors); + if (returnCode != OH_NN_SUCCESS) { + LOGE("[MaxPool] Build failed, PoolingBuild failed."); + return returnCode; + } + + m_isBuild = true; + m_name = OP_NAME; + return OH_NN_SUCCESS; +} + +LiteGraphPrimitvePtr MaxPoolBuilder::GetPrimitive() +{ + if (!m_isBuild) { + LOGE("[MaxPool] GetPrimitive failed, cannot get primitive before call build."); + return {nullptr, DestroyLiteGraphPrimitive}; + } + + void* primitive = MindIR_MaxPoolFusion_CreatePrimitive(m_kernelSize, m_strides, m_pad, + m_padMode, m_format, m_global, m_activationType); + LiteGraphPrimitvePtr graphPrimitivePtr(primitive, DestroyLiteGraphPrimitive) ; + return graphPrimitivePtr; +} + +REGISTER_OPS(MaxPoolBuilder, OH_NN_OPS_MAX_POOL); +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS diff --git a/frameworks/native/ops/maxpool_builder.h b/frameworks/native/ops/maxpool_builder.h new file mode 100644 index 0000000..2022b6e --- /dev/null +++ b/frameworks/native/ops/maxpool_builder.h @@ -0,0 +1,40 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_MAXPOOL_BUILDER_H +#define NEURAL_NETWORK_RUNTIME_MAXPOOL_BUILDER_H + +#include "pooling_builder.h" +#include "frameworks/native/ops_registry.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +class MaxPoolBuilder : public PoolingBuilder { +public: + MaxPoolBuilder(); + ~MaxPoolBuilder() override; + OH_NN_ReturnCode Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) override; + + LiteGraphPrimitvePtr GetPrimitive() override; +}; +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS + +#endif // NEURAL_NETWORK_RUNTIME_MAXPOOL_BUILDER_H diff --git a/frameworks/native/ops/mul_builder.cpp b/frameworks/native/ops/mul_builder.cpp new file mode 100644 index 0000000..f055d28 --- /dev/null +++ b/frameworks/native/ops/mul_builder.cpp @@ -0,0 +1,122 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "mul_builder.h" + +#include "frameworks/native/transform.h" +#include "frameworks/native/validation.h" +#include "frameworks/native/ops_registry.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +static const int INPUT_NUM = 2; +static const int OUTPUT_NUM = 1; +static const int SCALE_LENGTH = 1; +static const std::string OP_NAME = "Mul"; + +MulBuilder::MulBuilder() {} + +MulBuilder::~MulBuilder() {} + +OH_NN_ReturnCode MulBuilder::SetActivationType(std::shared_ptr tensor) +{ + tensor->IdentifyOpParameter(); + if (tensor->GetElementCount() != SCALE_LENGTH) { + LOGE("[Mul] Mul SetActivationType failed. The shape of activation should be scaler."); + return OH_NN_INVALID_PARAMETER; + } + + if (tensor->GetDataType() != OH_NN_INT8) { + LOGE("[Mul] Mul SetActivationType failed. The activation should be type OH_NN_INT8."); + return OH_NN_INVALID_PARAMETER; + } + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[Mul] SetActivationType failed, the activationType passed a empty buffer."); + return OH_NN_INVALID_PARAMETER; + } + + int8_t* fuseData = static_cast(buffer); + if (!OHOS::NeuralNetworkRuntime::Validation::ValidateFuseType(static_cast(*fuseData))) { + LOGE("[Mul] Mul SetActivationType failed. Fuse activation type is invalid"); + return OH_NN_INVALID_PARAMETER; + } + + auto fuseType = (OH_NN_FuseType)(*fuseData); + m_activationType = NNToMS::TransfromFusionType(fuseType); + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode MulBuilder::Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) +{ + if (m_isBuild) { + LOGE("[Mul] Mul build failed. operation has been build, cannot build again."); + return OH_NN_OPERATION_FORBIDDEN; + } + + OH_NN_ReturnCode returnCode = CheckIOIndex(inputsIndex, outputsIndex, allTensors, INPUT_NUM, OUTPUT_NUM); + if (returnCode != OH_NN_SUCCESS) { + LOGE("[Mul] Mul build failed. Passed invalid input or output index of Mul operation index."); + return returnCode; + } + + m_inputsIndex = inputsIndex; + m_outputsIndex = outputsIndex; + + for (int i : paramsIndex) { + std::shared_ptr tensor = allTensors[i]; + switch (tensor->GetType()) { + case OH_NN_MUL_ACTIVATION_TYPE: + returnCode = SetActivationType(tensor); + break; + default: + LOGE("[Mul] Parameter Type is invalid, type=%d", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; + } + + if (returnCode != OH_NN_SUCCESS) { + LOGE("[Mul] Mul build failed. Passed invalid param."); + return returnCode; + } + } + + // The quantization type of the first output determinies that of the operator. + SetQuantType(outputsIndex, allTensors); + m_name = OP_NAME; + m_isBuild = true; + return OH_NN_SUCCESS; +} + +LiteGraphPrimitvePtr MulBuilder::GetPrimitive() +{ + if (!m_isBuild) { + LOGE("[Mul] Mul GetPrimitive failed. Cannot get primitive before call build."); + return {nullptr, DestroyLiteGraphPrimitive}; + } + + void* primitive = mindspore::lite::MindIR_MulFusion_CreatePrimitive(m_activationType); + LiteGraphPrimitvePtr graphPrimitivePtr(primitive, DestroyLiteGraphPrimitive); + return graphPrimitivePtr; +} + +REGISTER_OPS(MulBuilder, OH_NN_OPS_MUL); +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS \ No newline at end of file diff --git a/frameworks/native/ops/mul_builder.h b/frameworks/native/ops/mul_builder.h new file mode 100644 index 0000000..25cd5ac --- /dev/null +++ b/frameworks/native/ops/mul_builder.h @@ -0,0 +1,46 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_MUL_BUILDER_H +#define NEURAL_NETWORK_RUNTIME_MUL_BUILDER_H + +#include "mindir.h" + +#include "frameworks/native/ops_builder.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +class MulBuilder : public OpsBuilder { +public: + MulBuilder(); + ~MulBuilder() override; + OH_NN_ReturnCode Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) override; + LiteGraphPrimitvePtr GetPrimitive() override; + +private: + OH_NN_ReturnCode SetActivationType(std::shared_ptr tensor); + +private: + mindspore::lite::ActivationType m_activationType{mindspore::lite::ACTIVATION_TYPE_NO_ACTIVATION}; +}; +} // namespace Ops +} // namespace NeuralNetworkRuntime +} + +#endif // NEURAL_NETWORK_RUNTIME_MUL_BUILDER_H \ No newline at end of file diff --git a/frameworks/native/ops/onehot_builder.cpp b/frameworks/native/ops/onehot_builder.cpp new file mode 100644 index 0000000..b068408 --- /dev/null +++ b/frameworks/native/ops/onehot_builder.cpp @@ -0,0 +1,107 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "onehot_builder.h" + +#include "mindir.h" + +#include "frameworks/native/ops_registry.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +static const int INPUT_NUM = 4; +static const int OUTPUT_NUM = 1; +static const std::string OP_NAME = "Onehot"; + +OnehotBuilder::OnehotBuilder() {} + +OnehotBuilder::~OnehotBuilder() {} + +OH_NN_ReturnCode OnehotBuilder::SetAxis(std::shared_ptr tensor) +{ + tensor->IdentifyOpParameter(); + if (tensor->GetDataType() != OH_NN_INT64) { + LOGE("[Onehot] Onehot SetAxis failed. The axis should be type OH_NN_INT64."); + return OH_NN_INVALID_PARAMETER; + } + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[Onehot] SetAxis failed, the axis passed a empty buffer."); + return OH_NN_INVALID_PARAMETER; + } + + m_axis = *static_cast(buffer); + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode OnehotBuilder::Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) +{ + if (m_isBuild) { + LOGE("[Onehot] Onehot build failed. operation has been build, cannot build again."); + return OH_NN_OPERATION_FORBIDDEN; + } + + OH_NN_ReturnCode returnCode = CheckIOIndex(inputsIndex, outputsIndex, allTensors, INPUT_NUM, OUTPUT_NUM); + if (returnCode != OH_NN_SUCCESS) { + LOGE("[Onehot] Onehot build failed. Passed invalid input or output index of Onehot operation index."); + return returnCode; + } + + m_inputsIndex = inputsIndex; + m_outputsIndex = outputsIndex; + + for (int i : paramsIndex) { + std::shared_ptr tensor = allTensors[i]; + switch (tensor->GetType()) { + case OH_NN_ONE_HOT_AXIS: + returnCode = SetAxis(tensor); + break; + default: + LOGE("[Onehot] Parameter Type is invalid, type=%d", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; + } + + if (returnCode != OH_NN_SUCCESS) { + LOGE("[Onehot] Onehot Build failed. Passed invalid param."); + return returnCode; + } + } + + m_name = OP_NAME; + m_isBuild = true; + return OH_NN_SUCCESS; +} + +LiteGraphPrimitvePtr OnehotBuilder::GetPrimitive() +{ + if (!m_isBuild) { + LOGE("[Onehot] Onehot GetPrimitive failed. Cannot get primitive before call build."); + return {nullptr, DestroyLiteGraphPrimitive}; + } + + void* primitive = mindspore::lite::MindIR_OneHot_CreatePrimitive(m_axis); + LiteGraphPrimitvePtr graphPrimitivePtr(primitive, DestroyLiteGraphPrimitive); + return graphPrimitivePtr; +} + +REGISTER_OPS(OnehotBuilder, OH_NN_OPS_ONE_HOT); +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // OHOS \ No newline at end of file diff --git a/frameworks/native/ops/onehot_builder.h b/frameworks/native/ops/onehot_builder.h new file mode 100644 index 0000000..537855b --- /dev/null +++ b/frameworks/native/ops/onehot_builder.h @@ -0,0 +1,44 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_ONEHOT_BUILDER_H +#define NEURAL_NETWORK_RUNTIME_ONEHOT_BUILDER_H + +#include "frameworks/native/ops_builder.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +class OnehotBuilder : public OpsBuilder { +public: + OnehotBuilder(); + ~OnehotBuilder() override; + OH_NN_ReturnCode Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) override; + LiteGraphPrimitvePtr GetPrimitive() override; + +private: + OH_NN_ReturnCode SetAxis(std::shared_ptr tensor); + +private: + int64_t m_axis{-1}; +}; +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // OHOS + +#endif // NEURAL_NETWORK_RUNTIME_ONEHOT_BUILDER_H \ No newline at end of file diff --git a/frameworks/native/ops/pad_builder.cpp b/frameworks/native/ops/pad_builder.cpp new file mode 100644 index 0000000..b14d8a6 --- /dev/null +++ b/frameworks/native/ops/pad_builder.cpp @@ -0,0 +1,116 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "pad_builder.h" + +#include "mindir.h" + +#include "frameworks/native/ops_registry.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +static const int INPUT_NUM = 2; +static const int OUTPUT_NUM = 1; +static const int SCALE_LENGTH = 1; +static const std::string OP_NAME = "Pad"; + +PadBuilder::PadBuilder() {} + +PadBuilder::~PadBuilder() {} + +OH_NN_ReturnCode PadBuilder::SetConstantValue(std::shared_ptr tensor) +{ + tensor->IdentifyOpParameter(); + if (tensor->GetElementCount() != SCALE_LENGTH) { + LOGE("[Pad] Pad SetConstantValue failed. The constant_value should be scaler."); + return OH_NN_INVALID_PARAMETER; + } + + if (tensor->GetDataType() != OH_NN_FLOAT32) { + LOGE("[Pad] Pad SetConstantValue failed. The constant_value should be type OH_NN_FLOAT32"); + return OH_NN_INVALID_PARAMETER; + } + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[Pad] SetConstantValue failed, the constantValue passed an empty buffer."); + return OH_NN_INVALID_PARAMETER; + } + + m_constantValue = *static_cast(buffer); + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode PadBuilder::Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) +{ + if (m_isBuild) { + LOGE("[Pad] Pad Build failed. operation has been build, cannot build again."); + return OH_NN_OPERATION_FORBIDDEN; + } + + OH_NN_ReturnCode returnCode = CheckIOIndex(inputsIndex, outputsIndex, allTensors, INPUT_NUM, OUTPUT_NUM); + if (returnCode != OH_NN_SUCCESS) { + LOGE("[Pad] Pad Build failed. Passed invalid input or output index of Pad operation index."); + return returnCode; + } + + m_inputsIndex = inputsIndex; + m_outputsIndex = outputsIndex; + + for (int i : paramsIndex) { + std::shared_ptr tensor = allTensors[i]; + switch (tensor->GetType()) { + case OH_NN_PAD_CONSTANT_VALUE: + returnCode = SetConstantValue(tensor); + break; + default: + LOGE("[Pad] Parameter Type is invalid, type=%d", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; + } + + if (returnCode != OH_NN_SUCCESS) { + LOGE("[Pad] Pad Build failed. Passed invalid param."); + return returnCode; + } + } + + // The quantization type of the first output determinies that of the operator. + SetQuantType(outputsIndex, allTensors); + + m_name = OP_NAME; + m_isBuild = true; + return OH_NN_SUCCESS; +} +LiteGraphPrimitvePtr PadBuilder::GetPrimitive() +{ + if (!m_isBuild) { + LOGE("[Pad] GetPrimitive failed. Cannot get primitive before call build."); + return {nullptr, DestroyLiteGraphPrimitive}; + } + + mindspore::lite::PaddingMode padding_mode = mindspore::lite::PADDING_MODE_CONSTANT; + void* primitive = MindIR_PadFusion_CreatePrimitive(paddings, padding_mode, m_constantValue); + LiteGraphPrimitvePtr graphPrimitivePtr(primitive, DestroyLiteGraphPrimitive); + return graphPrimitivePtr; +} + +REGISTER_OPS(PadBuilder, OH_NN_OPS_PAD); +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespcae OHOS \ No newline at end of file diff --git a/frameworks/native/ops/pad_builder.h b/frameworks/native/ops/pad_builder.h new file mode 100644 index 0000000..a5968a4 --- /dev/null +++ b/frameworks/native/ops/pad_builder.h @@ -0,0 +1,45 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_PAD_BUILDER_H +#define NEURAL_NETWORK_RUNTIME_PAD_BUILDER_H + +#include "frameworks/native/ops_builder.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +class PadBuilder : public OpsBuilder { +public: + PadBuilder(); + ~PadBuilder() override; + OH_NN_ReturnCode Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) override; + LiteGraphPrimitvePtr GetPrimitive() override; + +private: + OH_NN_ReturnCode SetConstantValue(std::shared_ptr tensor); + +private: + std::vector> paddings{}; + float m_constantValue{0.0}; +}; +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS + +#endif // NEURAL_NETWORK_RUNTIME_PAD_BUILDER_H \ No newline at end of file diff --git a/frameworks/native/ops/pooling_builder.cpp b/frameworks/native/ops/pooling_builder.cpp new file mode 100644 index 0000000..9b52c8f --- /dev/null +++ b/frameworks/native/ops/pooling_builder.cpp @@ -0,0 +1,220 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "pooling_builder.h" + +#include "frameworks/native/transform.h" +#include "frameworks/native/validation.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +static const int INPUT_NUM = 1; +static const int OUTPUT_NUM = 1; +static const int NUM_ELEMENT_PAD_MODE = 1; +static const int NUM_ELEMENT_PAD_LIST = 4; +static const int ACTIVATION_LENGTH = 1; + +OH_NN_ReturnCode PoolingBuilder::PoolingBuild(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) +{ + if (m_isBuild) { + LOGE("[PoolingBuilder] PoolingBuild failed, operation has been build, cannot build again."); + return OH_NN_OPERATION_FORBIDDEN; + } + + // Set input and output + OH_NN_ReturnCode returnCode = SetInputAndOutput(inputsIndex, outputsIndex, allTensors); + if (returnCode != OH_NN_SUCCESS) { + LOGE("[PoolingBuilder] PoolingBuild failed, the SetInputAndOutput failed."); + return returnCode; + } + + for (int i : paramsIndex) { + std::shared_ptr tensor = allTensors[i]; + switch (tensor->GetType()) { + case OH_NN_AVG_POOL_KERNEL_SIZE: + case OH_NN_MAX_POOL_KERNEL_SIZE: + returnCode = SetKernel(tensor); + break; + case OH_NN_AVG_POOL_STRIDE: + case OH_NN_MAX_POOL_STRIDE: + returnCode = SetStrides(tensor); + break; + case OH_NN_AVG_POOL_PAD_MODE: + case OH_NN_MAX_POOL_PAD_MODE: + case OH_NN_MAX_POOL_PAD: + case OH_NN_AVG_POOL_PAD: + returnCode = SetPadModeOrPaddings(tensor); + break; + case OH_NN_AVG_POOL_ACTIVATION_TYPE: + case OH_NN_MAX_POOL_ACTIVATION_TYPE: + returnCode = SetActivation(tensor); + break; + default: + LOGE("[PoolingBuilder] Build failed, param invalid, type = %d.", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; + } + if (returnCode != OH_NN_SUCCESS) { + LOGE("[PoolingBuilder] PoolingBuild failed, passed invalid param."); + return returnCode; + } + } + + // The quantization type of the first output determinies that of the operator. + SetQuantType(outputsIndex, allTensors); + + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode PoolingBuilder::SetInputAndOutput(const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) +{ + OH_NN_ReturnCode returnCode = CheckIOIndex(inputsIndex, outputsIndex, allTensors, INPUT_NUM, OUTPUT_NUM); + if (returnCode != OH_NN_SUCCESS) { + LOGE("[PoolingBuilder] SetInputAndOutput failed, passed invalid input or output index."); + return returnCode; + } + + m_inputsIndex = inputsIndex; + m_outputsIndex = outputsIndex; + + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode PoolingBuilder::SetKernel(std::shared_ptr tensor) +{ + tensor->IdentifyOpParameter(); + // Set kernelSize + if (tensor->GetDataType() != OH_NN_INT64) { + LOGE("[PoolingBuilder] SetKernel failed, the KernelSize should be type OH_NN_INT64."); + return OH_NN_INVALID_PARAMETER; + } + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[PoolingBuilder] SetKernel GetBuffer return nullptr"); + return OH_NN_INVALID_PARAMETER; + } + + const int64_t* pKernelSize = reinterpret_cast(buffer); + int kernelSize = tensor->GetElementCount(); + m_kernelSize.assign(pKernelSize, pKernelSize + kernelSize); + + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode PoolingBuilder::SetStrides(std::shared_ptr tensor) +{ + tensor->IdentifyOpParameter(); + // Set Strides + if (tensor->GetDataType() != OH_NN_INT64) { + LOGE("[PoolingBuilder] SetStrides failed, the Strides should be type OH_NN_INT64."); + return OH_NN_INVALID_PARAMETER; + } + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[PoolingBuilder] SetStrides GetBuffer return nullptr"); + return OH_NN_INVALID_PARAMETER; + } + + const int64_t* pStrides = reinterpret_cast(buffer); + int strideslSize = tensor->GetElementCount(); + m_strides.assign(pStrides, pStrides + strideslSize); + + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode PoolingBuilder::SetPadModeOrPaddings(std::shared_ptr tensor) +{ + tensor->IdentifyOpParameter(); + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[PoolingBuilder] SetPadModeOrPaddings GetBuffer return nullptr"); + return OH_NN_INVALID_PARAMETER; + } + size_t tensorElementCount = tensor->GetElementCount(); + // Set PadMode or PadList + if (tensorElementCount == NUM_ELEMENT_PAD_MODE) { + // PadMode + if (tensor->GetDataType() != OH_NN_INT8) { + LOGE("[PoolingBuilder] SetPadModeOrPaddings failed, the type of padMode should be OH_NN_INT8."); + return OH_NN_INVALID_PARAMETER; + } + + int8_t* pPadMode = static_cast(buffer); + if (!OHOS::NeuralNetworkRuntime::Validation::ValidatePadMode(*pPadMode)) { + LOGE("[PoolingBuilder] SetPadModeOrPaddings failed, invalid pad mode."); + return OH_NN_INVALID_PARAMETER; + } + m_padMode = NNToMS::TransformPadModeValue(*pPadMode); + } else if (tensorElementCount == NUM_ELEMENT_PAD_LIST) { + if (tensor->GetDataType() != OH_NN_INT64) { + LOGE("[PoolingBuilder] SetPadModeOrPaddings failed, the type of padList should be OH_NN_INT64."); + return OH_NN_INVALID_PARAMETER; + } + + int64_t* pPad = static_cast(buffer); + // PadList + m_pad.clear(); + for (int i = 0; i < NUM_ELEMENT_PAD_LIST; i++) { + m_pad.emplace_back(static_cast(pPad[i])); + } + } else { + LOGE("[PoolingBuilder] SetPadModeOrPaddings failed, invalid element size of padMode or padList," + "padMode should be single value, and padList should be 4."); + return OH_NN_INVALID_PARAMETER; + } + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode PoolingBuilder::SetActivation(std::shared_ptr tensor) +{ + tensor->IdentifyOpParameter(); + // Set ActivationType + if (tensor->GetElementCount() != ACTIVATION_LENGTH) { + LOGE("[PoolingBuilder] SetActivation failed, the Activation shoule be a scalar"); + return OH_NN_INVALID_PARAMETER; + } + + if (tensor->GetDataType() != OH_NN_INT8) { + LOGE("[PoolingBuilder] SetActivation failed, the ActivationType should be type OH_NN_INT8."); + return OH_NN_INVALID_PARAMETER; + } + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[PoolingBuilder] SetActivation GetBuffer return nullptr"); + return OH_NN_INVALID_PARAMETER; + } + + int8_t* pFuseData = static_cast(buffer); + if (!OHOS::NeuralNetworkRuntime::Validation::ValidateFuseType(static_cast(*pFuseData))) { + LOGE("[PoolingBuilder] SetActivation failed, activation input is invalid."); + return OH_NN_INVALID_PARAMETER; + } + auto fuseType = (OH_NN_FuseType)(*pFuseData); + m_activationType = NNToMS::TransfromFusionType(fuseType); + + return OH_NN_SUCCESS; +} +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS diff --git a/frameworks/native/ops/pooling_builder.h b/frameworks/native/ops/pooling_builder.h new file mode 100644 index 0000000..4a42b4a --- /dev/null +++ b/frameworks/native/ops/pooling_builder.h @@ -0,0 +1,59 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_POOLING_BUILDER_H +#define NEURAL_NETWORK_RUNTIME_POOLING_BUILDER_H + +#include "mindir.h" + +#include "frameworks/native/ops_builder.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +class PoolingBuilder : public OpsBuilder { +public: + PoolingBuilder() = default; + virtual ~PoolingBuilder() = default; + + OH_NN_ReturnCode PoolingBuild(const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector& paramsIndex, + const std::vector>& allTensors); + + OH_NN_ReturnCode SetInputAndOutput(const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors); + + OH_NN_ReturnCode SetKernel(std::shared_ptr tensor); + OH_NN_ReturnCode SetStrides(std::shared_ptr tensor); + OH_NN_ReturnCode SetPadModeOrPaddings(std::shared_ptr tensor); + OH_NN_ReturnCode SetActivation(std::shared_ptr tensor); + +protected: + std::vector m_kernelSize; + std::vector m_pad; + std::vector m_strides; + mindspore::lite::PadMode m_padMode{mindspore::lite::PAD_MODE_PAD}; + mindspore::lite::ActivationType m_activationType{mindspore::lite::ACTIVATION_TYPE_NO_ACTIVATION}; + mindspore::lite::RoundMode m_roundMode = mindspore::lite::ROUND_MODE_FLOOR; + mindspore::lite::Format m_format = mindspore::lite::FORMAT_NCHW; + bool m_global = false; +}; +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS + +#endif // NEURAL_NETWORK_RUNTIME_POOLING_BUILDER_H diff --git a/frameworks/native/ops/pow_builder.cpp b/frameworks/native/ops/pow_builder.cpp new file mode 100644 index 0000000..71e9df7 --- /dev/null +++ b/frameworks/native/ops/pow_builder.cpp @@ -0,0 +1,82 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "pow_builder.h" + +#include "mindir.h" + +#include "frameworks/native/ops_registry.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +static const int INPUT_NUM = 2; +static const int OUTPUT_NUM = 1; +static const std::string OP_NAME = "Pow"; + +PowBuilder::PowBuilder() {} + +PowBuilder::~PowBuilder() {} + +OH_NN_ReturnCode PowBuilder::Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) +{ + if (m_isBuild) { + LOGE("[Pow] Build failed, operation has been build, cannot build again."); + return OH_NN_OPERATION_FORBIDDEN; + } + + OH_NN_ReturnCode returnCode = CheckIOIndex(inputsIndex, outputsIndex, allTensors, INPUT_NUM, OUTPUT_NUM); + if (returnCode != OH_NN_SUCCESS) { + LOGE("[Pow] Build failed, passed invalid input or output index of Pow operation index."); + return returnCode; + } + + if (!paramsIndex.empty()) { + LOGW("[Pow] Build failed, pow expects no parameters, but receive %zu", paramsIndex.size()); + return OH_NN_INVALID_PARAMETER; + } + + m_inputsIndex = inputsIndex; + m_outputsIndex = outputsIndex; + + SetQuantType(outputsIndex, allTensors); + + m_name = OP_NAME; + m_isBuild = true; + return OH_NN_SUCCESS; +} + +LiteGraphPrimitvePtr PowBuilder::GetPrimitive() +{ + if (!m_isBuild) { + LOGE("[Pow] GetPrimitive failed, cannot get primitive before call build."); + return {nullptr, DestroyLiteGraphPrimitive}; + } + + float scale{1.0}; + float shift{0.0}; + + void* primitive = mindspore::lite::MindIR_PowFusion_CreatePrimitive(scale, shift); + LiteGraphPrimitvePtr graphPrimitivePtr(primitive, DestroyLiteGraphPrimitive); + return graphPrimitivePtr; +} + +REGISTER_OPS(PowBuilder, OH_NN_OPS_POW); +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS \ No newline at end of file diff --git a/frameworks/native/ops/pow_builder.h b/frameworks/native/ops/pow_builder.h new file mode 100644 index 0000000..10ec727 --- /dev/null +++ b/frameworks/native/ops/pow_builder.h @@ -0,0 +1,39 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_POW_BUILDER_H +#define NEURAL_NETWORK_RUNTIME_POW_BUILDER_H + +#include "frameworks/native/ops_builder.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +class PowBuilder : public OpsBuilder { +public: + PowBuilder(); + ~PowBuilder() override; + OH_NN_ReturnCode Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) override; + + LiteGraphPrimitvePtr GetPrimitive() override; +}; +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS + +#endif // NEURAL_NETWORK_RUNTIME_POW_BUILDER_H \ No newline at end of file diff --git a/frameworks/native/ops/prelu_builder.cpp b/frameworks/native/ops/prelu_builder.cpp new file mode 100644 index 0000000..d8fcc56 --- /dev/null +++ b/frameworks/native/ops/prelu_builder.cpp @@ -0,0 +1,78 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "prelu_builder.h" + +#include "mindir.h" + +#include "frameworks/native/ops_registry.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +static const int INPUT_NUMS = 2; +static const int OUTPUT_NUMS = 1; +static const std::string OP_NAME = "PRelu"; + +PReluBuilder::PReluBuilder() {} + +PReluBuilder::~PReluBuilder() {} + +OH_NN_ReturnCode PReluBuilder::Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) +{ + if (m_isBuild) { + LOGE("[PRelu] Build failed, the PRelu operation has been build, cannot build again."); + return OH_NN_OPERATION_FORBIDDEN; + } + + OH_NN_ReturnCode returnCode = CheckIOIndex(inputsIndex, outputsIndex, allTensors, INPUT_NUMS, OUTPUT_NUMS); + if (returnCode != OH_NN_SUCCESS) { + LOGE("[PRelu] Build failed, passed invalid input or output index of PRelu operation index."); + return returnCode; + } + + if (!paramsIndex.empty()) { + LOGW("[PRelu] Build failed, the PRelu expects no parameters, but receive %zu", paramsIndex.size()); + return OH_NN_INVALID_PARAMETER; + } + + m_inputsIndex = inputsIndex; + m_outputsIndex = outputsIndex; + + m_name = OP_NAME; + m_isBuild = true; + return OH_NN_SUCCESS; +} + +LiteGraphPrimitvePtr PReluBuilder::GetPrimitive() +{ + if (!m_isBuild) { + LOGE("[PRelu] GetPrimitive failed, cannot get primitive before call build."); + return {nullptr, DestroyLiteGraphPrimitive}; + } + + bool channelShared{false}; + void* primitive = mindspore::lite::MindIR_PReLUFusion_CreatePrimitive(channelShared); + LiteGraphPrimitvePtr graphPrimitivePtr(primitive, DestroyLiteGraphPrimitive); + return graphPrimitivePtr; +} + +REGISTER_OPS(PReluBuilder, OH_NN_OPS_PRELU); +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS diff --git a/frameworks/native/ops/prelu_builder.h b/frameworks/native/ops/prelu_builder.h new file mode 100644 index 0000000..117343c --- /dev/null +++ b/frameworks/native/ops/prelu_builder.h @@ -0,0 +1,39 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_PRELU_BUILDER_H +#define NEURAL_NETWORK_RUNTIME_PRELU_BUILDER_H + +#include "frameworks/native/ops_builder.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +class PReluBuilder : public OpsBuilder { +public: + PReluBuilder(); + ~PReluBuilder() override; + OH_NN_ReturnCode Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) override; + + LiteGraphPrimitvePtr GetPrimitive() override; +}; +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS + +#endif // NEURAL_NETWORK_RUNTIME_PRELU_BUILDER_H \ No newline at end of file diff --git a/frameworks/native/ops/quant_dtype_cast_builder.cpp b/frameworks/native/ops/quant_dtype_cast_builder.cpp new file mode 100644 index 0000000..79ce59c --- /dev/null +++ b/frameworks/native/ops/quant_dtype_cast_builder.cpp @@ -0,0 +1,128 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "quant_dtype_cast_builder.h" + +#include "mindir.h" + +#include "frameworks/native/ops_registry.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +static const int INPUT_NUM = 1; +static const int OUTPUT_NUM = 1; +static const std::string OP_NAME = "QuantDTypeCast"; + +QuantDTypeCastBuilder::QuantDTypeCastBuilder() {} + +QuantDTypeCastBuilder::~QuantDTypeCastBuilder() {} + +OH_NN_ReturnCode QuantDTypeCastBuilder::SetSrcT(std::shared_ptr tensor) +{ + tensor->IdentifyOpParameter(); + if (tensor->GetDataType() != OH_NN_INT64) { + LOGE("[QuantDTypeCast] SetSrcT failed, the src_t should be type OH_NN_INT64."); + return OH_NN_INVALID_PARAMETER; + } + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[QuantDTypeCast] SetSrcT failed, the src_t passed buffer is empty."); + return OH_NN_INVALID_PARAMETER; + } + + m_src_t = static_cast(buffer); + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode QuantDTypeCastBuilder::SetDstT(std::shared_ptr tensor) +{ + tensor->IdentifyOpParameter(); + if (tensor->GetDataType() != OH_NN_INT64) { + LOGE("[QuantDTypeCast] SetDstT failed, the dst_t should be type OH_NN_INT64."); + return OH_NN_INVALID_PARAMETER; + } + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[QuantDTypeCast] SetDstT failed, the dst_t passed buffer is empty."); + return OH_NN_INVALID_PARAMETER; + } + + m_dst_t = static_cast(buffer); + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode QuantDTypeCastBuilder::Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) +{ + if (m_isBuild) { + LOGE("[QuantDTypeCast] Build failed, the QuantDTypeCast operation has been build, cannot build again."); + return OH_NN_OPERATION_FORBIDDEN; + } + + OH_NN_ReturnCode returnCode = CheckIOIndex(inputsIndex, outputsIndex, allTensors, INPUT_NUM, OUTPUT_NUM); + if (returnCode != OH_NN_SUCCESS) { + LOGE("[QuantDTypeCast] Build failed, passed invalid input or output index."); + return returnCode; + } + + m_inputsIndex = inputsIndex; + m_outputsIndex = outputsIndex; + + for (uint32_t i : paramsIndex) { + std::shared_ptr tensor = allTensors[i]; + switch (tensor->GetType()) { + case OH_NN_QUANT_DTYPE_CAST_SRC_T: + returnCode = SetSrcT(tensor); + break; + case OH_NN_QUANT_DTYPE_CAST_DST_T: + returnCode = SetDstT(tensor); + break; + default: + LOGE("[QuantDTypeCast] Build failed, parameter type is invalid. type=%d", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; + } + + if (returnCode != OH_NN_SUCCESS) { + LOGE("[QuantDTypeCast] Build failed, passed invalid param."); + return returnCode; + } + } + + m_isBuild = true; + m_name = OP_NAME; + return OH_NN_SUCCESS; +} + +LiteGraphPrimitvePtr QuantDTypeCastBuilder::GetPrimitive() +{ + if (!m_isBuild) { + LOGE("[QuantDTypeCast] GetPrimitive failed, cannot get primitive before call build."); + return {nullptr, DestroyLiteGraphPrimitive}; + } + + void* primitive = mindspore::lite::MindIR_QuantDTypeCast_CreatePrimitive(*m_src_t, *m_dst_t); + LiteGraphPrimitvePtr graphPrimitivePtr(primitive, DestroyLiteGraphPrimitive); + return graphPrimitivePtr; +} + +REGISTER_OPS(QuantDTypeCastBuilder, OH_NN_OPS_QUANT_DTYPE_CAST); +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS \ No newline at end of file diff --git a/frameworks/native/ops/quant_dtype_cast_builder.h b/frameworks/native/ops/quant_dtype_cast_builder.h new file mode 100644 index 0000000..bd4a1b3 --- /dev/null +++ b/frameworks/native/ops/quant_dtype_cast_builder.h @@ -0,0 +1,47 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_QUANTDTYPECAST_BUILDER_H +#define NEURAL_NETWORK_RUNTIME_QUANTDTYPECAST_BUILDER_H + +#include "frameworks/native/ops_builder.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +class QuantDTypeCastBuilder : public OpsBuilder { +public: + QuantDTypeCastBuilder(); + ~QuantDTypeCastBuilder() override; + OH_NN_ReturnCode Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) override; + + LiteGraphPrimitvePtr GetPrimitive() override; + +private: + OH_NN_ReturnCode SetSrcT(std::shared_ptr tensor); + OH_NN_ReturnCode SetDstT(std::shared_ptr tensor); + +private: + const uint64_t* m_src_t{nullptr}; + const uint64_t* m_dst_t{nullptr}; +}; +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS + +#endif // NEURAL_NETWORK_RUNTIME_QUANTDTYPECAST_BUILDER_H \ No newline at end of file diff --git a/frameworks/native/ops/reduceall_builder.cpp b/frameworks/native/ops/reduceall_builder.cpp new file mode 100644 index 0000000..fbf0406 --- /dev/null +++ b/frameworks/native/ops/reduceall_builder.cpp @@ -0,0 +1,114 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "reduceall_builder.h" + +#include "frameworks/native/ops_registry.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +static const int INPUT_NUM = 2; +static const int OUTPUT_NUM = 1; +static const int SCALE_LENGTH = 1; +static const std::string OP_NAME = "ReduceAll"; + +ReduceAllBuilder::ReduceAllBuilder() {} + +ReduceAllBuilder:: ~ReduceAllBuilder() {} + +OH_NN_ReturnCode ReduceAllBuilder::SetKeepDims(std::shared_ptr tensor) +{ + tensor->IdentifyOpParameter(); + if (tensor->GetElementCount() != SCALE_LENGTH) { + LOGE("[ReduceAll] SetKeepDims failed, the keep_dims dimensions should be scalar."); + return OH_NN_INVALID_PARAMETER; + } + + if (tensor->GetDataType() != OH_NN_BOOL) { + LOGE("[ReduceAll] SetKeepDims failed, the keep_dims should be type OH_NN_BOOL."); + return OH_NN_INVALID_PARAMETER; + } + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[ReduceAll] SetKeepDims failed, the keep_dims passed buffer is empty."); + return OH_NN_INVALID_PARAMETER; + } + + m_keepDims = *(static_cast(buffer)); + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode ReduceAllBuilder::Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) +{ + if (m_isBuild) { + LOGE("[ReduceAll] Build failed, the ReduceAll operation has been build, cannot build again."); + return OH_NN_OPERATION_FORBIDDEN; + } + + OH_NN_ReturnCode returnCode = CheckIOIndex(inputsIndex, outputsIndex, allTensors, INPUT_NUM, OUTPUT_NUM); + if (returnCode != OH_NN_SUCCESS) { + LOGE("[ReduceAll] Build failed, passed invalid input or output index of ReduceAll operation index."); + return returnCode; + } + + m_inputsIndex = inputsIndex; + m_outputsIndex = outputsIndex; + + for (uint32_t i : paramsIndex) { + std::shared_ptr tensor = allTensors[i]; + switch (tensor->GetType()) { + case OH_NN_REDUCE_ALL_KEEP_DIMS: + returnCode = SetKeepDims(tensor); + break; + default: + LOGE("[ReduceAll] Build failed, parameter type is invalid. type=%d", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; + } + + if (returnCode != OH_NN_SUCCESS) { + LOGE("[ReduceAll] Build failed, passed invalid param."); + return returnCode; + } + } + + m_name = OP_NAME; + m_isBuild = true; + return OH_NN_SUCCESS; +} + +LiteGraphPrimitvePtr ReduceAllBuilder::GetPrimitive() +{ + if (!m_isBuild) { + LOGE("[ReduceAll] GetPrimitive failed, cannot get primitive before call build."); + return {nullptr, DestroyLiteGraphPrimitive}; + } + + bool reduceToEnd{false}; + float coeff{0.0f}; + + void* primitive = mindspore::lite::MindIR_ReduceFusion_CreatePrimitive(m_keepDims, m_mode, reduceToEnd, coeff); + LiteGraphPrimitvePtr graphPrimitivePtr(primitive, DestroyLiteGraphPrimitive); + return graphPrimitivePtr; +} + +REGISTER_OPS(ReduceAllBuilder, OH_NN_OPS_REDUCE_ALL); +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS \ No newline at end of file diff --git a/frameworks/native/ops/reduceall_builder.h b/frameworks/native/ops/reduceall_builder.h new file mode 100644 index 0000000..141d69b --- /dev/null +++ b/frameworks/native/ops/reduceall_builder.h @@ -0,0 +1,48 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_REDUCEALL_BUILDER_H +#define NEURAL_NETWORK_RUNTIME_REDUCEALL_BUILDER_H + +#include "mindir.h" + +#include "frameworks/native/ops_builder.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +class ReduceAllBuilder : public OpsBuilder { +public: + ReduceAllBuilder(); + ~ReduceAllBuilder() override; + OH_NN_ReturnCode Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) override; + + LiteGraphPrimitvePtr GetPrimitive() override; + +private: + OH_NN_ReturnCode SetKeepDims(std::shared_ptr tensor); + +private: + bool m_keepDims{false}; + mindspore::lite::ReduceMode m_mode{mindspore::lite::REDUCE_MODE_ALL}; +}; +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS + +#endif // NEURAL_NETWORK_RUNTIME_REDUCEALL_BUILDER_H \ No newline at end of file diff --git a/frameworks/native/ops/reducemean_builder.cpp b/frameworks/native/ops/reducemean_builder.cpp new file mode 100644 index 0000000..5f60e05 --- /dev/null +++ b/frameworks/native/ops/reducemean_builder.cpp @@ -0,0 +1,116 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "reducemean_builder.h" + +#include "frameworks/native/ops_registry.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +static const int INPUT_NUM = 2; +static const int OUTPUT_NUM = 1; +static const int SCALE_LENGTH = 1; +static const std::string OP_NAME = "ReduceMean"; + +ReduceMeanBuilder::ReduceMeanBuilder() {} + +ReduceMeanBuilder:: ~ReduceMeanBuilder() {} + +OH_NN_ReturnCode ReduceMeanBuilder::SetKeepDims(std::shared_ptr tensor) +{ + tensor->IdentifyOpParameter(); + if (tensor->GetElementCount() != SCALE_LENGTH) { + LOGE("[ReduceMean] SetKeepDims failed, the keep_dims dimensions should be scalar."); + return OH_NN_INVALID_PARAMETER; + } + + if (tensor->GetDataType() != OH_NN_BOOL) { + LOGE("[ReduceMean] SetKeepDims failed, the keep_dims should be type OH_NN_BOOL."); + return OH_NN_INVALID_PARAMETER; + } + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[ReduceMean] SetKeepDims failed, the keep_dims passed buffer is empty."); + return OH_NN_INVALID_PARAMETER; + } + + m_keepDims = *(static_cast(buffer)); + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode ReduceMeanBuilder::Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) +{ + if (m_isBuild) { + LOGE("[ReduceMean] Build failed, the ReduceMean operation has been build, cannot build again."); + return OH_NN_OPERATION_FORBIDDEN; + } + + OH_NN_ReturnCode returnCode = CheckIOIndex(inputsIndex, outputsIndex, allTensors, INPUT_NUM, OUTPUT_NUM); + if (returnCode != OH_NN_SUCCESS) { + LOGE("[ReduceMean] Build failed, passed invalid input or output index of ReduceMean operation index."); + return returnCode; + } + + m_inputsIndex = inputsIndex; + m_outputsIndex = outputsIndex; + + for (uint32_t i : paramsIndex) { + std::shared_ptr tensor = allTensors[i]; + switch (tensor->GetType()) { + case OH_NN_REDUCE_MEAN_KEEP_DIMS: + returnCode = SetKeepDims(tensor); + break; + default: + LOGE("[ReduceMean] Build failed, parameter type is invalid. type=%d", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; + } + + if (returnCode != OH_NN_SUCCESS) { + LOGE("[ReduceMean] Build failed, passed invalid param."); + return returnCode; + } + } + + SetQuantType(outputsIndex, allTensors); + + m_name = OP_NAME; + m_isBuild = true; + return OH_NN_SUCCESS; +} + +LiteGraphPrimitvePtr ReduceMeanBuilder::GetPrimitive() +{ + if (!m_isBuild) { + LOGE("[ReduceMean] GetPrimitive failed, cannot get primitive before call build."); + return {nullptr, DestroyLiteGraphPrimitive}; + } + + bool reduceToEnd{false}; + float coeff{0.0f}; + + void* primitive = mindspore::lite::MindIR_ReduceFusion_CreatePrimitive(m_keepDims, m_mode, reduceToEnd, coeff); + LiteGraphPrimitvePtr graphPrimitivePtr(primitive, DestroyLiteGraphPrimitive); + return graphPrimitivePtr; +} + +REGISTER_OPS(ReduceMeanBuilder, OH_NN_OPS_REDUCE_MEAN); +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS \ No newline at end of file diff --git a/frameworks/native/ops/reducemean_builder.h b/frameworks/native/ops/reducemean_builder.h new file mode 100644 index 0000000..7ddc3b3 --- /dev/null +++ b/frameworks/native/ops/reducemean_builder.h @@ -0,0 +1,48 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_REDUCEMEAN_BUILDER_H +#define NEURAL_NETWORK_RUNTIME_REDUCEMEAN_BUILDER_H + +#include "mindir.h" + +#include "frameworks/native/ops_builder.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +class ReduceMeanBuilder : public OpsBuilder { +public: + ReduceMeanBuilder(); + ~ReduceMeanBuilder() override; + OH_NN_ReturnCode Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) override; + + LiteGraphPrimitvePtr GetPrimitive() override; + +private: + OH_NN_ReturnCode SetKeepDims(std::shared_ptr tensor); + +private: + bool m_keepDims{false}; + mindspore::lite::ReduceMode m_mode{mindspore::lite::REDUCE_MODE_MEAN}; +}; +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS + +#endif // NEURAL_NETWORK_RUNTIME_REDUCEMEAN_BUILDER_H \ No newline at end of file diff --git a/frameworks/native/ops/reduceprod_builder.cpp b/frameworks/native/ops/reduceprod_builder.cpp new file mode 100644 index 0000000..e22b2c5 --- /dev/null +++ b/frameworks/native/ops/reduceprod_builder.cpp @@ -0,0 +1,116 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "reduceprod_builder.h" + +#include "frameworks/native/ops_registry.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +static const int INPUT_NUM = 2; +static const int OUTPUT_NUM = 1; +static const int SCALE_LENGTH = 1; +static const std::string OP_NAME = "ReduceProd"; + +ReduceProdBuilder::ReduceProdBuilder() {} + +ReduceProdBuilder:: ~ReduceProdBuilder() {} + +OH_NN_ReturnCode ReduceProdBuilder::SetKeepDims(std::shared_ptr tensor) +{ + tensor->IdentifyOpParameter(); + if (tensor->GetElementCount() != SCALE_LENGTH) { + LOGE("[ReduceProd] SetKeepDims failed, the keep_dims dimensions should be scalar."); + return OH_NN_INVALID_PARAMETER; + } + + if (tensor->GetDataType() != OH_NN_BOOL) { + LOGE("[ReduceProd] SetKeepDims failed, the keep_dims should be type OH_NN_BOOL."); + return OH_NN_INVALID_PARAMETER; + } + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[ReduceProd] SetKeepDims failed, the keep_dims passed buffer is empty."); + return OH_NN_INVALID_PARAMETER; + } + + m_keepDims = *(static_cast(buffer)); + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode ReduceProdBuilder::Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) +{ + if (m_isBuild) { + LOGE("[ReduceProd] Build failed, operation has been build, cannot build again."); + return OH_NN_OPERATION_FORBIDDEN; + } + + OH_NN_ReturnCode returnCode = CheckIOIndex(inputsIndex, outputsIndex, allTensors, INPUT_NUM, OUTPUT_NUM); + if (returnCode != OH_NN_SUCCESS) { + LOGE("[ReduceProd] Build failed, passed invalid input or output index of ReduceProd operation index."); + return returnCode; + } + + m_inputsIndex = inputsIndex; + m_outputsIndex = outputsIndex; + + for (uint32_t i : paramsIndex) { + std::shared_ptr tensor = allTensors[i]; + switch (tensor->GetType()) { + case OH_NN_REDUCE_PROD_KEEP_DIMS: + returnCode = SetKeepDims(tensor); + break; + default: + LOGE("[ReduceProd] Build failed, parameter type is invalid. type=%d", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; + } + + if (returnCode != OH_NN_SUCCESS) { + LOGE("[ReduceProd] Build failed, passed invalid param."); + return returnCode; + } + } + + SetQuantType(outputsIndex, allTensors); + + m_name = OP_NAME; + m_isBuild = true; + return OH_NN_SUCCESS; +} + +LiteGraphPrimitvePtr ReduceProdBuilder::GetPrimitive() +{ + if (!m_isBuild) { + LOGE("[ReduceProd] GetPrimitive failed, cannot get primitive before call build."); + return {nullptr, DestroyLiteGraphPrimitive}; + } + + bool reduceToEnd{false}; + float coeff{0.0f}; + + void* primitive = mindspore::lite::MindIR_ReduceFusion_CreatePrimitive(m_keepDims, m_mode, reduceToEnd, coeff); + LiteGraphPrimitvePtr graphPrimitivePtr(primitive, DestroyLiteGraphPrimitive); + return graphPrimitivePtr; +} + +REGISTER_OPS(ReduceProdBuilder, OH_NN_OPS_REDUCE_PROD); +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS \ No newline at end of file diff --git a/frameworks/native/ops/reduceprod_builder.h b/frameworks/native/ops/reduceprod_builder.h new file mode 100644 index 0000000..b1314aa --- /dev/null +++ b/frameworks/native/ops/reduceprod_builder.h @@ -0,0 +1,48 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_REDUCEPROD_BUILDER_H +#define NEURAL_NETWORK_RUNTIME_REDUCEPROD_BUILDER_H + +#include "mindir.h" + +#include "frameworks/native/ops_builder.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +class ReduceProdBuilder : public OpsBuilder { +public: + ReduceProdBuilder(); + ~ReduceProdBuilder() override; + OH_NN_ReturnCode Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) override; + + LiteGraphPrimitvePtr GetPrimitive() override; + +private: + OH_NN_ReturnCode SetKeepDims(std::shared_ptr tensor); + +private: + bool m_keepDims{false}; + mindspore::lite::ReduceMode m_mode{mindspore::lite::REDUCE_MODE_PROD}; +}; +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS + +#endif // NEURAL_NETWORK_RUNTIME_REDUCEPROD_BUILDER_H \ No newline at end of file diff --git a/frameworks/native/ops/relu6_builder.cpp b/frameworks/native/ops/relu6_builder.cpp new file mode 100644 index 0000000..eaa3474 --- /dev/null +++ b/frameworks/native/ops/relu6_builder.cpp @@ -0,0 +1,87 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "relu6_builder.h" + +#include "mindir.h" + +#include "frameworks/native/ops_registry.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +static const int INPUT_NUM = 1; +static const int OUTPUT_NUM = 1; +static const std::string OP_NAME = "Relu6"; + +Relu6Builder::Relu6Builder() {} + +Relu6Builder::~Relu6Builder() {} + +OH_NN_ReturnCode Relu6Builder::Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) +{ + if (m_isBuild) { + LOGE("[Relu6] Build failed, operation has been build, cannot build again."); + return OH_NN_OPERATION_FORBIDDEN; + } + + OH_NN_ReturnCode returnCode = CheckIOIndex(inputsIndex, outputsIndex, allTensors, INPUT_NUM, OUTPUT_NUM); + if (returnCode != OH_NN_SUCCESS) { + LOGE("[Relu6] Build failed, passed invalid input or output indices."); + return returnCode; + } + + if (!paramsIndex.empty()) { + LOGW("[Relu6] Build failed, the Relu6 expects no parameters, but receive %zu", paramsIndex.size()); + return OH_NN_INVALID_PARAMETER; + } + + m_inputsIndex = inputsIndex; + m_outputsIndex = outputsIndex; + + SetQuantType(outputsIndex, allTensors); + + m_isBuild = true; + m_name = OP_NAME; + return OH_NN_SUCCESS; +} + +LiteGraphPrimitvePtr Relu6Builder::GetPrimitive() +{ + if (!m_isBuild) { + LOGE("[Relu6] GetPrimitive failed, cannot get primitive before call build."); + return {nullptr, DestroyLiteGraphPrimitive}; + } + + float alpha{0.0f}; + float minVal{0.0f}; + float maxVal{0.0f}; + bool approximate{false}; + mindspore::lite::ActivationType activationType{mindspore::lite::ACTIVATION_TYPE_RELU6}; + + void* primitive = mindspore::lite::MindIR_Activation_CreatePrimitive(activationType, alpha, + minVal, maxVal, approximate); + + LiteGraphPrimitvePtr graphPrimitivePtr(primitive, DestroyLiteGraphPrimitive); + return graphPrimitivePtr; +} + +REGISTER_OPS(Relu6Builder, OH_NN_OPS_RELU6); +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS \ No newline at end of file diff --git a/frameworks/native/ops/relu6_builder.h b/frameworks/native/ops/relu6_builder.h new file mode 100644 index 0000000..71f6196 --- /dev/null +++ b/frameworks/native/ops/relu6_builder.h @@ -0,0 +1,39 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_RELU6_BUILDER_H +#define NEURAL_NETWORK_RUNTIME_RELU6_BUILDER_H + +#include "frameworks/native/ops_builder.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +class Relu6Builder : public OpsBuilder { +public: + Relu6Builder(); + ~Relu6Builder() override; + OH_NN_ReturnCode Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) override; + + LiteGraphPrimitvePtr GetPrimitive() override; +}; +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS + +#endif // NEURAL_NETWORK_RUNTIME_RELU6_BUILDER_H diff --git a/frameworks/native/ops/relu_builder.cpp b/frameworks/native/ops/relu_builder.cpp new file mode 100644 index 0000000..6f397fe --- /dev/null +++ b/frameworks/native/ops/relu_builder.cpp @@ -0,0 +1,87 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "relu_builder.h" + +#include "mindir.h" + +#include "frameworks/native/ops_registry.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +static const int INPUT_NUMS = 1; +static const int OUTPUT_NUMS = 1; +static const std::string OP_NAME = "Relu"; + +ReluBuilder::ReluBuilder() {} + +ReluBuilder::~ReluBuilder() {} + +OH_NN_ReturnCode ReluBuilder::Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) +{ + if (m_isBuild) { + LOGE("[Relu] Build failed, the Relu operation has been build, cannot build again."); + return OH_NN_OPERATION_FORBIDDEN; + } + + OH_NN_ReturnCode returnCode = CheckIOIndex(inputsIndex, outputsIndex, allTensors, INPUT_NUMS, OUTPUT_NUMS); + if (returnCode != OH_NN_SUCCESS) { + LOGE("[Relu] Build failed, passed invalid input or output indices."); + return returnCode; + } + + if (!paramsIndex.empty()) { + LOGW("[Relu] Build failed, the Relu expects no parameters, but receive %zu", paramsIndex.size()); + return OH_NN_INVALID_PARAMETER; + } + + m_inputsIndex = inputsIndex; + m_outputsIndex = outputsIndex; + + SetQuantType(outputsIndex, allTensors); + + m_isBuild = true; + m_name = OP_NAME; + return OH_NN_SUCCESS; +} + +LiteGraphPrimitvePtr ReluBuilder::GetPrimitive() +{ + if (!m_isBuild) { + LOGE("[Relu] GetPrimitive failed, cannot get primitive before call build."); + return {nullptr, DestroyLiteGraphPrimitive}; + } + + float alpha{0.0f}; + float minVal{0.0f}; + float maxVal{0.0f}; + bool approximate{false}; + mindspore::lite::ActivationType activationType{mindspore::lite::ACTIVATION_TYPE_RELU}; + + void* primitive = mindspore::lite::MindIR_Activation_CreatePrimitive(activationType, alpha, + minVal, maxVal, approximate); + + LiteGraphPrimitvePtr graphPrimitivePtr(primitive, DestroyLiteGraphPrimitive); + return graphPrimitivePtr; +} + +REGISTER_OPS(ReluBuilder, OH_NN_OPS_RELU); +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS diff --git a/frameworks/native/ops/relu_builder.h b/frameworks/native/ops/relu_builder.h new file mode 100644 index 0000000..9c8b9d7 --- /dev/null +++ b/frameworks/native/ops/relu_builder.h @@ -0,0 +1,39 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_RELU_BUILDER_H +#define NEURAL_NETWORK_RUNTIME_RELU_BUILDER_H + +#include "frameworks/native/ops_builder.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +class ReluBuilder : public OpsBuilder { +public: + ReluBuilder(); + ~ReluBuilder() override; + OH_NN_ReturnCode Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) override; + + LiteGraphPrimitvePtr GetPrimitive() override; +}; +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS + +#endif // NEURAL_NETWORK_RUNTIME_RELU_BUILDER_H diff --git a/frameworks/native/ops/reshape_builder.cpp b/frameworks/native/ops/reshape_builder.cpp new file mode 100644 index 0000000..e1469bc --- /dev/null +++ b/frameworks/native/ops/reshape_builder.cpp @@ -0,0 +1,79 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "reshape_builder.h" + +#include "mindir.h" + +#include "frameworks/native/ops_registry.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +static const int INPUT_NUM = 2; +static const int OUTPUT_NUM = 1; +static const std::string OP_NAME = "Reshape"; + +ReshapeBuilder::ReshapeBuilder() {} + +ReshapeBuilder::~ReshapeBuilder() {} + +OH_NN_ReturnCode ReshapeBuilder::Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) +{ + if (m_isBuild) { + LOGE("[Reshape] Build failed, operation has been build, cannot build again."); + return OH_NN_OPERATION_FORBIDDEN; + } + + OH_NN_ReturnCode returnCode = CheckIOIndex(inputsIndex, outputsIndex, allTensors, INPUT_NUM, OUTPUT_NUM); + if (returnCode != OH_NN_SUCCESS) { + LOGE("[Reshape] Build failed, passed invalid input or output index of Reshape operation index."); + return returnCode; + } + + if (!paramsIndex.empty()) { + LOGW("[Reshape] Build failed, the Reshape expects no parameters, but receive %zu", paramsIndex.size()); + return OH_NN_INVALID_PARAMETER; + } + + m_inputsIndex = inputsIndex; + m_outputsIndex = outputsIndex; + + SetQuantType(outputsIndex, allTensors); + + m_name = OP_NAME; + m_isBuild = true; + return OH_NN_SUCCESS; +} + +LiteGraphPrimitvePtr ReshapeBuilder::GetPrimitive() +{ + if (!m_isBuild) { + LOGE("[Reshape] GetPrimitive failed, cannot get primitive before call build."); + return {nullptr, DestroyLiteGraphPrimitive}; + } + + void* primitive = mindspore::lite::MindIR_Reshape_CreatePrimitive(); + LiteGraphPrimitvePtr graphPrimitivePtr(primitive, DestroyLiteGraphPrimitive); + return graphPrimitivePtr; +} + +REGISTER_OPS(ReshapeBuilder, OH_NN_OPS_RESHAPE); +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS \ No newline at end of file diff --git a/frameworks/native/ops/reshape_builder.h b/frameworks/native/ops/reshape_builder.h new file mode 100644 index 0000000..ef24166 --- /dev/null +++ b/frameworks/native/ops/reshape_builder.h @@ -0,0 +1,39 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_RESHAPE_BUILDER_H +#define NEURAL_NETWORK_RUNTIME_RESHAPE_BUILDER_H + +#include "frameworks/native/ops_builder.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +class ReshapeBuilder : public OpsBuilder { +public: + ReshapeBuilder(); + ~ReshapeBuilder() override; + OH_NN_ReturnCode Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) override; + + LiteGraphPrimitvePtr GetPrimitive() override; +}; +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS + +#endif // NEURAL_NETWORK_RUNTIME_RESHAPE_BUILDER_H \ No newline at end of file diff --git a/frameworks/native/ops/resize_bilinear_builder.cpp b/frameworks/native/ops/resize_bilinear_builder.cpp new file mode 100644 index 0000000..5c868f2 --- /dev/null +++ b/frameworks/native/ops/resize_bilinear_builder.cpp @@ -0,0 +1,227 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "resize_bilinear_builder.h" + +#include "frameworks/native/ops_registry.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +static const int INPUT_NUM = 1; +static const int OUTPUT_NUM = 1; +static const int SCALE_LENGTH = 1; +static const std::string OP_NAME = "ResizeBilinear"; + +ResizeBilinearBuilder::ResizeBilinearBuilder() {} + +ResizeBilinearBuilder::~ResizeBilinearBuilder() {} + +OH_NN_ReturnCode ResizeBilinearBuilder::SetNewHeight(std::shared_ptr tensor) +{ + tensor->IdentifyOpParameter(); + if (tensor->GetElementCount() != SCALE_LENGTH) { + LOGE("[ResizeBilinear] SetNewHeight failed, the new_height dimensions should be scaler."); + return OH_NN_INVALID_PARAMETER; + } + + if (tensor->GetDataType() != OH_NN_INT64) { + LOGE("[ResizeBilinear] SetNewHeight failed, the new_height should be type OH_NN_INT64"); + return OH_NN_INVALID_PARAMETER; + } + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[ResizeBilinear] ResizeBilinear failed, the new_height passed buffer is empty."); + return OH_NN_INVALID_PARAMETER; + } + + m_newHeight = *(static_cast(buffer)); + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode ResizeBilinearBuilder::SetNewWidth(std::shared_ptr tensor) +{ + tensor->IdentifyOpParameter(); + if (tensor->GetElementCount() != SCALE_LENGTH) { + LOGE("[ResizeBilinear] SetNewWidth failed, the new_width dimensions should be scaler."); + return OH_NN_INVALID_PARAMETER; + } + + if (tensor->GetDataType() != OH_NN_INT64) { + LOGE("[ResizeBilinear] SetNewWidth failed, the new_width should be type OH_NN_INT64"); + return OH_NN_INVALID_PARAMETER; + } + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[ResizeBilinear] SetNewWidth failed, the new_width passed buffer is empty."); + return OH_NN_INVALID_PARAMETER; + } + + m_newWidth = *(static_cast(buffer)); + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode ResizeBilinearBuilder::SetPreserveAspectRatio(std::shared_ptr tensor) +{ + tensor->IdentifyOpParameter(); + if (tensor->GetElementCount() != SCALE_LENGTH) { + LOGE("[ResizeBilinear] SetPreserveAspectRatio failed, the preserve_aspect_ratio dimensions should be scaler."); + return OH_NN_INVALID_PARAMETER; + } + + if (tensor->GetDataType() != OH_NN_BOOL) { + LOGE("[ResizeBilinear] SetPreserveAspectRatio failed, the preserve_aspect_ratio should be type OH_NN_BOOL"); + return OH_NN_INVALID_PARAMETER; + } + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[ResizeBilinear] SetPreserveAspectRatio failed, the preserve_aspect_ratio passed buffer is empty."); + return OH_NN_INVALID_PARAMETER; + } + + m_preserveAspectRatio = *(static_cast(buffer)); + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode ResizeBilinearBuilder::SetCoordinateTransformMode(std::shared_ptr tensor) +{ + tensor->IdentifyOpParameter(); + if (tensor->GetElementCount() != SCALE_LENGTH) { + LOGE("[ResizeBilinear] SetCoordinateTransformMode failed," + "the coordinate_transform_mode dimensions should be scaler."); + return OH_NN_INVALID_PARAMETER; + } + + if (tensor->GetDataType() != OH_NN_INT8) { + LOGE("[ResizeBilinear] SetCoordinateTransformMode failed," + "the coordinate_transform_mode should be type OH_NN_INT32"); + return OH_NN_INVALID_PARAMETER; + } + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[ResizeBilinear] SetCoordinateTransformMode failed," + "the coordinate_transform_mode passed buffer is empty."); + return OH_NN_INVALID_PARAMETER; + } + + m_coordinateTransformMode = *(static_cast(buffer)); + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode ResizeBilinearBuilder::SetExcludeOutside(std::shared_ptr tensor) +{ + tensor->IdentifyOpParameter(); + if (tensor->GetElementCount() != SCALE_LENGTH) { + LOGE("[ResizeBilinear] SetExcludeOutside failed, the exclude_outside dimensions should be scaler."); + return OH_NN_INVALID_PARAMETER; + } + + if (tensor->GetDataType() != OH_NN_INT64) { + LOGE("[ResizeBilinear] SetExcludeOutside failed, the exclude_outside should be type OH_NN_INT64"); + return OH_NN_INVALID_PARAMETER; + } + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[ResizeBilinear] SetExcludeOutside failed, the exclude_outside passed buffer is empty."); + return OH_NN_INVALID_PARAMETER; + } + + m_excludeOutside = *(static_cast(buffer)); + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode ResizeBilinearBuilder::Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) +{ + if (m_isBuild) { + LOGE("[ResizeBilinear] Build failed, the Resize operation has been build, cannot build again."); + return OH_NN_OPERATION_FORBIDDEN; + } + + OH_NN_ReturnCode returnCode = CheckIOIndex(inputsIndex, outputsIndex, allTensors, INPUT_NUM, OUTPUT_NUM); + if (returnCode != OH_NN_SUCCESS) { + LOGE("[ResizeBilinear] Build failed, passed invalid input or output index."); + return returnCode; + } + + m_inputsIndex = inputsIndex; + m_outputsIndex = outputsIndex; + + for (uint32_t i : paramsIndex) { + std::shared_ptr tensor = allTensors[i]; + switch (tensor->GetType()) { + case OH_NN_RESIZE_BILINEAR_NEW_HEIGHT: + returnCode = SetNewHeight(tensor); + break; + case OH_NN_RESIZE_BILINEAR_NEW_WIDTH: + returnCode = SetNewWidth(tensor); + break; + case OH_NN_RESIZE_BILINEAR_PRESERVE_ASPECT_RATIO: + returnCode = SetPreserveAspectRatio(tensor); + break; + case OH_NN_RESIZE_BILINEAR_COORDINATE_TRANSFORM_MODE: + returnCode = SetCoordinateTransformMode(tensor); + break; + case OH_NN_RESIZE_BILINEAR_EXCLUDE_OUTSIDE: + returnCode = SetExcludeOutside(tensor); + break; + default: + LOGE("[ResizeBilinear] Build failed, parameter type is invalid. type=%d", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; + } + + if (returnCode != OH_NN_SUCCESS) { + LOGE("[ResizeBilinear] Build failed, passed invalid param."); + return returnCode; + } + } + + SetQuantType(outputsIndex, allTensors); + + m_name = OP_NAME; + m_isBuild = true; + return OH_NN_SUCCESS; +} + +LiteGraphPrimitvePtr ResizeBilinearBuilder::GetPrimitive() +{ + if (!m_isBuild) { + LOGE("[ResizeBilinear] GetPrimitive failed, cannot get primitive before call build."); + return {nullptr, DestroyLiteGraphPrimitive}; + } + + float cubicCoeff{0.0f}; + float extrapolationValue{0.0f}; + mindspore::lite::NearestMode nearestMode{mindspore::lite::NEAREST_MODE_NORMAL}; + + void* primitive = mindspore::lite::MindIR_Resize_CreatePrimitive(m_method, m_newHeight, m_newWidth, + m_preserveAspectRatio, m_coordinateTransformMode, cubicCoeff, m_excludeOutside, + extrapolationValue, nearestMode); + + LiteGraphPrimitvePtr graphPrimitivePtr(primitive, DestroyLiteGraphPrimitive); + return graphPrimitivePtr; +} + +REGISTER_OPS(ResizeBilinearBuilder, OH_NN_OPS_RESIZE_BILINEAR); +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS \ No newline at end of file diff --git a/frameworks/native/ops/resize_bilinear_builder.h b/frameworks/native/ops/resize_bilinear_builder.h new file mode 100644 index 0000000..ce8eb56 --- /dev/null +++ b/frameworks/native/ops/resize_bilinear_builder.h @@ -0,0 +1,57 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_RESIZE_BILINEAR_BUILDER_H +#define NEURAL_NETWORK_RUNTIME_RESIZE_BILINEAR_BUILDER_H + +#include "mindir.h" + +#include "frameworks/native/ops_builder.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +class ResizeBilinearBuilder : public OpsBuilder { +public: + ResizeBilinearBuilder(); + ~ResizeBilinearBuilder() override; + OH_NN_ReturnCode Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) override; + + LiteGraphPrimitvePtr GetPrimitive() override; + +private: + OH_NN_ReturnCode SetNewHeight(std::shared_ptr tensor); + OH_NN_ReturnCode SetNewWidth(std::shared_ptr tensor); + OH_NN_ReturnCode SetPreserveAspectRatio(std::shared_ptr tensor); + OH_NN_ReturnCode SetCoordinateTransformMode(std::shared_ptr tensor); + OH_NN_ReturnCode SetExcludeOutside(std::shared_ptr tensor); + +private: + mindspore::lite::ResizeMethod m_method {mindspore::lite::RESIZE_METHOD_LINEAR}; + uint64_t m_newHeight{0}; + uint64_t m_newWidth{0}; + bool m_preserveAspectRatio{false}; + mindspore::lite::CoordinateTransformMode m_coordinateTransformMode { + mindspore::lite::COORDINATE_TRANSFORM_MODE_ASYMMETRIC}; + uint64_t m_excludeOutside{0}; +}; +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS + +#endif // NEURAL_NETWORK_RUNTIME_RESIZE_BILINEAR_BUILDER_H diff --git a/frameworks/native/ops/rsqrt_builder.cpp b/frameworks/native/ops/rsqrt_builder.cpp new file mode 100644 index 0000000..b384005 --- /dev/null +++ b/frameworks/native/ops/rsqrt_builder.cpp @@ -0,0 +1,79 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "rsqrt_builder.h" + +#include "mindir.h" + +#include "frameworks/native/ops_registry.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +static const int INPUT_NUM = 1; +static const int OUTPUT_NUM = 1; +static const std::string OP_NAME = "Rsqrt"; + +RsqrtBuilder::RsqrtBuilder() {} + +RsqrtBuilder::~RsqrtBuilder() {} + +OH_NN_ReturnCode RsqrtBuilder::Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) +{ + if (m_isBuild) { + LOGE("[Rsqrt] Build failed, operation has been build, cannot build again."); + return OH_NN_OPERATION_FORBIDDEN; + } + + OH_NN_ReturnCode returnCode = CheckIOIndex(inputsIndex, outputsIndex, allTensors, INPUT_NUM, OUTPUT_NUM); + if (returnCode != OH_NN_SUCCESS) { + LOGE("[Rsqrt] Build failed, passed invalid input or output index of Rsqrt operation index."); + return returnCode; + } + + if (!paramsIndex.empty()) { + LOGW("[Rsqrt] Build failed, the Rsqrt expects no parameters, but receive %zu", paramsIndex.size()); + return OH_NN_INVALID_PARAMETER; + } + + m_inputsIndex = inputsIndex; + m_outputsIndex = outputsIndex; + + SetQuantType(outputsIndex, allTensors); + + m_name = OP_NAME; + m_isBuild = true; + return OH_NN_SUCCESS; +} + +LiteGraphPrimitvePtr RsqrtBuilder::GetPrimitive() +{ + if (!m_isBuild) { + LOGE("[Rsqrt] GetPrimitive failed, cannot get primitive before call build."); + return {nullptr, DestroyLiteGraphPrimitive}; + } + + void* primitive = mindspore::lite::MindIR_Rsqrt_CreatePrimitive(); + LiteGraphPrimitvePtr graphPrimitivePtr(primitive, DestroyLiteGraphPrimitive); + return graphPrimitivePtr; +} + +REGISTER_OPS(RsqrtBuilder, OH_NN_OPS_RSQRT); +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS \ No newline at end of file diff --git a/frameworks/native/ops/rsqrt_builder.h b/frameworks/native/ops/rsqrt_builder.h new file mode 100644 index 0000000..9d1f6c0 --- /dev/null +++ b/frameworks/native/ops/rsqrt_builder.h @@ -0,0 +1,39 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_RSQRT_BUILDER_H +#define NEURAL_NETWORK_RUNTIME_RSQRT_BUILDER_H + +#include "frameworks/native/ops_builder.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +class RsqrtBuilder : public OpsBuilder { +public: + RsqrtBuilder(); + ~RsqrtBuilder() override; + OH_NN_ReturnCode Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) override; + + LiteGraphPrimitvePtr GetPrimitive() override; +}; +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS + +#endif // NEURAL_NETWORK_RUNTIME_RSQRT_BUILDER_H \ No newline at end of file diff --git a/frameworks/native/ops/scale_builder.cpp b/frameworks/native/ops/scale_builder.cpp new file mode 100644 index 0000000..defccf7 --- /dev/null +++ b/frameworks/native/ops/scale_builder.cpp @@ -0,0 +1,149 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "scale_builder.h" + +#include "frameworks/native/ops_registry.h" +#include "frameworks/native/validation.h" +#include "frameworks/native/transform.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +static const int INPUT_NUM = 3; +static const int OUTPUT_NUM = 1; +static const int SCALE_LENGTH = 1; +static const std::string OP_NAME = "Scale"; + +ScaleBuilder::ScaleBuilder() {} + +ScaleBuilder::~ScaleBuilder() {} + +OH_NN_ReturnCode ScaleBuilder::SetAxis(std::shared_ptr tensor) +{ + tensor->IdentifyOpParameter(); + if (tensor->GetDataType() != OH_NN_INT64) { + LOGE("[ScaleBuilder] SetAxis failed, the axis should be type OH_NN_INT64."); + return OH_NN_INVALID_PARAMETER; + } + + if (tensor->GetElementCount() != SCALE_LENGTH) { + LOGE("[ScaleBuilder] SetAxis failed, the axis dimensions should be scaler."); + return OH_NN_INVALID_PARAMETER; + } + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[ScaleBuilder] SetAxis failed, the axis passed buffer is empty."); + return OH_NN_INVALID_PARAMETER; + } + + m_axis = static_cast(buffer); + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode ScaleBuilder::SetActivationType(std::shared_ptr tensor) +{ + tensor->IdentifyOpParameter(); + if (tensor->GetDataType() != OH_NN_INT8) { + LOGE("[ScaleBuilder] SetActivationType failed, the activation should be type OH_NN_INT32."); + return OH_NN_INVALID_PARAMETER; + } + + if (tensor->GetElementCount() != SCALE_LENGTH) { + LOGE("[ScaleBuilder] SetActivationType failed, the activation dimensions should be scaler."); + return OH_NN_INVALID_PARAMETER; + } + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[ScaleBuilder] SetActivationType failed, the activation passed buffer is empty."); + return OH_NN_INVALID_PARAMETER; + } + + const int8_t* fuseData = static_cast(buffer); + if (!OHOS::NeuralNetworkRuntime::Validation::ValidateFuseType(static_cast(*fuseData))) { + LOGE("[ScaleBuilder] SetActivationType failed, the activation input is invalid."); + return OH_NN_INVALID_PARAMETER; + } + + auto fuseType = (OH_NN_FuseType)(*fuseData); + m_activationType = NNToMS::TransfromFusionType(fuseType); + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode ScaleBuilder::Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) +{ + if (m_isBuild) { + LOGE("[ScaleBuilder] Build failed, the scale operation has been build, cannot build again."); + return OH_NN_OPERATION_FORBIDDEN; + } + + OH_NN_ReturnCode returnCode = CheckIOIndex(inputsIndex, outputsIndex, allTensors, INPUT_NUM, OUTPUT_NUM); + if (returnCode != OH_NN_SUCCESS) { + LOGE("[ScaleBuilder] Build failed, passed invalid input or output index."); + return returnCode; + } + + m_inputsIndex = inputsIndex; + m_outputsIndex = outputsIndex; + + for (uint32_t i : paramsIndex) { + std::shared_ptr tensor = allTensors[i]; + switch (tensor->GetType()) { + case OH_NN_SCALE_AXIS: + returnCode = SetAxis(tensor); + break; + case OH_NN_SCALE_ACTIVATIONTYPE: + returnCode = SetActivationType(tensor); + break; + default: + LOGE("[ResizeBilinear] Build failed, parameter type is invalid. type=%d", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; + } + + if (returnCode != OH_NN_SUCCESS) { + LOGE("[ScaleBuilder] Build failed, passed invalid param."); + return returnCode; + } + } + + // The quantization type of the first output determinies that of the operator. + SetQuantType(outputsIndex, allTensors); + + m_isBuild = true; + m_name = OP_NAME; + return OH_NN_SUCCESS; +} + +LiteGraphPrimitvePtr ScaleBuilder::GetPrimitive() +{ + if (!m_isBuild) { + LOGE("[ScaleBuilder] GetPrimitive failed, cannot get primitive before call build."); + return {nullptr, DestroyLiteGraphPrimitive}; + } + + void* primitive = mindspore::lite::MindIR_ScaleFusion_CreatePrimitive(*m_axis, m_activationType); + LiteGraphPrimitvePtr graphPrimitivePtr(primitive, DestroyLiteGraphPrimitive); + return graphPrimitivePtr; +} + +REGISTER_OPS(ScaleBuilder, OH_NN_OPS_SCALE); +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS \ No newline at end of file diff --git a/frameworks/native/ops/scale_builder.h b/frameworks/native/ops/scale_builder.h new file mode 100644 index 0000000..362f4ab --- /dev/null +++ b/frameworks/native/ops/scale_builder.h @@ -0,0 +1,49 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_SCALE_BUILDER_H +#define NEURAL_NETWORK_RUNTIME_SCALE_BUILDER_H + +#include "mindir.h" + +#include "frameworks/native/ops_builder.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +class ScaleBuilder : public OpsBuilder { +public: + ScaleBuilder(); + ~ScaleBuilder() override; + OH_NN_ReturnCode Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) override; + + LiteGraphPrimitvePtr GetPrimitive() override; + +private: + OH_NN_ReturnCode SetAxis(std::shared_ptr tensor); + OH_NN_ReturnCode SetActivationType(std::shared_ptr tensor); + +private: + mindspore::lite::ActivationType m_activationType{mindspore::lite::ACTIVATION_TYPE_NO_ACTIVATION}; + const uint64_t* m_axis{nullptr}; +}; +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS + +#endif // NEURAL_NETWORK_RUNTIME_SCALE_BUILDER_H diff --git a/frameworks/native/ops/shape_builder.cpp b/frameworks/native/ops/shape_builder.cpp new file mode 100644 index 0000000..9164b0e --- /dev/null +++ b/frameworks/native/ops/shape_builder.cpp @@ -0,0 +1,77 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "shape_builder.h" + +#include "mindir.h" + +#include "frameworks/native/ops_registry.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +static const int INPUT_NUM = 1; +static const int OUTPUT_NUM = 1; +static const std::string OP_NAME = "Shape"; + +ShapeBuilder::ShapeBuilder() {} + +ShapeBuilder::~ShapeBuilder() {} + +OH_NN_ReturnCode ShapeBuilder::Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) +{ + if (m_isBuild) { + LOGE("[ShapeBuilder] Build failed, the Shape operation has been build, cannot build again."); + return OH_NN_OPERATION_FORBIDDEN; + } + + OH_NN_ReturnCode returnCode = CheckIOIndex(inputsIndex, outputsIndex, allTensors, INPUT_NUM, OUTPUT_NUM); + if (returnCode != OH_NN_SUCCESS) { + LOGE("[ShapeBuilder] Build failed, passed invalid input or output index."); + return returnCode; + } + + if (!paramsIndex.empty()) { + LOGW("[ShapeBuilder] Build failed, the Shape expects no parameters, but receive %zu", paramsIndex.size()); + return OH_NN_INVALID_PARAMETER; + } + + m_inputsIndex = inputsIndex; + m_outputsIndex = outputsIndex; + + m_isBuild = true; + m_name = OP_NAME; + return OH_NN_SUCCESS; +} + +LiteGraphPrimitvePtr ShapeBuilder::GetPrimitive() +{ + if (!m_isBuild) { + LOGE("[ShapeBuilder] GetPrimitive failed, cannot get primitive before call build."); + return {nullptr, DestroyLiteGraphPrimitive}; + } + + void* primitive = mindspore::lite::MindIR_Shape_CreatePrimitive(); + LiteGraphPrimitvePtr graphPrimitivePtr(primitive, DestroyLiteGraphPrimitive); + return graphPrimitivePtr; +} + +REGISTER_OPS(ShapeBuilder, OH_NN_OPS_SHAPE); +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS \ No newline at end of file diff --git a/frameworks/native/ops/shape_builder.h b/frameworks/native/ops/shape_builder.h new file mode 100644 index 0000000..bd3d798 --- /dev/null +++ b/frameworks/native/ops/shape_builder.h @@ -0,0 +1,39 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_SHAPE_BUILDER_H +#define NEURAL_NETWORK_RUNTIME_SHAPE_BUILDER_H + +#include "frameworks/native/ops_builder.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +class ShapeBuilder : public OpsBuilder { +public: + ShapeBuilder(); + ~ShapeBuilder() override; + OH_NN_ReturnCode Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) override; + + LiteGraphPrimitvePtr GetPrimitive() override; +}; +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS + +#endif // NEURAL_NETWORK_RUNTIME_SHAPE_BUILDER_H \ No newline at end of file diff --git a/frameworks/native/ops/sigmoid_builder.cpp b/frameworks/native/ops/sigmoid_builder.cpp new file mode 100644 index 0000000..ae7df18 --- /dev/null +++ b/frameworks/native/ops/sigmoid_builder.cpp @@ -0,0 +1,88 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "sigmoid_builder.h" + +#include "mindir.h" + +#include "frameworks/native/ops_registry.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +static const int INPUT_NUM = 1; +static const int OUTPUT_NUM = 1; +static const std::string OP_NAME = "Sigmoid"; + +SigmoidBuilder::SigmoidBuilder() {} + +SigmoidBuilder::~SigmoidBuilder() {} + +OH_NN_ReturnCode SigmoidBuilder::Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) +{ + if (m_isBuild) { + LOGE("[SigmoidBuilder] Build failed, the Sigmoid operation has been build, cannot build again."); + return OH_NN_OPERATION_FORBIDDEN; + } + + OH_NN_ReturnCode returnCode = CheckIOIndex(inputsIndex, outputsIndex, allTensors, INPUT_NUM, OUTPUT_NUM); + if (returnCode != OH_NN_SUCCESS) { + LOGE("[SigmoidBuilder] Build failed, passed invalid input or output index."); + return returnCode; + } + + if (!paramsIndex.empty()) { + LOGW("[SigmoidBuilder] Build failed, the Sigmoid expects no parameters, but receive %zu", paramsIndex.size()); + return OH_NN_INVALID_PARAMETER; + } + + m_inputsIndex = inputsIndex; + m_outputsIndex = outputsIndex; + + // The quantization type of the first output determinies that of the operator. + SetQuantType(outputsIndex, allTensors); + + m_isBuild = true; + m_name = OP_NAME; + return OH_NN_SUCCESS; +} + +LiteGraphPrimitvePtr SigmoidBuilder::GetPrimitive() +{ + if (!m_isBuild) { + LOGE("[SigmoidBuilder] GetPrimitive failed, cannot get primitive before call build."); + return {nullptr, DestroyLiteGraphPrimitive}; + } + + float alpha{0.0f}; + float minVal{0.0f}; + float maxVal{0.0f}; + bool approximate{false}; + mindspore::lite::ActivationType activationType{mindspore::lite::ACTIVATION_TYPE_SIGMOID}; + + void* primitive = mindspore::lite::MindIR_Activation_CreatePrimitive(activationType, alpha, minVal, + maxVal, approximate); + + LiteGraphPrimitvePtr graphPrimitivePtr(primitive, DestroyLiteGraphPrimitive); + return graphPrimitivePtr; +} + +REGISTER_OPS(SigmoidBuilder, OH_NN_OPS_SIGMOID); +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS \ No newline at end of file diff --git a/frameworks/native/ops/sigmoid_builder.h b/frameworks/native/ops/sigmoid_builder.h new file mode 100644 index 0000000..63d9c8e --- /dev/null +++ b/frameworks/native/ops/sigmoid_builder.h @@ -0,0 +1,39 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_SIGMOID_BUILDER_H +#define NEURAL_NETWORK_RUNTIME_SIGMOID_BUILDER_H + +#include "frameworks/native/ops_builder.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +class SigmoidBuilder : public OpsBuilder { +public: + SigmoidBuilder(); + ~SigmoidBuilder() override; + OH_NN_ReturnCode Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) override; + + LiteGraphPrimitvePtr GetPrimitive() override; +}; +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS + +#endif // NEURAL_NETWORK_RUNTIME_SIGMOID_BUILDER_H \ No newline at end of file diff --git a/frameworks/native/ops/slice_builder.cpp b/frameworks/native/ops/slice_builder.cpp new file mode 100644 index 0000000..6351cc8 --- /dev/null +++ b/frameworks/native/ops/slice_builder.cpp @@ -0,0 +1,89 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "slice_builder.h" + +#include "mindir.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +static const int INPUT_NUM = 3; +static const int OUTPUT_NUM = 1; +static const std::string OP_NAME = "Slice"; + +SliceBuilder::SliceBuilder() {} + +SliceBuilder::~SliceBuilder() {} + +/** + * Build method. + * 1.set attr of ops. + * 2.set inputIndex of ops. + * 3.set outputIndex of ops. + */ +OH_NN_ReturnCode SliceBuilder::Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) +{ + if (m_isBuild) { + LOGE("[SliceBuilder] Slice operation has been build, cannot build again."); + return OH_NN_OPERATION_FORBIDDEN; + } + + OH_NN_ReturnCode returnCode = CheckIOIndex(inputsIndex, outputsIndex, allTensors, INPUT_NUM, OUTPUT_NUM); + if (returnCode != OH_NN_SUCCESS) { + LOGE("[SliceBuilder] Passed invalid input or output index."); + return returnCode; + } + + if (!paramsIndex.empty()) { + LOGE("[SliceBuilder] slice expects no parameters, but receive %zu", paramsIndex.size()); + return OH_NN_INVALID_PARAMETER; + } + + m_inputsIndex = inputsIndex; + m_outputsIndex = outputsIndex; + + // The quantization type of the first output determinies that of the operator. + SetQuantType(outputsIndex, allTensors); + + m_name = OP_NAME; + m_isBuild = true; + return OH_NN_SUCCESS; +} + +LiteGraphPrimitvePtr SliceBuilder::GetPrimitive() +{ + if (!m_isBuild) { + LOGE("[SliceBuilder] Cannot get primitive before call build."); + return {nullptr, DestroyLiteGraphPrimitive}; + } + + auto primitive = mindspore::lite::MindIR_SliceFusion_CreatePrimitive(m_axes); + if (primitive == nullptr) { + LOGE("[SliceBuilder] MindIR_SliceFusion_CreatePrimitive failed."); + return {nullptr, DestroyLiteGraphPrimitive}; + } + + LiteGraphPrimitvePtr graphPrimitivePtr(primitive, DestroyLiteGraphPrimitive); + return graphPrimitivePtr; +} + +REGISTER_OPS(SliceBuilder, OH_NN_OPS_SLICE); +} // namespace ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS diff --git a/frameworks/native/ops/slice_builder.h b/frameworks/native/ops/slice_builder.h new file mode 100644 index 0000000..3494d6e --- /dev/null +++ b/frameworks/native/ops/slice_builder.h @@ -0,0 +1,43 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_SLICE_BUILDER_H +#define NEURAL_NETWORK_RUNTIME_SLICE_BUILDER_H + +#include "frameworks/native/ops_builder.h" +#include "frameworks/native/ops_registry.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +class SliceBuilder : public OpsBuilder { +public: + SliceBuilder(); + ~SliceBuilder() override; + OH_NN_ReturnCode Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) override; + + LiteGraphTensorPtr GetPrimitive() override; + +private: + std::vector m_axes; +}; +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS + +#endif // NEURAL_NETWORK_RUNTIME_SLICE_BUILDER_H \ No newline at end of file diff --git a/frameworks/native/ops/softmax_builder.cpp b/frameworks/native/ops/softmax_builder.cpp new file mode 100644 index 0000000..e0835bf --- /dev/null +++ b/frameworks/native/ops/softmax_builder.cpp @@ -0,0 +1,121 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "softmax_builder.h" + +#include "mindir.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +static const int INPUT_NUM = 1; +static const int OUTPUT_NUM = 1; +static const std::string OP_NAME = "Softmax"; + +SoftmaxBuilder::SoftmaxBuilder() {} + +SoftmaxBuilder::~SoftmaxBuilder() {} + +OH_NN_ReturnCode SoftmaxBuilder::SetAxis(std::shared_ptr tensor) +{ + // Set Axis + if (tensor->GetDataType() != OH_NN_INT64) { + LOGE("[SoftmaxBuilder] The 2nd input axis should be type OH_NN_INT64."); + return OH_NN_INVALID_PARAMETER; + } + + if (tensor->GetElementCount() != 1) { + LOGE("[SoftmaxBuilder] The 2nd input axis should be scaler."); + return OH_NN_INVALID_PARAMETER; + } + + m_axis.clear(); + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[SoftmaxBuilder] Tensor buffer is nullptr."); + return OH_NN_INVALID_PARAMETER; + } + m_axis.emplace_back(*(static_cast(buffer))); + + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode SoftmaxBuilder::Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) +{ + if (m_isBuild) { + LOGE("[SoftmaxBuilder] Softmax operation has been build, cannot build again."); + return OH_NN_OPERATION_FORBIDDEN; + } + + OH_NN_ReturnCode returnCode = CheckIOIndex(inputsIndex, outputsIndex, allTensors, INPUT_NUM, OUTPUT_NUM); + if (returnCode != OH_NN_SUCCESS) { + LOGE("[SoftmaxBuilder] Passed invalid input or output index."); + return returnCode; + } + + m_inputsIndex = inputsIndex; + m_outputsIndex = outputsIndex; + + for (int i : paramsIndex) { + std::shared_ptr tensor = allTensors[i]; + tensor->IdentifyOpParameter(); + switch (tensor->GetType()) { + case OH_NN_SOFTMAX_AXIS: + returnCode = SetAxis(tensor); + break; + default: + LOGE("[SoftmaxBuilder] Parameter Type is invalid. type=%d", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; + } + + if (returnCode != OH_NN_SUCCESS) { + LOGE("[SoftmaxBuilder] Passed invalid param."); + return returnCode; + } + } + + // The quantization type of the first output determinies that of the operator. + SetQuantType(outputsIndex, allTensors); + + m_isBuild = true; + m_name = OP_NAME; + return OH_NN_SUCCESS; +} + +LiteGraphTensorPtr SoftmaxBuilder::GetPrimitive() +{ + if (!m_isBuild) { + LOGE("[SoftmaxBuilder] Cannot get primitive before call build."); + return {nullptr, DestroyLiteGraphPrimitive}; + } + + auto primitive = mindspore::lite::MindIR_Softmax_CreatePrimitive(m_axis); + if (primitive == nullptr) { + LOGE("[SoftmaxBuilder] Create primitive of Softmax failed."); + return {nullptr, DestroyLiteGraphPrimitive}; + } + + LiteGraphTensorPtr graphPrimitivePtr(primitive, DestroyLiteGraphPrimitive); + return graphPrimitivePtr; +} + +REGISTER_OPS(SoftmaxBuilder, OH_NN_OPS_SOFTMAX); +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS \ No newline at end of file diff --git a/frameworks/native/ops/softmax_builder.h b/frameworks/native/ops/softmax_builder.h new file mode 100644 index 0000000..4a449b2 --- /dev/null +++ b/frameworks/native/ops/softmax_builder.h @@ -0,0 +1,46 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_SOFTMAX_BUILDER_H +#define NEURAL_NETWORK_RUNTIME_SOFTMAX_BUILDER_H + +#include "frameworks/native/ops_builder.h" +#include "frameworks/native/ops_registry.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +class SoftmaxBuilder : public OpsBuilder { +public: + SoftmaxBuilder(); + ~SoftmaxBuilder() override; + OH_NN_ReturnCode Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) override; + + LiteGraphTensorPtr GetPrimitive() override; + +private: + OH_NN_ReturnCode SetAxis(std::shared_ptr tensor); + +private: + std::vector m_axis; +}; +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS + +#endif // NEURAL_NETWORK_RUNTIME_SOFTMAX_BUILDER_H \ No newline at end of file diff --git a/frameworks/native/ops/space_to_batch_nd_builder.cpp b/frameworks/native/ops/space_to_batch_nd_builder.cpp new file mode 100644 index 0000000..094c168 --- /dev/null +++ b/frameworks/native/ops/space_to_batch_nd_builder.cpp @@ -0,0 +1,192 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "space_to_batch_nd_builder.h" + +#include "mindir.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +static const int INPUT_NUM = 1; +static const int OUTPUT_NUM = 1; +static const std::string OP_NAME = "SpaceToBatchND"; +static const int PADDINGS_DATA_SIZE = 2; +static const int VECT_DATA_SIZE = 2; +static const int BLOCKSHAPE_RANK = 1; +static const int PADDINGS_RANK = 2; +static const int BLOCK_SIZE = 2; +static const int PADDINGS_SIZE = 4; + +SpaceToBatchNDBuilder::SpaceToBatchNDBuilder() {} + +SpaceToBatchNDBuilder::~SpaceToBatchNDBuilder() {} + +OH_NN_ReturnCode SpaceToBatchNDBuilder::SetBlockShape(std::shared_ptr tensor) +{ + if (tensor->GetDataType() != OH_NN_INT64) { + LOGE("[SpaceToBatchNDBuilder] The 2nd input blockShape should be type OH_NN_INT64."); + return OH_NN_INVALID_PARAMETER; + } + + auto blockshape_shape = tensor->GetDimensions(); + if (blockshape_shape.size() != BLOCKSHAPE_RANK) { + LOGE("[SpaceToBatchNDBuilder] Invalid rank of shape of 2nd input blockShape, should be 1 dimensions."); + return OH_NN_INVALID_PARAMETER; + } + + if (tensor->GetElementCount() != BLOCK_SIZE) { + LOGE("[SpaceToBatchNDBuilder] The 2nd input blockShape size should be 2."); + return OH_NN_INVALID_PARAMETER; + } + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[SpaceToBatchNDBuilder] Tensor buffer is nullptr."); + return OH_NN_INVALID_PARAMETER; + } + + const int64_t* blockShapeData = reinterpret_cast(buffer); + const uint32_t elementSize = tensor->GetElementCount(); + for (uint32_t i = 0; i < elementSize; ++i) { + block_shape.push_back(blockShapeData[i]); + } + + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode SpaceToBatchNDBuilder::SetPaddings(std::shared_ptr tensor) +{ + if (tensor->GetDataType() != OH_NN_INT64) { + LOGE("[SpaceToBatchNDBuilder] The 3rd input paddings should be type OH_NN_INT64."); + return OH_NN_INVALID_PARAMETER; + } + + auto paddings_shape = tensor->GetDimensions(); + if (paddings_shape.size() != PADDINGS_RANK) { + LOGE("[SpaceToBatchNDBuilder] Invalid rank of shape of 3rd input paddings, should be 2 dimensions."); + return OH_NN_INVALID_PARAMETER; + } + + if (tensor->GetElementCount() != PADDINGS_SIZE) { + LOGE("[SpaceToBatchNDBuilder] The 3rd input paddings size should be 4."); + return OH_NN_INVALID_PARAMETER; + } + + OH_NN_ReturnCode returnCode = SetPadData(tensor); + if (returnCode != OH_NN_SUCCESS) { + LOGE("[SpaceToBatchNDBuilder] SetPadData failed."); + return returnCode; + } + + return OH_NN_SUCCESS; +} +/** + * Build method. + * 1.set attr of ops. + * 2.set inputIndex of ops. + * 3.set outputIndex of ops. + */ +OH_NN_ReturnCode SpaceToBatchNDBuilder::Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) +{ + if (m_isBuild) { + LOGE("[SpaceToBatchNDBuilder] SpaceToBatchND operation has been build, cannot build again."); + return OH_NN_OPERATION_FORBIDDEN; + } + + OH_NN_ReturnCode returnCode = CheckIOIndex(inputsIndex, outputsIndex, allTensors, INPUT_NUM, OUTPUT_NUM); + if (returnCode != OH_NN_SUCCESS) { + LOGE("[SpaceToBatchNDBuilder] Passed invalid input or output index."); + return returnCode; + } + + m_inputsIndex = inputsIndex; + m_outputsIndex = outputsIndex; + + for (int i : paramsIndex) { + std::shared_ptr tensor = allTensors[i]; + tensor->IdentifyOpParameter(); + switch (tensor->GetType()) { + case OH_NN_SPACE_TO_BATCH_ND_BLOCK_SHAPE: + returnCode = SetBlockShape(tensor); + break; + case OH_NN_SPACE_TO_BATCH_ND_PADDINGS: + returnCode = SetPaddings(tensor); + break; + default: + LOGE("[SpaceToBatchNDBuilder] Parameter Type is invalid. type=%d", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; + } + + if (returnCode != OH_NN_SUCCESS) { + LOGE("[SpaceToBatchNDBuilder] Passed invalid param."); + return returnCode; + } + } + + // The quantization type of the first output determinies that of the operator. + SetQuantType(outputsIndex, allTensors); + + m_isBuild = true; + m_name = OP_NAME; + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode SpaceToBatchNDBuilder::SetPadData(std::shared_ptr tensor) +{ + paddings.clear(); + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[SpaceToBatchNDBuilder] Tensor buffer is nullptr."); + return OH_NN_INVALID_PARAMETER; + } + + const int64_t* paddingsData = reinterpret_cast(buffer); + for (int i = 0; i < PADDINGS_DATA_SIZE; i++) { + std::vector vect_data; + vect_data.reserve(VECT_DATA_SIZE); + for (int i = 0; i < VECT_DATA_SIZE; ++i) { + vect_data.push_back(paddingsData[i]); + } + paddings.push_back(vect_data); + } + return OH_NN_SUCCESS; +} + +LiteGraphTensorPtr SpaceToBatchNDBuilder::GetPrimitive() +{ + if (!m_isBuild) { + LOGE("[SpaceToBatchNDBuilder] Cannot get primitive before call build."); + return {nullptr, DestroyLiteGraphPrimitive}; + } + + auto primitive = mindspore::lite::MindIR_SpaceToBatchND_CreatePrimitive(block_shape, paddings); + if (primitive == nullptr) { + LOGE("[SpaceToBatchNDBuilder] MindIR_SpaceToBatchND_CreatePrimitive failed."); + return {nullptr, DestroyLiteGraphPrimitive}; + } + + LiteGraphTensorPtr graphPrimitivePtr(primitive, DestroyLiteGraphPrimitive); + return graphPrimitivePtr; +} + +REGISTER_OPS(SpaceToBatchNDBuilder, OH_NN_OPS_SPACE_TO_BATCH_ND); +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS diff --git a/frameworks/native/ops/space_to_batch_nd_builder.h b/frameworks/native/ops/space_to_batch_nd_builder.h new file mode 100644 index 0000000..0944183 --- /dev/null +++ b/frameworks/native/ops/space_to_batch_nd_builder.h @@ -0,0 +1,49 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_SPACETOBATCHND_BUILDER_H +#define NEURAL_NETWORK_RUNTIME_SPACETOBATCHND_BUILDER_H + +#include "frameworks/native/ops_builder.h" +#include "frameworks/native/ops_registry.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +class SpaceToBatchNDBuilder : public OpsBuilder { +public: + SpaceToBatchNDBuilder(); + ~SpaceToBatchNDBuilder() override; + OH_NN_ReturnCode Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) override; + + LiteGraphTensorPtr GetPrimitive() override; + +private: + OH_NN_ReturnCode SetPadData(std::shared_ptr tensor); + OH_NN_ReturnCode SetBlockShape(std::shared_ptr tensor); + OH_NN_ReturnCode SetPaddings(std::shared_ptr tensor); + +private: + std::vector> paddings {}; + std::vector block_shape {}; +}; +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS + +#endif // NEURAL_NETWORK_RUNTIME_SPACETOBATCHND_BUILDER_H diff --git a/frameworks/native/ops/split_builder.cpp b/frameworks/native/ops/split_builder.cpp new file mode 100644 index 0000000..fbdd990 --- /dev/null +++ b/frameworks/native/ops/split_builder.cpp @@ -0,0 +1,186 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "split_builder.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +static const int INPUT_NUM = 1; +static const std::string OP_NAME = "Split"; + +SplitBuilder::SplitBuilder() {} + +SplitBuilder::~SplitBuilder() {} + +OH_NN_ReturnCode SplitBuilder::SetInputAndOutput(const std::vector &inputsIndex, + const std::vector &outputsIndex, const std::vector> &allTensors) +{ + auto inputSize = inputsIndex.size(); + if (inputSize != INPUT_NUM) { + LOGE("[SplitBuilder] The number of inputsIndex should be %d, its number is %zu.", INPUT_NUM, inputSize); + return OH_NN_INVALID_PARAMETER; + } + + auto allTensorSize = allTensors.size(); + for (auto index : inputsIndex) { + if (index >= allTensorSize) { + LOGE("[SplitBuilder] InputsIndex of Split is out of range."); + return OH_NN_INVALID_PARAMETER; + } + } + + for (auto index : outputsIndex) { + if (index >= allTensorSize) { + LOGE("[SplitBuilder] OutputsIndex of Split is out of range."); + return OH_NN_INVALID_PARAMETER; + } + } + + m_inputsIndex = inputsIndex; + m_outputsIndex = outputsIndex; + + // The quantization type of the first output determinies that of the operator. + SetQuantType(outputsIndex, allTensors); + + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode SplitBuilder::SetAxis(std::shared_ptr tensor) +{ + if (tensor->GetDataType() != OH_NN_INT64) { + LOGE("[SplitBuilder] The 4th input axis should be type OH_NN_INT64."); + return OH_NN_INVALID_PARAMETER; + } + + if (tensor->GetElementCount() != 1) { + LOGE("[SplitBuilder] The 4th input axis should be scaler."); + return OH_NN_INVALID_PARAMETER; + } + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[SplitBuilder] Tensor buffer is nullptr."); + return OH_NN_INVALID_PARAMETER; + } + m_axis = *(static_cast(buffer)); + + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode SplitBuilder::SetOutputNum(std::shared_ptr tensor) +{ + if (tensor->GetDataType() != OH_NN_INT64) { + LOGE("[SplitBuilder] The 2nd input outputNum should be type OH_NN_INT64."); + return OH_NN_INVALID_PARAMETER; + } + + if (tensor->GetElementCount() != 1) { + LOGE("[SoftmaxBuilder] The 2nd input outputNum should be scaler."); + return OH_NN_INVALID_PARAMETER; + } + + m_output_num = *(static_cast(tensor->GetBuffer())); + + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode SplitBuilder::SetSizeSplits(std::shared_ptr tensor) +{ + if (tensor->GetDataType() != OH_NN_INT64) { + LOGE("[SplitBuilder] The 3rd input sizeSplit should be type OH_NN_INT64."); + return OH_NN_INVALID_PARAMETER; + } + + const int64_t *size_splits_data_ptr = reinterpret_cast(tensor->GetBuffer()); + for (uint32_t i = 0; i < tensor->GetElementCount(); i++) { + m_size_splits.push_back(*size_splits_data_ptr++); + } + + return OH_NN_SUCCESS; +} + +/** + * Build method. + * 1.set attr of ops. + * 2.set inputIndex of ops. + * 3.set outputIndex of ops. + */ +OH_NN_ReturnCode SplitBuilder::Build(const std::vector ¶msIndex, + const std::vector &inputsIndex, + const std::vector &outputsIndex, + const std::vector> &allTensors) +{ + if (m_isBuild) { + LOGE("[SplitBuilder] Split operation has been build, cannot build again."); + return OH_NN_OPERATION_FORBIDDEN; + } + + OH_NN_ReturnCode returnCode = SetInputAndOutput(inputsIndex, outputsIndex, allTensors); + if (returnCode != OH_NN_SUCCESS) { + LOGE("[SplitBuilder] Set index of inputs or outputs failed."); + return returnCode; + } + + for (int i : paramsIndex) { + std::shared_ptr tensor = allTensors[i]; + tensor->IdentifyOpParameter(); + switch (tensor->GetType()) { + case OH_NN_SPLIT_AXIS: + returnCode = SetAxis(tensor); + break; + case OH_NN_SPLIT_OUTPUT_NUM: + returnCode = SetOutputNum(tensor); + break; + case OH_NN_SPLIT_SIZE_SPLITS: + returnCode = SetSizeSplits(tensor); + break; + default: + LOGE("[SplitBuilder] Parameter Type is invalid. type=%d", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; + } + + if (returnCode != OH_NN_SUCCESS) { + LOGE("[SplitBuilder] Passed invalid param."); + return returnCode; + } + } + + m_isBuild = true; + m_name = OP_NAME; + return OH_NN_SUCCESS; +} + +LiteGraphTensorPtr SplitBuilder::GetPrimitive() +{ + if (!m_isBuild) { + LOGE("[SplitBuilder] Cannot get primitive before call build."); + return { nullptr, DestroyLiteGraphPrimitive }; + } + + auto primitive = mindspore::lite::MindIR_Split_CreatePrimitive(m_output_num, m_size_splits, m_axis); + if (primitive == nullptr) { + LOGE("[SplitBuilder] MindIR_Split_CreatePrimitive failed."); + return { nullptr, DestroyLiteGraphPrimitive }; + } + + LiteGraphTensorPtr graphPrimitivePtr(primitive, DestroyLiteGraphPrimitive); + return graphPrimitivePtr; +} + +REGISTER_OPS(SplitBuilder, OH_NN_OPS_SPLIT); +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS diff --git a/frameworks/native/ops/split_builder.h b/frameworks/native/ops/split_builder.h new file mode 100644 index 0000000..2ab4cf6 --- /dev/null +++ b/frameworks/native/ops/split_builder.h @@ -0,0 +1,55 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_SPLIT_BUILDER_H +#define NEURAL_NETWORK_RUNTIME_SPLIT_BUILDER_H + +#include "mindir.h" + +#include "frameworks/native/ops_builder.h" +#include "frameworks/native/ops_registry.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +class SplitBuilder : public OpsBuilder { +public: + SplitBuilder(); + ~SplitBuilder() override; + OH_NN_ReturnCode Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) override; + + LiteGraphTensorPtr GetPrimitive() override; + +private: + OH_NN_ReturnCode SetInputAndOutput(const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors); + OH_NN_ReturnCode SetAxis(std::shared_ptr tensor); + OH_NN_ReturnCode SetOutputNum(std::shared_ptr tensor); + OH_NN_ReturnCode SetSizeSplits(std::shared_ptr tensor); + +private: + int64_t m_output_num {0}; + std::vector m_size_splits; + int64_t m_axis {0}; +}; +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS + +#endif // NEURAL_NETWORK_RUNTIME_SPLIT_BUILDER_H diff --git a/frameworks/native/ops/sqrt_builder.cpp b/frameworks/native/ops/sqrt_builder.cpp new file mode 100644 index 0000000..378902f --- /dev/null +++ b/frameworks/native/ops/sqrt_builder.cpp @@ -0,0 +1,88 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "sqrt_builder.h" + +#include "mindir.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +static const int INPUT_NUM = 1; +static const int OUTPUT_NUM = 1; +static const std::string OP_NAME = "Sqrt"; + +SqrtBuilder::SqrtBuilder() {} + +SqrtBuilder::~SqrtBuilder() {} + +/** + * Build method. + * 1.set attr of ops. + * 2.set inputIndex of ops. + * 3.set outputIndex of ops. + */ +OH_NN_ReturnCode SqrtBuilder::Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) +{ + if (m_isBuild) { + LOGE("[SqrtBuilder] Sqrt operation has been build, cannot build again."); + return OH_NN_OPERATION_FORBIDDEN; + } + + OH_NN_ReturnCode returnCode = CheckIOIndex(inputsIndex, outputsIndex, allTensors, INPUT_NUM, OUTPUT_NUM); + if (returnCode != OH_NN_SUCCESS) { + LOGE("[SqrtBuilder] Passed invalid input or output index."); + return returnCode; + } + + if (!paramsIndex.empty()) { + LOGE("[SqrtBuilder] sqrt expects no parameters, but receive %zu", paramsIndex.size()); + return OH_NN_INVALID_PARAMETER; + } + + m_inputsIndex = inputsIndex; + m_outputsIndex = outputsIndex; + + // The quantization type of the first output determinies that of the operator. + SetQuantType(outputsIndex, allTensors); + m_isBuild = true; + m_name = OP_NAME; + return OH_NN_SUCCESS; +} + +LiteGraphTensorPtr SqrtBuilder::GetPrimitive() +{ + if (!m_isBuild) { + LOGE("[SqrtBuilder] Cannot get primitive before call build."); + return {nullptr, DestroyLiteGraphPrimitive}; + } + + auto primitive = mindspore::lite::MindIR_Sqrt_CreatePrimitive(); + if (primitive == nullptr) { + LOGE("[SqrtBuilder] Create primitive of Sqrt failed."); + return {nullptr, DestroyLiteGraphPrimitive}; + } + + LiteGraphTensorPtr graphPrimitivePtr(primitive, DestroyLiteGraphPrimitive); + return graphPrimitivePtr; +} + +REGISTER_OPS(SqrtBuilder, OH_NN_OPS_SQRT); +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS diff --git a/frameworks/native/ops/sqrt_builder.h b/frameworks/native/ops/sqrt_builder.h new file mode 100644 index 0000000..ad835b8 --- /dev/null +++ b/frameworks/native/ops/sqrt_builder.h @@ -0,0 +1,40 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_SQRT_BUILDER_H +#define NEURAL_NETWORK_RUNTIME_SQRT_BUILDER_H + +#include "frameworks/native/ops_builder.h" +#include "frameworks/native/ops_registry.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +class SqrtBuilder : public OpsBuilder { +public: + SqrtBuilder(); + ~SqrtBuilder() override; + OH_NN_ReturnCode Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) override; + + LiteGraphTensorPtr GetPrimitive() override; +}; +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS + +#endif // NEURAL_NETWORK_RUNTIME_SQRT_BUILDER_H diff --git a/frameworks/native/ops/squared_difference_builder.cpp b/frameworks/native/ops/squared_difference_builder.cpp new file mode 100644 index 0000000..331b43e --- /dev/null +++ b/frameworks/native/ops/squared_difference_builder.cpp @@ -0,0 +1,86 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "squared_difference_builder.h" + +#include "mindir.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +static const int INPUT_NUM = 2; +static const int OUTPUT_NUM = 1; +static const std::string OP_NAME = "SquaredDifference"; + +SquaredDifferenceBuilder::SquaredDifferenceBuilder() {} + +SquaredDifferenceBuilder::~SquaredDifferenceBuilder() {} + +/** + * Build method. + * 1.set attr of ops. + * 2.set inputIndex of ops. + * 3.set outputIndex of ops. + */ +OH_NN_ReturnCode SquaredDifferenceBuilder::Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) +{ + if (m_isBuild) { + LOGE("[SquaredDifferenceBuilder] SquaredDifference operation has been build, cannot build again."); + return OH_NN_OPERATION_FORBIDDEN; + } + + OH_NN_ReturnCode returnCode = CheckIOIndex(inputsIndex, outputsIndex, allTensors, INPUT_NUM, OUTPUT_NUM); + if (returnCode != OH_NN_SUCCESS) { + LOGE("[SquaredDifferenceBuilder] Passed invalid input or output index."); + return returnCode; + } + + if (!paramsIndex.empty()) { + LOGE("[SquaredDifferenceBuilder] squaredDifference expects no parameters, but receive %zu", paramsIndex.size()); + return OH_NN_INVALID_PARAMETER; + } + + m_inputsIndex = inputsIndex; + m_outputsIndex = outputsIndex; + + m_isBuild = true; + m_name = OP_NAME; + return OH_NN_SUCCESS; +} + +LiteGraphTensorPtr SquaredDifferenceBuilder::GetPrimitive() +{ + if (!m_isBuild) { + LOGE("[SquaredDifferenceBuilder] Cannot get primitive before call build."); + return {nullptr, DestroyLiteGraphPrimitive}; + } + + auto primitive = mindspore::lite::MindIR_SquaredDifference_CreatePrimitive(); + if (primitive == nullptr) { + LOGE("[SquaredDifferenceBuilder] MindIR_SquaredDifference_CreatePrimitive failed."); + return {nullptr, DestroyLiteGraphPrimitive}; + } + + LiteGraphTensorPtr graphPrimitivePtr(primitive, DestroyLiteGraphPrimitive); + return graphPrimitivePtr; +} + +REGISTER_OPS(SquaredDifferenceBuilder, OH_NN_OPS_SQUARED_DIFFERENCE); +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS \ No newline at end of file diff --git a/frameworks/native/ops/squared_difference_builder.h b/frameworks/native/ops/squared_difference_builder.h new file mode 100644 index 0000000..d51847c --- /dev/null +++ b/frameworks/native/ops/squared_difference_builder.h @@ -0,0 +1,40 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_SQUAREDDIFFERENCE_BUILDER_H +#define NEURAL_NETWORK_RUNTIME_SQUAREDDIFFERENCE_BUILDER_H + +#include "frameworks/native/ops_builder.h" +#include "frameworks/native/ops_registry.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +class SquaredDifferenceBuilder : public OpsBuilder { +public: + SquaredDifferenceBuilder(); + ~SquaredDifferenceBuilder() override; + OH_NN_ReturnCode Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) override; + + LiteGraphTensorPtr GetPrimitive() override; +}; +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS + +#endif // NEURAL_NETWORK_RUNTIME_SQUAREDDIFFERENCE_BUILDER_H \ No newline at end of file diff --git a/frameworks/native/ops/squeeze_builder.cpp b/frameworks/native/ops/squeeze_builder.cpp new file mode 100644 index 0000000..c37da63 --- /dev/null +++ b/frameworks/native/ops/squeeze_builder.cpp @@ -0,0 +1,125 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "squeeze_builder.h" + +#include "mindir.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +static const int INPUT_NUM = 1; +static const int OUTPUT_NUM = 1; +static const std::string OP_NAME = "Squeeze"; + +SqueezeBuilder::SqueezeBuilder() {} + +SqueezeBuilder::~SqueezeBuilder() {} + +OH_NN_ReturnCode SqueezeBuilder::SetAxis(std::shared_ptr tensor) +{ + if (tensor->GetDataType() != OH_NN_INT64) { + LOGE("[SqueezeBuilder] The 2nd input axis should be type OH_NN_INT64."); + return OH_NN_INVALID_PARAMETER; + } + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[SqueezeBuilder] Tensor buffer is nullptr."); + return OH_NN_INVALID_PARAMETER; + } + + int64_t *axis_data_ptr = static_cast(buffer); + const uint32_t elementSize = tensor->GetElementCount(); + for (uint32_t i = 0; i < elementSize; ++i) { + m_axis.push_back(*axis_data_ptr); + ++axis_data_ptr; + } + + return OH_NN_SUCCESS; +} + +/* * + * Build method. + * 1.set attr of ops. + * 2.set inputIndex of ops. + * 3.set outputIndex of ops. + */ +OH_NN_ReturnCode SqueezeBuilder::Build(const std::vector ¶msIndex, + const std::vector &inputsIndex, + const std::vector &outputsIndex, + const std::vector> &allTensors) +{ + if (m_isBuild) { + LOGE("[SqueezeBuilder] Squeeze operation has been build, cannot build again."); + return OH_NN_OPERATION_FORBIDDEN; + } + + OH_NN_ReturnCode returnCode = CheckIOIndex(inputsIndex, outputsIndex, allTensors, INPUT_NUM, OUTPUT_NUM); + if (returnCode != OH_NN_SUCCESS) { + LOGE("[SqueezeBuilder] Passed invalid input or output index."); + return returnCode; + } + + m_inputsIndex = inputsIndex; + m_outputsIndex = outputsIndex; + + for (int i : paramsIndex) { + std::shared_ptr tensor = allTensors[i]; + tensor->IdentifyOpParameter(); + switch (tensor->GetType()) { + case OH_NN_SQUEEZE_AXIS: + returnCode = SetAxis(tensor); + break; + default: + LOGE("[SqueezeBuilder] Parameter Type is invalid. type=%d", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; + } + + if (returnCode != OH_NN_SUCCESS) { + LOGE("[SqueezeBuilder] Passed invalid param."); + return returnCode; + } + } + + // The quantization type of the first output determinies that of the operator. + SetQuantType(outputsIndex, allTensors); + + m_isBuild = true; + m_name = OP_NAME; + return OH_NN_SUCCESS; +} + +LiteGraphTensorPtr SqueezeBuilder::GetPrimitive() +{ + if (!m_isBuild) { + LOGE("[SqueezeBuilder] Cannot get primitive before call build."); + return { nullptr, DestroyLiteGraphPrimitive }; + } + + auto primitive = mindspore::lite::MindIR_Squeeze_CreatePrimitive(m_axis); + if (primitive == nullptr) { + LOGE("[SqueezeBuilder] MindIR_Squeeze_CreatePrimitive failed."); + return { nullptr, DestroyLiteGraphPrimitive }; + } + + LiteGraphTensorPtr graphPrimitivePtr(primitive, DestroyLiteGraphPrimitive); + return graphPrimitivePtr; +} + +REGISTER_OPS(SqueezeBuilder, OH_NN_OPS_SQUEEZE); +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS \ No newline at end of file diff --git a/frameworks/native/ops/squeeze_builder.h b/frameworks/native/ops/squeeze_builder.h new file mode 100644 index 0000000..989caa1 --- /dev/null +++ b/frameworks/native/ops/squeeze_builder.h @@ -0,0 +1,46 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_SQUEEZE_BUILDER_H +#define NEURAL_NETWORK_RUNTIME_SQUEEZE_BUILDER_H + +#include "frameworks/native/ops_builder.h" +#include "frameworks/native/ops_registry.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +class SqueezeBuilder : public OpsBuilder { +public: + SqueezeBuilder(); + ~SqueezeBuilder() override; + OH_NN_ReturnCode Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) override; + + LiteGraphTensorPtr GetPrimitive() override; + +private: + OH_NN_ReturnCode SetAxis(std::shared_ptr tensor); + +private: + std::vector m_axis; +}; +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS + +#endif // NEURAL_NETWORK_RUNTIME_SQUEEZE_BUILDER_H \ No newline at end of file diff --git a/frameworks/native/ops/stack_builder.cpp b/frameworks/native/ops/stack_builder.cpp new file mode 100644 index 0000000..efe7a5c --- /dev/null +++ b/frameworks/native/ops/stack_builder.cpp @@ -0,0 +1,125 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "stack_builder.h" + +#include "mindir.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +static const int INPUT_MIN_NUM = 2; +static const int OUTPUT_NUM = 1; +static const std::string OP_NAME = "Stack"; + +StackBuilder::StackBuilder() {} + +StackBuilder::~StackBuilder() {} + +OH_NN_ReturnCode StackBuilder::SetAxis(std::shared_ptr tensor) +{ + if (tensor->GetDataType() != OH_NN_INT64) { + LOGE("[StackBuilder] The last input axis should be type OH_NN_INT64."); + return OH_NN_INVALID_PARAMETER; + } + + if (tensor->GetElementCount() != 1) { + LOGE("[StackBuilder] The last input axis should be scaler."); + return OH_NN_INVALID_PARAMETER; + } + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[StackBuilder] Tensor buffer is nullptr."); + return OH_NN_INVALID_PARAMETER; + } + m_axis = *(static_cast(buffer)); + + return OH_NN_SUCCESS; +} + +/** + * Build method. + * 1.set attr of ops. + * 2.set inputIndex of ops. + * 3.set outputIndex of ops. + */ +OH_NN_ReturnCode StackBuilder::Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) +{ + if (m_isBuild) { + LOGE("[StackBuilder] Stack operation has been build, cannot build again."); + return OH_NN_OPERATION_FORBIDDEN; + } + + if (inputsIndex.size() < INPUT_MIN_NUM) { + LOGE("[StackBuilder] The number of index of inputs don't larger than %d.", INPUT_MIN_NUM); + return OH_NN_INVALID_PARAMETER; + } + if (outputsIndex.size() != OUTPUT_NUM) { + LOGE("[StackBuilder] The number of index of outputs don't equal to %d.", OUTPUT_NUM); + return OH_NN_INVALID_PARAMETER; + } + + m_inputsIndex = inputsIndex; + m_outputsIndex = outputsIndex; + + OH_NN_ReturnCode returnCode; + for (int i : paramsIndex) { + std::shared_ptr tensor = allTensors[i]; + tensor->IdentifyOpParameter(); + switch (tensor->GetType()) { + case OH_NN_STACK_AXIS: + returnCode = SetAxis(tensor); + break; + default: + LOGE("[StackBuilder] Parameter Type is invalid. type=%d", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; + } + + if (returnCode != OH_NN_SUCCESS) { + LOGE("[StackBuilder] Passed invalid param."); + return returnCode; + } + } + + m_isBuild = true; + m_name = OP_NAME; + return OH_NN_SUCCESS; +} + +LiteGraphTensorPtr StackBuilder::GetPrimitive() +{ + if (!m_isBuild) { + LOGE("[StackBuilder] Cannot get primitive before call build."); + return {nullptr, DestroyLiteGraphPrimitive}; + } + + auto primitive = mindspore::lite::MindIR_Stack_CreatePrimitive(m_axis); + if (primitive == nullptr) { + LOGE("[StackBuilder] MindIR_Stack_CreatePrimitive failed."); + return {nullptr, DestroyLiteGraphPrimitive}; + } + + LiteGraphTensorPtr graphPrimitivePtr(primitive, DestroyLiteGraphPrimitive); + return graphPrimitivePtr; +} + +REGISTER_OPS(StackBuilder, OH_NN_OPS_STACK); +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS diff --git a/frameworks/native/ops/stack_builder.h b/frameworks/native/ops/stack_builder.h new file mode 100644 index 0000000..1e80ecd --- /dev/null +++ b/frameworks/native/ops/stack_builder.h @@ -0,0 +1,46 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_STACK_BUILDER_H +#define NEURAL_NETWORK_RUNTIME_STACK_BUILDER_H + +#include "frameworks/native/ops_builder.h" +#include "frameworks/native/ops_registry.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +class StackBuilder : public OpsBuilder { +public: + StackBuilder(); + ~StackBuilder() override; + OH_NN_ReturnCode Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) override; + + LiteGraphTensorPtr GetPrimitive() override; + +private: + OH_NN_ReturnCode SetAxis(std::shared_ptr tensor); + +private: + int64_t m_axis = {0}; +}; +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS + +#endif // NEURAL_NETWORK_RUNTIME_STACK_BUILDER_H \ No newline at end of file diff --git a/frameworks/native/ops/strided_slice_builder.cpp b/frameworks/native/ops/strided_slice_builder.cpp new file mode 100644 index 0000000..4f25d85 --- /dev/null +++ b/frameworks/native/ops/strided_slice_builder.cpp @@ -0,0 +1,212 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "strided_slice_builder.h" + +#include "mindir.h" + +#include "interfaces/kits/c/neural_network_runtime_type.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +static const int INPUT_NUM = 4; +static const int OUTPUT_NUM = 1; +static const std::string OP_NAME = "StridedSlice"; + +StridedSliceBuilder::StridedSliceBuilder() {} + +StridedSliceBuilder::~StridedSliceBuilder() {} + +OH_NN_ReturnCode StridedSliceBuilder::SetInputOutput(const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) +{ + OH_NN_ReturnCode returnCode = CheckIOIndex(inputsIndex, outputsIndex, allTensors, INPUT_NUM, OUTPUT_NUM); + if (returnCode != OH_NN_SUCCESS) { + LOGE("[StridedSliceBuilder] Passed invalid input or output index."); + return returnCode; + } + + m_inputsIndex = inputsIndex; + m_outputsIndex = outputsIndex; + + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode StridedSliceBuilder::SetBeginMask(std::shared_ptr tensor) +{ + if (tensor->GetDataType() != OH_NN_INT64) { + LOGE("[StridedSliceBuilder] The 5th input beginMask should be type HNN_INT64."); + return OH_NN_INVALID_PARAMETER; + } + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[StridedSliceBuilder] Tensor buffer is nullptr."); + return OH_NN_INVALID_PARAMETER; + } + m_begin_mask = *(static_cast(buffer)); + + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode StridedSliceBuilder::SetEndMask(std::shared_ptr tensor) +{ + if (tensor->GetDataType() != OH_NN_INT64) { + LOGE("[StridedSliceBuilder] The 6th input endMask should be type HNN_INT64."); + return OH_NN_INVALID_PARAMETER; + } + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[StridedSliceBuilder] Tensor buffer is nullptr."); + return OH_NN_INVALID_PARAMETER; + } + m_end_mask = *(static_cast(buffer)); + + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode StridedSliceBuilder::SetEllipsisMask(std::shared_ptr tensor) +{ + if (tensor->GetDataType() != OH_NN_INT64) { + LOGE("[StridedSliceBuilder] The 7th input ellipsisMask should be type HNN_INT64."); + return OH_NN_INVALID_PARAMETER; + } + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[StridedSliceBuilder] Tensor buffer is nullptr."); + return OH_NN_INVALID_PARAMETER; + } + m_ellipsis_mask = *(static_cast(buffer)); + + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode StridedSliceBuilder::SetNewAxisMask(std::shared_ptr tensor) +{ + if (tensor->GetDataType() != OH_NN_INT64) { + LOGE("[StridedSliceBuilder] The 8th input newAxisMask should be type HNN_INT64."); + return OH_NN_INVALID_PARAMETER; + } + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[StridedSliceBuilder] Tensor buffer is nullptr."); + return OH_NN_INVALID_PARAMETER; + } + m_new_axis_mask = *(static_cast(buffer)); + + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode StridedSliceBuilder::SetShrinkAxisMask(std::shared_ptr tensor) +{ + if (tensor->GetDataType() != OH_NN_INT64) { + LOGE("[StridedSliceBuilder] The 9th input shrinkAxisMAsk should be type HNN_INT64."); + return OH_NN_INVALID_PARAMETER; + } + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[StridedSliceBuilder] Tensor buffer is nullptr."); + return OH_NN_INVALID_PARAMETER; + } + m_shrink_axis_mask = *(static_cast(buffer)); + + return OH_NN_SUCCESS; +} + +/** + * Build method. + * 1.set attr of ops. + * 2.set inputIndex of ops. + * 3.set outputIndex of ops. + */ +OH_NN_ReturnCode StridedSliceBuilder::Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) +{ + if (m_isBuild) { + LOGE("[StridedSliceBuilder] StridedSlice operation has been build, cannot build again."); + return OH_NN_OPERATION_FORBIDDEN; + } + + OH_NN_ReturnCode returnCode = SetInputOutput(inputsIndex, outputsIndex, allTensors); + if (returnCode != OH_NN_SUCCESS) { + LOGE("[StridedSliceBuilder] Set index of inputs or outputs failed."); + return returnCode; + } + + for (int i : paramsIndex) { + std::shared_ptr tensor = allTensors[i]; + tensor->IdentifyOpParameter(); + switch (tensor->GetType()) { + case OH_NN_STRIDED_SLICE_BEGIN_MASK: + returnCode = SetBeginMask(tensor); + break; + case OH_NN_STRIDED_SLICE_END_MASK: + returnCode = SetEndMask(tensor); + break; + case OH_NN_STRIDED_SLICE_ELLIPSIS_MASK: + returnCode = SetEllipsisMask(tensor); + break; + case OH_NN_STRIDED_SLICE_NEW_AXIS_MASK: + returnCode = SetNewAxisMask(tensor); + break; + case OH_NN_STRIDED_SLICE_SHRINK_AXIS_MASK: + returnCode = SetShrinkAxisMask(tensor); + break; + default: + LOGE("[StridedSliceBuilder] Parameter Type is invalid. type=%d", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; + } + + if (returnCode != OH_NN_SUCCESS) { + LOGE("[StridedSliceBuilder] Passed invalid param."); + return returnCode; + } + } + + m_isBuild = true; + m_name = OP_NAME; + return OH_NN_SUCCESS; +} + +LiteGraphPrimitvePtr StridedSliceBuilder::GetPrimitive() +{ + if (!m_isBuild) { + LOGE("[StridedSliceBuilder] Cannot get primitive before call build."); + return {nullptr, DestroyLiteGraphPrimitive}; + } + + auto primitive = mindspore::lite::MindIR_StridedSlice_CreatePrimitive(m_begin_mask, m_end_mask, m_ellipsis_mask, + m_new_axis_mask, m_shrink_axis_mask); + if (primitive == nullptr) { + LOGE("[StridedSliceBuilder] MindIR_StridedSlice_CreatePrimitive failed."); + return {nullptr, DestroyLiteGraphPrimitive}; + } + + LiteGraphPrimitvePtr graphPrimitivePtr(primitive, DestroyLiteGraphPrimitive); + return graphPrimitivePtr; +} + +REGISTER_OPS(StridedSliceBuilder, OH_NN_OPS_STRIDED_SLICE); +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS diff --git a/frameworks/native/ops/strided_slice_builder.h b/frameworks/native/ops/strided_slice_builder.h new file mode 100644 index 0000000..45637b0 --- /dev/null +++ b/frameworks/native/ops/strided_slice_builder.h @@ -0,0 +1,57 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_STRIDEDSLICE_BUILDER_H +#define NEURAL_NETWORK_RUNTIME_STRIDEDSLICE_BUILDER_H + +#include "frameworks/native/ops_builder.h" +#include "frameworks/native/ops_registry.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +class StridedSliceBuilder : public OpsBuilder { +public: + StridedSliceBuilder(); + ~StridedSliceBuilder() override; + OH_NN_ReturnCode Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) override; + + LiteGraphPrimitvePtr GetPrimitive() override; + +private: + OH_NN_ReturnCode SetInputOutput(const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors); + OH_NN_ReturnCode SetBeginMask(std::shared_ptr tensor); + OH_NN_ReturnCode SetEndMask(std::shared_ptr tensor); + OH_NN_ReturnCode SetEllipsisMask(std::shared_ptr tensor); + OH_NN_ReturnCode SetNewAxisMask(std::shared_ptr tensor); + OH_NN_ReturnCode SetShrinkAxisMask(std::shared_ptr tensor); + +private: + int64_t m_begin_mask = {0}; + int64_t m_end_mask = {0}; + int64_t m_ellipsis_mask = {0}; + int64_t m_new_axis_mask = {0}; + int64_t m_shrink_axis_mask = {0}; +}; +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS + +#endif // NEURAL_NETWORK_RUNTIME_STRIDEDSLICE_BUILDER_H diff --git a/frameworks/native/ops/sub_builder.cpp b/frameworks/native/ops/sub_builder.cpp new file mode 100644 index 0000000..6021c17 --- /dev/null +++ b/frameworks/native/ops/sub_builder.cpp @@ -0,0 +1,131 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "sub_builder.h" +#include "frameworks/native/transform.h" +#include "frameworks/native/validation.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +static const int INPUT_NUM = 2; +static const int OUTPUT_NUM = 1; +static const std::string OP_NAME = "Sub"; + +SubBuilder::SubBuilder() {} + +SubBuilder::~SubBuilder() {} + +OH_NN_ReturnCode SubBuilder::SetActivationType(std::shared_ptr tensor) +{ + if (tensor->GetDataType() != OH_NN_INT8) { + LOGE("[SubBuilder] The 3rd input activation should be type OH_NN_INT8."); + return OH_NN_INVALID_PARAMETER; + } + + if (tensor->GetElementCount() != 1) { + LOGE("[SubBuilder] The 3rd input activation should be scaler."); + return OH_NN_INVALID_PARAMETER; + } + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[SubBuilder] Tensor buffer is nullptr."); + return OH_NN_INVALID_PARAMETER; + } + + int8_t* fuseData = static_cast(buffer); + if (!OHOS::NeuralNetworkRuntime::Validation::ValidateFuseType(static_cast(*fuseData))) { + LOGE("[SubBuilder] Fuse activation type is invalid"); + return OH_NN_INVALID_PARAMETER; + } + + auto fuseType = (OH_NN_FuseType)(*fuseData); + m_activationType = NNToMS::TransfromFusionType(fuseType); + return OH_NN_SUCCESS; +} + +/** + * Build method. + * 1.set attr of ops. + * 2.set inputIndex of ops. + * 3.set outputIndex of ops. + */ +OH_NN_ReturnCode SubBuilder::Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) +{ + if (m_isBuild) { + LOGE("[SubBuilder] Sub operation has been build, cannot build again."); + return OH_NN_OPERATION_FORBIDDEN; + } + + OH_NN_ReturnCode returnCode = CheckIOIndex(inputsIndex, outputsIndex, allTensors, INPUT_NUM, OUTPUT_NUM); + if (returnCode != OH_NN_SUCCESS) { + LOGE("[SubBuilder] Passed invalid input or output index."); + return returnCode; + } + + m_inputsIndex = inputsIndex; + m_outputsIndex = outputsIndex; + + for (int i : paramsIndex) { + std::shared_ptr tensor = allTensors[i]; + tensor->IdentifyOpParameter(); + switch (tensor->GetType()) { + case OH_NN_SUB_ACTIVATIONTYPE: + returnCode = SetActivationType(tensor); + break; + default: + LOGE("[SubBuilder] Parameter Type is invalid. type=%d", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; + } + + if (returnCode != OH_NN_SUCCESS) { + LOGE("[SubBuilder] Passed invalid param."); + return returnCode; + } + } + + // The quantization type of the first output determinies that of the operator. + SetQuantType(outputsIndex, allTensors); + + m_isBuild = true; + m_name = "Sub"; + return OH_NN_SUCCESS; +} + +LiteGraphPrimitvePtr SubBuilder::GetPrimitive() +{ + if (!m_isBuild) { + LOGE("[SubBuilder] Cannot get primitive before call build."); + return {nullptr, DestroyLiteGraphPrimitive}; + } + + auto primitive = mindspore::lite::MindIR_SubFusion_CreatePrimitive(m_activationType); + if (primitive == nullptr) { + LOGE("[SubBuilder] MindIR_SubFusion_CreatePrimitive failed."); + return {nullptr, DestroyLiteGraphPrimitive}; + } + + LiteGraphPrimitvePtr graphPrimitivePtr(primitive, DestroyLiteGraphPrimitive); + return graphPrimitivePtr; +} + +REGISTER_OPS(SubBuilder, OH_NN_OPS_SUB); +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS diff --git a/frameworks/native/ops/sub_builder.h b/frameworks/native/ops/sub_builder.h new file mode 100644 index 0000000..19dc8d2 --- /dev/null +++ b/frameworks/native/ops/sub_builder.h @@ -0,0 +1,48 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_SUB_BUILDER_H +#define NEURAL_NETWORK_RUNTIME_SUB_BUILDER_H + +#include "mindir.h" + +#include "frameworks/native/ops_builder.h" +#include "frameworks/native/ops_registry.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +class SubBuilder : public OpsBuilder { +public: + SubBuilder(); + ~SubBuilder() override; + OH_NN_ReturnCode Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) override; + + LiteGraphPrimitvePtr GetPrimitive() override; + +private: + OH_NN_ReturnCode SetActivationType(std::shared_ptr tensor); + +private: + mindspore::lite::ActivationType m_activationType{mindspore::lite::ACTIVATION_TYPE_NO_ACTIVATION}; +}; +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS + +#endif // NEURAL_NETWORK_RUNTIME_SUB_BUILDER_H \ No newline at end of file diff --git a/frameworks/native/ops/tanh_builder.cpp b/frameworks/native/ops/tanh_builder.cpp new file mode 100644 index 0000000..c275f00 --- /dev/null +++ b/frameworks/native/ops/tanh_builder.cpp @@ -0,0 +1,92 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "tanh_builder.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +static const int INPUT_NUM = 1; +static const int OUTPUT_NUM = 1; +static const std::string OP_NAME = "Tanh"; + +TanhBuilder::TanhBuilder() {} + +TanhBuilder::~TanhBuilder() {} + +/** + * Build method. + * 1.set attr of ops. + * 2.set inputIndex of ops. + * 3.set outputIndex of ops. + */ +OH_NN_ReturnCode TanhBuilder::Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) +{ + if (m_isBuild) { + LOGE("[TanhBuilder] Tanh operation has been build, cannot build again."); + return OH_NN_OPERATION_FORBIDDEN; + } + + OH_NN_ReturnCode returnCode = CheckIOIndex(inputsIndex, outputsIndex, allTensors, INPUT_NUM, OUTPUT_NUM); + if (returnCode != OH_NN_SUCCESS) { + LOGE("[TanhBuilder] Passed invalid input or output index."); + return returnCode; + } + + if (!paramsIndex.empty()) { + LOGE("[TanhBuilder] TanhBuilder expects no parameters, but receive %zu", paramsIndex.size()); + return OH_NN_INVALID_PARAMETER; + } + + m_inputsIndex = inputsIndex; + m_outputsIndex = outputsIndex; + + // The quantization type of the first output determinies that of the operator. + SetQuantType(outputsIndex, allTensors); + + m_name = OP_NAME; + m_isBuild = true; + return OH_NN_SUCCESS; +} + +LiteGraphPrimitvePtr TanhBuilder::GetPrimitive() +{ + if (!m_isBuild) { + LOGE("[TanhBuilder] Cannot get primitive before call build."); + return { nullptr, DestroyLiteGraphPrimitive }; + } + + float alpha {0.0f}; + float minVal {0.0f}; + float maxVal {0.0f}; + bool approximate {false}; + auto primitive = + mindspore::lite::MindIR_Activation_CreatePrimitive(m_activationType, alpha, minVal, maxVal, approximate); + if (primitive == nullptr) { + LOGE("[TanhBuilder] Create primitive of Tanh failed."); + return { nullptr, DestroyLiteGraphPrimitive }; + } + + LiteGraphPrimitvePtr graphPrimitivePtr(primitive, DestroyLiteGraphPrimitive); + return graphPrimitivePtr; +} + +REGISTER_OPS(TanhBuilder, OH_NN_OPS_TANH); +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS \ No newline at end of file diff --git a/frameworks/native/ops/tanh_builder.h b/frameworks/native/ops/tanh_builder.h new file mode 100644 index 0000000..32fd2a7 --- /dev/null +++ b/frameworks/native/ops/tanh_builder.h @@ -0,0 +1,45 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_TANH_BUILDER_H +#define NEURAL_NETWORK_RUNTIME_TANH_BUILDER_H + +#include "mindir.h" + +#include "frameworks/native/ops_builder.h" +#include "frameworks/native/ops_registry.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +class TanhBuilder : public OpsBuilder { +public: + TanhBuilder(); + ~TanhBuilder() override; + OH_NN_ReturnCode Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) override; + + LiteGraphPrimitvePtr GetPrimitive() override; + +private: + mindspore::lite::ActivationType m_activationType{mindspore::lite::ACTIVATION_TYPE_TANH}; +}; +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS + +#endif // NEURAL_NETWORK_RUNTIME_TANH_BUILDER_H \ No newline at end of file diff --git a/frameworks/native/ops/tile_builder.cpp b/frameworks/native/ops/tile_builder.cpp new file mode 100644 index 0000000..8032acc --- /dev/null +++ b/frameworks/native/ops/tile_builder.cpp @@ -0,0 +1,86 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "tile_builder.h" + +#include "mindir.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +static const int INPUT_NUM = 2; +static const int OUTPUT_NUM = 1; +static const std::string OP_NAME = "Tile"; + +TileBuilder::TileBuilder() {} + +TileBuilder::~TileBuilder() {} + +/** + * Build method. + * 1.set attr of ops. + * 2.set inputIndex of ops. + * 3.set outputIndex of ops. + */ +OH_NN_ReturnCode TileBuilder::Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) +{ + if (m_isBuild) { + LOGE("[TileBuilder] Tile operation has been build, cannot build again."); + return OH_NN_OPERATION_FORBIDDEN; + } + + OH_NN_ReturnCode returnCode = CheckIOIndex(inputsIndex, outputsIndex, allTensors, INPUT_NUM, OUTPUT_NUM); + if (returnCode != OH_NN_SUCCESS) { + LOGE("[TileBuilder] Passed invalid input or output index."); + return returnCode; + } + + if (!paramsIndex.empty()) { + LOGE("[TileBuilder] TransposeBuilder expects no parameters, but receive %zu", paramsIndex.size()); + return OH_NN_INVALID_PARAMETER; + } + + m_inputsIndex = inputsIndex; + m_outputsIndex = outputsIndex; + + m_isBuild = true; + m_name = OP_NAME; + return OH_NN_SUCCESS; +} + +LiteGraphPrimitvePtr TileBuilder::GetPrimitive() +{ + if (!m_isBuild) { + LOGE("[TileBuilder] Cannot get primitive before call build."); + return {nullptr, DestroyLiteGraphPrimitive}; + } + + auto primitive = mindspore::lite::MindIR_TileFusion_CreatePrimitive(m_dims); + if (primitive == nullptr) { + LOGE("[TileBuilder] MindIR_TileFusion_CreatePrimitive failed."); + return {nullptr, DestroyLiteGraphPrimitive}; + } + + LiteGraphPrimitvePtr graphPrimitivePtr(primitive, DestroyLiteGraphPrimitive); + return graphPrimitivePtr; +} + +REGISTER_OPS(TileBuilder, OH_NN_OPS_TILE); +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS diff --git a/frameworks/native/ops/tile_builder.h b/frameworks/native/ops/tile_builder.h new file mode 100644 index 0000000..fc93212 --- /dev/null +++ b/frameworks/native/ops/tile_builder.h @@ -0,0 +1,43 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_TILE_BUILDER_H +#define NEURAL_NETWORK_RUNTIME_TILE_BUILDER_H + +#include "frameworks/native/ops_builder.h" +#include "frameworks/native/ops_registry.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +class TileBuilder : public OpsBuilder { +public: + TileBuilder(); + ~TileBuilder() override; + OH_NN_ReturnCode Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) override; + + LiteGraphPrimitvePtr GetPrimitive() override; + +private: + std::vector m_dims {0}; +}; +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS + +#endif // NEURAL_NETWORK_RUNTIME_TILE_BUILDER_H diff --git a/frameworks/native/ops/top_k_builder.cpp b/frameworks/native/ops/top_k_builder.cpp new file mode 100644 index 0000000..673d6f0 --- /dev/null +++ b/frameworks/native/ops/top_k_builder.cpp @@ -0,0 +1,117 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "top_k_builder.h" + +#include "mindir.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +static const std::string OP_NAME = "TopK"; +static const int INPUT_NUM = 2; +static const int OUTPUT_NUM = 2; + +TopKBuilder::TopKBuilder() {} + +TopKBuilder::~TopKBuilder() {} + +OH_NN_ReturnCode TopKBuilder::SetSorted(std::shared_ptr tensor) +{ + if (tensor->GetDataType() != OH_NN_BOOL) { + LOGE("[TopK] The sorted should be type OH_NN_BOOL."); + return OH_NN_INVALID_PARAMETER; + } + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[TopK] Tensor buffer is nullptr."); + return OH_NN_INVALID_PARAMETER; + } + m_sorted = *(static_cast(buffer)); + + return OH_NN_SUCCESS; +} + +/** + * Build method. + * 1.build primitive of ops. + * 2.build inputIndex of ops. + * 3.build outputIndex of ops. + */ +OH_NN_ReturnCode TopKBuilder::Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) +{ + if (m_isBuild) { + LOGE("[TopK] Build operation has been completed, cannot build again."); + return OH_NN_OPERATION_FORBIDDEN; + } + + OH_NN_ReturnCode returnCode = CheckIOIndex(inputsIndex, outputsIndex, allTensors, INPUT_NUM, OUTPUT_NUM); + if (returnCode != OH_NN_SUCCESS) { + LOGE("[TopK] Passed invalid input or output index."); + return returnCode; + } + + m_inputsIndex = inputsIndex; + m_outputsIndex = outputsIndex; + + for (int i : paramsIndex) { + std::shared_ptr tensor = allTensors[i]; + tensor->IdentifyOpParameter(); + switch (tensor->GetType()) { + case OH_NN_TOP_K_SORTED: + returnCode = SetSorted(tensor); + break; + default: + LOGE("[TopK] Parameter Type is invalid. type=%d", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; + } + + if (returnCode != OH_NN_SUCCESS) { + LOGE("[TopK] Passed invalid param."); + return returnCode; + } + } + + m_name = OP_NAME; + m_isBuild = true; + return OH_NN_SUCCESS; +} + +LiteGraphPrimitvePtr TopKBuilder::GetPrimitive() +{ + if (!m_isBuild) { + LOGE("[TopK] Cannot get primitive before call build."); + return {nullptr, DestroyLiteGraphPrimitive}; + } + + int64_t axis = 0; + auto primitive = mindspore::lite::MindIR_TopKFusion_CreatePrimitive(m_sorted, axis); + if (primitive == nullptr) { + LOGE("[TopK] MindIR_TopKFusion_CreatePrimitive failed."); + return {nullptr, DestroyLiteGraphPrimitive}; + } + + LiteGraphPrimitvePtr graphPrimitivePtr(primitive, DestroyLiteGraphPrimitive); + return graphPrimitivePtr; +} + +REGISTER_OPS(TopKBuilder, OH_NN_OPS_TOP_K); +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS diff --git a/frameworks/native/ops/top_k_builder.h b/frameworks/native/ops/top_k_builder.h new file mode 100644 index 0000000..dfd4a6a --- /dev/null +++ b/frameworks/native/ops/top_k_builder.h @@ -0,0 +1,45 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_TOPK_BUILDER_H +#define NEURAL_NETWORK_RUNTIME_TOPK_BUILDER_H + +#include "frameworks/native/ops_builder.h" +#include "frameworks/native/ops_registry.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +class TopKBuilder : public OpsBuilder { +public: + TopKBuilder(); + ~TopKBuilder() override; + OH_NN_ReturnCode Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) override; + LiteGraphPrimitvePtr GetPrimitive() override; + +private: + OH_NN_ReturnCode SetSorted(std::shared_ptr tensor); + +private: + bool m_sorted; +}; +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS + +#endif // NEURAL_NETWORK_RUNTIME_TOPK_BUILDER_H diff --git a/frameworks/native/ops/transpose_builder.cpp b/frameworks/native/ops/transpose_builder.cpp new file mode 100644 index 0000000..a89b282 --- /dev/null +++ b/frameworks/native/ops/transpose_builder.cpp @@ -0,0 +1,86 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "transpose_builder.h" + +#include "mindir.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +static const int INPUT_NUM = 2; +static const int OUTPUT_NUM = 1; +static const std::string OP_NAME = "Transpose"; + +TransposeBuilder::TransposeBuilder() {} + +TransposeBuilder::~TransposeBuilder() {} + +/** + * Build method. + * 1.set attr of ops. + * 2.set inputIndex of ops. + * 3.set outputIndex of ops. + */ +OH_NN_ReturnCode TransposeBuilder::Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) +{ + if (m_isBuild) { + LOGE("[TransposeBuilder] Transpose operation has been build, cannot build again."); + return OH_NN_OPERATION_FORBIDDEN; + } + + OH_NN_ReturnCode returnCode = CheckIOIndex(inputsIndex, outputsIndex, allTensors, INPUT_NUM, OUTPUT_NUM); + if (returnCode != OH_NN_SUCCESS) { + LOGE("[TransposeBuilder] Passed invalid input or output index."); + return returnCode; + } + + if (!paramsIndex.empty()) { + LOGE("[TransposeBuilder] TransposeBuilder expects no parameters, but receive %zu", paramsIndex.size()); + return OH_NN_INVALID_PARAMETER; + } + + m_inputsIndex = inputsIndex; + m_outputsIndex = outputsIndex; + + m_isBuild = true; + m_name = OP_NAME; + return OH_NN_SUCCESS; +} + +LiteGraphPrimitvePtr TransposeBuilder::GetPrimitive() +{ + if (!m_isBuild) { + LOGE("[TransposeBuilder] Cannot get primitive before call build."); + return {nullptr, DestroyLiteGraphPrimitive}; + } + + auto primitive = mindspore::lite::MindIR_Transpose_CreatePrimitive(); + if (primitive == nullptr) { + LOGE("[TransposeBuilder] MindIR_Transpose_CreatePrimitive failed."); + return {nullptr, DestroyLiteGraphPrimitive}; + } + + LiteGraphPrimitvePtr graphPrimitivePtr(primitive, DestroyLiteGraphPrimitive); + return graphPrimitivePtr; +} + +REGISTER_OPS(TransposeBuilder, OH_NN_OPS_TRANSPOSE); +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS diff --git a/frameworks/native/ops/transpose_builder.h b/frameworks/native/ops/transpose_builder.h new file mode 100644 index 0000000..998d580 --- /dev/null +++ b/frameworks/native/ops/transpose_builder.h @@ -0,0 +1,39 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_TRANSPOSE_BUILDER_H +#define NEURAL_NETWORK_RUNTIME_TRANSPOSE_BUILDER_H + +#include "frameworks/native/ops_builder.h" +#include "frameworks/native/ops_registry.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +class TransposeBuilder : public OpsBuilder { +public: + TransposeBuilder(); + ~TransposeBuilder() override; + OH_NN_ReturnCode Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) override; + LiteGraphPrimitvePtr GetPrimitive() override; +}; +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS + +#endif // NEURAL_NETWORK_RUNTIME_TRANSPOSE_BUILDER_H diff --git a/frameworks/native/ops/unsqueeze_builder.cpp b/frameworks/native/ops/unsqueeze_builder.cpp new file mode 100644 index 0000000..62b6d86 --- /dev/null +++ b/frameworks/native/ops/unsqueeze_builder.cpp @@ -0,0 +1,119 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "unsqueeze_builder.h" + +#include "mindir.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +static const int INPUT_NUM = 1; +static const int OUTPUT_NUM = 1; +static const std::string OP_NAME = "Unsqueeze"; + +UnsqueezeBuilder::UnsqueezeBuilder() {} + +UnsqueezeBuilder::~UnsqueezeBuilder() {} + +OH_NN_ReturnCode UnsqueezeBuilder::SetAxis(std::shared_ptr tensor) +{ + // Set Axis + if (tensor->GetDataType() != OH_NN_INT64) { + LOGE("[UnsqueezeBuilder] The 2nd input axis should be type OH_NN_INT64."); + return OH_NN_INVALID_PARAMETER; + } + + if (tensor->GetElementCount() != 1) { + LOGE("[UnsqueezeBuilder] The 2nd input axis should be scaler."); + return OH_NN_INVALID_PARAMETER; + } + + m_axis.clear(); + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[UnsqueezeBuilder] Tensor buffer is nullptr."); + return OH_NN_INVALID_PARAMETER; + } + m_axis.emplace_back(*(static_cast(buffer))); + + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode UnsqueezeBuilder::Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) +{ + if (m_isBuild) { + LOGE("[UnsqueezeBuilder] Unsqueeze build operation has been build, cannot build again"); + return OH_NN_OPERATION_FORBIDDEN; + } + OH_NN_ReturnCode returnCode = CheckIOIndex(inputsIndex, outputsIndex, allTensors, INPUT_NUM, OUTPUT_NUM); + if (returnCode != OH_NN_SUCCESS) { + LOGE("[UnsqueezeBuilder] Passed invalid input or output index."); + return returnCode; + } + + m_inputsIndex = inputsIndex; + m_outputsIndex = outputsIndex; + + for (int i : paramsIndex) { + std::shared_ptr tensor = allTensors[i]; + tensor->IdentifyOpParameter(); + switch (tensor->GetType()) { + case OH_NN_UNSQUEEZE_AXIS: + returnCode = SetAxis(tensor); + break; + default: + LOGE("[UnsqueezeBuilder] Parameter Type is invalid. type=%d", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; + } + + if (returnCode != OH_NN_SUCCESS) { + LOGE("[UnsqueezeBuilder] Passed invalid param."); + return returnCode; + } + } + // The quantization type of the first output determinies that of the operator. + SetQuantType(outputsIndex, allTensors); + + m_isBuild = true; + m_name = OP_NAME; + return OH_NN_SUCCESS; +} + +LiteGraphPrimitvePtr UnsqueezeBuilder::GetPrimitive() +{ + if (!m_isBuild) { + LOGE("[UnsqueezeBuilder] Cannot get primitive before call build."); + return {nullptr, DestroyLiteGraphPrimitive}; + } + + auto primitive = mindspore::lite::MindIR_Unsqueeze_CreatePrimitive(m_axis); + if (primitive == nullptr) { + LOGE("[UnsqueezeBuilder] MindIR_Unsqueeze_CreatePrimitive failed."); + return {nullptr, DestroyLiteGraphPrimitive}; + } + + LiteGraphPrimitvePtr graphPrimitivePtr(primitive, DestroyLiteGraphPrimitive); + return graphPrimitivePtr; +} + +REGISTER_OPS(UnsqueezeBuilder, OH_NN_OPS_UNSQUEEZE); +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS diff --git a/frameworks/native/ops/unsqueeze_builder.h b/frameworks/native/ops/unsqueeze_builder.h new file mode 100644 index 0000000..6633957 --- /dev/null +++ b/frameworks/native/ops/unsqueeze_builder.h @@ -0,0 +1,46 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_UNSQUEEZE_BUILDER_H +#define NEURAL_NETWORK_RUNTIME_UNSQUEEZE_BUILDER_H + +#include "frameworks/native/ops_builder.h" +#include "frameworks/native/ops_registry.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +class UnsqueezeBuilder : public OpsBuilder { +public: + UnsqueezeBuilder(); + ~UnsqueezeBuilder() override; + OH_NN_ReturnCode Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) override; + + LiteGraphPrimitvePtr GetPrimitive() override; + +private: + OH_NN_ReturnCode SetAxis(std::shared_ptr tensor); + +private: + std::vector m_axis; +}; +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS + +#endif // NEURAL_NETWORK_RUNTIME_UNSQUEEZE_BUILDER_H diff --git a/frameworks/native/ops_builder.cpp b/frameworks/native/ops_builder.cpp new file mode 100644 index 0000000..d815fc9 --- /dev/null +++ b/frameworks/native/ops_builder.cpp @@ -0,0 +1,99 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ops_builder.h" +#include "mindir.h" +#include "mindir_types.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +void DestroyLiteGraphPrimitive(void* primitive) +{ + mindspore::lite::MindIR_Primitive_Destroy(&primitive); +} + +void OpsBuilder::GetInputIndex(std::vector& inputsIndex, + const std::unordered_map& modelIDToGraphID) const +{ + for (auto index : m_inputsIndex) { + // index has been prevented from taking value out of modelIDToGraphID, no need to check. + inputsIndex.emplace_back(modelIDToGraphID.at(index)); + } +} + +void OpsBuilder::GetOutputIndex(std::vector& outputsIndex, + const std::unordered_map& modelIDToGraphID) const +{ + for (auto index : m_outputsIndex) { + // index has been prevented from taking value out of modelIDToGraphID, no need to check. + outputsIndex.emplace_back(modelIDToGraphID.at(index)); + } +} + +std::string OpsBuilder::GetName() const +{ + return m_name; +} + +OpsQuantType OpsBuilder::GetQuantType() const +{ + return m_quantType; +} + +OH_NN_ReturnCode OpsBuilder::CheckIOIndex(const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors, + const size_t inputNum, + const size_t outputNum) const +{ + size_t inputsIndexSize = inputsIndex.size(); + size_t outputIndexSize = outputsIndex.size(); + if (inputsIndexSize != inputNum) { + LOGE("The number of index of inputs is %zu don't equal to %zu.", inputsIndexSize, inputNum); + return OH_NN_INVALID_PARAMETER; + } + if (outputIndexSize != outputNum) { + LOGE("The number of index of outputs is %zu don't equal to %zu.", outputIndexSize, outputNum); + return OH_NN_INVALID_PARAMETER; + } + + for (auto index : inputsIndex) { + if (index >= allTensors.size()) { + LOGE("The index of inputs is out of range."); + return OH_NN_INVALID_PARAMETER; + } + } + + for (auto index : outputsIndex) { + if (index >= allTensors.size()) { + LOGE("The index of outputs is out of range."); + return OH_NN_INVALID_PARAMETER; + } + } + + return OH_NN_SUCCESS; +} + +void OpsBuilder::SetQuantType(const std::vector& outputsIndex, + const std::vector>& allTensors) +{ + if (allTensors[outputsIndex.front()]->IsQuantTensor()) { + m_quantType = OpsQuantType::QUANT_ALL; + } +} +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS \ No newline at end of file diff --git a/frameworks/native/ops_builder.h b/frameworks/native/ops_builder.h new file mode 100644 index 0000000..ca2f70c --- /dev/null +++ b/frameworks/native/ops_builder.h @@ -0,0 +1,81 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_OPS_BUILDER_H +#define NEURAL_NETWORK_RUNTIME_OPS_BUILDER_H + +#include +#include + +#include "nn_tensor.h" +#include "common/log.h" +#include "interfaces/kits/c/neural_network_runtime.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +using LiteGraphPrimitvePtr = std::unique_ptr; +void DestroyLiteGraphPrimitive(void* primitive); + +// QuantType Enum +enum class OpsQuantType: int { + QUANT_NONE = 0, + QUANT_ALL = 1 +}; + +class OpsBuilder { +public: + OpsBuilder() = default; + virtual ~OpsBuilder() = default; + + // Other operation builders inherit from OpsBuilder, delete these special construction and assignment functions. + OpsBuilder(const OpsBuilder& opsBuilder) = delete; + OpsBuilder& operator=(const OpsBuilder& opsBuilder) = delete; + OpsBuilder(OpsBuilder&& opsBuilder) = delete; + OpsBuilder& operator=(OpsBuilder&& opsBuilder) = delete; + + virtual OH_NN_ReturnCode Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) = 0; + virtual LiteGraphPrimitvePtr GetPrimitive() = 0; + + virtual void GetInputIndex(std::vector& inputsIndex, + const std::unordered_map& modelIDToGraphID) const; + virtual void GetOutputIndex(std::vector& outputsIndex, + const std::unordered_map& modelIDToGraphID) const; + virtual std::string GetName() const; + virtual OpsQuantType GetQuantType() const; + +protected: + OH_NN_ReturnCode CheckIOIndex(const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors, + const size_t inputNum, + const size_t outputNum) const; + void SetQuantType(const std::vector& outputsIndex, + const std::vector>& allTensors); + +protected: + std::string m_name; + std::vector m_inputsIndex; + std::vector m_outputsIndex; + OpsQuantType m_quantType {OpsQuantType::QUANT_NONE}; + bool m_isBuild {false}; +}; +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS +#endif // NEURAL_NETWORK_RUNTIME_OPS_BUILDER_H \ No newline at end of file diff --git a/frameworks/native/ops_registry.cpp b/frameworks/native/ops_registry.cpp new file mode 100644 index 0000000..c71eb35 --- /dev/null +++ b/frameworks/native/ops_registry.cpp @@ -0,0 +1,46 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ops_registry.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +OpsRegistry::Registrar::Registrar(OH_NN_OperationType opsType, std::function()> createFunc) +{ + OpsRegistry& registry = OpsRegistry::GetSingleton(); + if (registry.m_opsRegedit.find(opsType) != registry.m_opsRegedit.end()) { + LOGW("Operantion has been registered, cannot register twice. Operation type: %d", opsType); + } else { + registry.m_opsRegedit[opsType] = createFunc; + } +} + +OpsRegistry& OpsRegistry::GetSingleton() +{ + static OpsRegistry opsRegistry; + return opsRegistry; +} + +std::unique_ptr OpsRegistry::GetOpsBuilder(OH_NN_OperationType type) const +{ + if (m_opsRegedit.find(type) != m_opsRegedit.end()) { + return m_opsRegedit.at(type)(); + } + return nullptr; +} +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS \ No newline at end of file diff --git a/frameworks/native/ops_registry.h b/frameworks/native/ops_registry.h new file mode 100644 index 0000000..29171cb --- /dev/null +++ b/frameworks/native/ops_registry.h @@ -0,0 +1,54 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef HETERNEURAL_NETWORK_OPS_REGISTRY_H +#define HETERNEURAL_NETWORK_OPS_REGISTRY_H + +#include +#include +#include + +#include "ops_builder.h" +#include "interfaces/kits/c/neural_network_runtime.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +class OpsRegistry { +public: + struct Registrar { + Registrar() = delete; + Registrar(OH_NN_OperationType opsType, std::function()> createFunc); + }; + +public: + static OpsRegistry& GetSingleton(); + std::unique_ptr GetOpsBuilder(OH_NN_OperationType type) const; + +private: + OpsRegistry() {}; + OpsRegistry(const OpsRegistry&) = delete; + OpsRegistry& operator=(const OpsRegistry&) = delete; + +private: + std::unordered_map()>> m_opsRegedit; +}; + +#define CREATE_FUNC(T) ([]()->std::unique_ptr {return std::make_unique();}) +#define REGISTER_OPS(T, opsType) static OpsRegistry::Registrar g_##T(opsType, CREATE_FUNC(T)) +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespcae OHOS +#endif // HETERNEURAL_NETWORK_OPS_REGISTRY_H \ No newline at end of file diff --git a/frameworks/native/transform.cpp b/frameworks/native/transform.cpp new file mode 100644 index 0000000..ea0d339 --- /dev/null +++ b/frameworks/native/transform.cpp @@ -0,0 +1,299 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "transform.h" + +#include "memory_manager.h" +#include "common/log.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +const uint32_t BIT8_TO_BYTE = 1; +const uint32_t BIT16_TO_BYTE = 2; +const uint32_t BIT32_TO_BYTE = 4; +const uint32_t BIT64_TO_BYTE = 8; + +OH_NN_DeviceType HDIToNN::TransHDIDeviceType(const V1_0::DeviceType& iDeviceType) +{ + switch (iDeviceType) { + case V1_0::DeviceType::CPU: + return OH_NN_CPU; + case V1_0::DeviceType::GPU: + return OH_NN_GPU; + case V1_0::DeviceType::ACCELERATOR: + return OH_NN_ACCELERATOR; + default: + return OH_NN_OTHERS; + } +} + +DeviceStatus HDIToNN::TransHDIDeviceStatus(const V1_0::DeviceStatus& iDeviceStatus) +{ + switch (iDeviceStatus) { + case V1_0::DeviceStatus::AVAILABLE: + return DeviceStatus::AVAILABLE; + case V1_0::DeviceStatus::BUSY: + return DeviceStatus::BUSY; + case V1_0::DeviceStatus::OFFLINE: + return DeviceStatus::OFFLINE; + default: + return DeviceStatus::UNKNOWN; + } +} + +V1_0::PerformanceMode NNToHDI::TransPerformanceMode(const OH_NN_PerformanceMode& mode) +{ + switch (mode) { + case OH_NN_PERFORMANCE_LOW: + return V1_0::PerformanceMode::PERFORMANCE_LOW; + case OH_NN_PERFORMANCE_MEDIUM: + return V1_0::PerformanceMode::PERFORMANCE_MEDIUM; + case OH_NN_PERFORMANCE_HIGH: + return V1_0::PerformanceMode::PERFORMANCE_HIGH; + case OH_NN_PERFORMANCE_EXTREME: + return V1_0::PerformanceMode::PERFORMANCE_EXTREME; + default: + return V1_0::PerformanceMode::PERFORMANCE_NONE; + } +} +V1_0::Priority NNToHDI::TransPriority(const OH_NN_Priority& priority) +{ + switch (priority) { + case OH_NN_PRIORITY_LOW: + return V1_0::Priority::PRIORITY_LOW; + case OH_NN_PRIORITY_MEDIUM: + return V1_0::Priority::PRIORITY_MEDIUM; + case OH_NN_PRIORITY_HIGH: + return V1_0::Priority::PRIORITY_HIGH; + default: + return V1_0::Priority::PRIORITY_NONE; + } +} + +V1_0::DataType NNToHDI::TransDataType(const OH_NN_DataType& dataType) +{ + switch (dataType) { + case OH_NN_BOOL: + return V1_0::DataType::DATA_TYPE_BOOL; + case OH_NN_INT8: + return V1_0::DataType::DATA_TYPE_INT8; + case OH_NN_INT16: + return V1_0::DataType::DATA_TYPE_INT16; + case OH_NN_INT32: + return V1_0::DataType::DATA_TYPE_INT32; + case OH_NN_INT64: + return V1_0::DataType::DATA_TYPE_INT64; + case OH_NN_UINT8: + return V1_0::DataType::DATA_TYPE_UINT8; + case OH_NN_UINT16: + return V1_0::DataType::DATA_TYPE_UINT16; + case OH_NN_UINT32: + return V1_0::DataType::DATA_TYPE_UINT32; + case OH_NN_UINT64: + return V1_0::DataType::DATA_TYPE_UINT64; + case OH_NN_FLOAT16: + return V1_0::DataType::DATA_TYPE_FLOAT16; + case OH_NN_FLOAT32: + return V1_0::DataType::DATA_TYPE_FLOAT32; + case OH_NN_FLOAT64: + return V1_0::DataType::DATA_TYPE_FLOAT64; + default: + return V1_0::DataType::DATA_TYPE_UNKNOWN; + } +} + +V1_0::Format NNToHDI::TransFormat(const OH_NN_Format& format) +{ + switch (format) { + case OH_NN_FORMAT_NCHW: + return V1_0::Format::FORMAT_NCHW; + case OH_NN_FORMAT_NHWC: + return V1_0::Format::FORMAT_NHWC; + default: + return V1_0::Format::FORMAT_NONE; + } +} + +V1_0::IOTensor NNToHDI::TransIOTensor(const IOTensor& tensor) +{ + V1_0::IOTensor iTensor; + iTensor.name = tensor.name; + iTensor.dataType = TransDataType(tensor.dataType); + iTensor.dimensions = tensor.dimensions; + iTensor.format = TransFormat(tensor.format); + + V1_0::SharedBuffer iBuffer {INVALID_FD, 0, 0, 0}; + if (tensor.data != nullptr) { + auto memManager = MemoryManager::GetInstance(); + Memory memory; + auto ret = memManager->GetMemory(tensor.data, memory); + if (ret != OH_NN_SUCCESS) { + LOGE("Invalid Tensor buffer, cannot transform to fd."); + } else { + iBuffer.fd = memory.fd; + iBuffer.bufferSize = memory.length; + iBuffer.offset = 0; + iBuffer.dataSize = memory.length; + } + } + iTensor.data = iBuffer; + + return iTensor; +} + +uint32_t GetTypeSize(OH_NN_DataType type) +{ + switch (type) { + case OH_NN_BOOL: + return sizeof(bool); + case OH_NN_INT8: + case OH_NN_UINT8: + return BIT8_TO_BYTE; + case OH_NN_INT16: + case OH_NN_UINT16: + case OH_NN_FLOAT16: + return BIT16_TO_BYTE; + case OH_NN_INT32: + case OH_NN_UINT32: + case OH_NN_FLOAT32: + return BIT32_TO_BYTE; + case OH_NN_INT64: + case OH_NN_UINT64: + case OH_NN_FLOAT64: + return BIT64_TO_BYTE; + default: + return 0; + } +} + +mindspore::lite::DataType NNToMS::TransformDataType(OH_NN_DataType type) +{ + switch (type) { + case OH_NN_BOOL: + return mindspore::lite::DATA_TYPE_BOOL; + case OH_NN_INT8: + return mindspore::lite::DATA_TYPE_INT8; + case OH_NN_INT16: + return mindspore::lite::DATA_TYPE_INT16; + case OH_NN_INT32: + return mindspore::lite::DATA_TYPE_INT32; + case OH_NN_INT64: + return mindspore::lite::DATA_TYPE_INT64; + case OH_NN_UINT8: + return mindspore::lite::DATA_TYPE_UINT8; + case OH_NN_UINT16: + return mindspore::lite::DATA_TYPE_UINT16; + case OH_NN_UINT32: + return mindspore::lite::DATA_TYPE_UINT32; + case OH_NN_UINT64: + return mindspore::lite::DATA_TYPE_UINT64; + case OH_NN_FLOAT16: + return mindspore::lite::DATA_TYPE_FLOAT16; + case OH_NN_FLOAT32: + return mindspore::lite::DATA_TYPE_FLOAT32; + case OH_NN_FLOAT64: + return mindspore::lite::DATA_TYPE_FLOAT64; + default: + return mindspore::lite::DATA_TYPE_UNKNOWN; + } +} + +mindspore::lite::Format NNToMS::TransformFormat(OH_NN_Format type) +{ + switch (type) { + case OH_NN_FORMAT_NCHW: + return mindspore::lite::FORMAT_NCHW; + case OH_NN_FORMAT_NHWC: + return mindspore::lite::FORMAT_NHWC; + default: + return mindspore::lite::FORMAT_NHWC; + } +} + +mindspore::lite::ActivationType NNToMS::TransfromFusionType(OH_NN_FuseType type) +{ + switch (type) { + case OH_NN_FUSED_NONE: + return mindspore::lite::ACTIVATION_TYPE_NO_ACTIVATION; + case OH_NN_FUSED_RELU: + return mindspore::lite::ACTIVATION_TYPE_RELU; + case OH_NN_FUSED_RELU6: + return mindspore::lite::ACTIVATION_TYPE_RELU6; + default: + return mindspore::lite::ACTIVATION_TYPE_UNKNOWN; + } +} + +mindspore::lite::QuantType NNToMS::TransformQuantType(OHOS::NeuralNetworkRuntime::Ops::OpsQuantType type) +{ + switch (type) { + case OHOS::NeuralNetworkRuntime::Ops::OpsQuantType::QUANT_NONE: + return mindspore::lite::QUANT_TYPE_NONE; + case OHOS::NeuralNetworkRuntime::Ops::OpsQuantType::QUANT_ALL: + return mindspore::lite::QUANT_TYPE_ALL; + default: return mindspore::lite::QUANT_TYPE_NONE; + } +} + +mindspore::lite::PadMode NNToMS::TransformPadModeValue(int8_t padMode) +{ + // The value is an optional value of the int8_t type. The value 0 indicates the same, + // and the value 1 indicates valid. + return (padMode == 0) ? mindspore::lite::PadMode::PAD_MODE_SAME : + mindspore::lite::PadMode::PAD_MODE_VALID; +} + +OH_NN_DataType MSToNN::TransformDataType(mindspore::lite::DataType type) +{ + switch (type) { + case mindspore::lite::DATA_TYPE_BOOL: + return OH_NN_BOOL; + case mindspore::lite::DATA_TYPE_INT8: + return OH_NN_INT8; + case mindspore::lite::DATA_TYPE_INT16: + return OH_NN_INT16; + case mindspore::lite::DATA_TYPE_INT32: + return OH_NN_INT32; + case mindspore::lite::DATA_TYPE_INT64: + return OH_NN_INT64; + case mindspore::lite::DATA_TYPE_UINT8: + return OH_NN_UINT8; + case mindspore::lite::DATA_TYPE_UINT16: + return OH_NN_UINT16; + case mindspore::lite::DATA_TYPE_UINT32: + return OH_NN_UINT32; + case mindspore::lite::DATA_TYPE_UINT64: + return OH_NN_UINT64; + case mindspore::lite::DATA_TYPE_FLOAT16: + return OH_NN_FLOAT16; + case mindspore::lite::DATA_TYPE_FLOAT32: + return OH_NN_FLOAT32; + case mindspore::lite::DATA_TYPE_FLOAT64: + return OH_NN_FLOAT64; + default: + return OH_NN_UNKNOWN; + } +} + +std::vector MSToNN::TransformQuantParams(std::vector msQuantParams) +{ + std::vector nnQuantParam; + for (const mindspore::lite::QuantParam& param : msQuantParams) { + nnQuantParam.emplace_back((QuantParam){param.numBits, param.scale, param.zeroPoint}); + } + return nnQuantParam; +} +} // namespace NeuralNetworkRuntime +} // namespace OHOS \ No newline at end of file diff --git a/frameworks/native/transform.h b/frameworks/native/transform.h new file mode 100644 index 0000000..7021611 --- /dev/null +++ b/frameworks/native/transform.h @@ -0,0 +1,68 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_TRANSFORM_H +#define NEURAL_NETWORK_RUNTIME_TRANSFORM_H + +#include "hdi_interfaces.h" +#include "interfaces/kits/c/neural_network_runtime_type.h" +#include "interfaces/oem/cpp_api/cpp_type.h" +#include "mindir.h" +#include "mindir_types.h" +#include "ops_builder.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +template +std::vector ConstructVectorFromArray(const T* data, size_t size) +{ + std::vector array; + if (data != nullptr) { + array.assign(data, data + size); + } + return array; +} + +uint32_t GetTypeSize(OH_NN_DataType type); + + +namespace HDIToNN { +OH_NN_DeviceType TransHDIDeviceType(const V1_0::DeviceType& iDeviceType); +DeviceStatus TransHDIDeviceStatus(const V1_0::DeviceStatus& iDeviceStatus); +} // namespace HDIToNN + +namespace NNToHDI { +V1_0::PerformanceMode TransPerformanceMode(const OH_NN_PerformanceMode& mode); +V1_0::Priority TransPriority(const OH_NN_Priority& priority); +V1_0::DataType TransDataType(const OH_NN_DataType& dataType); +V1_0::Format TransFormat(const OH_NN_Format& format); +V1_0::IOTensor TransIOTensor(const IOTensor& tensor); +} // namespace NNToHDI + +namespace NNToMS { +mindspore::lite::DataType TransformDataType(OH_NN_DataType type); +mindspore::lite::Format TransformFormat(OH_NN_Format type); +mindspore::lite::ActivationType TransfromFusionType(OH_NN_FuseType type); +mindspore::lite::QuantType TransformQuantType(OHOS::NeuralNetworkRuntime::Ops::OpsQuantType type); +mindspore::lite::PadMode TransformPadModeValue(int8_t padMode); +} // NNToMS + +namespace MSToNN { +OH_NN_DataType TransformDataType(mindspore::lite::DataType type); +std::vector TransformQuantParams(std::vector msQuantParams); +} // namespace MSToNN +} // namespace NeuralNetworkRuntime +} // namespace OHOS +#endif // NEURAL_NETWORK_RUNTIME_TRANSFORM_H \ No newline at end of file diff --git a/frameworks/native/validation.cpp b/frameworks/native/validation.cpp new file mode 100644 index 0000000..719c8be --- /dev/null +++ b/frameworks/native/validation.cpp @@ -0,0 +1,72 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "mindir_types.h" + +#include "validation.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Validation { +bool ValidateTensorDataType(OH_NN_DataType dataType) +{ + if (dataType >= OH_NN_UNKNOWN && dataType <= OH_NN_FLOAT64) { + return true; + } + return false; +} + +bool ValidatePerformanceMode(OH_NN_PerformanceMode performanceMode) +{ + if ((performanceMode >= OH_NN_PERFORMANCE_NONE) && (performanceMode <= OH_NN_PERFORMANCE_EXTREME)) { + return true; + } + return false; +} + +bool ValidatePriority(OH_NN_Priority priority) +{ + if ((priority >= OH_NN_PRIORITY_NONE) && (priority <= OH_NN_PRIORITY_HIGH)) { + return true; + } + return false; +} + +bool ValidateFuseType(OH_NN_FuseType fuseType) +{ + if ((fuseType >= OH_NN_FUSED_NONE) && (fuseType <= OH_NN_FUSED_RELU6)) { + return true; + } + return false; +} + +bool ValidatePadMode(int8_t padMode) +{ + if ((padMode >= mindspore::lite::PAD_MODE_PAD) && (padMode <= mindspore::lite::PAD_MODE_VALID)) { + return true; + } + return false; +} + +bool ValidateTensorType(OH_NN_TensorType nnTensorType) +{ + if ((nnTensorType >= OH_NN_TENSOR) && (nnTensorType <= OH_NN_UNSQUEEZE_AXIS)) { + return true; + } + return false; +} +} // namespace Validation +} // namespace NeuralNetworkRuntime +} // namespace OHOS diff --git a/frameworks/native/validation.h b/frameworks/native/validation.h new file mode 100644 index 0000000..919d4c4 --- /dev/null +++ b/frameworks/native/validation.h @@ -0,0 +1,45 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_VALIDATION_H +#define NEURAL_NETWORK_RUNTIME_VALIDATION_H + +#include "common/log.h" +#include "interfaces/kits/c/neural_network_runtime.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Validation { +template +OH_NN_ReturnCode ValidateArray(const T* data, size_t size) +{ + if ((data != nullptr) != (size > 0)) { + LOGE("ValidateArray failed, data is %p but the length is %zu", data, size); + return OH_NN_INVALID_PARAMETER; + } + return OH_NN_SUCCESS; +} + +bool ValidateTensorType(OH_NN_TensorType nnTensorType); +bool ValidateTensorDataType(OH_NN_DataType dataType); +bool ValidatePerformanceMode(OH_NN_PerformanceMode performanceMode); +bool ValidatePriority(OH_NN_Priority priority); +bool ValidateFuseType(OH_NN_FuseType fuseType); +bool ValidatePadMode(int8_t padMode); +} // namespace Validation +} // NeuralNetworkRuntime +} // namespace OHOS + +#endif // NEURAL_NETWORK_RUNTIME_VALIDATION_H diff --git a/interfaces/innerkits/c/neural_network_runtime_inner.h b/interfaces/innerkits/c/neural_network_runtime_inner.h new file mode 100644 index 0000000..4b298e4 --- /dev/null +++ b/interfaces/innerkits/c/neural_network_runtime_inner.h @@ -0,0 +1,54 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_INNER_H +#define NEURAL_NETWORK_RUNTIME_INNER_H + +#include "interfaces/kits/c/neural_network_runtime_type.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * @brief 直接加载LiteGraph,完成模型搭建。 + * + * 调用{@link OH_NNModel_Construct}创建模型实例后,直接调用本方法加载LiteGraph。加载LiteGraph后,只能调用 + * {@link OH_NNCompilation_Construct}创建模型编译器,或者调用{@link OH_NNModel_Destroy}销毁模型实例。\n + * + * 不允许本方法与{@link OH_NNModel_AddTensor}、{@link OH_NNModel_AddOperation}、 + * {@link OH_NNModel_SetTensorData}和{@link OH_NNModel_SpecifyInputsAndOutputs} + * 等构图接口混用,否则返回{@link OH_NN_OPERATION_FORBIDDEN}错误。\n + * + * 如果本方法调用成功,返回{@link OH_NN_SUCCESS},liteGraph将由NNRt管理,调用者无需释放,避免造成二次释放; + * 如果方法返回其他错误码,则NNRt不会持有liteGraph,此时需要调用者主动释放内存。 + * + * + * 本接口不作为Neural Network Runtime接口对外开放。\n + * + * @param model 指向{@link OH_NNModel}实例的指针。 + * @param liteGraph 指向LiteGraph的指针。 + * @return 函数执行的结果状态,执行成功返回OH_NN_SUCCESS,失败返回具体错误码,参考{@link OH_NN_ReturnCode}。 + * @throw std::bad_alloc 本方法可能在转换原始指针到智能指针的过程中,抛出std::bad_alloc异常,此时liteGraph将被 + * 主动释放。 + * @since 9 + * @version 1.0 + */ +OH_NN_ReturnCode OH_NNModel_BuildFromLiteGraph(OH_NNModel *model, const void *liteGraph); + +#ifdef __cplusplus +} +#endif // __cpluscplus +#endif // NEURAL_NETWORK_RUNTIME_INNER_H \ No newline at end of file diff --git a/interfaces/kits/c/neural_network_runtime.h b/interfaces/kits/c/neural_network_runtime.h new file mode 100644 index 0000000..15d8dc7 --- /dev/null +++ b/interfaces/kits/c/neural_network_runtime.h @@ -0,0 +1,686 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_H +#define NEURAL_NETWORK_RUNTIME_H +/** + * @file neural_network_runtime.h + * + * @brief Neural Network Runtime部件接口定义,通过调用以下接口,在硬件加速器上执行深度学习模型推理计算。 + * + * 注意:Neural Network Runtime的接口目前均不支持多线程调用。\n + * + * @since 9 + * @version 1.0 + */ +#include "neural_network_runtime_type.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * @addtogroup NNModel + * @{ + * + * @brief Neural Network Runtime 构图模块,提供了一系列构图接口实现操作数的添加、算子的添加和输入输出的设置,帮助开发者完成 + * AI模型的构建。 + * + * @since 9 + * @version 1.0 + */ + +/** + * @brief 创建{@link OH_NNModel}类型的模型实例,搭配OH_NNModel模块提供的其他接口,完成模型实例的构造。 + * + * 在开始构图前,先调用{@link OH_NNModel_Construct}创建模型实例,根据模型的拓扑结构,调用 + * {@link OH_NNModel_AddTensor}、{@link OH_NNModel_AddOperation}和 + * {@link OH_NNModel_SetTensorData}方法,填充模型的数据节点和算子节点;然后调用 + * {@link OH_NNModel_SpecifyInputsAndOutputs}指定模型的输入和输出;当构造完模型的拓扑结构,调用 + * {@link OH_NNModel_Finish}完成模型的构建。\n + * + * 模型实例使用完毕后,需要调用{@link OH_NNModel_Destroy}销毁模型实例,避免内存泄漏。\n + * + * @return 返回一个指向{@link OH_NNModel}实例的指针。 + * @since 9 + * @version 1.0 + */ +OH_NNModel *OH_NNModel_Construct(void); + +/** + * @brief 向模型实例中添加操作数 + * + * Neural Network Runtime模型中的数据节点和算子参数均由模型的操作数构成。本方法根据tensor,向model实 + * 例中添加操作数。操作数添加的顺序是模型中记录操作数的索引值,{@link OH_NNModel_SetTensorData}、 + * {@link OH_NNModel_AddOperation}和{@link OH_NNModel_SpecifyInputsAndOutputs} + * 方法根据该索引值,指定不同的操作数。\n + * + * Neural Network Runtime支持动态形状输入和输出。在添加动态形状的数据节点时,需要将tensor.dimensions中支持动态 + * 变化的维度设置为-1。例如:一个4维tensor,将tensor.dimensions设置为[1, -1, 2, 2],表示其第二个维度支持 + * 动态变化。\n + * + * @param model 指向{@link OH_NNModel}实例的指针。 + * @param tensor {@link OH_NN_Tensor}操作数的指针,tensor指定了添加到模型实例中操作数的属性。 + * @return 函数执行的结果状态。执行成功返回OH_NN_SUCCESS;失败返回具体错误码,具体失败错误码可参考{@link OH_NN_ReturnCode}。 + * @since 9 + * @version 1.0 + */ +OH_NN_ReturnCode OH_NNModel_AddTensor(OH_NNModel *model, const OH_NN_Tensor *tensor); + +/** + * @brief 设置操作数的数值 + * + * 对于具有常量值的操作数(如模型的权重),需要在构图阶段使用本方法设置数值。操作数的索引值根据操作数添加进模型的顺序决定,操作数的添加参考 + * {@link OH_NNModel_AddTensor}。\n + * + * @param model 指向{@link OH_NNModel}实例的指针。 + * @param index 操作数的索引值。 + * @param dataBuffer 指向真实数据的指针。 + * @param length 数据缓冲区的长度。 + * @return 函数执行的结果状态。执行成功返回OH_NN_SUCCESS;失败返回具体错误码,具体失败错误码可参考{@link OH_NN_ReturnCode}。 + * @since 9 + * @version 1.0 + */ +OH_NN_ReturnCode OH_NNModel_SetTensorData(OH_NNModel *model, uint32_t index, const void *dataBuffer, size_t length); + +/** + * @brief 向模型实例中添加算子 + * + * 本方法向模型实例中添加算子,算子类型由op指定,算子的参数、输入和输出由paramIndices、inputIndices和 + * outputIndices指定。本方法将对算子参数的属性和输入输出的数量进行校验,这些属性需要在调用 + * {@link OH_NNModel_AddTensor}添加操作数的时候正确设置。每个算子期望的参数、输入和输出属性请参考 + * {@link OH_NN_OperationType}。\n + * + * paramIndices、inputIndices和outputIndices中存储的是操作数的索引值,每个索引值根据操作数添加进模型的顺序决定,正确 + * 设置并添加算子要求准确设置每个操作数的索引值。操作数的添加参考{@link OH_NNModel_AddTensor}。\n + * + * 如果添加算子时,添加了额外的参数(非算子需要的参数),本方法返回{@link OH_NN_INVALID_PARAMETER};如果没有设置算子参数, + * 则算子按默认值设置缺省的参数,默认值请参考{@link OH_NN_OperationType}。\n + * + * @param model 指向{@link OH_NNModel}实例的指针。 + * @param op 指定添加的算子类型,取值请参考{@link OH_NN_OperationType}的枚举值。 + * @param paramIndices OH_NN_UInt32Array实例的指针,设置算子的参数。 + * @param inputIndices OH_NN_UInt32Array实例的指针,指定算子的输入。 + * @param outputIndices OH_NN_UInt32Array实例的指针,设置算子的输出。 + * @return 函数执行的结果状态。执行成功返回OH_NN_SUCCESS;失败返回具体错误码,具体失败错误码可参考{@link OH_NN_ReturnCode}。 + * @since 9 + * @version 1.0 + */ +OH_NN_ReturnCode OH_NNModel_AddOperation(OH_NNModel *model, + OH_NN_OperationType op, + const OH_NN_UInt32Array *paramIndices, + const OH_NN_UInt32Array *inputIndices, + const OH_NN_UInt32Array *outputIndices); + +/** + * @brief 指定模型的输入输出 + * + * 模型实例需要指定操作数作为端到端的输入和输出,设置为输入和输出的操作数不能使用{@link OH_NNModel_SetTensorData}设置 + * 数值,需要在执行阶段调用OH_NNExecutor的方法设置输入、输出数据。\n + * + * 操作数的索引值根据操作数添加进模型的顺序决定,操作数的添加参考 + * {@link OH_NNModel_AddTensor}。\n + * + * 暂时不支持异步设置模型输入输出。\n + * + * @param model 指向{@link OH_NNModel}实例的指针。 + * @param inputIndices OH_NN_UInt32Array实例的指针,指定算子的输入。 + * @param outputIndices OH_NN_UInt32Array实例的指针,指定算子的输出。 + * @return 函数执行的结果状态。执行成功返回OH_NN_SUCCESS;失败返回具体错误码,具体失败错误码可参考{@link OH_NN_ReturnCode}。 + * @since 9 + * @version 1.0 + */ +OH_NN_ReturnCode OH_NNModel_SpecifyInputsAndOutputs(OH_NNModel *model, + const OH_NN_UInt32Array *inputIndices, + const OH_NN_UInt32Array *outputIndices); + +/** + * @brief 完成模型构图 + * + * 完成模型拓扑结构的搭建后,调用本方法指示构图已完成。在调用本方法后,无法进行额外的构图操作,调用 + * {@link OH_NNModel_AddTensor}、{@link OH_NNModel_AddOperation}、 + * {@link OH_NNModel_SetTensorData}和 + * {@link OH_NNModel_SpecifyInputsAndOutputs}将返回 + * {@link OH_NN_OPERATION_FORBIDDEN}。\n + * + * 在调用{@link OH_NNModel_GetAvailableOperations}和{@link OH_NNCompilation_Construct} + * 之前,必须先调用本方法完成构图。\n + * + * @param model 指向{@link OH_NNModel}实例的指针。 + * @return 函数执行的结果状态。执行成功返回OH_NN_SUCCESS;失败返回具体错误码,具体失败错误码可参考{@link OH_NN_ReturnCode}。 + * @since 9 + * @version 1.0 + */ +OH_NN_ReturnCode OH_NNModel_Finish(OH_NNModel *model); + +/** + * @brief 释放模型实例。 + * + * 调用{@link OH_NNModel_Construct}创建的模型实例需要调用本方法主动释放,否则将造成内存泄漏。\n + * + * 如果model为空指针或者*model为空指针,本方法只打印warning日志,不执行释放逻辑。\n + * + * @param model 指向{@link OH_NNModel}实例的二级指针。模型实例销毁后,本方法将*model主动设置为空指针。 + * @since 9 + * @version 1.0 + */ +void OH_NNModel_Destroy(OH_NNModel **model); + +/** + * @brief 查询硬件对模型内所有算子的支持情况,通过布尔值序列指示支持情况。 + * + * 查询底层硬件对模型实例内每个算子的支持情况,硬件由deviceID指定,结果将通过isSupported指向的数组表示。如果支持第i个算子,则 + * (*isSupported)[i] == true,否则为 false。\n + * + * 本方法成功执行后,(*isSupported)将指向记录算子支持情况的bool数组,数组长度和模型实例的算子数量相等。该数组对应的内存由 + * Neural Network Runtime管理,在模型实例销毁或再次调用本方法后自动销毁。\n + * + * @param model 指向{@link OH_NNModel}实例的指针。 + * @param deviceID 指定查询的硬件ID,通过{@link OH_NNDevice_GetAllDevicesID}获取。 + * @param isSupported 指向bool数组的指针。调用本方法时,要求(*isSupported)为空指针,否则返回 + * {@link OH_NN_INVALID_PARAMETER}。 + * @param opCount 模型实例中算子的数量,对应(*isSupported)数组的长度。 + * @return 函数执行的结果状态。执行成功返回OH_NN_SUCCESS;失败返回具体错误码,具体失败错误码可参考{@link OH_NN_ReturnCode}。 + * @since 9 + * @version 1.0 + */ +OH_NN_ReturnCode OH_NNModel_GetAvailableOperations(OH_NNModel *model, + size_t deviceID, + const bool **isSupported, + uint32_t *opCount); +/** @} */ + +/** + * @addtogroup NNCompilation + * @{ + * + * @brief Neural Network Runtime 编译模块 + * + * @since 9 + * @version 1.0 + */ + +/** + * @brief 创建{@link OH_NNCompilation}类型的编译实例 + * + * 使用OH_NNModel模块完成模型的构造后,借助OH_NNCompilation模块提供的接口,将模型传递到底层硬件完成编译。本方法接受一个 + * {@link OH_NNModel}实例,创建出{@link OH_NNCompilation}实例;通过 + * {@link OH_NNCompilation_SetDevice}方法,设置编译的设备,最后调用 + * {@link OH_NNCompilation_Build}完成编译。\n + * + * 除了计算硬件的选择,OH_NNCompilation模块支持模型缓存、性能偏好、优先级设置、float16计算等特性,参考以下方法: + * - {@link OH_NNCompilation_SetCache} + * - {@link OH_NNCompilation_SetPerformanceMode} + * - {@link OH_NNCompilation_SetPriority} + * - {@link OH_NNCompilation_EnableFloat16}\n + * + * 调用本方法创建{@link OH_NNCompilation}后,{@link OH_NNModel}实例可以释放。\n + * + * @param model 指向{@link OH_NNModel}实例的指针。 + * @return 返回一个指向{@link OH_NNCompilation}实例的指针。 + * @since 9 + * @version 1.0 + */ +OH_NNCompilation *OH_NNCompilation_Construct(const OH_NNModel *model); + +/** + * @brief 指定模型编译和计算的硬件。 + * + * 编译阶段,需要指定模型编译和执行计算的硬件设备。先调用{@link OH_NNDevice_GetAllDevicesID}获取可用的设备ID, + * 通过{@link OH_NNDevice_GetType}和{@link OH_NNDevice_GetType}获取设备信息后,将期望编译执行的 + * 设备ID传入本方法进行设置。\n + * + * @param compilation 指向{@link OH_NNCompilation}实例的指针。 + * @param deviceID 指定的硬件ID。 + * @return 函数执行的结果状态。执行成功返回OH_NN_SUCCESS;失败返回具体错误码,具体失败错误码可参考{@link OH_NN_ReturnCode}。 + * @since 9 + * @version 1.0 + */ +OH_NN_ReturnCode OH_NNCompilation_SetDevice(OH_NNCompilation *compilation, size_t deviceID); + +/** + * @brief 设置编译后的模型缓存路径和缓存版本。 + * + * 在支持缓存的硬件上,模型在硬件驱动层编译后可以保存为缓存文件,下次编译时直接从缓存文件读取模型,减少重新编译的耗时。本方法接受缓存路径和版本,根据缓存 + * 路径中和版本的不同情况,本方法采取不同的行为:\n + * + * - 缓存路径指定的目录下没有文件: + * 将编译后的模型缓存到目录下,设置缓存版本等于version。\n + * + * - 缓存路径指定的目录下存在完整的缓存文件,且版本号 == version: + * 读取路径下的缓存文件,传递到底层硬件中转换为可以执行的模型实例。\n + * + * - 缓存路径指定的目录下存在完整的缓存文件,但版本号 < version: + * 路径下的缓存文件需要更新,模型在底层硬件完成编译后,覆写路径下的缓存文件,将版本号更新为version。\n + * + * - 缓存路径指定的目录下存在完整的缓存文件,但版本号 > version: + * 路径下的缓存文件版本高于version,不读取缓存文件,同时返回{@link OH_NN_INVALID_PARAMETER}错误码。\n + * + * - 缓存路径指定的目录下的缓存文件不完整或没有缓存文件的访问权限: + * 返回{@link OH_NN_INVALID_FILE}错误码。\n + * + * - 缓存目录不存在,或者没有访问权限: + * 返回{@link OH_NN_INVALID_PATH}错误码。\n + * + * @param compilation 指向{@link OH_NNCompilation}实例的指针。 + * @param cachePath 模型缓存文件目录,本方法在cachePath目录下为不同的硬件创建缓存目录。建议每个模型使用单独的缓存目录。 + * @param version 缓存版本。 + * @return 函数执行的结果状态。执行成功返回OH_NN_SUCCESS;失败返回具体错误码,具体失败错误码可参考{@link OH_NN_ReturnCode}。 + * @since 9 + * @version 1.0 + */ +OH_NN_ReturnCode OH_NNCompilation_SetCache(OH_NNCompilation *compilation, const char *cachePath, uint32_t version); + +/** + * @brief 设置模型计算的性能模式。 + * + * Neural Network Runtime 支持为模型计算设置性能模式,满足低功耗到极致性能的需求。如果编译阶段没有调用本方法设置性能模式, + * 编译实例为模型默认分配{@link OH_NN_PERFORMANCE_NONE}模式。在{@link OH_NN_PERFORMANCE_NONE} + * 模式下,硬件按默认的性能模式执行计算。\n + * + * 在不支持性能模式设置的硬件上调用本方法,将返回{@link OH_NN_UNAVALIDABLE_DEVICE}错误码。\n + * + * @param compilation 指向{@link OH_NNCompilation}实例的指针。 + * @param performanceMode 指定性能模式,可选的性能模式参考{@link OH_NN_PerformanceMode}。 + * @return 函数执行的结果状态。执行成功返回OH_NN_SUCCESS;失败返回具体错误码,具体失败错误码可参考{@link OH_NN_ReturnCode}。 + * @since 9 + * @version 1.0 + */ +OH_NN_ReturnCode OH_NNCompilation_SetPerformanceMode(OH_NNCompilation *compilation, + OH_NN_PerformanceMode performanceMode); + +/** + * @brief 设置模型计算的优先级。 + * + * Neural Network Runtime 支持为模型设置计算优先级,优先级仅作用于相同uid进程创建的模型,不同uid进程、不同设备的优先级不会 + * 相互影响。\n + * + * 在不支持优先级设置的硬件上调用本方法,将返回{@link OH_NN_UNAVALIDABLE_DEVICE}错误码。\n + * + * @param compilation 指向{@link OH_NNCompilation}实例的指针。 + * @param priority 指定优先级,可选的优先级参考{@link OH_NN_Priority}。 + * @return 函数执行的结果状态。执行成功返回OH_NN_SUCCESS;失败返回具体错误码,具体失败错误码可参考{@link OH_NN_ReturnCode}。 + * @since 9 + * @version 1.0 + */ +OH_NN_ReturnCode OH_NNCompilation_SetPriority(OH_NNCompilation *compilation, OH_NN_Priority priority); + +/** + * @brief 是否以float16的浮点数精度计算。 + * + * Neural Network Runtime目前仅支持构造float32浮点模型和int8量化模型。在支持float16精度的硬件上调用本方法, + * float32浮点数精度的模型将以float16的精度执行计算,以减少内存占用和执行时间。\n + * + * 在不支持float16精度计算的硬件上调用本方法,将返回{@link OH_NN_UNAVALIDABLE_DEVICE}错误码。\n + * + * @param compilation 指向{@link OH_NNCompilation}实例的指针。 + * @param enableFloat16 Float16低精度计算标志位。设置为true时,执行Float16推理;设置为false时,执行float32推理。 + * @return 函数执行的结果状态。执行成功返回OH_NN_SUCCESS;失败返回具体错误码,具体失败错误码可参考{@link OH_NN_ReturnCode}。 + * @since 9 + * @version 1.0 + */ +OH_NN_ReturnCode OH_NNCompilation_EnableFloat16(OH_NNCompilation *compilation, bool enableFloat16); + +/** + * @brief 进行模型编译 + * + * 完成编译配置后,调用本方法指示模型编译已完成。编译实例将模型和编译选项推送至硬件设备进行编译。在调用本方法后,无法进行额外的编译操作,调用 + * {@link OH_NNCompilation_SetDevice}、{@link OH_NNCompilation_SetCache}、 + * {@link OH_NNCompilation_SetPerformanceMode}、 + * {@link OH_NNCompilation_SetPriority}和{@link OH_NNCompilation_EnableFloat16} + * 方法将返回{@link OH_NN_OPERATION_FORBIDDEN}。\n + * + * @param compilation 指向{@link OH_NNCompilation}实例的指针。 + * @return 函数执行的结果状态。执行成功返回OH_NN_SUCCESS;失败返回具体错误码,具体失败错误码可参考{@link OH_NN_ReturnCode}。 + * @since 9 + * @version 1.0 + */ +OH_NN_ReturnCode OH_NNCompilation_Build(OH_NNCompilation *compilation); + +/** + * @brief 释放Compilation对象。 + * + * 调用{@link OH_NNCompilation_Construct}创建的编译实例需要调用本方法主动释放,否则将造成内存泄漏。\n + * + * 如果compilation为空指针或者*compilation为空指针,本方法只打印warning日志,不执行释放逻辑。\n + * + * @param compilation 指向{@link OH_NNCompilation}实例的二级指针。编译实例销毁后,本方法将*compilation主动设置为空指针。 + * @since 9 + * @version 1.0 + */ +void OH_NNCompilation_Destroy(OH_NNCompilation **compilation); +/** @} */ + +/** + * @addtogroup NNExecutor + * @{ + * + * @brief Neural Network Runtime 执行模块 + * + * @since 9 + * @version 1.0 + */ + +/** + * @brief 创建{@link OH_NNExecutor}类型的执行器实例 + * + * 本方法接受一个编译器,构造一个与硬件关联的模型推理执行器。通过{@link OH_NNExecutor_SetInput}设置模型输入数据, + * 设置输入数据后,调用{@link OH_NNExecutor_Run}方法执行推理,最后通过 + * {@link OH_NNExecutor_GetOutput}获取计算结果。\n + * + * 调用本方法创建{@link OH_NNExecutor}实例后,如果不需要创建其他执行器,可以安全释放{@link OH_NNCompilation}实例。\n + * + * @param compilation 指向{@link OH_NNCompilation}实例的指针。 + * @return 返回指向{@link OH_NNExecutor}实例的指针。 + * @since 9 + * @version 1.0 + */ +OH_NNExecutor *OH_NNExecutor_Construct(OH_NNCompilation *compilation); + +/** + * @brief 设置模型单个输入的数据。 + * + * 本方法将dataBuffer中,长度为length个字节的数据,拷贝到底层硬件的共享内存。inputIndex指定设置的输入,tensor用于设置输入的 + * 形状、类型、量化参数等信息。\n + * + * 由于Neural Network Runtime支持动态输入形状的模型,在固定形状输入和动态形状输入的场景下,本方法采取不同的处理策略: + * + * - 固定形状输入的场景:tensor各属性必须和构图阶段调用{@link OH_NNModel_AddTensor}添加的操作数保持一致; + * - 动态形状输入的场景:在构图阶段,由于动态输入的形状不确定,调用本方法时,要求tensor.dimensions中的每个值必须大于0, + * 以确定执行计算阶段输入的形状。设置形状时,只允许调整数值为-1的维度。假设在构图阶段,输入A的维度为 + * [-1, 224, 224, 3],调用本方法时,只能调整第一个维度的尺寸,如:[3, 224, 224, 3]。调整其他维度将返回 + * {@link OH_NN_INVALID_PARAMETER}。\n + * + * inputIndex的值,从0开始,根据调用{@link OH_NNModel_SpecifyInputsAndOutputs}时,指定输入的顺序, + * 依次加一。假设调用{@link OH_NNModel_SpecifyInputsAndOutputs}时,inputIndices为{1,2,3}, + * 则在执行阶段,三个输入的索引值分别为{0, 1, 2}。\n + * + * @param executor 指向{@link OH_NNExecutor}实例的指针。 + * @param inputIndex 输入的索引值。 + * @param tensor 设置输入数据对应的操作数。 + * @param dataBuffer 指向输入数据的指针。 + * @param length 数据缓冲区的字节长度。 + * @return 函数执行的结果状态。执行成功返回OH_NN_SUCCESS;失败返回具体错误码,具体失败错误码可参考{@link OH_NN_ReturnCode}。 + * @since 9 + * @version 1.0 + */ +OH_NN_ReturnCode OH_NNExecutor_SetInput(OH_NNExecutor *executor, + uint32_t inputIndex, + const OH_NN_Tensor *tensor, + const void *dataBuffer, + size_t length); + +/** + * @brief 设置模型单个输出的缓冲区。 + * + * 本方法将dataBuffer指向的缓冲区与outputIndex指定的输出绑定,缓冲区的长度由length指定。\n + * + * 调用{@link OH_NNExecutor_Run}完成单次模型推理后,Neural Network Runtime将比对dataBuffer指向的缓冲区与 + * 输出数据的长度,根据不同情况,返回不同结果:\n + * + * - 如果缓冲区大于或等于数据长度:则推理后的结果将拷贝至缓冲区,并返回{@link OH_NN_SUCCESS},可以通过访问dataBuffer读取推理结果。 + * - 如果缓冲区小于数据长度:则{@link OH_NNExecutor_Run}将返回{@link OH_NN_INVALID_PARAMETER}, + * 并输出日志告知缓冲区太小的信息。\n + * + * outputIndex的值,从0开始,根据调用{@link OH_NNModel_SpecifyInputsAndOutputs}时,指定输出的顺序, + * 依次加一。假设调用{@link OH_NNModel_SpecifyInputsAndOutputs}时,outputIndices为{4, 5, 6}, + * 则在执行阶段,三个输出的索引值分别为{0, 1, 2}。\n + * + * @param executor 指向{@link OH_NNExecutor}实例的指针。 + * @param outputIndex 输出的索引值。 + * @param dataBuffer 指向输出数据的指针。 + * @param length 数据缓冲区的字节长度。 + * @return 函数执行的结果状态。执行成功返回OH_NN_SUCCESS;失败返回具体错误码,具体失败错误码可参考{@link OH_NN_ReturnCode}。 + * @since 9 + * @version 1.0 + */ +OH_NN_ReturnCode OH_NNExecutor_SetOutput(OH_NNExecutor *executor, + uint32_t outputIndex, + void *dataBuffer, + size_t length); + +/** + * @brief 获取输出tensor的维度信息。 + * + * 调用{@link OH_NNExecutor_Run}完成单次推理后,本方法获取指定输出的维度信息和维数。在动态形状输入、输出的场景中常用。\n + * + * outputIndex的值,从0开始,根据调用{@link OH_NNModel_SpecifyInputsAndOutputs}时,指定输出的顺序, + * 依次加一。假设调用{@link OH_NNModel_SpecifyInputsAndOutputs}时,outputIndices为{4, 5, 6}, + * 则在执行阶段,三个输出的索引值分别为{0, 1, 2}。\n + * + * @param executor 指向{@link OH_NNExecutor}实例的指针。 + * @param outputIndex 输出的索引值。 + * @param shape 指向int32_t数组的指针,数组中的每个元素值,是输出tensor在每个维度上的长度。 + * @param length uint32_t类型的指针,返回输出的维数。 + * @return 函数执行的结果状态。执行成功返回OH_NN_SUCCESS;失败返回具体错误码,具体失败错误码可参考{@link OH_NN_ReturnCode}。 + * @since 9 + * @version 1.0 + */ +OH_NN_ReturnCode OH_NNExecutor_GetOutputShape(OH_NNExecutor *executor, + uint32_t outputIndex, + int32_t **shape, + uint32_t *shapeLength); + +/** + * @brief 执行推理。 + * + * 在执行器关联的硬件上,执行模型的端到端推理计算。\n + * + * @param executor 指向{@link OH_NNExecutor}实例的指针。 + * @return 函数执行的结果状态。执行成功返回OH_NN_SUCCESS;失败返回具体错误码,具体失败错误码可参考{@link OH_NN_ReturnCode}。 + * @since 9 + * @version 1.0 + */ +OH_NN_ReturnCode OH_NNExecutor_Run(OH_NNExecutor *executor); + +/** + * @brief 在硬件上为单个输入申请共享内存。 + * + * Neural Network Runtime 提供主动申请硬件共享内存的方法。通过指定执行器和输入索引值,本方法在单个输入关联的硬件 + * 上,申请大小为length的共享内存,通过{@link OH_NN_Memory}实例返回。\n + * + * inputIndex的值,从0开始,根据调用{@link OH_NNModel_SpecifyInputsAndOutputs}时,指定输入的顺序, + * 依次加一。假设调用{@link OH_NNModel_SpecifyInputsAndOutputs}时,inputIndices为{1,2,3}, + * 则在执行阶段,三个输入的索引值分别为{0, 1, 2}。\n + * + * @param executor 指向{@link OH_NNExecutor}实例的指针。 + * @param inputIndex 输入的索引值。 + * @param length 申请的内存字节。 + * @return 指向{@link OH_NN_Memory}实例的指针。 + * @since 9 + * @version 1.0 + */ +OH_NN_Memory *OH_NNExecutor_AllocateInputMemory(OH_NNExecutor *executor, uint32_t inputIndex, size_t length); + +/** + * @brief 在硬件上为单个输出申请共享内存。 + * + * Neural Network Runtime 提供主动申请硬件共享内存的方法。通过指定执行器和输出索引值,本方法在单个输出关联的硬件 + * 上,申请大小为length的共享内存,通过{@link OH_NN_Memory}实例返回。\n + * + * outputIndex的值,从0开始,根据调用{@link OH_NNModel_SpecifyInputsAndOutputs}时,指定输出的顺序, + * 依次加一。假设调用{@link OH_NNModel_SpecifyInputsAndOutputs}时,outputIndices为{4, 5, 6}, + * 则在执行阶段,三个输出的索引值分别为{0, 1, 2}。\n + * + * @param executor 指向{@link OH_NNExecutor}实例的指针。 + * @param outputIndex 输出的索引值。 + * @param length 申请的内存字节。 + * @return 指向{@link OH_NN_Memory}实例的指针。 + * @since 9 + * @version 1.0 + */ +OH_NN_Memory *OH_NNExecutor_AllocateOutputMemory(OH_NNExecutor *executor, uint32_t outputIndex, size_t length); + +/** + * @brief 释放{@link OH_NN_Memory}实例指向的输入内存。 + * + * 调用{@link OH_NNExecutor_AllocateInputMemory}创建的内存实例,需要主动调用本方法进行释放,否则将造成内存泄漏。 + * inputIndex和memory的对应关系需要和创建内存实例时保持一致。\n + * + * 如果memory或*memory为空指针,本方法只打印warning日志,不执行释放逻辑。\n + * + * inputIndex的值,从0开始,根据调用{@link OH_NNModel_SpecifyInputsAndOutputs}时,指定输入的顺序, + * 依次加一。假设调用{@link OH_NNModel_SpecifyInputsAndOutputs}时,inputIndices为{1,2,3}, + * 则在执行阶段,三个输入的索引值分别为{0, 1, 2}。\n + * + * @param executor 指向{@link OH_NNExecutor}实例的指针。 + * @param inputIndex 输入的索引值。 + * @param memory 指向{@link OH_NN_Memory}实例的二级指针。共享内存销毁后,本方法将*memory主动设置为空指针。 + * @since 9 + * @version 1.0 + */ +void OH_NNExecutor_DestroyInputMemory(OH_NNExecutor *executor, uint32_t inputIndex, OH_NN_Memory **memory); + +/** + * @brief 释放{@link OH_NN_Memory}实例指向的输出内存。 + * + * 调用{@link OH_NNExecutor_AllocateOutputMemory}创建的内存实例,需要主动调用本方法进行释放,否则将造成内存泄漏。 + * outputIndex和memory的对应关系需要和创建内存实例时保持一致。\n + * + * 如果memory或*memory为空指针,本方法只打印warning日志,不执行释放逻辑。\n + * + * outputIndex的值,从0开始,根据调用{@link OH_NNModel_SpecifyInputsAndOutputs}时,指定输出的顺序, + * 依次加一。假设调用{@link OH_NNModel_SpecifyInputsAndOutputs}时,outputIndices为{4, 5, 6}, + * 则在执行阶段,三个输出的索引值分别为{0, 1, 2}。\n + * + * @param executor 指向{@link OH_NNExecutor}实例的指针。 + * @param outputIndex 输出的索引值。 + * @param memory 指向{@link OH_NN_Memory}实例的二级指针。共享内存销毁后,本方法将*memory主动设置为空指针。 + * @since 9 + * @version 1.0 + */ +void OH_NNExecutor_DestroyOutputMemory(OH_NNExecutor *executor, uint32_t outputIndex, OH_NN_Memory **memory); + +/** + * @brief 将{@link OH_NN_Memory}实例指向的硬件共享内存,指定为单个输入使用的共享内存。 + * + * 在需要自行管理内存的场景下,本方法将执行输入和{@link OH_NN_Memory}内存实例绑定。执行计算时,底层硬件从内存实例指向的共享内存中读取 + * 输入数据。通过本方法,可以实现设置输入、执行计算、读取输出的并发执行,提升数据流的推理效率。\n + * + * inputIndex的值,从0开始,根据调用{@link OH_NNModel_SpecifyInputsAndOutputs}时,指定输入的顺序, + * 依次加一。假设调用{@link OH_NNModel_SpecifyInputsAndOutputs}时,inputIndices为{1,2,3}, + * 则在执行阶段,三个输入的索引值分别为{0, 1, 2}。\n + * + * @param executor 指向{@link OH_NNExecutor}实例的指针。 + * @param inputIndex 输入的索引值。 + * @param tensor 指向{@link OH_NN_Tensor}的指针,设置单个输入所对应的操作数。 + * @param memory 指向{@link OH_NN_Memory}的指针。 + * @return 函数执行的结果状态。执行成功返回OH_NN_SUCCESS;失败返回具体错误码,具体失败错误码可参考{@link OH_NN_ReturnCode}。 + * @since 9 + * @version 1.0 + */ +OH_NN_ReturnCode OH_NNExecutor_SetInputWithMemory(OH_NNExecutor *executor, + uint32_t inputIndex, + const OH_NN_Tensor *tensor, + const OH_NN_Memory *memory); + +/** + * @brief 将{@link OH_NN_Memory}实例指向的硬件共享内存,指定为单个输出使用的共享内存。 + * + * 在需要自行管理内存的场景下,本方法将执行输出和{@link OH_NN_Memory}内存实例绑定。执行计算时,底层硬件将计算结果直接写入内存实例指向 + * 的共享内存。通过本方法,可以实现设置输入、执行计算、读取输出的并发执行,提升数据流的推理效率。\n + * + * outputIndex的值,从0开始,根据调用{@link OH_NNModel_SpecifyInputsAndOutputs}时,指定输出的顺序, + * 依次加一。假设调用{@link OH_NNModel_SpecifyInputsAndOutputs}时,outputIndices为{4, 5, 6}, + * 则在执行阶段,三个输出的索引值分别为{0, 1, 2}。\n + * + * @param executor 执行器。 + * @param outputIndex 输出的索引值。 + * @param memory 指向{@link OH_NN_Memory}的指针。 + * @return 函数执行的结果状态。执行成功返回OH_NN_SUCCESS;失败返回具体错误码,具体失败错误码可参考{@link OH_NN_ReturnCode}。 + * @since 9 + * @version 1.0 + */ +OH_NN_ReturnCode OH_NNExecutor_SetOutputWithMemory(OH_NNExecutor *executor, + uint32_t outputIndex, + const OH_NN_Memory *memory); + +/** + * @brief 销毁执行器实例,释放执行器占用的内存。 + * + * 调用{@link OH_NNExecutor_Construct}创建的执行器实例需要调用本方法主动释放,否则将造成内存泄漏。\n + * + * 如果executor为空指针或者*executor为空指针,本方法只打印warning日志,不执行释放逻辑。\n + * + * @param executor 指向{@link OH_NNExecutor}实例的二级指针。 + * @since 9 + * @version 1.0 + */ +void OH_NNExecutor_Destroy(OH_NNExecutor **executor); +/** @} */ + +/** + * @addtogroup NNDevice + * @{ + * + * @brief Neural Network Runtime 设备管理模块 + * + * @since 9 + * @version 1.0 + */ + +/** + * @brief 获取对接到 Neural Network Runtime 的硬件ID。 + * + * 每个硬件在 Neural Network Runtime 中存在唯一且固定ID,本方法通过uin32_t数组返回当前设备上已经对接的硬件ID。\n + * + * 硬件ID通过size_t数组返回,数组的每个元素是单个硬件的ID值。数组内存由Neural Network Runtime管理。在下次调用本方法前, + * 数据指针有效。\n + * + * @param allDevicesID 指向size_t数组的指针。要求传入的(*allDevicesID)为空指针,否则返回 + * {@link OH_NN_INVALID_PARAMETER}。 + * @param deviceCount uint32_t类型的指针,用于返回(*allDevicesID)的长度。 + * @return 函数执行的结果状态。执行成功返回OH_NN_SUCCESS;失败返回具体错误码,具体失败错误码可参考{@link OH_NN_ReturnCode}。 + * @since 9 + * @version 1.0 + */ +OH_NN_ReturnCode OH_NNDevice_GetAllDevicesID(const size_t **allDevicesID, uint32_t *deviceCount); + +/** + * @brief 获取指定硬件的类型信息。 + * + * 通过deviceID指定计算硬件,获取硬件的名称。硬件ID需要调用{@link OH_NNDevice_GetAllDevicesID}获取。\n + * + * @param deviceID 指定硬件ID。 + * @param name 指向char数组的指针,要求传入的(*char)为空指针,否则返回 + * {@link OH_NN_INVALID_PARAMETER}。(*name)以C风格字符串保存硬件名称,数组以\0结尾。 + * @return 函数执行的结果状态。执行成功返回OH_NN_SUCCESS;失败返回具体错误码,具体失败错误码可参考{@link OH_NN_ReturnCode}。 + * @since 9 + * @version 1.0 + */ +OH_NN_ReturnCode OH_NNDevice_GetName(size_t deviceID, const char **name); + +/** + * @brief 获取指定硬件的类别信息。 + * + * 通过deviceID指定计算硬件,获取硬件的类别。目前 Neural Network Runtime 支持的设备类型有: + * - CPU设备:OH_NN_CPU + * - GPU设备:OH_NN_GPU + * - 机器学习专用加速器:OH_NN_ACCELERATOR + * - 不属于以上类型的其他硬件类型:OH_NN_OTHERS\n + * + * @param deviceID 指定硬件ID。 + * @param deviceType 指向{@link OH_NN_DeviceType}实例的指针,返回硬件的类别信息。 + * @return 函数执行的结果状态。执行成功返回OH_NN_SUCCESS;失败返回具体错误码,具体失败错误码可参考{@link OH_NN_ReturnCode}。 + * @since 9 + * @version 1.0 + */ +OH_NN_ReturnCode OH_NNDevice_GetType(size_t deviceID, OH_NN_DeviceType *deviceType); +/** @} */ + +#ifdef __cplusplus +} +#endif // __cplusplus +#endif // NEURAL_NETWORK_RUNTIME_H diff --git a/interfaces/kits/c/neural_network_runtime_type.h b/interfaces/kits/c/neural_network_runtime_type.h new file mode 100644 index 0000000..73f9320 --- /dev/null +++ b/interfaces/kits/c/neural_network_runtime_type.h @@ -0,0 +1,1632 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_TYPE_H +#define NEURAL_NETWORK_RUNTIME_TYPE_H +/** + * @file neural_network_runtime_type.h + * + * @brief Neural Network Runtime定义的结构体和枚举值。 + * + * @since 9 + * @version 1.0 + */ +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * @brief Neural Network Runtime的模型句柄 + * + * @since 9 + * @version 1.0 + */ +typedef struct OH_NNModel OH_NNModel; + +/** + * @brief Neural Network Runtime的编译器句柄 + * + * @since 9 + * @version 1.0 + */ +typedef struct OH_NNCompilation OH_NNCompilation; + +/** + * @brief Neural Network Runtime的执行器句柄 + * + * @since 9 + * @version 1.0 + */ +typedef struct OH_NNExecutor OH_NNExecutor; + +/** + * @brief 硬件的执行性能模式 + * + * @since 9 + * @version 1.0 + */ +typedef enum { + /** 无性能模式偏好 */ + OH_NN_PERFORMANCE_NONE = 0, + /** 低能耗模式 */ + OH_NN_PERFORMANCE_LOW = 1, + /** 中性能模式 */ + OH_NN_PERFORMANCE_MEDIUM = 2, + /** 高性能模式 */ + OH_NN_PERFORMANCE_HIGH = 3, + /** 极致性能模式 */ + OH_NN_PERFORMANCE_EXTREME = 4 +} OH_NN_PerformanceMode; + +/** + * @brief 模型推理任务优先级 + * + * @since 9 + * @version 1.0 + */ +typedef enum { + /** 无优先级偏好 */ + OH_NN_PRIORITY_NONE = 0, + /** 低优先级 */ + OH_NN_PRIORITY_LOW = 1, + /** 中优先级 */ + OH_NN_PRIORITY_MEDIUM = 2, + /** 高优先级 */ + OH_NN_PRIORITY_HIGH = 3 +} OH_NN_Priority; + +/** + * @brief Neural Network Runtime 定义的错误码类型 + * + * @since 9 + * @version 1.0 + */ +typedef enum { + /** 操作成功 */ + OH_NN_SUCCESS = 0, + /** 操作失败 */ + OH_NN_FAILED = 1, + /** 非法参数 */ + OH_NN_INVALID_PARAMETER = 2, + /** 内存相关的错误,包括:内存不足、内存数据拷贝失败、内存申请失败等。 */ + OH_NN_MEMORY_ERROR = 3, + /** 非法操作 */ + OH_NN_OPERATION_FORBIDDEN = 4, + /** 空指针异常 */ + OH_NN_NULL_PTR = 5, + /** 无效文件 */ + OH_NN_INVALID_FILE = 6, + /** 硬件发生错误,错误可能包含:HDL服务崩溃 */ + OH_NN_UNAVALIDABLE_DEVICE = 7, + /** 非法路径 */ + OH_NN_INVALID_PATH = 8 +} OH_NN_ReturnCode; + +/** + * @brief Neural Network Runtime 融合算子中激活函数的类型 + * + * @since 9 + * @version 1.0 + */ +typedef enum : int8_t { + /** 未指定融合激活函数 */ + OH_NN_FUSED_NONE = 0, + /** 融合relu激活函数 */ + OH_NN_FUSED_RELU = 1, + /** 融合relu6激活函数 */ + OH_NN_FUSED_RELU6 = 2 +} OH_NN_FuseType; + +/** + * @brief tensor数据的排布类型 + * + * @since 9 + * @version 1.0 + */ +typedef enum { + /** 当tensor没有特定的排布类型时(如标量或矢量),使用{@link OH_NN_FORMAT_NONE} */ + OH_NN_FORMAT_NONE = 0, + /** 读取(使用)维度信息时按照NCHW读取(使用)*/ + OH_NN_FORMAT_NCHW = 1, + /** 读取(使用)维度信息时按照NHWC读取(使用) */ + OH_NN_FORMAT_NHWC = 2 +} OH_NN_Format; + +/** + * @brief Neural Network Runtime 支持的设备类型 + * + * @since 9 + * @version 1.0 + */ +typedef enum { + /** 不属于CPU、GPU、专用加速器的设备 */ + OH_NN_OTHERS = 0, + /** CPU设备 */ + OH_NN_CPU = 1, + /** GPU设备 */ + OH_NN_GPU = 2, + /** 专用硬件加速器 */ + OH_NN_ACCELERATOR = 3, +} OH_NN_DeviceType; + +/** + * @brief Neural Network Runtime 支持的数据类型 + * + * @since 9 + * @version 1.0 + */ +typedef enum { + /** 操作数数据类型未知 */ + OH_NN_UNKNOWN = 0, + /** 操作数数据类型为bool */ + OH_NN_BOOL = 1, + /** 操作数数据类型为int8 */ + OH_NN_INT8 = 2, + /** 操作数数据类型为int16 */ + OH_NN_INT16 = 3, + /** 操作数数据类型为int32 */ + OH_NN_INT32 = 4, + /** 操作数数据类型为int64 */ + OH_NN_INT64 = 5, + /** 操作数数据类型为uint8 */ + OH_NN_UINT8 = 6, + /** 操作数数据类型为uint16 */ + OH_NN_UINT16 = 7, + /** 操作数数据类型为uint32 */ + OH_NN_UINT32 = 8, + /** 操作数数据类型为uint64 */ + OH_NN_UINT64 = 9, + /** 操作数数据类型为float16 */ + OH_NN_FLOAT16 = 10, + /** 操作数数据类型为float32 */ + OH_NN_FLOAT32 = 11, + /** 操作数数据类型为float64 */ + OH_NN_FLOAT64 = 12 +} OH_NN_DataType; + + +/** + * @brief Neural Network Runtime 支持算子的类型 + * + * @since 9 + * @version 1.0 + */ +typedef enum { + /** + * 返回两个输入张量对应元素相加的和的张量。 + * + * 输入: + * + * * x,第一个输入的张量,数据类型要求为布尔值或者数字。 + * * y,第二个输入的张量,数据类型和形状需要和第一个输入保持一致。 + * + * 参数: + * + * * activationType,是一个整型常量,且必须是FuseType中含有的值。 + * 在输出之前调用指定的激活。 + * + * 输出: + * + * * 0 输出x和y的和,数据形状与输入broadcast之后一样,数据类型与较高精度的输入精度一致 + */ + OH_NN_OPS_ADD = 1, + + /** + * 在输入tensor上应用 2D 平均池化,仅支持NHWC格式的tensor。支持int8量化输入。 + * + * 如果输入中含有padMode参数: + * + * 输入: + * + * * x,一个张量。 + * + * 参数: + * + * * kernelSize,用来取平均值的kernel大小,是一个长度为2的int数组[kernel_height,kernel_weight], + * 第一个数表示kernel高度,第二个数表示kernel宽度。 + * * strides,kernel移动的距离,是一个长度为2的int数组[stride_height,stride_weight], + * 第一个数表示高度上的移动步幅,第二个数表示宽度上的移动步幅。 + * * padMode,填充模式,int类型的可选值,0表示same,1表示valid,并且以最近邻的值填充。 + * same,输出的高度和宽度与x相同,填充总数将在水平和垂直方向计算,并在可能的情况下均匀分布到顶部 + * 和底部、左侧和右侧。否则,最后一个额外的填充将从底部和右侧完成。 + * valid,输出的可能最大高度和宽度将在不填充的情况下返回。额外的像素将被丢弃。 + * * activationType,是一个整型常量,且必须是FuseType中含有的值。 + * 在输出之前调用指定的激活。 + * + * 如果输入中含有padList参数: + * + * 输入: + * + * * x,一个张量。 + * + * 参数: + * + * * kernelSize,用来取平均值的kernel大小,是一个长度为2的int数组[kernel_height,kernel_weight], + * 第一个数表示kernel高度,第二个数表示kernel宽度。 + * * strides,kernel移动的距离,是一个长度为2的int数组[stride_height,stride_weight], + * 第一个数表示高度上的移动步幅,第二个数表示宽度上的移动步幅。 + * * padList,输入x周围的填充,是一个长度为4的int数组[top,bottom,left,right],并且以最近邻的值填充。 + * * activationType,是一个整型常量,且必须是FuseType中含有的值。 + * 在输出之前调用指定的激活。 + * + * 输出: + * + * * 输出x平均池化后的张量。 + */ + OH_NN_OPS_AVG_POOL = 2, + + /** + * 对一个tensor进行batch normalization,对tensor元素进行缩放和位移,缓解一批数据中潜在的covariate shift。 + * + * 输入: + * + * * x,一个n维的tensor,要求形状为[N,...,C],即第n维是通道数(channel)。 + * * scale,缩放因子的1D张量,用于缩放归一化的第一个张量。 + * * offset,用于偏移的1D张量,以移动到归一化的第一个张量。 + * * mean,总体均值的一维张量,仅用于推理;对于训练,必须为空。 + * * variance,用于总体方差的一维张量。仅用于推理;对于训练,必须为空。 + * + * 参数: + * + * * epsilon,数值稳定性的小附加值。 + * + * 输出: + * + * * 输出张量,形状和数据类型与输入x一致。 + */ + OH_NN_OPS_BATCH_NORM = 3, + + /** + * 将一个4维tensor的batch维度按block_shape切分成小块,并将这些小块拼接到空间维度。 + * + * 参数: + * + * * x,输入张量,维将被切分,拼接回空间维度。 + * + * 输出: + * + * * blockSize,一个长度为2的数组[height_block,weight_block],指定切分到空间维度上的block大小。 + * * crops,一个shape为(2,2)的2维数组[[crop0_start,crop0_end],[crop1_start,crop1_end]], + * 表示在output的空间维度上截掉部分元素。 + * + * 输出: + * + * * 输出张量,假设x的形状为(n,h,w,c),output的形状为(n',h',w',c'): + * n' = n / (block_shape[0] * block_shape[1]) + * h' = h * block_shape[0] - crops[0][0] - crops[0][1] + * w' = w * block_shape[1] - crops[1][0] - crops[1][1] + * c'= c + */ + OH_NN_OPS_BATCH_TO_SPACE_ND = 4, + + /** + * 对给出的输入张量上的各个维度方向上的数据进行偏置。 + * + * 输入: + * + * * x,输入张量,可为2-5维度。 + * * bias,参数对应输入维度数量的偏移值。 + * + * 输出: + * + * * 输出张量,根据输入中每个维度方向偏移后的结果。 + */ + OH_NN_OPS_BIAS_ADD = 5, + + /** + * 对输入张量中的数据类型进行转换。 + * + * 输入: + * + * * x,输入张量。 + * * type,输入转换目的的数据类型。 + * + * 输出: + * + * * 输出张量,输出转换为目的数据类型后的张量。 + */ + OH_NN_OPS_CAST = 6, + + /** + * 在指定轴上连接张量,将输入张量按给定的轴连接起来。 + * + * 输入: + * + * * x:N个输入张量。 + * + * 参数: + * + * * axis,指定轴的位置。 + * + * 输出: + * + * * 输出n个张量按axis轴连接的结果。 + */ + OH_NN_OPS_CONCAT = 7, + + /** + * 二维卷积层。 + * + * 如果输入中含有padMode参数: + * + * 输入: + * + * * x,输入张量。 + * * weight,卷积的权重,要求weight排布为[outChannel,kernelHeight,kernelWidth,inChannel/group], + * inChannel必须要能整除group。 + * * bias,卷积的偏置,是长度为[outChannel]的数组。在量化场景下,bias 参数不需要量化参数,其量化 + * 版本要求输入 OH_NN_INT32 类型数据,实际量化参数由 x 和 weight 共同决定。 + * + * 参数: + * + * * stride,卷积核在height和weight上的步幅,是一个长度为2的int数组[strideHeight,strideWidth]。 + * * dilation,表示扩张卷积在height和weight上的扩张率,是一个长度为2的int数组[dilationHeight,dilationWidth], + * 值必须大于或等于1,并且不能超过x的height和width。 + * * padMode,x的填充模式,支持same和valid,int类型,0表示same,1表示valid。 + * same,输出的高度和宽度与x相同,填充总数将在水平和垂直方向计算,并在可能的情况下均匀分布到顶部和底部、左侧 + * 和右侧。否则,最后一个额外的填充将从底部和右侧完成。 + * Valid,输出的可能最大高度和宽度将在不填充的情况下返回。额外的像素将被丢弃。 + * * group,将输入x按in_channel分组,int类型。group等于1,这是常规卷积;group大于1且小于或等于in_channel,这是分组卷积。 + * * activationType,是一个整型常量,且必须是FuseType中含有的值。 + * 在输出之前调用指定的激活。 + * + * + * 如果输入中含有padList参数: + * + * 输入: + * + * * x,输入张量。 + * * weight,卷积的权重,要求weight排布为[outChannel,kernelHeight,kernelWidth,inChannel/group], + * inChannel必须要能整除group。 + * * bias,卷积的偏置,是长度为[outChannel]的数组。在量化场景下,bias 参数不需要量化参数,其量化 + * 版本要求输入 OH_NN_INT32 类型数据,实际量化参数由 x 和 weight 共同决定。 + * + * 参数: + * + * * stride,卷积核在height和weight上的步幅,是一个长度为2的int数组[strideHeight,strideWidth]。 + * * dilation,表示扩张卷积在height和weight上的扩张率,是一个长度为2的int数组[dilationHeight,dilationWidth]。 + * 值必须大于或等于1,并且不能超过x的height和width。 + * * padList,输入x周围的填充,是一个长度为4的int数组[top,bottom,left,right]。 + * * group,将输入x按in_channel分组,int类型。 + * group等于1,这是常规卷积。 + * group等于in_channel,这是depthwiseConv2d,此时group==in_channel==out_channel。 + * group大于1且小于in_channel,这是分组卷积,out_channel==group。 + * * activationType,是一个整型常量,且必须是FuseType中含有的值。 + * 在输出之前调用指定的激活。 + * + * 输出: + * + * * 输出张量,卷积的输出。 + */ + OH_NN_OPS_CONV2D = 8, + + /** + * 二维卷积转置。 + * + * 如果输入中含有padMode参数: + * + * 输入: + * + * * x,输入张量。 + * * weight,卷积的权重,要求weight排布为[outChannel,kernelHeight,kernelWidth,inChannel/group], + * inChannel必须要能整除group。 + * * bias,卷积的偏置,是长度为[outChannel]的数组。在量化场景下,bias 参数不需要量化参数,其量化 + * 版本要求输入 OH_NN_INT32 类型数据,实际量化参数由 x 和 weight 共同决定。 + * * stride,卷积核在height和weight上的步幅,是一个长度为2的int数组[strideHeight,strideWidth]。 + * + * 参数: + * + * * dilation,表示扩张卷积在height和weight上的扩张率,是一个长度为2的int数组[dilationHeight,dilationWidth]。 + * 值必须大于或等于1,并且不能超过x的height和width。 + * * padMode,x的填充模式,支持same和valid,int类型,0表示same,1表示valid。 + * same,输出的高度和宽度与x相同,填充总数将在水平和垂直方向计算,并在可能的情况下均匀分布到顶部和底部、左侧 + * 和右侧。否则,最后一个额外的填充将从底部和右侧完成。 + * Valid,输出的可能最大高度和宽度将在不填充的情况下返回。额外的像素将被丢弃。 + * * group,将输入x按in_channel分组,int类型。group等于1,这是常规卷积;group大于1且小于或等于in_channel,这是分组卷积。 + * * outputPads,一个整数或元组/2 个整数的列表,指定沿输出张量的高度和宽度的填充量。可以是单个整数,用于为所 + * 有空间维度指定相同的值。沿给定维度的输出填充量必须小于沿同一维度的步幅。 + * * activationType,是一个整型常量,且必须是FuseType中含有的值。 + * 在输出之前调用指定的激活。 + * + * 如果输入中含有padList参数: + * + * 输入: + * + * * x,输入张量。 + * * weight,卷积的权重,要求weight排布为[outChannel,kernelHeight,kernelWidth,inChannel/group], + * inChannel必须要能整除group。 + * * bias,卷积的偏置,是长度为[outChannel]的数组。在量化场景下,bias 参数不需要量化参数,其量化 + * 版本要求输入 OH_NN_INT32 类型数据,实际量化参数由 x 和 weight 共同决定。 + * + * 参数: + * + * * stride,卷积核在height和weight上的步幅,是一个长度为2的int数组[strideHeight,strideWidth]。 + * * dilation,表示扩张卷积在height和weight上的扩张率,是一个长度为2的int数组[dilationHeight,dilationWidth]。 + * 值必须大于或等于1,并且不能超过x的height和width。 + * * padList,输入x周围的填充,是一个长度为4的int数组[top,bottom,left,right]。 + * * group,将输入x按in_channel分组,int类型。group等于1,这是常规卷积;group大于1且小于或等于in_channel,这是分组卷积。 + * * outputPads,一个整数或元组/2 个整数的列表,指定沿输出张量的高度和宽度的填充量。可以是单个整数,用于为所 + * 有空间维度指定相同的值。沿给定维度的输出填充量必须小于沿同一维度的步幅。 + * * activationType,是一个整型常量,且必须是FuseType中含有的值。 + * 在输出之前调用指定的激活。 + * + * 输出: + * + * * 输出张量,卷积转置后的输出。 + */ + OH_NN_OPS_CONV2D_TRANSPOSE = 9, + + /** + * 2维深度可分离卷积 + * + * 如果输入中含有padMode参数: + * + * 输入: + * + * * x,输入张量。 + * * weight,卷积的权重,要求weight排布为[outChannel,kernelHeight,kernelWidth,1],outChannel = channelMultiplier x inChannel。 + * * bias,卷积的偏置,是长度为[outChannel]的数组。在量化场景下,bias 参数不需要量化参数,其量化 + * 版本要求输入 OH_NN_INT32 类型数据,实际量化参数由 x 和 weight 共同决定。 + * + * 参数: + * + * * stride,卷积核在height和weight上的步幅,是一个长度为2的int数组[strideHeight,strideWidth]。 + * * dilation,表示扩张卷积在height和weight上的扩张率,是一个长度为2的int数组[dilationHeight,dilationWidth]。 + * 值必须大于或等于1,并且不能超过x的height和width。 + * * padMode,x的填充模式,支持same和valid,int类型,0表示same,1表示valid + * same,输出的高度和宽度与x相同,填充总数将在水平和垂直方向计算,并在可能的情况下均匀分布到顶部和底部、左侧 + * 和右侧。否则,最后一个额外的填充将从底部和右侧完成 + * Valid,输出的可能最大高度和宽度将在不填充的情况下返回。额外的像素将被丢弃 + * * activationType,是一个整型常量,且必须是FuseType中含有的值。 + * 在输出之前调用指定的激活。 + * + * 如果输入中含有padList 参数: + * + * 输入: + * + * * x,输入张量。 + * * weight,卷积的权重,要求weight排布为[outChannel,kernelHeight,kernelWidth,1],outChannel = channelMultiplier x inChannel。 + * * bias,卷积的偏置,是长度为[outChannel]的数组。在量化场景下,bias 参数不需要量化参数,其量化 + * 版本要求输入 OH_NN_INT32 类型数据,实际量化参数由 x 和 weight 共同决定。 + * + * 参数: + * + * * stride,卷积核在height和weight上的步幅,是一个长度为2的int数组[strideHeight,strideWidth]。 + * * dilation,表示扩张卷积在height和weight上的扩张率,是一个长度为2的int数组[dilationHeight,dilationWidth]。 + * 值必须大于或等于1,并且不能超过x的height和width。 + * * padList,输入x周围的填充,是一个长度为4的int数组[top,bottom,left,right]。 + * * activationType,是一个整型常量,且必须是FuseType中含有的值。 + * 在输出之前调用指定的激活。 + * + * 输出: + * + * * 输出张量,卷积后的输出。 + */ + OH_NN_OPS_DEPTHWISE_CONV2D_NATIVE = 10, + + /** + * 对输入的两个标量或张量做商。 + * + * 输入: + * + * * x1,第一个输入是标量或布尔值或数据类型为数字或布尔值的张量。 + * * x2,数据类型根据x1的类型,要求有所不同: + * 当第一个输入是张量时,第二个输入可以是实数或布尔值或数据类型为实数/布尔值的张量。 + * 当第一个输入是实数或布尔值时,第二个输入必须是数据类型为实数/布尔值的张量。 + * + * 参数: + * + * * activationType,是一个整型常量,且必须是FuseType中含有的值。 + * 在输出之前调用指定的激活。 + * + * 输出: + * + * * 输出张量,输出两输入相除后的结果。 + */ + OH_NN_OPS_DIV = 11, + + /** + * 设置参数对输入进行product(点乘)、sum(相加减)或max(取大值)。 + * + * 输入: + * + * * x1,第一个输入张量。 + * * x2,第二个输入张量。 + * + * 参数: + * + * * mode,枚举,选择操作方式。 + * + * 输出: + * + * * 输出tensor,与x1有相同的数据类型和形状。 + * + */ + OH_NN_OPS_ELTWISE = 12, + + /** + * 在给定轴上为tensor添加一个额外的维度。 + * + * 输入: + * + * * x,输入张量。 + * * axis,需要添加的维度的index,int32_t类型,值必须在[-dim-1,dim],且只允许常量值。 + * + * 输出: + * + * * 输出tensor,与x有相同的数据类型和形状。 + */ + OH_NN_OPS_EXPAND_DIMS = 13, + + /** + * 根据指定的维度,创建由一个标量填充的张量。 + * + * 输入: + * + * * value,填充的标量。 + * * shape,指定创建张量的维度。 + * + * 输出: + * + * * 输出张量,与value有相同的数据类型,shape由输入指定。 + */ + OH_NN_OPS_FILL = 14, + + /** + * 全连接,整个输入作为feature map,进行特征提取。 + * + * 输入: + * + * * x,全连接的输入张量。 + * * weight,全连接的权重张量。 + * * bias,全连接的偏置,在量化场景下,bias 参数不需要量化参数,其量化 + * 版本要求输入 OH_NN_INT32 类型数据,实际量化参数由 x 和 weight 共同决定。 + * + * 参数: + * + * * activationType,是一个整型常量,且必须是FuseType中含有的值。 + * 在输出之前调用指定的激活。 + * + * 输出: + * + * * output,输出运算后的张量。 + + * 如果输入中含有axis参数: + * + * 输入: + * + * * x,全连接的输入张量。 + * * weight,全连接的权重张量。 + * * bias,全连接的偏置,在量化场景下,bias 参数不需要量化参数,其量化 + * 版本要求输入 OH_NN_INT32 类型数据,实际量化参数由 x 和 weight 共同决定。 + * + * 参数: + * + * * axis,x做全连接的轴,从指定轴axis开始,将axis和axis后面的轴展开成1维去做全连接。 + * * activationType,是一个整型常量,且必须是FuseType中含有的值。 + * 在输出之前调用指定的激活。 + * + * 输出: + * + * * output,输出运算后的张量。 + */ + OH_NN_OPS_FULL_CONNECTION = 15, + + /** + * 根据指定的索引和轴返回输入tensor的切片。 + * + * 输入: + * + * * x,输入待切片的tensor。 + * * inputIndices,指定输入x在axis上的索引,是一个int类型的数组,值必须在[0,x.shape[axis])范围内 + * * axis,输入x被切片的轴,int32_t类型的数组,数组长度为1。 + * + * 输出: + * + * * Output,输出切片后的tensor。 + */ + OH_NN_OPS_GATHER = 16, + + /** + * 计算输入的Hswish激活值。 + * + * 输入: + * + * * 一个n维输入tensor。 + * + * 输出: + * + * * n维Hswish激活值,数据类型和shape和input一致。 + */ + OH_NN_OPS_HSWISH = 17, + + /** + * 对输入x1和x2,计算每对元素的x<=y的结果。 + * + * 输入: + * + * * x1,可以是实数、布尔值或数据类型是实数/NN_BOOL的tensor。 + * * x2,如果input_x是tensor,input_y可以是实数、布尔值,否则只能是tensor,其数据类型是实数或NN_BOOL。 + * + * 输出: + * + * * Tensor,数据类型为NN_BOOL的tensor,使用量化模型时,output的量化参数不可省略,但量化参数的数值不会对输入结果产生影响。 + */ + OH_NN_OPS_LESS_EQUAL = 18, + + /** + * 计算x1和x2的内积 + * + * 输入: + * + * * x1,n维输入tensor。 + * * x2,n维输入tensor。 + * + * 参数: + * + * * TransposeX,布尔值,是否对x1进行转置。 + * * TransposeY,布尔值,是否对x2进行转置。 + * + * 输出: + * + * * output,计算得到内积,当type!=NN_UNKNOWN时,output数据类型由type决定;当type==NN_UNKNOWN时, + * output的数据类型取决于inputX和inputY进行计算时转化的数据类型。 + */ + OH_NN_OPS_MATMUL = 19, + + /** + * 计算input1和input2对应元素最大值,input1和input2的输入遵守隐式类型转换规则,使数据类型一致。输入必须 + * 是两个张量或一个张量和一个标量。当输入是两个张量时,它们的数据类型不能同时为NN_BOOL。它们的形状支持 + * broadcast成相同的大小。当输入是一个张量和一个标量时,标量只能是一个常数。 + * + * 输入: + * + * * x1,n维输入tensor,实数或NN_BOOL类型。 + * * x2,n维输入tensor,实数或NN_BOOL类型。 + * + * 输出: + * + * * output,n维输出tensor,output的shape和数据类型和两个input中精度或者位数高的相同。 + */ + OH_NN_OPS_MAXIMUM = 20, + + /** + * 在输入tensor上应用 2D 最大值池化。 + * + * 如果输入中含有padMode参数: + * + * 输入: + * + * * x,一个张量。 + * + * 参数: + * + * * kernelSize,用来取最大值的kernel大小,是一个长度为2的int数组[kernel_height,kernel_weight], + * 第一个数表示kernel高度,第二个数表示kernel宽度。 + * * strides,kernel移动的距离,是一个长度为2的int数组[stride_height,stride_weight], + * 第一个数表示高度上的移动步幅,第二个数表示宽度上的移动步幅。 + * * padMode,填充模式,int类型的可选值,0表示same,1表示valid,并且以最近邻的值填充。 + * same,输出的高度和宽度与x相同,填充总数将在水平和垂直方向计算,并在可能的情况下均匀分布到顶部 + * 和底部、左侧和右侧。否则,最后一个额外的填充将从底部和右侧完成。 + * valid,输出的可能最大高度和宽度将在不填充的情况下返回。额外的像素将被丢弃。 + * * activationType,是一个整型常量,且必须是FuseType中含有的值。 + * 在输出之前调用指定的激活。 + * + * 如果输入中含有padList参数: + * + * 输入: + * + * * x,一个张量。 + * + * 参数: + * + * * kernelSize,用来取最大值的kernel大小,是一个长度为2的int数组[kernel_height,kernel_weight], + * 第一个数表示kernel高度,第二个数表示kernel宽度。 + * * strides,kernel移动的距离,是一个长度为2的int数组[stride_height,stride_weight], + * 第一个数表示高度上的移动步幅,第二个数表示宽度上的移动步幅。 + * * padList,输入x周围的填充,是一个长度为4的int数组[top,bottom,left,right],并且以最近邻的值填充。 + * * activationType,是一个整型常量,且必须是FuseType中含有的值。 + * 在输出之前调用指定的激活。 + * + * 输出: + * + * * output,输出x最大值池化后的张量。 + */ + OH_NN_OPS_MAX_POOL = 21, + + /** + * 将inputX和inputY相同的位置的元素相乘得到output。如果inputX和inputY类型shape不同,要求inputX和inputY可以 + * 通过broadcast扩充成相同的shape进行相乘。 + * + * 输入: + * + * * x1,一个n维tensor。 + * * x2,一个n维tensor。 + * + * 参数: + * + * * activationType,是一个整型常量,且必须是FuseType中含有的值。 + * 在输出之前调用指定的激活。 + * + * 输出: + * + * * output,x1和x2每个元素的乘积。 + */ + OH_NN_OPS_MUL = 22, + + /** + * 根据indices指定的位置,生成一个由one-hot向量构成的tensor。每个onehot向量中的有效值由on_value决定,其他位置由off_value决定。 + * + * 输入: + * + * * indices,n维tensor。indices中每个元素决定每个one-hot向量,on_value的位置 + * * depth,一个整型标量,决定one-hot向量的深度。要求depth>0。 + * * on_value,一个标量,指定one-hot向量中的有效值。 + * * off_value,(一个标量,指定one-hot向量中除有效位以外,其他位置的值。 + * + * 参数: + * + * * axis,一个整型标量,指定插入one-hot的维度。 + * indices的形状是[N,C],depth的值是D,当axis=0时,output形状为[D,N,C], + * indices的形状是[N,C],depth的值是D,当axis=-1时,output形状为[N,C,D], + * indices的形状是[N,C],depth的值是D,当axis=1时,output形状为[N,D,C]。 + * + * 输出: + * + * * output,如果indices时n维tensor,则output是(n+1)维tensor。output的形状由indices和axis共同决定。 + */ + OH_NN_OPS_ONE_HOT = 23, + + /** + * 在inputX指定维度的数据前后,添加指定数值进行增广。 + * + * 输入: + * + * * inputX,一个n维tensor,要求inputX的排布为[BatchSize,…]。 + * * paddings,一个2维tensor,指定每一维度增补的长度,shape为[n,2]。paddings[i][0]表示第i维上,需要在inputX前增补的数量; + * paddings[i][1]表示第i维上,需要在inputX后增补的数量。 + * + * 参数: + * + * * padValues,一个常数,数据类型和inputX一致,指定Pad操作补全的数值。 + * + * 输出: + * + * * output,一个n维tensor,维数和数据类型和inputX保持一致。shape由inputX和paddings共同决定 + * output.shape[i] = input.shape[i] + paddings[i][0]+paddings[i][1]。 + */ + OH_NN_OPS_PAD = 24, + + /** + * 求x的y次幂,输入必须是两个tensor或一个tensor和一个标量。当输入是两个tensor时,它们的数据类型不能同时为NN_BOOL, + * 且要求两个tensor的shape相同。当输入是一个tensor和一个标量时,标量只能是一个常数。 + * + * 输入: + * + * * x,实数、bool值或tensor,tensor的数据类型为实数/NN_BOOL。 + * * y,实数、bool值或tensor,tensor的数据类型为实数/NN_BOOL。 + * + * 输出: + * + * * output,形状由x和y broadcast后的形状决定。 + */ + OH_NN_OPS_POW = 25, + + /** + * 给定一个tensor,计算其缩放后的值。 + * + * 输入: + * + * * x,一个n维tensor。 + * * scale,缩放tensor。 + * * bias,偏置tensor。 + * + * 参数: + * + * * axis,指定缩放的维度。 + * * activationType,是一个整型常量,且必须是FuseType中含有的值。 + * 在输出之前调用指定的激活。 + * + * 输出: + * + * * output,scale的计算结果,一个n维tensor,类型和input一致,shape由axis决定。 + */ + OH_NN_OPS_SCALE = 26, + + /** + * 输入一个tensor,计算其shape。 + * + * 输入: + * + * * x,一个n维tensor。 + * + * 输出: + * + * * output,输出tensor的维度,一个整型数组。 + */ + OH_NN_OPS_SHAPE = 27, + + /** + * 给定一个tensor,计算其sigmoid结果。 + * + * 输入: + * + * * input,一个n维tensor。 + * + * 输出: + * + * * output,sigmoid的计算结果,一个n维tensor,类型和shape和input一致。 + */ + OH_NN_OPS_SIGMOID = 28, + + /** + * 在input tensor各维度,以begin为起点,截取size长度的切片。 + * + * 输入: + * + * * x,n维输入tensor。 + * * begin,一组不小于0的整数,指定每个维度上的起始切分点。 + * * size,一组不小于1的整数,指定每个维度上切片的长度。假设某一维度i,1<=size[i]<=input.shape[i]-begin[i]。 + * + * 输出: + * + * * output,切片得到的n维tensor,其TensorType和input一致,shape和size相同。 + */ + OH_NN_OPS_SLICE = 29, + + /** + * 给定一个tensor,计算其softmax结果。 + * + * 输入: + * + * * x,n维输入tensor。 + * + * 参数: + * + * * axis,int64类型,指定计算softmax的维度。整数取值范围为[-n,n)。 + * + * 输出: + * + * * output,softmax的计算结果,一个n维tensor,类型和shape和x一致。 + */ + OH_NN_OPS_SOFTMAX = 30, + + /** + * 将4维tensor在空间维度上进行切分成blockShape[0] * blockShape[1]个小块,然后在batch维度上拼接这些小块。 + * + * 输入: + * + * * x,一个4维tensor + * + * 参数: + * + * * blockShape,一对整数,每个整数不小于1。 + * * paddings,一对数组,每个数组由两个整数组成。组成paddings的4个整数都不小于0。paddings[0][0]和paddings[0][1]指 + * 定了第三个维度上padding的数量,paddings[1][0]和paddings[1][1]指定了第四个维度上padding的数量。 + * + * 输出: + * + * * output,一个4维tensor,数据类型和input一致。shape由input,blockShape和paddings共同决定,假设input shape为[n,c,h,w],则有 + * output.shape[0] = n * blockShape[0] * blockShape[1] + * output.shape[1] = c + * output.shape[2] = (h + paddings[0][0] + paddings[0][1]) / blockShape[0] + * output.shape[3] = (w + paddings[1][0] + paddings[1][1]) / blockShape[1] + * 要求(h + paddings[0][0] + paddings[0][1])和(w + paddings[1][0] + paddings[1][1])能被 + * blockShape[0]和blockShape[1]整除。 + */ + OH_NN_OPS_SPACE_TO_BATCH_ND = 31, + + /** + * Split 算子沿 axis 维度将 input 拆分成多个 tensor,tensor 数量由 outputNum 指定。 + * + * 输入: + * + * * x,n维tensor。 + * + * 参数: + * + * * outputNum,long,输出tensor的数量,output_num类型为int。 + * * size_splits,1维tensor,指定 tensor 沿 axis 轴拆分后,每个 tensor 的大小,size_splits 类型为 int。 + * 如果 size_splits 的数据为空,则 tensor 被拆分成大小均等的 tensor,此时要求 input.shape[axis] 可以被 outputNum 整除; + * 如果 size_splits 不为空,则要求 size_splits 所有元素之和等于 input.shape[axis]。 + * * axis,指定拆分的维度,axis类型为int。 + * + * 输出: + * + * * outputs,一组n维tensor,每一个tensor类型和shape相同,每个tensor的类型和input一致。 + */ + OH_NN_OPS_SPLIT = 32, + + /** + * 给定一个tensor,计算其平方根。 + * + * 输入: + * + * * x,一个n维tensor。 + * + * 输出: + * + * * output,输入的平方根,一个n维tensor,类型和shape和input一致。 + */ + OH_NN_OPS_SQRT = 33, + + /** + * 计算两个输入的差值并返回差值的平方。SquaredDifference算子支持tensor和tensor相减。 + * 如果两个tensor的TensorType不相同,Sub算子会将低精度的tensor转成更高精度的类型。 + * 如果两个tensor的shape不同,要求两个tensor可以通过broadcast拓展成相同shape的tensor。 + * + * 输入: + * + * * x,被减数,inputX是一个tensor,tensor的类型可以是NN_FLOAT16、NN_FLOAT32、NN_INT32或NN_BOOL。 + * * y,减数,inputY是一个tensor,tensor的类型可以是NN_FLOAT16、NN_FLOAT32、NN_INT32或NN_BOOL。 + * + * 输出: + * + * * output,两个input差值的平方。output的shape由inputX和inputY共同决定,inputX和inputY的shape相同时, + * output的shape和inputX、inputY相同;shape不同时,需要将inputX或inputY做broadcast操作后,相减得到output。 + * output的TensorType由两个输入中更高精度的TensorType决定。 + */ + OH_NN_OPS_SQUARED_DIFFERENCE = 34, + + /** + * 去除axis中,长度为1的维度。支持int8量化输入假设input的shape为[2,1,1,2,2],axis为[0,1], + * 则output的shape为[2,1,2,2]。第0维到第1维之间,长度为0的维度被去除。 + * + * 输入: + * + * * x,n维tensor。 + * + * 参数: + * + * * axis,指定删除的维度。axis可以是一个int64_t的整数或数组,整数的取值范围为[-n,n)。 + * + * 输出: + * + * * output,输出tensor。 + */ + OH_NN_OPS_SQUEEZE = 35, + + /** + * 将一组tensor沿axis维度进行堆叠,堆叠前每个tensor的维数为n,则堆叠后output维数为n+1。 + * + * 输入: + * + * * x,Stack支持传入多个输入n维tensor,每个tensor要求shape相同且类型相同。 + * + * 参数: + * + * * axis,一个整数,指定tensor堆叠的维度。axis可以是负数,axis取值范围为[-(n+1),(n+1))。 + * + * 输出: + * + * * output,将input沿axis维度堆叠的输出,n+1维tensor,TensorType和input相同。 + */ + OH_NN_OPS_STACK = 36, + + /** + * 跨步截取Tensor + * + * 输入: + * + * * x,n维输入tensor。 + * * begin,1维tensor,begin的长度等于n,begin[i]指定第i维上截取的起点。 + * * end,1维tensor,end的长度等于n,end[i]指定第i维上截取的终点。 + * * strides,1维tensor,strides的长度等于n,strides[i]指定第i维上截取的步长。 + * + * 参数: + * + * * beginMask,一个整数,用于解除begin的限制。将beginMask转成二进制表示,如果binary(beginMask)[i]==1, + * 则对于第i维,从第一个元素开始,以strides[i]为步长截取元素直到第end[i]-1个元素。 + * * endMask,个整数,用于解除end的限制。将endMask转成二进制表示,如果binary(endMask)[i]==1,则对于第i维, + * 从第begin[i]个元素起,以strides[i]为步长截取元素直到tensor边界。 + * * ellipsisMask,一个整数,用于解除begin和end的限制。将ellipsisMask转成二进制表示,如果binary(ellipsisMask)[i]==1, + * 则对于第i维,从第一个元素开始,以strides[i]为补偿,截取元素直到tensor边界。binary(ellipsisMask)仅允许有一位不为0。 + * * newAxisMask,一个整数,用于新增维度。将newAxisMask转成二进制表示,如果binary(newAxisMask)[i]==1,则在第i维插入长度为1的新维度。 + * * shrinkAxisMask,一个整数,用于压缩指定维度。将shrinkAxisMask转成二进制表示,如果binary(shrinkAxisMask)[i]==1, + * 则舍去第i维所有元素,第i维长度压缩至1。 + * + * 输出: + * + * * 堆叠运算后的Tensor,数据类型与x相同。输出维度rank(x[0])+1 维。 + */ + OH_NN_OPS_STRIDED_SLICE = 37, + + /** + * 计算两个输入的差值。 + * + * 输入: + * + * * x,被减数,x是一个tensor。 + * * y,减数,y是一个tensor。 + * + * 参数: + * + * * activationType,是一个整型常量,且必须是FuseType中含有的值。 + * 在输出之前调用指定的激活。 + * + * 输出: + * + * * output,两个input相减的差。output的shape由inputX和inputY共同决定,inputX和inputY的shape相同时,output的shape和inputX、inputY相同; + * shape不同时,需要将inputX或inputY做broadcast操作后,相减得到output。output的TensorType由两个输入中更高精度的TensorType决定。 + */ + OH_NN_OPS_SUB = 38, + + /** + * 计算输入tensor的双曲正切值。 + * + * 输入: + * + * * x,n维tensor。 + * + * 输出: + * + * * output,input的双曲正切,TensorType和tensor shape和input相同。 + */ + OH_NN_OPS_TANH = 39, + + /** + * 以multiples指定的次数拷贝input。 + * + * 输入: + * * x,n维tensor。 + * * multiples,1维tensor,指定各个维度拷贝的次数。其长度m不小于input的维数n。 + * + * 输出: + * * Tensor,m维tensor,TensorType与input相同。如果input和multiples长度相同, + * 则output和input维数一致,都是n维tensor;如果multiples长度大于n,则用1填充input的维度, + * 再在各个维度上拷贝相应的次数,得到m维tensor。 + */ + OH_NN_OPS_TILE = 40, + + /** + * 根据permutation对input 0进行数据重排。 + * + * 输入: + * + * * x,n维tensor,待重排的tensor。 + * * perm,1维tensor,其长度和input 0的维数一致。 + * + * 输出: + * + * * output,n维tensor,output 0的TensorType与input 0相同,shape由input 0的shape和permutation共同决定。 + */ + OH_NN_OPS_TRANSPOSE = 41, + + /** + * keepDims为false时,计算指定维度上的平均值,减少input的维数;当keepDims为true时,计算指定维度上的平均值,保留相应的维度。 + * + * 输入: + * + * * input,n维输入tensor,n<8。 + * * axis,1维tensor,指定计算均值的维度,axis中每个元素的取值范围为[-n,n)。 + * + * 参数: + * + * * keepDims,布尔值,是否保留维度的标志位。 + * + * 输出: + * + * * output,m维输出tensor,数据类型和input相同。当keepDims为false时,m==n;当keepDims为true时,m=2,则要求inputX的排布为[BatchSize,…,Channels],第二个维度为通道数。 + * * weight,一个1维tensor。weight的长度只能是1或者等于通道数。当weight长度为1,则inputX所有通道共享一个权重值。 + * 若weight长度等于通道数,每个通道独享一个权重,若inputX维数n<2,weight长度只能为1。 + * 输出: + * + * output,x的PReLU激活值。形状和数据类型和inputX保持一致。 + */ + OH_NN_OPS_PRELU = 46, + + /** + * 计算input的Relu激活值。 + * + * 输入: + * + * * input,一个n维输入tensor。 + * + * 输出: + * + * * output,n维Relu输出tensor,数据类型和shape和input一致。 + */ + OH_NN_OPS_RELU = 47, + + /** + * 计算input的Relu6激活值,即对input中每个元素x,计算min(max(x,0),6)。 + * + * 输入: + * + * * input,一个n维输入tensor。 + * + * 输出: + * + * * output,n维Relu6输出tensor,数据类型和shape和input一致。 + */ + OH_NN_OPS_RELU6 = 48, + + /** + * 对一个tensor从某一axis开始做层归一化。 + * + * 输入: + * + * * input,一个n维输入tensor。 + * * gamma,一个m维tensor,gamma维度应该与input做归一化部分的shape一致。 + * * beta,一个m维tensor,shape与gamma一样。 + * + * 参数: + * + * * beginAxis,是一个NN_INT32的标量,指定开始做归一化的轴,取值范围是[1,rank(input))。 + * * epsilon,是一个NN_FLOAT32的标量,是归一化公式中的微小量,常用值是1e-7。 + * + * 输出: + * + * * output,n维输出tensor,数据类型和shape和input一致。 + */ + OH_NN_OPS_LAYER_NORM = 49, + + /** + * 当keepDims为false时,过乘以维度中的所有元素来减小张量的维度,减少input的维数;当keepDims为true时,过乘以维度中的所有元素来减小张量的维度,保留相应的维度。 + * + * 输入: + * + * * input,n维输入tensor,n<8。 + * * axis,1维tensor,指定计算乘的维度,axis中每个元素的取值范围为[-n,n)。 + * + * 参数: + * + * * keepDims,布尔值,是否保留维度的标志位。 + * + * 输出: + * + * * output,m维输出tensor,数据类型和input相同。当keepDims为false时,m==n;当keepDims为true时,m +#include +#include + +#include "interfaces/kits/c/neural_network_runtime_type.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +// ALLOCATE_BUFFER_LIMIT is 1 Gb +const size_t ALLOCATE_BUFFER_LIMIT = 1024 * 1024 * 1024; +enum DeviceStatus: int { + UNKNOWN, + AVAILABLE, + BUSY, + OFFLINE +}; + +struct ModelConfig { + bool enableFloat16; + OH_NN_PerformanceMode mode; + OH_NN_Priority priority; +}; + +struct ModelBuffer { + void* buffer; + size_t length; +}; + +struct QuantParam { + uint32_t numBits; + double scale; + int32_t zeroPoint; +}; + +struct IOTensor { + std::string name; + OH_NN_DataType dataType; + OH_NN_Format format; + std::vector dimensions; + void* data; + size_t length; +}; +} // NeuralNetworkRuntime +} // OHOS + +#endif // NEURAL_NETWORK_RUNTIME_OEM_CPP_API_TYPE_H \ No newline at end of file diff --git a/interfaces/oem/cpp_api/device.h b/interfaces/oem/cpp_api/device.h new file mode 100644 index 0000000..93415e4 --- /dev/null +++ b/interfaces/oem/cpp_api/device.h @@ -0,0 +1,60 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_DEVICE_H +#define NEURAL_NETWORK_RUNTIME_DEVICE_H + +#include +#include +#include + +#include "interfaces/kits/c/neural_network_runtime_type.h" +#include "cpp_type.h" +#include "prepared_model.h" +#include "mindir.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +class Device { +public: + Device() = default; + virtual ~Device() = default; + + virtual OH_NN_ReturnCode GetDeviceName(std::string& name) = 0; + virtual OH_NN_ReturnCode GetVendorName(std::string& name) = 0; + virtual OH_NN_ReturnCode GetDeviceType(OH_NN_DeviceType& deviceType) = 0; + virtual OH_NN_ReturnCode GetDeviceStatus(DeviceStatus& status) = 0; + virtual OH_NN_ReturnCode GetSupportedOperation(std::shared_ptr model, + std::vector& ops) = 0; + + virtual OH_NN_ReturnCode IsFloat16PrecisionSupported(bool& isSupported) = 0; + virtual OH_NN_ReturnCode IsPerformanceModeSupported(bool& isSupported) = 0; + virtual OH_NN_ReturnCode IsPrioritySupported(bool& isSupported) = 0; + virtual OH_NN_ReturnCode IsDynamicInputSupported(bool& isSupported) = 0; + virtual OH_NN_ReturnCode IsModelCacheSupported(bool& isSupported) = 0; + + virtual OH_NN_ReturnCode PrepareModel(std::shared_ptr model, + const ModelConfig& config, + std::shared_ptr& preparedModel) = 0; + virtual OH_NN_ReturnCode PrepareModelFromModelCache(const std::vector& modelCache, + const ModelConfig& config, + std::shared_ptr& preparedModel) = 0; + + virtual void* AllocateBuffer(size_t length) = 0; + virtual OH_NN_ReturnCode ReleaseBuffer(const void* buffer) = 0; +}; +} // namespace NeuralNetworkRuntime +} // namespace OHOS +#endif // NEURAL_NETWORK_RUNTIME_DEVICE_H \ No newline at end of file diff --git a/interfaces/oem/cpp_api/device_registrar.h b/interfaces/oem/cpp_api/device_registrar.h new file mode 100644 index 0000000..9d3c832 --- /dev/null +++ b/interfaces/oem/cpp_api/device_registrar.h @@ -0,0 +1,41 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_DEVICE_REGISTRAR_H +#define NEURAL_NETWORK_RUNTIME_DEVICE_REGISTRAR_H + +#include +#include +#include + +#include "interfaces/oem/cpp_api/device.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +using CreateDevice = std::function()>; + +class DeviceRegistrar { +public: + DeviceRegistrar(const CreateDevice creator); + ~DeviceRegistrar() = default; +}; + +#define REGISTER_DEVICE(deviceName, vendorName, creator) \ + namespace { \ + static OHOS::NeuralNetworkRuntime::DeviceRegistrar g_##deviceName##_##vendorName##_device_registrar(creator) \ + } // namespace +} // namespace NeuralNetworkRuntime +} // OHOS +#endif // NEURAL_NETWORK_RUNTIME_DEVICE_REGISTRAR_H \ No newline at end of file diff --git a/interfaces/oem/cpp_api/prepared_model.h b/interfaces/oem/cpp_api/prepared_model.h new file mode 100644 index 0000000..6574131 --- /dev/null +++ b/interfaces/oem/cpp_api/prepared_model.h @@ -0,0 +1,40 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_PREPARED_MODEL_H +#define NEURAL_NETWORK_RUNTIME_PREPARED_MODEL_H + +#include + +#include "interfaces/kits/c/neural_network_runtime_type.h" +#include "cpp_type.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +class PreparedModel { +public: + PreparedModel() = default; + virtual ~PreparedModel() = default; + + virtual OH_NN_ReturnCode ExportModelCache(std::vector& modelCache) = 0; + + virtual OH_NN_ReturnCode Run(const std::vector& inputs, + const std::vector& outputs, + std::vector>& outputsDims, + std::vector& isOutputBufferEnough) = 0; +}; +} // OHOS +} // namespace NeuralNetworkRuntime +#endif // NEURAL_NETWORK_RUNTIME_PREPARED_MODEL_H \ No newline at end of file diff --git a/neural-network-runtime-guidelines.md b/neural-network-runtime-guidelines.md new file mode 100644 index 0000000..bf191bc --- /dev/null +++ b/neural-network-runtime-guidelines.md @@ -0,0 +1,460 @@ +# Neural Network Runtime开发指导 + +## 场景介绍 + +Neural Network Runtime作为AI推理引擎和加速芯片的桥梁,为AI推理引擎提供精简的Native接口,满足推理引擎通过加速芯片执行端到端推理的需求;同时为加速芯片提供了统一的HDI接口,使能加速芯片接入OpenHarmony社区生态。 + +## 环境准备 + +### 环境要求 + +Neural Network Runtime部件的环境要求如下: + +- 系统版本:OpenHarmony 3.2及以上。 +- 开发环境:Ubuntu 18.04及以上。 +- 接入设备:OpenHarmony定义的标准设备,并且系统中内置的硬件加速器驱动,已通过HDI接口对接Neural Network Runtime。 + +由于Neural Network Runtime通过OpenHarmony Native API对外开放,需要通过OpenHarmony的Native开发套件编译Neural Network Runtime应用。在社区的[每日构建](http://ci.openharmony.cn/dailys/dailybuilds)下载对应系统版本的ohos-sdk压缩包,从压缩包中提取对应平台的Native开发套件。以Linux为例,Native开发套件的压缩包命名为`native-linux-{版本号}.zip`。 + +### 环境搭建 + +1. 打开Ubuntu编译服务器的终端。 +2. 把下载好的Native开发套件压缩包拷贝至当前用户根目录下。 +3. 执行以下命令解压Native开发套件的压缩包。 +```shell +unzip native-linux-{版本号}.zip +``` + +解压缩后的内容如下(随版本迭代,目录下的内容可能发生变化,请以最新版本的Native API为准): +```text +native/ +├── build // 交叉编译工具链 +├── build-tools // 编译构建工具 +├── docs +├── llvm +├── nativeapi_syscap_config.json +├── ndk_system_capability.json +├── NOTICE.txt +├── oh-uni-package.json +└── sysroot // Native API头文件和库 +``` +## Neural Network Runtime接口 + +详细的Neural Network Runtime接口文档请参考: +- [neural_network_runtime.h](./interfaces/kits/c/neural_network_runtime.h) +- [neural_network_runtime_type.h](./interfaces/kits/c/neural_network_runtime_type.h) + +## Neural Network Runtime开发指导 + +### 开发步骤 + +Neural Network Runtime的开发流程主要包含**模型构造**、**模型编译**和**推理执行**三个阶段。以下开发步骤以`Add`单算子模型为例,介绍调用Neural Network Runtime接口,开发应用的过程。 + +1. 创建应用样例文件。 + + 首先,创建Neural Network Runtime应用样例的源文件。在项目目录下执行以下命令,创建`nnrt_example/`目录,在目录下创建 `nnrt_example.cpp` 源文件。 + + ```shell + mkdir ~/nnrt_example && cd ~/nnrt_example + touch nnrt_example.cpp + ``` + +2. 导入Neural Network Runtime。 + + 在 nnrt_example.cpp` 文件的开头添加以下代码,引入Neural Network Runtime接口。 + + ```cpp + #include + #include + #include + + #include "neural_network_runtime/neural_network_runtime.h" + + const size_t DATA_LENGTH = 4 * 12; // 输出、输出的字节长度 + ``` + +3. 构造模型。 + + 以下图所示的`Add`单算子模型为例,使用Neural Network Runtime构图模块,构造样例模型。`Add`算子有两个输入、一个参数和一个输出,其中的参数用于指定`Add`的激活类型。 + + !["Add单算子网络示意图"](neural_network_runtime_add_op_model.png) + + ```cpp + OH_NN_ReturnCode BuildModel(OH_NNModel** pModel) + { + // 创建模型实例,调用构图模块的接口,进行模型构图 + OH_NNModel* model = OH_NNModel_Construct(); + if (model == nullptr) { + std::cout << "Create model failed." << std::endl; + return OH_NN_MEMORY_ERROR; + } + + // 添加Add算子的第一个输入Tensor,类型为float32,张量形状为[1, 2, 2, 3] + int32_t inputDims[4] = {1, 2, 2, 3}; + OH_NN_Tensor input1 = {OH_NN_FLOAT32, 4, inputDims, nullptr, OH_NN_TENSOR}; + OH_NN_ReturnCode ret = OH_NNModel_AddTensor(model, &input1); + if (ret != OH_NN_SUCCESS) { + std::cout << "BuildModel failed, add Tensor of first input failed." << std::endl + return ret; + } + + // 添加Add算子的第二个输入Tensor,类型为float32,张量形状为[1, 2, 2, 3] + OH_NN_Tensor input2 = {OH_NN_FLOAT32, 4, inputDims, nullptr, OH_NN_TENSOR}; + ret = OH_NNModel_AddTensor(model, &input2); + if (ret != OH_NN_SUCCESS) { + std::cout << "BuildModel failed, add Tensor of second input failed." << std::endl; + return ret; + } + + // 添加Add算子唯一一个参数Tensor,激活函数类型,其数据类型为int8,是一个标量。 + int32_t activationDims = 1; + int8_t activationValue = OH_NN_FUSED_NONE; + OH_NN_Tensor activation = {OH_NN_INT8, 1, &activationDims, nullptr, OH_NN_ADD_ACTIVATIONTYPE}; + ret = OH_NNModel_AddTensor(model, &activation); + if (ret != OH_NN_SUCCESS) { + std::cout << "BuildModel failed, add Tensor of activation failed." << std::endl; + return ret; + } + + // 将激活函数类型设置为“无激活函数”。 + ret = OH_NNModel_SetTensorData(model, 2, &activationValue, sizeof(int8_t)); + if (ret != OH_NN_SUCCESS) { + std::cout << "BuildModel failed, set value of activation failed." << std::endl; + return ret; + } + + // 设置Add算子的输出,类型为float32,张量形状为[1, 2, 2, 3] + OH_NN_Tensor output = {OH_NN_FLOAT32, 4, inputDims, nullptr, OH_NN_TENSOR}; + ret = OH_NNModel_AddTensor(model, &output); + if (ret != OH_NN_SUCCESS) { + std::cout << "BuildModel failed, add Tensor of output failed." << std::endl; + return ret; + } + + // 指定Add算子的输入、参数和输出索引 + uint32_t inputIndicesValues[2] = {0, 1}; + uint32_t paramIndicesValues = 2; + uint32_t outputIndicesValues = 3; + OH_NN_UInt32Array paramIndices = {¶mIndicesValues, 1}; + OH_NN_UInt32Array inputIndices = {inputIndicesValues, 2}; + OH_NN_UInt32Array outputIndices = {&outputIndicesValues, 1}; + + // 向模型实例添加Add算子 + ret = OH_NNModel_AddOperation(model, OH_NN_OPS_ADD, ¶mIndices, &inputIndices, &outputIndices); + if (ret != OH_NN_SUCCESS) { + std::cout << "BuildModel failed, add operation failed." << std::endl; + return ret; + } + + // 设置模型实例的输入、输出索引 + ret = OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices); + if (ret != OH_NN_SUCCESS) { + std::cout << "BuildModel failed, specify inputs and outputs failed." << std::endl; + return ret; + } + + // 完成模型实例的构建 + ret = OH_NNModel_Finish(model); + if (ret != OH_NN_SUCCESS) { + std::cout << "BuildModel failed, error happened when finishing model construction." << std::endl; + return ret; + } + + *pModel = model; + return OH_NN_SUCCESS; + } + ``` + +4. 查询Neural Network Runtime已经对接的加速芯片。 + + 通过HDI接口,Neural Network Runtime支持对接多种加速芯片。在执行模型编译前,需要查询当前设备下,Neural Network Runtime已经对接的加速芯片。每个加速芯片对应唯一的ID值,在编译阶段需要通过设备ID,指定模型编译的芯片。 + ```cpp + void GetAvailableDevices(std::vector& availableDevice) + { + availableDevice.clear(); + + // 获取可用的硬件ID + const size_t* devices = nullptr; + uint32_t deviceCount = 0; + OH_NN_ReturnCode ret = OH_NNDevice_GetAllDevicesID(&devices, &deviceCount); + if (ret != OH_NN_SUCCESS) { + std::cout << "GetAllDevicesID failed, get no available device." << std::endl; + return; + } + + for (uint32_t i = 0; i < deviceCount; i++) { + availableDevice.emplace_back(devices[i]); + } + } + ``` + +5. 在指定的设备上编译模型。 + + Neural Network Runtime使用抽象的模型表达描述AI模型的拓扑结构,在加速芯片上执行前,需要通过Neural Network Runtime提供的编译模块,将抽象的模型表达下发至芯片驱动层,转换成可以直接推理计算的格式。 + ```cpp + OH_NN_ReturnCode CreateCompilation(OH_NNModel* model, const std::vector& availableDevice, OH_NNCompilation** pCompilation) + { + // 创建编译实例,用于将模型传递至底层硬件编译 + OH_NNCompilation* compilation = OH_NNCompilation_Construct(model); + if (compilation == nullptr) { + std::cout << "CreateCompilation failed, error happended when creating compilation." << std::endl; + return OH_NN_MEMORY_ERROR; + } + + // 设置编译的硬件、缓存路径、性能模式、计算优先级、是否开启float16低精度计算等选项 + + OH_NN_ReturnCode ret = OH_NNCompilation_SetDevice(compilation, availableDevice[0]); // 选择在第一个设备上编译模型 + if (ret != OH_NN_SUCCESS) { + std::cout << "CreateCompilation failed, error happened when setting device." << std::endl; + return ret; + } + + // 将模型编译结果缓存在/data/local/tmp目录下,版本号指定为1 + ret = OH_NNCompilation_SetCache(compilation, "/data/local/tmp", 1); + if (ret != OH_NN_SUCCESS) { + std::cout << "CreateCompilation failed, error happened when setting cache path." << std::endl; + return ret; + } + + ret = OH_NNCompilation_SetPerformanceMode(compilation, OH_NN_PERFORMANCE_MEDIUM); // 选择中等的性能模式 + if (ret != OH_NN_SUCCESS) { + std::cout << "CreateCompilation failed, error happened when setting performance mode." << std::endl; + return ret; + } + + ret = OH_NNCompilation_EnableFloat16(compilation, true); // 如果设备支持Float16低精度推理,则开启Float16精度推理 + if (ret != OH_NN_SUCCESS) { + std::cout << "CreateCompilation failed, error happened when enable float16 computation." << std::endl; + return ret; + } + + // 完成编译设置,进行模型编译 + ret = OH_NNCompilation_Build(compilation); + if (ret != OH_NN_SUCCESS) { + std::cout << "CreateCompilation failed, error happened when building compilation." << std::endl; + return ret; + } + + *pCompilation = compilation; + return OH_NN_SUCCESS; + } + ``` + +6. 创建执行器。 + + 完成模型编译后,需要调用Neural Network Runtime的执行模块,创建推理执行器。执行阶段,设置模型输入、获取模型输出和触发推理计算的操作均围绕执行器完成。 + ```cpp + OH_NNExecutor* CreateExecutor(OH_NNCompilation* compilation) + { + // 创建执行实例 + OH_NNExecutor* executor = OH_NNExecutor_Construct(compilation); + return executor; + } + ``` + +7. 执行推理计算,并打印计算结果。 + + 通过执行模块提供的接口,将推理计算所需要的输入数据传递给执行器,触发执行器完成一次推理计算,获取模型的推理计算结果。 + ```cpp + OH_NN_ReturnCode Run(OH_NNExecutor* executor) + { + // 构造示例数据 + float input1[12] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}; + float input2[12] = {11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22}; + + int32_t inputDims[4] = {1, 2, 2, 3}; + OH_NN_Tensor inputTensor1 = {OH_NN_FLOAT32, 4, inputDims, nullptr, OH_NN_TENSOR}; + OH_NN_Tensor inputTensor2 = {OH_NN_FLOAT32, 4, inputDims, nullptr, OH_NN_TENSOR}; + + // 设置执行的输入 + + // 设置执行的第一个输入,输入数据由input1指定 + OH_NN_ReturnCode ret = OH_NNExecutor_SetInput(executor, 0, &inputTensor1, input1, DATA_LENGTH); + if (ret != OH_NN_SUCCESS) { + std::cout << "Run failed, error happened when setting first input." << std::endl; + return ret; + } + + // 设置执行的第二个输入,输入数据由input2指定 + ret = OH_NNExecutor_SetInput(executor, 1, &inputTensor2, input2, DATA_LENGTH); + if (ret != OH_NN_SUCCESS) { + std::cout << "Run failed, error happened when setting second input." << std::endl; + return ret; + } + + // 设置输出的数据缓冲区,OH_NNExecutor_Run执行计算后,输出结果将保留在output中 + float output[12]; + ret = OH_NNExecutor_SetOutput(executor, 0, output, DATA_LENGTH); + if (ret != OH_NN_SUCCESS) { + std::cout << "Run failed, error happened when setting output buffer." << std::endl; + return ret; + } + + // 执行计算 + ret = OH_NNExecutor_Run(executor); + if (ret != OH_NN_SUCCESS) { + std::cout << "Run failed, error doing execution." << std::endl; + return ret; + } + + // 打印输出结果 + for (uint32_t i = 0; i < 12; i++) { + std::cout << "Output index: " << i << ", value is: " << output[i] << "." << std::endl; + } + + return OH_NN_SUCCESS; + } + ``` + +8. 构建端到端构图-编译-执行流程。 + + 步骤3-步骤7实现了模型的构图、编译和执行流程,并封装成4个函数,便于模块化开发。以下示例代码将4个函数串联成完整的Neural Network Runtime开发流程。 + ```cpp + int main() + { + OH_NNModel* model = nullptr; + OH_NNCompilation* compilation = nullptr; + OH_NNExecutor* executor = nullptr; + std::vector availableDevices; + + // 模型构图阶段 + OH_NN_ReturnCode ret = BuildModel(&model); + if (ret != OH_NN_SUCCESS) { + std::cout << "BuildModel failed." << std::endl; + OH_NNModel_Destroy(&model); + return -1; + } + + // 获取 + GetAvailableDevices(availableDevices); + if (availableDevices.empty()) { + std::cout << "No available device." << std::endl; + OH_NNModel_Destroy(&model); + return -1; + } + + // 模型编译阶段 + ret = CreateCompilation(model, availableDevices, &compilation); + if (ret != OH_NN_SUCCESS) { + std::cout << "CreateCompilation failed." << std::endl; + OH_NNModel_Destroy(&model); + OH_NNCompilation_Destroy(&compilation); + return -1; + } + + // 创建模型的推理执行器 + executor = CreateExecutor(compilation); + if (executor == nullptr) { + std::cout << "CreateExecutor failed, no executor is created." << std::endl; + OH_NNModel_Destroy(&model); + OH_NNCompilation_Destroy(&compilation); + return -1; + } + + // 使用上一步创建的执行器,执行单步推理计算 + ret = Run(executor); + if (ret != OH_NN_SUCCESS) { + std::cout << "Run failed." << std::endl; + OH_NNModel_Destroy(&model); + OH_NNCompilation_Destroy(&compilation); + OH_NNExecutor_Destroy(&executor); + return -1; + } + + // 释放申请的资源 + OH_NNModel_Destroy(&model); + OH_NNCompilation_Destroy(&compilation); + OH_NNExecutor_Destroy(&executor); + + return 0; + } + ``` + +## 调测验证 + +1. 准备应用样例的编译配置文件。 + + 新建一个 `CMakeLists.txt` 文件,为开发步骤中的应用样例文件 `nnrt_example.cpp` 添加编译配置。以下提供简单的 `CMakeLists.txt` 示例: + ```text + cmake_minimum_required(VERSION 3.16) + project(nnrt_example C CXX) + + add_executable(nnrt_example + ./nnrt_example.cpp + ) + + target_link_libraries(nnrt_example + neural_network_runtime.z + ) + ``` + +2. 编译应用样例。 + + 执行以下命令,在当前目录下新建build/目录,在build/目录下编译 `nnrt_example.cpp`,得到二进制文件 `nnrt_example`。 + ```shell + mkdir build && cd build + cmake -DCMAKE_TOOLCHAIN_FILE={交叉编译工具链的路径}/build/cmake/ohos.toolchain.cmake -DOHOS_ARCH=arm64-v8a -DOHOS_PLATFORM=OHOS -DOHOS_STL=c++_static .. + make . + ``` + +3. 执行以下代码,将样例推送到设备上执行。 + ```shell + # 将编译得到的 `nnrt_example` 推送到设备上,执行样例。 + hdc_std file send ./nnrt_example /data/local/tmp/. + + # 给测试用例可执行文件加上权限。 + hdc_std shell "chmod +x /data/local/tmp/nnrt_example" + + # 执行测试用例 + hdc_std shell "/data/local/tmp/nnrt_example" + ``` + + > **说明:** 如果样例执行正常,应该得到以下输出。 + ```text + Output index: 0, value is: 11.000000. + Output index: 1, value is: 13.000000. + Output index: 2, value is: 15.000000. + Output index: 3, value is: 17.000000. + Output index: 4, value is: 19.000000. + Output index: 5, value is: 21.000000. + Output index: 6, value is: 23.000000. + Output index: 7, value is: 25.000000. + Output index: 8, value is: 27.000000. + Output index: 9, value is: 29.000000. + Output index: 10, value is: 31.000000. + Output index: 11, value is: 33.000000. + ``` + +4. 检查Cache(可选) + + 如果在调测环境下,Neural Network Runtime对接的HDI服务支持Cache功能,执行完 `nnrt_example`, 可以在 `/data/local/tmp` 目录下 + 找到生成的缓存文件。 + + > **Cache功能说明:** 模型的IR需要传递到硬件驱动层,由HDI服务将统一的IR图,编译成硬件专用的计算图,编译的过程非常耗时。Neural Network Runtime支持 + 计算图缓存的特性,可以将HDI服务编译生成的计算图,缓存到设备存储中。当下一次在同一个加速芯片上编译同一个模型时,通过指定缓存的路径, + Neural Network Runtime可以直接加载缓存文件中的计算图,减少编译消耗的时间。 + + ```shell + ls /data/local/tmp + ``` + + 以下为打印结果 + ```text + # 0.nncache cache_info.nncache + ``` + + 如果缓存不再使用,需要手动删除缓存,可以参考以下命令。 + ```shell + rm /data/local/tmp/*nncache + ``` + +### 相关实例 + +针对Neural Network Runtime应用开发,可以参考一下实例: +- [Tensorflow Lite对接Neural Network Runtime](./example/deep_learning_framework/README_zh.md) + + +## 相关仓 + +- [HDF Framework](https://gitee.com/openharmony/drivers_hdf_core) +- [Mindspore](https://gitee.com/openharmony/third_party_mindspore) diff --git a/neural_network_runtime_add_op_model.png b/neural_network_runtime_add_op_model.png new file mode 100644 index 0000000000000000000000000000000000000000..6b2eaa15def968551cf7cf97a9c48882b79c0cdf GIT binary patch literal 27897 zcmc$_bzGEfyDmHkA_@p72!ezHQqm0q5`wgJgMf6y&?yqqN;eWiH$#_#bTiU0bPQcX z$9IqK^S=9i*4k^Ywg32j`ww*PnfvPNI?v-guHy`OuONl{fb;fJ6Bxu=;U`r zKC=h_1!6ox5Ckafzn~()7te|KJ`(?9WZ2@{XN+tkzfSSkFQUsf+g~wpVu!iDP^tJ! zwP6?;fB8aiHyK^J4=pnD4%(gjSPkQ|EqO!zU(d52bw|l4s}&UL#jRg!hgCy2r-qJ7 zjB1DSPT@l=_krGZN?=_2Baz5u+6cT8XEcx>WGC#)mr1h*ELU50`+nu`4pJsHR3efpvnx@(ncd*+nPVLX_KU#k){g1(GgA zJyxW_ic9t+FW_F6)!F`Ht}M2|1;#a+pR7bVtEPcLtHkKm&lVQ{z3FNzkGe{ zIXhlG^>>$xo2P|mM;h9fgIhB4@?%zIrVKA$Y}_8&H z1JIjcQflLUvfyRvsfm&aK{D557Km(O29MTL#E`}2Z$cR4EL?a|L@!eW6vWOERBGcp zrE&3b!fY0NO>d|2O_=2(<|M(}q=7JFnC^;c_7dH%hM;rU;$fGRcC+p0Co3&h4Y4fi zy!Ce(H5)}Ky`ckO2K`J$49U8ad!QZa@Jtm(M#hf@wffj_Mm9Z}1Z2wWo#w_hA#xF) zSug_y@TPhl@*aqsI{c^EM47=v(Ufdr&;$P_`M27~+BKV7Tk;8`EEf{?0kXcJg{N&K zUMFih^`T_kaPZT3d;)^ZOzO_flG)n7y_KFDpjfKcn9F|Jwe?x(*0O<6R$Bg9uK2yy z5u4gM(}q~}jmJNUd#};3L|@B?ly;dd@nPfu$*?-5u4RiJigb#--a zpE=No0{uT50nP&=Z`LgW0k5jHv|>$S$$okfz@Iq@3*Xlw((B|AwJMA^eH-!Ep6E$VP$|UD4ZDNyh-J)Hb|JXKJe|url*p1pKBh)KFHr=P zR;Z*%;ZPw9t+6QMZ2BCaWH?V(2#vzM&cUTuw`EhI~X4IX=~G19D0=St3p=y zh%L`E8B5$5)JJ)kZSq*r$|>4NhAkk|EsiOS_wvjSP8gXD1hmlQ6EHp(sxB9Q#Wb^! zck#dAanIX3u1}_t&Qs*nyT1^mBS#E^D(e-tkBwAzNEMY8g58ct$?auA-8EG@Y4hz= zrMeO}{EixVqFUeU6((EuJej|ebU$kDbdo0Sl~K!v2`~XwZP{zJe^MGDB^ax#Re^TC zVjY`33-T>Hnb-J)X*fog#r>jD#;R~^QG>(G=vH-lQ*BWfxRIaBy>ZWzKwc5uklfLb}iv%i*){%M`_V48PJZ z7BT_(4{Q<>n3`9#ovM!B$Bn;IbZBmIOG{x$4_c<1f94DijUD{p-l7mX<#+nrD1M}}pLr57_{b$*-z zlk{FrusHZAO&1F8o>N^|L2ARn5elmQ>2CARu-V4JxdBo`y3|XFDX{@9gtjb?uYiaF z#=^@s60z}sfN^Tvn#7t6Rj_MQ?05E^xnctKwi-p%GFX%Q3{8h+=crbAD4KZ?TM#}` z5xuLG?Ul>xHpRq~kWJ@gURsT~(6NBkz>@JCG>{>VoD^g z<^Xo^(>SIH=uJ{mC3mR})=QCMf&qB`aKdHLEal7-o=qIVwF`+j;d5tyo>cSZ2uu+4 zm$3213nOm(eI90VcbUif$wlnwIY$ttV-uyo971N>%AGg!2q$jCRkU6KvQYJb39}v|nKYUPS7Brc(x)BnM`ONw^2)-Y^yJBEgOB+8isbSp zRh7Y}Vt!lOCdF3+%Fb2GD&VdSrWW%vjad_)V2`OOtAe|9uv9e={#kQ-@$j_+ z<$3xlm_k%UxVpT2#0Y6}mSW=0%RPt7A!G0V4`qH6|os7@W*r#CZNS|8&-mY5d!`zz?W)`b0U4u z{@q9-x1SbRJqIuEkcspulUeFFdr3EYAbIp8Igjr zy6mgooD+oFw*2b_z$=jPyi!OQ%wPwsxt@Tpdi&D!#W@A%#^bdBBfy=Jw~;i(?=QHK zPd4@e?#%ha9*fY~g4=&I_>1kqTTADb*Y-$H7({arPopej(1g-1pa_oHEJX|h`qO)$^yT%h3o(>FXQHPO6xjnA{VIrU_ijzbo(S8qT8-@jh-(@1tM#MsU?FmQ9LWq5ty)eAN8y1KE1Kp=tJ5edQs zx|#65{A2TpWjy1>46I0d1Rm(MIEi>34!M9TfK3o-#W*ksSZnW$0Dp z@1?MGP#=f2y2}O^9yGxo+)Z<;DM$)@dDfJ zauwu9zCL%g8VM^P6>u!}N#f}OOB*7VPc^pA7CB1vRPaw!l#W9H&<3DM&+zb{WkEPk zv)UAEwpzz;)e1t|p}s>3z6tC7RzgyUwNaTPWzyKG101>{1uJudrjb2KQ{DnSWedP6~4S!A_Ng-4?I&_Kv5XIQ|cwx;+bQY$> z)*lD|M73QCp0~-VwmDW*>v6hUV%TnkTsS+3zp@cUL1ZkBHce{4w0;^eRRPW+f&>4> z2A&^nfHjHn01z7NC&3W0u-bS>0dK3RF?=92SZqhAh6t%hOH1?KewM@Un{ZDXFu-w}Ns;+@x^Q&aZ=oUS)wIMZ#F8%!!|CWU>sFvZ z1to2tVd(`qXKs*HK2?4AnPq)1IloL;EdxVU4iYQkDE8UI;0H{Ce5VY-&mzT~?KO*z z)^bb=n@N@g_6nYSV%)bOi~E?%(Diu;uL?jfe#08k1!vEKd%5LPdM#B|REn3DjEIOQ z@^h_YDwlH{rIHn?xzd*3Ye&y@Su`lr>}*sC?DRUFfgLfP0`U<3l1yyU+E*_56wAXn z64=sCq6CCEfk99qnp$3T$as~%ci*|RgwuxHeMjzq-(;v;$z8{eItMi^&j&1qV#Jko zPr_uZkMc<%#2=XyqK!57M>Ta_+VgcIPpvWp#$2YkItq08pYBSHr=H-f#>&Df_2xr7 z75HU!t%f-`cgV|_tTf9Kl9CGKJVONZJ@-^Y=E9D2NKjpCOPyBHWMNR~d93p=q#B=7 zsi%}{ZN!7|)*R=P&An_tX!F)vqPl3^t#90UdQbURm33?MQzs=o^Yo-^+&c1ByBF=d z7rT`dw41D@lXfYM=jUFA%Mb^w=xthpNY<1AHH*2Qk*7N(Obn*HOeu&f! zx(9^4-}}P);5n6*l{~j~*hKIUC69xb@-Zv>_A~cep%Ky(UK-*i@m#Mr@Dob>YoZfn zpncUKMb-u_KjIi+0~k$QhGPw%u-2|=$YTTR`j|3DOBeRoT{@uq z*2wqI=U7B?@sEzuNW<1@xrL^FegCvW=~St#Kns7Jo4u^VIHaWQ70Ju|D&iYE=na;Y z?!C32=Alkex>nbMypSn|kn>U{9;xK$CU1$+^_o&DI_cyZX^Pdlm^k>|_jXZHeYUPN zqeZEfXh7iq=ABjQ1E-bhTbgvMl4!z&*CVueR7%3k%)?{t({(KZC<9$n+u!2h2jIt zL$yM?Ha}}90*}v;Yn-cQ4n)};o{sfcSqB)jVDd9k`R8z*$=mW75Aa1NGy75;;1E2e zLu6#Od*wpy+#Xe(a~a89c*$G67Op~^Qj|UGsGe3Gzdo1!I3%a+r4Tu)Ug6P7`(z&- z)b=KPG|Y;tf=1=3#Y29_US}t{mkJKBwIV7Jb}oO-&xyx(Oo%%#p*E6c(;oINfJLzQ zE+p>G|8O;j+b6~$IbQGkl{$4;NG~f3OfTtauft0I!G4{^6k{J{zFG}bT_q0DVM3ai zl*tt=zG@+VlbdU!jE3E*9fEaocWY}a;hl%j7=A8v<78aKET+Ia*DVDK{oqic8vt|BL>71Lpmsbz- z)#p40fFt^^iJ!jL^5-itikn+yt+vR9#s~@{;Jg&$qaQS|ozmfiZ8#AX_2;Byaf zY8@OXI*f`CHC1okRrVG?K|TMI9s|0he!l7ow4?v(G4KXRY=k)%8w3LDNku*zSQlffWAv8koz@;KGhLc_yV0r*HrOq_R*@>v-G^6nis@T3ep z3Ohp$wY9-zCwaV7uV^5pb|cb9$%7A}M0lkg&B0 zF1-lX*STsK%qSW^Mie9qdDb*IZ<#jyDo4%0SqE}!0;z6WTH{lvcAHu)F(Z7gI@dUF z+wkg4dMLe=O31BzYhJ|{SzM}rtsmTSE|V=4r3A2B&B<|)FJG{#0)w891CCjsBr8iy zQ9*j-_T=>TySK@0VKAk8IF%_HP**ANQO>~2S@m|SL}Y~3m;dd&&t(asvh5)?;4>5) z_=HC($IB+-Z=LcG${&U~hjO=)%wWjhzk8CeXPuG-xH|xl^%*c-enY*zEy@RZ83e!V z+sC6qp^ayXj7*okUj-n{Np_00J5A90AyFF{eu|;S(Fuel&Z!)-D6VAr@_d@?uWCeSKr6kv>5G6E|mAWdN!) zg@u~tPs;@*C2B+6?|@#(Avc~D0DMx}w5EyrpDw@C(@XO>-dHl+mMh?Cu2C>6(H93v zNS=&CWE12P2%=G^E(+&Mr20_09#wc+fhFh0_HDZy^Zd7>w`4^PKDeUeRc1Mu5>o$a zjUTn`Y0pFp^fOYwuT03ywFvHJ63z+Hyv2*c`)H08NZP+a>fL>NmT3=mH}Ji#PWL+O zQ7^024J-G)bcuM%`BKy5C;kQcH1Y?B_Zf(rst(I|<4`h+VrRr*)~)2z4SwfftR<{P z(YZl+^hbt8+nE+A?~l1pY66#ULvGHdT%&z*1u11zr_o1%ZwdxCp)D~vdkR*!#JIty zQR8)JBfO;*x5dA2KVcxkl+)t2GArx|^7r_ft#=!FP=$xY$t zO@tM+7HBFMxqXI#biGXbne*j?L3vAyPe{(hXK%ff0n?-mwKrDv_4V4dwrMvfwXM}2 zd*eEGW5rrkv2k%G-#HTB`0hsxn8GUT28xZRZ?!A+rYO?Qes3}^t<44Uun|j5{p;nrH?`%Ab+%cq`fW8LUPkcLp;1UL0U-m@l*^9k$r3rlJs12}jy_}h^JJ80te%|j@@t$=je|RpcOk=TDP2j{PE|REZqUrf) z>-DaQjR4GkHsZ&lBKNzH(&ZNBvEI8CqYjM^Y2!90Yq#QF)wd}w*Cz)Q;<%8xB{=%lhpAglu+6!GZ##iq~=DKPT5RF+Ocdg9Tr}d-$ zDb<8ea_aP5JzvYc2>;2U@3we%&SR@&K)(?=u7hmDW+f@adO1Sb){Wk$s;OF7J8#Wl z!%HPn$jU)JyqQzl1`1%)kU(|i2&4yGhZiyiRp1Cq+m$!1R^L$yVp04jm zqakKY_|$&KN3$23waMPNo-{x6wyV# zGY17Htx4YY6Y%{g|su%~e8E~E->=UV2O;#HR}93}1`8eMkMVfH^QRIoe0 zb_%@r$D)1UkFW~VB*O@~!M~6}cFn836R*^@s=(xGdQGK$eR*5l<1o_T^d)C`{#*q5 zdVhZ)@XAc)7~$Zm;+H;-D0wJ6krZUZaV7NP%Xg#XN6|TS8E$e0lvh;<4*$(L*ZHx= zJ>a4NC*VuzD5Ydng9fEEz4oz8@r^u#%&C#e_?k1%q7v?oZz&lbbDb9DlTfh| zyub&20qiGFt>!20-4-XaY9z6szoDDzY#}Lk$|xut1w5|kjknCi; zQ>W)<)7QS<#Svt@ZnK??g8Q5tfb)E(tD8Egmcg$5nKu*CZcKK9fsGNhb(|UWCZOvB zpMke}Ma8z$+CxRa-@iUw_`F=buGoljg{c3JfoJ)Dc`Hg`-Sr{0tHqqw7-* zRTU@E(^;n|gtFKp0MRW}AYdxa8!2U|@=j+9XpmfCn}+bO9mxO|=`tNkFB!@1+E|pL z;M9B2_HH+2O27^9X(j=$BvFohYrG^yIWNz0nQ|vrRy6T!nNpo;XTqXkr)EYj;ZY^Z zW0o}mD5?x^8vxX8KL8-+yr05h!mRxfaOjc%ICG%FgE|Kp>S&yR_VpLI>=NXC1^@X#%H-~hT zf{e1))peS127s<7r|*D<8Bo=Ae{GBtW>Nzv!=k60>xnG)F|Q8ricljc(bD_zJ0}4Z zXn?1`J{Lb`rL>#4y}Z3rTkgbxzZKFuH)7zFZA`_qSzDhoLAY zY)E82FRRU_u%u)Rz%pD^0@}fo5kke`&zv&`Rn-by_u#1Q3=l)elNq}Mofsc6R5krV zr?bWdWIR=DHzsj=G2Fmn{_`#zFjI>=x@j`Ky>yZq|I`AU(3e4b*j)PU2Qq|3L#}7Z zTQ!nWST{@Wy=3&3n+f}pro(GBmL#0~d)$5c^&S`!WpP>FdDK#8?r(H8GX7ZDN2&y8 zBU}2w#+%aFk>V^&-G(nKhF8Ce!Cmrl-s#er*NYXF$JE^1`k805+3E)yZaC%Un3;7g zE(in55XZ2u-noiHol>=)rXXVpQ+kv?ZcjK5IwweY+HFnrigfyQU1}qJ*Y4*~GPa8A z1*f&|&l)cgrHIF|8`A8!>tnVS3kk3p>t%sGdU@@fmxjfUZ3Kst_PWWtHpxRhR~(4A zumk29oyI*KQ>{I43$9i~<60fx2S)X)A7o`xsC%jOtdToz$LAQz*p+#wASq(iWl6bY zrly$hq_Nt7sNMkAJ&N~un0%0;(%Dc%J7PeJW6+z)bI`MQ>Nf9cK#hboU zfx{A)9ow_6xki%=VQBk@-7RXyVG?8%X&k zP8*#Ev#x{%A@{Fo(qIfxmd^^WoZ$+mV-|`Cev4syc`r+&cGpTNT6k(!fzM;rD;U7Sq=och`1W122g zTcG!qBA#0V+J{_Ow1ps(wnQSHh^iogGyw)_9^7qvD6KNOF-?JF9J7?e;MH5&do4Cc ziY8{tZ(Hv(y0v709fOzVPaIVtoHlm*FxQpwTJ7p-qT}7ob<26a2KAOpp^hkTC2708 zxNE;Xl@i(%*L>Ylv^Ko=k`d764gqOzU2NcFX-J7cJgr zC@?#+&Nqu&Gztn2YOpH`Wp67_EFt^vsk>KW0kfKdSaGEp>zU#TNWBCO+2jb@^?Cn5 zK^JNYSBKOpoudwA@Cv<7AI9y$GoML2M7@TeJR!B^E*ACtI5~hA^Lfm5`;A=Y$o{$S z|wxynTi36tC>iqE>9>?Y|S}=op z(%6ge=lxuzN)7aR0@HRyrG^b7b6~w3ql@atodOykKWFyd?$Gn=^XEfrgWc;p05
*VU{$%d0&TdVePyLL%{8Z00O$SICLOZz?C|+&YYGT395e*6? zoO&q~-=LR5wC;^2fkQUe$K<$Y^^dx6n$Nqksm67evZP)cVEEdW9lFRWi*!_bR2^_r z5+BZ=rM7dAz?4`FrUSkd7rz?&{o8!JSSuQA-09(0ny2WG~w z^I{xI{q?qrTfUT;>9FaU#X+@Ac?Zs`Ag2WH(ncC z>a@<4>vh8G(5LCP`CGCn!{YSlZJ&YYy5TEo^$F(OmWK5v?QQkew2xL@I7dH_6q3Mb}gA`BM8 z^freRirCKTsIzqQWl7_*T*^Pn=JO$WL{e$xx)ovT#rM&4;ru!&3XMbbX0G?ug;z^o z99h-z}^c$p-%xrHirPqIwq17Dl^64Bx zy?O7uZkCU$rkr|r0KSpw|44cMH%dH|PA=2%IN#HR>@o|%0RoS1D4iy=p!p>L zfU@|oZ?EOzEG8{i_6`2Z>Z--f)p>3o2*mZ1i}u5FcX#((Sr7>N2G!5!J&aY_|E>en zdKs>@nlKNH#O?nn)k=V3VgqRYV$=RU+5e4{@qao5)MBXloRb6X5Cb70z%IZ4BbEG5 zbAn>k{@Wh>@7ZFupD7s{_Pkk=p3|`OKU6vTJRF#Ef}brPyuGgJKy5n*ctxx zad9OG#LUKl304BA`95ibJN@f+fGTX47%I|z$BJRI%G!ap9K0O2OcDLFEd zoe>k18TgI;$BTbF{O|_?k`L<38zk9AAbf4$;m&sVH)_UfuZ&1?CJh^#5*C9}mEV5m z<0vP1T?0BfGxioCO zC{92~&B=5~z1DV4E&&T93dq;4HZG7`9!Dj*HggSXL(H04^L79q-vIFO#a~C7pMQZ* z?e9h1ELUCpr4VhfJ0^kT=5AJNR1p#o9E*B_*n+r?{W4TlRq4+Y7%ql%ZbT`!&ov^_ z&PYW2(Se#u9UfD{)`lWCMQ-{Ok0&3i9PiZ5C$nIK{80rd`JPeswAePor|Mu}2EDc` zXix7uUj-oGOJuqGW`dy7suP$w0gJ!Hpcw2Ty)&vgQDHYZ*Cqx@EO$Y709_>5zy08w zdfCkPyY+j3sV-6qfc! zz~<)y|I%!Zy97A*fBc+)5_*>C)NURa4DbL7l`9ULNa3>u^y{Bn1Dup5RA}oXL&BNsIRq~(Y5v%N1KMY5BUuQ{ zM;i$be~Q{`fPy211s!x1%7*E-_?K4miX60I`R~_Z|JgSTqb{ai6*b5NpO%e`4=K!f z?F{yKY$j=u5}1^}0==NQwRqVX7@SlvpkzDty&DHp(#aN5gqj_ue_q zckprP#63SYz%}@nSZA&k&*sWNP}0er*Q)t{B$k(I)AioX0GG@dW@@RRU_r~l*zr{%+U#~IfSgSjT8spLL@E4QRXH(b8Iuc$OfY3IKo~?;oqkjeD_cJ9p&iQz+PbT9e|s_qi>P<@tX_N6#68eEV8%pM zA>tWo??T5W9mp}F^Ccq4Q;T}D6rK45MX71VtWyghZ!3~3ghUmgSTd!jy4f~=$tlH` zwV{vewFSx+Gw8}D2CXNmS+@-`Axw~`eRNxW?)8OD4mCE@jmo5F$_4R?QML}aw|bPI z1%O@f&y^XYqd~CDjV!{VM;ewDr0En8Ue!%r4IJ8vO$HD;VCtyNEcaHBDE(w-1<^EH znHW0INb+_NLD{CVccoh{PqEx^D4JR_+1@~PGcdz`+{SOpac`2WbFIuR4?UQNY&no5 zao_3S24(l7PfB&VU|&J_AAoJR^pd8ItXgI`u_s!A5b@r*oaqk8>NzkCk4y4@Mk{#b zB|nTR89j$Zo2ewH5fst$B;EyOV;R30<|;;q@sMwr_l0gg@^%*!O&l$L&rNLr`T(4E ztSo||j~3qr^uwIpvbPYOpYUkmntdi=AvVh!ol0EusX{7agZy@p{dYGnKp%kch$=D5 z6j1#0}vVb{Y~zTTj6ftnhSzqN!*foZD3a8OoKvI=3*eOCGLL^M|!GvTrEAyS_rNBhtx zts+HJYlYryr(O9?7w`J0+@|6a;KZd&kcNyTP3qh5Y|&W5^(=?Sa{KMpO3303Wo*!xC_LZlGick*23Kaj? z*GPE-G*(F7&^U1MYrg#pxMVeA*kkS+2c(jTDEk`;01jDz`B1U6EEGQfVcS61cXoC? zCL=2g`;%fo2-+6}EcfRL{Q>2UpVvV30b3JgDD_}-a&T2B4VS9~sURkNb9$3h(LtlRGh(wT@KoJR*JVR*v$2AAs{n zCzqC-6~1nY-+kwU*7#9x;&rjIGRldA?f^alJX#0vy(=pQ=J2wY1uZRbFu&m*;BI?U z({o~_Vz>ZlBy1J*=0`>K-GzI9vAxwB9jdtG{OYZ8DaH*s<;Mq?Mb20DN#msqA1ojF znE(jytOsCwRnK}{t|u;98Mz%2Pxht?fZ1i&Q||^+T8O;s&GM8V2P5bRZZp#ag0 z2xONQVCdk&R-^5en}$f=J+~C3+)>unhvfWH-{Vu{Um2!ZZM0dBRbgiqn`T&Dk$(Ib zCWLJVoLAuo18n2|uynfMXrz%;kNNN$-o*lm#%B(_a6*(mPkm2)S4L&9$6{JFb^di{ zy$~@F+4u&Ek~mo^8rCmi()MX7zc5?wYdnl~<qK6{ReX^X$&87p`ODGbc#oXe;#MPxGNZM&~3JBkJ`7JN() zhpkO;@edaC#yH5>^7pUlLe9=&dRdrU0y%*WP+=Y9F~~wUe|xRcy<}9PwM_j8p5~0@ zG>-{|34SqBXgqv2+M_2g`8(B9HUFbLc!H}mVggVm-J!{8*9>V^W?kdO38wAL=YW+) zN6VQIzASpxiWIw!PxXAD#v<)Hm+E8fHK3)O75%7@!-j`;N~eq#)#aeC>A7-pcI<|3 z98wmMd?pF2p5-+~5=9SUsu^^8tqu#1Vq^8b`Yadp>xHTu{*vga@AC}=3cAtfNOjuU zPsb$@mbIg%`z!=;XS))f`aW_UG+eI!G3tu1$4gA@-#RMY}y~uoloKEz% z$z5f`ddz`SuMVI7UmxX}UI8n|Rj^EJ1?(;I2P14Q3HtqZ6)dk_Dk088N(+RfwU;I`{jBjEHleaC#xcM>$_G{RvZq!n;D(yaZaHOTWng|dP4@a9c;xB1(*aZ} zIHNLAdRle(r|peGW3)9c_L+C903gc3T_orLGce1UxiBv(=fn3FWhK7RbTS$C*fma#Zi#irV+kj5#F z(ZHJe3%{PCX?k={%tMQ>jUN*7ua-zrdO}d^{T?jBMEl|~<5Ie345kTX!<#Sj6*GhzT+e)A)KN4H z0s_-ui|2uCm_N+a8>Si`hoUX~Z*n;gVltEgk8)uV^$wN&QC#z;(F39|Q3}ZE0S6=C zD|vf0z185>qFn$};Wj(5cHSCi&PO)g-ds)}|9o;if4l5U&+cA$GxIrW^Q0TTNiZb^ zIH?Dg%rEttb&HssiILuOw0ik2meVQ)7xTV{k0IBLYOOcOGWi7JN0Xl@{WNChwmqk^ zUO7~`=9z%_fYi}%Aba4}wfS7Vw8nO>aQ(@vk)ge210ZUrh_wo?q%8F*3l;H&RPQyF z{{F3s%C30v;*RGQ6AR0@2y5?X_U&1*k<@HiVq*}-vBzS!`(_>ELtn^?F3jtb*47e( zt0m%}b=KXxKNLyO7YJ-m(y?7Y`T6<7MN@Y4w!hImt~9F9UDE7Nd+xcYJ)147o%a$K z4}OT-&AnziIMLCkfmFnHe0Az8=jDKOEE#X9(sTs`h4bszEW-)Z;oN{rTntWW-n&B% zdKMXlO6}aUo<*EGxvCw(UNTW%^UJ3tFV#BTtoz>NNrpdz%r~ih7j;;hC-1X4P z{X~~s;4I>1ckhPfeZup9)EAi2?2GXNw9R@cQ#4`w+ke*e+C`zrjgCqD>CnJ1fMEo^ zOK;CbZu2m4DTJ#^Z@zb z71jSrVO>o);(tbIfo#CVV5hu(S^MU$74JrNRE_iIYv1cF1LTj-_b=+U2}O%zY@@bI ze9t%Mkw4Bbu90QF+2v z6~tIoD(reZ!O3cZSart+Gz4705gxFw!fP7D@Z+8DRh!K}xMA(h`ZqR2W9MXhDjU0c zehYqfAFZ%|S~A@G-57U*sr zxt!ilZBgTC7?j4W0Fd7Jvtce^+MfJemIamngG0e6rN@^2N0uxF`Ify`j$w9Zq1|mF zNW;YDI_uu%`f21hfE7Y0qX?ouWmn}vUvrpKYUxklgLWN$<32unBsS;!;!%;;%Rv#O zm;1|TrC8BiC&W=!?VYE9=D{8qE-x@#7P~t?zEMm4UXP^rK~lYGaNf%Vl4G2YH%45d z8r=@ESjNUT&0M?g7I0WyiUi@H5>@`NDUaR*6-3ufr7TzgEu%7E0J08SOl=&~>3PbG`k0U}875I4?a3La?c|9KEWu2`HQBGQbTp7>zIb#o)qEf+p{SXTG3RFmsauv< z+*?j=vlTJ&-4#gV=@-@CekbCVtvBeuo6T4=%77%?|B6zY9$Y^aYn(U6KRdXnl{y(G zSGv9i*9?CwU|WksWgSH@IZXWi?M5!+W$_pb?)6Y{3pa3x=uxTyzx)I~2GCVrwPpn- z$0wUHm5jG(upEF7iLQFj#B+Ii^b6X8D{ z@AfJ9MxL~G{W!WsHhSFFN|01dSr14?B;Pn0`&ke&F7~Ew)uY5f?*|2QW9e0^uI%abr~C@(9n4^M#163z96tSGAy1 z>pAY=Xz~-4XcSq2AuZrNy>0%!5lBac^GGvjdzou(A-eN1fHd0zfN4|ZQeo=FiN!L0 zRbg7pp3jBL9C5qNQqJYwY|_onpeh^;OV2q=b?oSnS2?^oPcD;D+htS*5`G@!*(f{s zWX+e}#ZV|vmrE*W1~PHYT>NLKQTfzOaWkq677GiSYU%k{wjrO?|I3u;u|0!Yffk8- zsq@wDSAm<#W2&+>XX9J-x_jK|vJLOm3rW+)wSX~b2a~nGQ-w-Oyp;_d|FX?kQlLI# z)ebOCq}4|)qQpF9!}`+b2Fck3HWWl9bn&K$K8E^f9qq#UgHj*F{GL+KEyZFaZCm1R zkK=qqY`S22P(rRDJ;_hz_ZjWRv`M8@@p`#S$DJ8>q*aZK8b;|%dgJ4Ba~hLd?gbPn z+Zfc#$9-0Tx*<3sh6M&vlrv>uCg&r8*Gy0_gv+c8gA|1)2W3f=#>oA5W@{g)5CQ^n-Zn*gH~tjJzLeBmuvhwcTpMCb?10D zNn%d=*)qYxMSv`^Vp#x-Mk$0;kWVA-ZN{d(i@#*|?YuGll~5LcUqFQ3T!S2LpRo$^2O&pY!T7G0mJ1;hi%-Wr*oSW7075%+QDhr|YwYURnEl)<`B&8p9Y z$s#kFZq~qt&IQ6+uT{v@Uz&ILF%BR-$V&gDeW$-KP!G5Eg^s3SUxw*sC*aWrt>h=F zLN*^qgA;Z)_APqM=N$5K9_N8uwDpDc5Cf+q9==iPrXg(O`sJ2S&Bgcw&bOSPM1$Hb zaDh~Q=R)1kzV!SW4w z{$5C39bZ9{?rNxti&lkh{obwJ-c5E&>7MT=5q>kce5*iG%#$pLO@8*9i68IZfAOAziGvamYdhd9}{D0 z`+R9!-`)0fr8|vMR35#$S(%`dPmgl)2sl1kMxOr?Pj|n);EReY!jyYCN%(-?kttx< zg!lWqf?(VSSdG)5z=z`kB~=Ph0>E$9Jd>cPCoL`9SY1luJL}rgsW%f3Vj^O-W~$tn z#uM}}hnwWcy_NU&*!of@|#FuGhJnPgdTIq8CEf#nH;> zXv1yOtPCB+uydD&#y3iMP^#$4;4Iv0y4NJNmH4gy7tU$wNXtXtCb5)eB4&`I(;Y72 zo?vk_GKIzn*IES9Z{$I6_CsgqhkV5XKXOFB&3t#0t;c-1@xhtzy@p1xKroSHHSPt6 zct%nl z_Ko77i{)*JR{jiU9;t+tF>YpBwB-_sxV4w&WN=*(6}Rg^W?@VN!CKaq8t=X0D%`t< z4;`PhH17#FsF57&HL3CKj?~0nKQYi;@SfvvZ^_^JbVrUJbAOfMS%e0Y5dfogYOvo3 zQy6qCP^q^yMa7aD_AlBW-_+ggE>YM%(rayg82B)3?(%X)TQwgorCuLP+Xa0*3FI6& zUeY1Y0v+vnak^P`N8Nx`$v%53H9zs6rcJ(9R8;qMt@dZ=KDVuCGmcf^&x&9umDv|A1+ zf2h;Q&cNTM_qecO!r@Po$XhvWA}AAi0oEffWV(Y8O6{go-p zNdT8}ln!BAyFtTy*QguFR36wU(~>jsJR|gx0keYbHqvzRdnb6LNQeGAkG5g8$Bp%| z<^8Qbe@6(WN9+%C=bC5QCBqf`Orl7bI>}~s_S}TF6`Wb%M!d#q#vZ!Z-nGV`5MsW4 zPgKr+VwR{|bbowSu~yAPUh>O;ckG2XjVl8|=N;x-d|G6GcZ9ctaxR%I%|6r!EZ_>e4R!^*K9M{f6#H0HnmyIFo3@yqTs<5=5 z$umE5vFv&<&f|5ai3UF(^}Hhm#b_KdY{e5_H+Et7V`Zus9-@wFfh)auZTMsTV}Lna z>i;Y4tK#Aantc}tEKZQXqQTt>4vQ~=2o@x`gy0LoJp^|G!3iD+F2UX1CBYqnyR)!4 z!#OYC!+p5t-udmz&UE*5bxBwKySnF#^Kq_w*u}rkoIkp4m1%Fp^s{S)6l`kE>(P+8 zF4SvrHv7gBTmdkRCp+vF?nMG9bH!@;d>#4Y9eWKs#cv4A35^$7hdZlhl4R`Brk|A^ zzSAkukS^qr^B9G3YKbP57ih^&twT8`wPi&6(T(SAI!qI=9cNAWF#8nCk~z)`Wbi?k z1>93ORop#P;WLt>}v}Xq^psn^~$bd8YZdW@Dim;x_aC~*wpj#A!~ zl?FW9Vx3~0SlG`}?W*UKeo9;+K)3e5(+`d5wWo=t@3#U7;fd^6Xvj!33y-C8O5HvdTFJhuOW~MgouMdgG5J>+38bT*W6a`-DPVXlzy`0;IuH3w#%CTic> zOObqi8pc3{Pg0p7zre@zZ}`I}%ueQOmnIMub$C zdsl+nQV_JZEYaXa_}fB^!S%WNS8PjXl+cx*qe8}(t|z<;bHU}YhhFGDZ3}31J*R|X zZ$hqecgLQ$*FR9VEpU1?QZXE-gp0GlHhe6*X3|0OD@pxFW}m&Dr{Nzd?i&$gSuf33 z#H?MM`Jtd0hF{QDes$zB%a%~?*S^7IneP6z#qmEQ5r!*8x0U9ArWx;_>`*X`Z7OYA z(_FvLv$EXvOxXq(6iHLsep_`|$@B8ZSE&zjw^=bC5Ue7fYteF_AuX;@rI9M%47bsJ9o$c5FE>H~}~L!bFQSY_~bvpuuYTA^Xu zy!)NGr7W`ry{9PX+9k z`VHQMlH0Al$$Vz0aN!UAPjTO>^l2M8q`FAlmus-lejE6jK{EsYtRTBy5@z~g_iB5%V>{D_YQZ0MfE{M?i4UGs=ASHjJ4YxX zwmtE+)@S{+L8SV)3!+UT_psw5alcI=0lnrc910b6)gU7DYvtlWv%jsJ-ue-!s%NHO zJ&#_G$iS+1+Y8_;8?IZ3KSzwv7n|A{=5|fwtH{o4m1YFt{kC7VMUXFMJgbR*sNdx| zHq&0vwuD}hH8=RyOy_k|>Gg&dByDo>9NkUST|0zXMWOh8K?9qPeiywtF_6P8R+fBj zyYeIo7LaD&D!S=o=y{6XjDVemJYlmdFY6xS+55rGz^x9q!l6Gq9!%hP?9gnb)CoTh z3ANL^+)vfJ#Ay+YVI`8*xj+%~72IJHZ%5J4ET3-z%(Os9JE(-1< zA@36Bp|_UL4{Bp7eGgMtDMPqTl4^~IAcU8uL$B@wrzp@sXc)Ndd)kJupnMTnLS7>F zh;wt}T|&i`Pm8r=t)}++;@qYoHFTLO-OhdNM7KIXN7f9&kOR#yYCvU4!&fqZsUv{b zJM3w2XqD;E{6PYsFY6SNqP&zf!qL2Nt7a>r;)g9EED}O^#Rs*nF#bKc`P`UBrFd{V zXX2O99{+l{No44q@zARSg-MDHew#n9;~(xmVf^APeP4-bj+484W;PbsR`QI|XUWm(+VwgWXrxA83~;9~|5k@M zV`j87<q*r$Pw?_xS$>NF^_l9b4F<0QBUOy#CH@SS_kI`);Y;@z!*DZ0flIpfe%+Zf4QeFW^)s1OS_9 z*-PV)?@E2}aA@&R~g5t}aDT#lR(~G5S>~5(Z|32Qv*|w@Us?pBlR#MlW_Yd_K zbJ-(FET5*cUu<#LsZ!l@yh|XcweKucDRp=!_pFCTztw>ziQ9SOeh#0?YBnOn$;fOpH^y{Lwsa?@ICj3( zM|^pyl6VDq+_r|y9Fv{5OQ{z$2$tEEJK3mS9b6 zOd!Aeea`)$vi_~_%h^0GNN*@BBakiEbIh*20c_9f@wd3_Z*xM$SavL)HfQ?5k;Jop zJ{H|x0RuC5iofy|jJyz+BZlX*-v;$_s zNZ^EDN4m2EU3U2E+PPWX$^7XoZ6U=_2zrKqZOY6!nP zM#}48@jfK3f&u}pfPhB0BcxX!=-^%5JGjRw(}b>BXg^OdA6YP8&StDK7f&mrqYW;h zhte!>FF1%;Z5GH~>{^wPC!rh7X9OVCxTkckS^74#y!F<3&dMv(3 z;31(>D4-ajp)X}|&47pUM5chWJXY0SWuaF?p2mD$ZU2IH^P>=JB=d801R2lTdQU`u z!v;Q;opMwkt(qyNFcMxpcL;f5`P9+L=~ExIXg#ppz~{L6vhWqdSs2i`tmA4s%5<`u zSzQALG4tEbcz?vFnVW(7t;FXWiF)v!7DB@$m564{K`bcSF!Bjn~vSIc_+W_cA>_8&5Nj?E-eC zR(b%N#^-jVU-&BLMtK$-BoIj$Ys8^d(%sfd=?BhFGM+B*E*zo@4{!VT@UQ)5RL>H5 zUk+?{`^V}cN+N|t**p72N!}1x_xQYqMEqROHo7O;o1=KFQA7#95w;u}oVXb1yBRoH z6S@xWHO77&PKcQb}Tjzob_ zg`yV1-t9W+$(S?wg6zyjz}%cHB=Bqyl-mREQRZW-s}6e2eC|22=5X_njyOiM+lWE5 zBKJ-uT9bWbZw1tNVY!6%c1vt7wnFG6;rj9K zID@qkB0u%u%cT(gCF0nt9&q=Gooie1&#@u^+yuwuW(;wbs$e9153R@2($W}`Td$PD zOIpIgKMpkwoR@tDFHS7KTQb7Q`#lV2^;fqKkj;iEDWJ2DhyQ8$xFe{X{2Ml{Stwf+ z6xVgS(RaFvTLZW}hF%Pj;H$OQ9Dd)$RYlC9%;vWX^?1}^E_bhg+MuKM3WVnP>rq%= z2h4dDw-wc|d~|hOX8Ui-TylH@UbXPLD)_5t11QDM31Rd3dh`5RW{*Tqj&){h-;jE* zANF_C_CpR{NNr14uog&^U^R>lz5l{%B~H%uigy}0M(~3>K>g)lAfH?IljoIYUHS_l z^YqMry<2q;^-+e-+hpDk2GQm_*)yrj35o&~F z@N~qbe)BODC@<~qx1eb0)2y-Y$n8nE>3pb3RW5m+*P{=qIE+89(MZry|9gKPSt@d> zEF^~C9L;3&#oP-!YXEipN-VKNau*?C*}Y&sTGl&|{bw#XAnmV7OEQJO;mm7Wcb7LS z?b~9#y`~2oPAe8`OlZ%lYjzZ1*xp&|c)C?~9WBnlE89(hUmX}4vDA>>gIdmI z&{wsPzEE$!=87K?<8)Kfqk>2GM_F1$g#g|n_-2pabbGZ*N8O2t!NNAEp#hI2b-}@O zOpB8e-MjM23bL?Ndx*z*t5aL4He1_3NSQGJeoO99T|mf^T9@BVJQ^WZ+5>Ae5;2|q>QP-uYc%2l9c*9<#s7C?>RoGeYUl8vPxtgqe5wiSVsuhBivqWc~ zvkcC6ThtU{9ue%4Kp#D75F%vZkzT`X*VhQEBN*))f}1tqAFiMqGh-W+hZSg)$c>E= z`x|BMqw=$qckEfb&HZ4hj8rGI{2D#$?y7iagM#Bc>X$vX98L;q5JhRE-RjEr!}@vX$5AQIky4d6HGo^BSpgMz#=;NlHkzhChj*=z#F*7gOcw)vzxCHT_~2_}WYXkr{xN7>5x zGASn3nNG78ALHwPo3bUzBgXprV>S~(8AZ`zG*d{? zAZCqHKw|%q>?Dv~*wqs;K-D|%uygyfrPs8<9X(w<_?F7{c`fHN8i-~XkM-5sI0t(2 zldO$)n(U#9u`^2UwM+-bDk!wN^{^c^hU`w-& z-|~8+#W7@F+X8mz1Tfx6`Lm)I3 z&n_M}5Mn0VLYqbJ5*j;|wc~}`9Eb;;6bsOQBq3c5vpD+#%nZmy8t*lzgOPK(sST#L zLCZH%??{4InuvWHK67mC(Y%|-$=3EZRJy$H7meEy3Rk?3+|qKoz8*N2g4Zfs6<*N} zufvf_bTkpll?4}Xb1J3L4AP@^Rp_?bKAw95$!%L=zxbT!KFP?XogLZQ2^9n(Ioci_Kd0URTzqi5W`zd)7x`B zhXO!Z52JXYM*eX%%Vt&l+sKZFczF~WO(~p>4j7eKJvvVjPKEEGM6=J+`ahGK5`Q~j zkUBw((jRUq5gIHl7)KGM8n$>}%50SIc+XOLp#qW`a|!mXG8PtK->a%jB?Iidukri& zPjVKXk?&S^Qu_f3pyZZ4K1OZYn(s}h45M5;@#5hRe%LjHf7lb;!1=mhhT-uGc!}S< zWst?kZvcR+AUgO$nYEmnq&I4wb}aB+_Fw@l0*01n)1D#=PM>FPOj8E{$Wv#+7ZiC; z2dSvm>CbxgnJ_@b{=mMlkjHNX{0WoLVrSz@56I2*js(Y@RKEOXXw{7&5yd13%;Xwu z%_7#!iHGmQ1$xc{xf>1V#*-;xFYl*x)T?leE~0-ADh~8YiJ-j_dQo21p0dquQdZa< zM*&I>D`D&CHET$0=)b;Qd^zC~Uew`Xu#gdKa>!lc)>dYL^h>&xJ~dQKgZ6$GjZrqh z$V)~!g{}by;~Sx#m^P2?6LCre5tq?>d{-0MKC3aRNVs|UV_DaQ-vFFh`=3amch@6ylid_LTAL4kO`HRmUCBR zfmm08%z@8KrXseD@p0phESZ2G+(9|VTPSE4&5SODtv~c341CR95T8_}+{)Mr6HfEqNJ2GZ4(*b5qiCwQMqK&!0~;sx&Qb+WjgO^&&crwhnkKEIx)-Jc))E}1ERd?zLNI>eE$aePHoZA z$eG$^ihX0ShBL#-74MH`KNk1p0OS2n;`d)-`NO781?9;G^# z3Ir|j>*|i_eDv7xKP4#;L2Jh9^X=6mucMxgb5dmkBDHr^V&kBVtGcY)-{mL$3~gT< zspx_DBKC7GEUNRf&k4(Y+h-*8H|EIR*7>D+Al+lyoG@r>))t z5ylKTL(W%k;4=Wc5M1};mh`U!@Gdk3g7w-++(j`~JS(dJK1r`e z0+JQm_<}x?^0Tm*xY#2?C~o=$2CQ=F>qi3Enz8|f5{F3_qz@p%(yTYK1ArC*A1joV zjTZ=;d{4&%ZeC4Us3*zD6E<4hr{Ao0EdhV%AA@Y<ib9z8wEmn9*;gy5&F%i17+YW^* zK7kw2eHU8}IA(+gl=6(%{bW=56s-9p?Y+=1735pz!d=trzWD8bCZ8-gz2Q~S^K0Dl zpwse>GFQZVT?68!n3qN%O6#y@pU}EVIE%$dlZ`^<`8t6B{b4GWS`}`M%1hImp68>- z-S_+2Yq_P)&BS17xPPYOKY1f zdCy~L;&0if=QS-RNcJ5Y;at|CbGMEcX7;7$F*P;3@qf|rV20anHJET~f211tZQC7Z z5wmu-jOX zq#5;qU)NsiZT8PA>lLk3x!evlaO9F4GFoatjuytb+Ox_5u0yB=PJOxZY-PohNMEkk z{eJKXcaQ~$O=e0_-AJwq;-$8Y*0T#&z{wgyw@s@BPR;_dDOD<) z6s?kVzoKN}{FE!=Bhb|AcD=sLYOTcSr;aGB`YCp-B*|%tzs8;W)TJCd`)wOT_dc%0 z^Bi8j$V-i~imi3>L|_2o&Yvoc-O5Bou9k9U2f z+TpLs-3!`_eO$Na^rs?zQdogR$=!i`Gqt#;@#Bsm@-O)GL5 zfJml3-WbVtv1~)Ct#Zp<%0H3-`!eue(BW?qa84g-K1VowWa`1}(jf}%S@p!=Q+5UT zRlz4smf_P^gxQtK>;7=jl4jxw2j_!ze$h7~U@>-6A6bPxT2oOja~cZH_;ic}n@Ec0 z@<5(BV%^ay6OvIebII8L(hAu`as+%{E zlys7FLchEC#lJhu6T@blL=aEI`gjwcbXt+)kQ9h$MG~bsMQXevzM^3R7HG*boM_!Q zms|*wRk1g!nJvHD*lBi~udpuVmSoC4JKDQ_TeLNmRknnyafqdpxQ&jlW`L)ojRPWA zb${ST!*=cnep)uLU1=~QN~$4eM}EE-!U}`*#z|KLvjmfv<}NO)wT-doPm~Cj8zWjx z2hz>$|X-+p$|95lLyw)6S;zb zV!?ht;R6l_=G_U}eW6D1?8sd=&@#*XOSr5Qk(9Y=5R1#`CQrhPYSr&S@CH<)j#O>R@2X~I>s+u5&h z5yVkJu|(muld=3p*Etu$ifT)~zb!%LaoNqSsH|1A#UpJ&_K}L+Mj*@Z1?iN))R$=D z&h}FvLf$Zm{F56*pP&{R6%j4dz!oV;)VV0keQhbrRuVx4SW*AK6U=~7mLAx2-pryb z1;@x3alDqBPQb7l5K|}c!O7AgDF}M2u?6SY-+!US5ao+;E4^` z1WPkOYrSh+Z*5S{z0=nsq?MSL8B=uN4|yk-vAZ~%}x`TiPaEsD+i zr;7dX&KnBLTK1mcgXNz4>xH%?FfQaIZd&1{GZ9dM(v$9|um?R(G#J5wq9-c1 z8{JrcgcBz2&-|Mwnk#gigvWGhrhs|c&LPhNEH4^)tcqlTFT6^REK~Th_;*3L6XaEM zD(bHNB~cGS6I&RoQX^Yf=u$iildUjQQZ4O&HvpK0{}b~6zp1+8rA{U-KiLGh zIA52VfUrB1RrWfvO=FLU6&GkOkN~>K<$@(I&la&1|DTP2C0U9GGm5YPD-L<mGe4o6C literal 0 HcmV?d00001 diff --git a/neural_network_runtime_intro.png b/neural_network_runtime_intro.png new file mode 100644 index 0000000000000000000000000000000000000000..8f58413de5337ae7fb98aca126efe6f5fca2cefc GIT binary patch literal 26387 zcmc$`1yo$?mnVvZkf1>lEI>#gcu4TV2@b*CA-KCc1PH-BxI^&ZZo%DM3NPFpih8H+ zz5nT+Uj5d4YkGQyRji^;o%6|--Zj{Zc0 zk8>5@UUkn@em!AL2*F32^K_o=x7OGAgIduuG*QD6PHd8UM;3ltTKCOnHaQ!ri(*yA!*?{3LFKuR*Yc}@n$yJaJ7eCj<$Pg! zTxPWX_SbzJfrzT`%lK$dwcy_?&=A^E!T-q8YmbND5$ON_`v%>lqo=0kC7MkMu)AYe z`jN3XFpkSN8=rm6*Ib^ijVW0a81)pVx-cl1^24K9V+sW@{xXjJD^G*QluV4**$nWB z(NK`f;1kp0W7Gp5V{+v_u1doH_}t-3|KD}P&443^Kuj*T0;E^z@G#p_5oO%}02}=Z z%A)zfI{WU_GLmy7LR+q7BhOA-EG9Rp4_L~CDHpa}%{8t_icYqDweMUVc!T5@LqExV zGfL!drw*Njc%Ak3?!@mAITBU^)tk{BOzdp}BdC$lH(OD&+)Z<*5Glv@`Z5ZMi7%Tk zj3i=2alo2l2V)*nM^M)b(xwkM+!th+f>)4;64%uAa zXMM8E4705gDR)N<01lfy>1b`~x*=wR>4+aPfpgY`IpRsG2)&n>BHwbo@!>hwC8uz& z_=;2t2m@R~nD_DvPkmPRmE)|nu)>e!?+2k>91%9H2RB|{rrRRrJ@-BnoMs9A48W?l zl?|F@TK>i^8RO`|46rax0wO{Dh|vveO?~DTm*5q?z~3fja4scWpf_hb=INXN@MBFV zJtYzk*f&pWs%giwmhS6qY$oRJ)1h*ROT|WFke0+mTE|<(7)qlT0If*C1zc>DpvrZ- zAAWPF2KXt&-A#ibG{Zdpv z)!6$q=pBKQTuX#uMd3(D>N$?&El7D78MdNvg*f=g%lnZ!SR~P=lu$=qJ%%#8AI@{R z;ocC$tw?OLBNDHN`*zrvZ8rCYey7vqBgD@~M1^PMHsis;N8m4>3Gqv#w=7q~Ztvs5 znV)dg-|(yJXaYhO=>Vu0@!Q7^4}>mfC=0|T&qg9>ur}mX3f-?vcWu&_9{l-diID;w z_9{Lt5ewLS`DL3prhFr}K45`mHAsVNjH|=r>MR2@ps0Xy<_dCQ|K51fK1)MZduoAy z&R-pE1nlR-h@!Z<21Q|?f4QIm0b=9}ipW2e__SNOgBJ9?M-E^lEGH_kHg7KrFrUn>xL}nYsJKl=r90n0 zGhn@PD*#>erIPJIVrPJz?K+@BJHv{T7TtpG$=gDd`}*^2C81`(Mgszg~xigZQc z>e~{X=;0W?WmauZhSJ^}gQ7>T8++l75Ny3~wJ3sOuV$kuX_w$2q99pWKQ-Hfkry4F zLIc~94GGt<+}oIn+OS?9Hc-;WikKWR=t|*#+Gx6@EOszzo35L8dF4Dgr6zv4b}-Y9 z*`5~!;MQw64AGA3K8AELRn4E{>;aHvTeA_9D6ZvM2+Qbb1$0dEkz0dVl!nobl97`C zb9L@lSE{XTXtj-3$beD4wgma!hvu-)_fn?0lGr0cbFZDi&PGlx1OpdbN3+Vl| z$azK<{jz+lvVut*g2Pz&Mx!z{yXIbB435i=Vn0!AH+sSe{0Q>vXp_TIQ(xl}z*v`u zqX0NOdImyo6?_~0^T>FqP$!wqVkR_+UCr<>sQll7=Kpnj@UIa(jT%WZ3r`Piyh2k^ zz`%O{%`QwXKQYp4#|#k)#+G%#gTua=21pptw*Qhv`lFiPgqTdcKNEMVTk4bi_k>y3 zhk4-Dv8M3JWsJY(gqcI}LVBcuk8o>qVLxysa}xkZbdftL1Mz{h<)bV;g~!QtZkjWcdmxjz2||N?WKNv#qzxip{~i%$vy9+et!0@>^DB zNM8(%zGF5|DZ|}Qm(m7<`Ta_z2(HQTV4Fyv-WDDL`nv+?Z?0Sj?2+ZGo$s^woWRkQ z;;wZ9nvwgb@lq_7Z5|`Pp^LAUI**i0BOXu4oF6tF_I!ROVobEm#&mE{?T{{bxX1T^ zy+p%;O>a3k!I+xG3Tst0NvX1LDeo5U`|lZEyF+gszK@WfX-ICKshaf-lH*&)ZX09O zwJdktFF4=YjPWMA?ze*j8ig1_g@((?1CGiS9-nn-HleQcN@dvb-L=>Sc}T=q^8rdu zE8J)nR`5tHi=Gkt;^MQRcO`Wd=P`xsL!tMYpP||VgI;ed`tOS7wpli2{>VO0 zaV#r7ELd$sBgZRhkIx>XGG+3y@#dPgHw-7&Q5=ao6FhIkA6v{xL+D~UxQ`KWEn)@P zb?OpH-L_s^Xz`?#CQ_OSk95RS28!C=IDGYtPY%CE^2u7OI)L;)EnV(X^xK}d%d%O8^LBx%#(Y=PO!^67~z#@YWM-Q zHTEDY9B-}N1xbZ(@k=wodPck7u~uFy!JA7W9#)5y9Ju8N90yv;vhj;kUA+#WxU%8k zNG7?aW7)3R1Vh3`ZY@Kz1+8gWy|raG;n{5}4`z?i%Com6=@VBvd4)A8p9y)s5bo?e zZH--vnlZNbMn!!zJ89pDu-4l#UAuLlCEMCN1?&3nfE68G?8$6S! z?)*_0Q3)u6~6x;HgTUZ!M z$T-OR+R+lnkE_4GP?7K^hH`fX?!Otg&iE3_J*IW=){Gx7p`W6&dOC*>FrW#bl$y+6 zHc6$nJ=H_<8ZI07|@l*B9O6JT-%t^`IdY^<^8m_awD&;;v~>% zD%^n6nbN9d5fJ3J!Ppg4#}q{%DX2b;=dW)DHd^m?4)Y&v1-QYhbLH=*V2^ZpL7D*M z!Cmj<)AbB3gf~}BLa8wAyM#7`F?QpMLhGV|73$hm9ju2%t^x11L{U9C8wlZD0^Y#D zL-%VBgU#XbBB#y;V)c7M)OtS4M@EDe)%gm9fgax38x5F7i{PV+@AJWQ^CXCGZEhBX`xZb_xr4BYDoy2Em8H^unp1Pg@4b4p}ROrDXI zd2YgfH_tr6SyAgYVn++Z0rZ)bc3;*;a4*Lh%3#S2cElRTanA}E{V<+zEub=;&63O? zyz{WxkOOG;baK-%5o-Y_QBW23Lky)fDgaMrqEafHk9&PNJBve{j(uU(Q z1BF%VTVP_jVq>kL+NYx23sAkAFbP5A#@pGQouEK)Kc6j@l50@Jpd2Twt%=-?EO-N9 z?DU?n#*Su1ZKQ%>n+4^ZRmqm3y$H2~(?-IV zr{xN*r-;rR2}{lXUs?c*)ttC@il4Ywelql|vljU&K?m^*?MAbM6?4Gkg#Zo`s{vK0 zlvn&Tb~u^FI}7qmPw;(L4+A?}WAyf_Jt*@l80AL}ctcH@>|^i__MqAA8s&D0acVHS zZMf^c(@*}CXJZ%lo3`7}N813;R~XJ&B6^LQ5Mh;;5Vt8dEcIe%v*#f_F@8SR&Wf=6 zQM1)!3M};CQXy_vENjQz_jLE8@nT=C`d8CfzD-_abp&9jqt^M59r*_b;kzUK6mq2P z!X`JFZ*4=+&D)+$qNlI>#L?L;Y9D^H#$U0q(KhaU%Jc%7J_uNFGa|fH6&u8}0?2o^Kgj27MP&C$l+YvG; zywmlOO_D+c2Fp}O;ILkD#c|blPs#4@>+h!+um30B4%CsE*TD_xa^%>Mxtb_ZuhJQ+ zUOTq76M{Qm5*sh46qS`lpyHu5*T*U%%F58yEpUDbrrn;+OhdtUfaJh!)-HnF6>4D% zKj=gU>jgc+)JnYUDOjH|laYg!1+7XF7%%H=y4V23pp=*h!^!!&HGCivu2^G9G*}eW zyJM;j>h9hu`OxAu_(J1vchxwiR-<+*(Z1yD73#1lX|MIU(=I#)kceRw!k>Mu=Olzu zQ=|EdiMOYo+4@C6Yhm=2bLOw_l@pTL(HFyZNL-xQwygoKy42Q`pILGJ-pKIyinB`Q z`oR2ru-mlq4-rrA-V?`$XZ|XQ8`zmbar!&KFieO;=Q2C?yCh+O@iLCAc*uPD%yn9l z?yFURPU!TEg4Sfr-g7L|1$jX$@CU0JC8gX)9dBP@?QfZ8s4r{eCUK>o?njU+P1WY5 z8H~N0SC$tnc|Zth)d9P{q^JLiVJ-p;yiTl9=}B&2hxJ!VW0*2p(s>qNNYkpPp|Z29 z_}BLuc`a|>1Yv*Sw}RPQ2>ptd6zVK=pSLQWgmfVeP!O5lUzHB>d;lq^OM}K{##lXN zY@4GA<#ZCb*ni0eF4|*N=Z>V9uC=?ah`suUV4cErE{cj6U37#jB%=GKW@h>>+6!`P z;&B6e_L<>9`5_4+U0z<9eO>PBv+t@_MC_<_ zqq(20bK|DFYpT16LuN%nkOUQlD(jqg>^}pvXDCh^n(l$S2tYz8iB6-}?~L$e1)9pM z8o4z6#7u7~*z2Wb%^cwv?S>~EA8M7l-p>P`=k(T;O4poxTf8Y1D_gbTt>4u=9Q6sLTd2SRWxOyAN3 z2C>tW%N$Eb$SGk|*Wd}$H6%&xasfduuqNYb8Yf0J{7F&GZTUVTs$ks6RnOj&fxa9q zAB!U5vjj%W>b#eUl0W1cxj>`NV$hSSr6ceW4TtidP6=O}; z*BZ)#odiX-WcFrIj$u?p_YZa6>Ut%xTV#!#T20^&wXoXihPLi4)H&X-ekG?#|068j z3`>3m0)Z%rIiy`!;LI5tq+OKyD=PgW{^E%MWs|>*pn!mVNLI>Z#Pf{%JoPH$mY-$; zyg(ulh{xU(SROla2mvHSYq4R1_Be9r|LJ*2qi6w<@!;<1@Fl?wUa?9&dJm5nC#oy^ z;(#gsE1AK-@-HsPe@cprLt*zmFdLP7zMIv6P91KiJ*Ao1GuO!LfB(ASzIW|l@Fn4} zZhqGMu|@iMMjE$^{lm>^$(5Cidc5X>y0*5c{;Pr`(W|7OKd0uu`E11cnUxp?Y0srA(~{n)7k5fpp8M6SmAe(qkE=mF$>b$DmsPNT z#n?3$SvKcK&P1e|&+Jc-UHk|S3W)AMI}YI+*eomU>D91vF!Jop4|+!VxXQ0?}L z_kv4;NeVvAuxm><79IdW7(B=P{QK;2c2;S4ASn*reo!1s1P3mCSr*j>Gan*i^qO*< z$v=7;DEOklTneS4_UnL!O4aZBpV<@Q8b({WJ*1z05X@kBkaC%p{Gw6;K)rD%45Q)i z6bM>?cwA|*1cFnP=rwajna2WxS#`7cW)O z-$n(4?cI^vSQgE@X zZfFS*q2JFpaAaNua^DkH#jz`7KQlBxE@LL?KP&+u>Z`2EBf`8(<%6OXNh-Jj=UP}a zW1`D0v8C^gFcQy*rg7@UY9A0yM*5W+V(|pA3c-4*{`Bq1p9^-8*yz~UPLI+wcxq=d z3&QcRS4YsVeM-!)#(rkqVq>$R_uKc9joI%UIgHY0yyKsm54J8!`#;lc%3l~gco?i_@jk+>H$5lwCzx54lp%%N!P zv+4C_s`Y93d=CEC8r+kQsjCFC4E{{I`sa)-qoS}Sa;;O!z-!)qex9%O@st3@&}y}M z29nxP{tBz~|fqN<^& z^G!{c!NqWc?e=b!^_{G6fu<)UWRB3bAy~~bi@1>0kRhubju{i~1rN*JZ#WCdX-Em) zE;u$`ZV~BZZ(QuJdpx;7msezbn7)12YB2FT$frBVqn<8KX*yo2WSjThtp}Wet+Lor zO<&L3A@h^{g^{td!`c-|dGEZ*a=-*Jxr>p2R(;O#C2xLkc&vYXxpLQSqn3Qg)1P3) zbG^jvnA_%j@p45n#UknEP*jJn@jE%G)+Ywe_jSDeOd7iTZ92!jZR=ZUCpjiRTrEt% zo+BcgkNc5X&TLHNyc|2PdtF*Tudz_S2L+!Qx4>jHe5#`BTEAG`pS{%a{&0CseN{ny zMz6Et7SXG7yjc+NLo01{)qw<*u0LXayf}JxHuBbce1TxVyVGG`w|^{%`nG$p922c9 z$E)_;Fn^HD;|UYJ>%LUX;n21Nk>Fdx2iWqr?FH%MO&6aIY*s3Oe?R9KqjC1$=+uEiq7ljoq+$P1UiT{T$0k!J4Qq|3 zgL7NE`1_f#@`r0)kAg%UBkG)6I}OUOhqo+OQF#|#Z+Adw8FRhqtqp0Y*QYu6nFf%m z2mQ)U9h-rBrd3qfc)v&Ky(>n{T2c8S8~smMA09qZuPjD`&fxv)V2n;TZZqh~Yo5~< zr8m4C*zKg$u2=9$&?ukr$+cPcp<1 z(>K@pQ-d9vFPG>~hT6S;LoTUf%dgwOz;-{4wpDqpo`Zh!i1m)^o-Q#>lzaG-^CEiQ zqM>R|V6_$U1FTPhpYBkwzzU~$`g2Hbl?Qk3)9kzmn&?@Ve}} z-AFu{`(Y@~t=ts7(QZ)AfkFJp0nb?IB<{>GOe7f+HL*(FMNb4`P+Qdp8b?qPK6o{beJOu6a9_9VZrJA5o85 zO>j=IG9qwr#U=^XCx&GiuO}7(X}g|Cv_8`YLsjJg8G<(((A(~x!feSM zJqeGG0}r7$G^9}8OfRb&&_2t>_Yt4Hg8uYgJ=gZ(i+QZc%J5xSVI)$G7pd= z&6U&Wkk$2x*%)o<9IOG80OU_BKRLTowf;=yJ91*-+`v0(k%Q;9AizW5xWVKCl$Q8mT*@7%;7i8g z3v!!{`$Et!QiFr!{!Gxgm%}$E!I3y6`jKqtq}OB06SS|2Ry%eH*F?JI!hRl%gAEiBuo z8_a>nK9=;ZH_th}FT7-?OMLN;+wtyW&c9~d(MBFF^^x-*ZK6iz22sOY#aGE*=~3^$ zwmIZMz2;5ASOKOkekl~H7cO)ZS?Ik@lj73)XM4e;@+UG0lA^t?c#Jh05Lg7x1+d+_ z9_)xl9D63fnr(JwknE3Tbz=U|c^Zl2{?YC8A_aF2F)@sl&7*7@Et^j%SI zFe@MP7h9CN;zqP~7BGSXa!%&2rXcY-Z)VhFC{kBRrzU+njWv zdJiRISnfN8)SlYzcR@w~VZijhSVLPO#!V>LUa3#}a_9w+uqHKtULvd3KZEQ66quT` zF_^U&W0Cxa`@tmWt!Rr4EJ!in2h@-TX}Z!xk0?<>d_<~+DE|yvpw$?cRrKk%B@2He zb_9Q2^oCFo27mo}|7O%|+U|60uep#((VTq3xfQBT97Rk!mmV`1)`a`FZ zcz(k$$MMPz#{`6L>%A+SLg;{-h1S%X=fJ#E%(+zIfHDdlWPcp9_g7@3z7aY!XFv7~ zO!V=1TccYvH$L)`NA*7C(JS5Kh!pL3u=b-|C3P%dZp995IHxBiY-GVT%%lBjxQZu_ zv3KrQ3WqkJ4)PVUW?rH&Lfo$s!{aDwU6GGYwraymLEw{OAN6PzsB27ypl;xL4vE_b zMUidGt;e6U_kYgXC=<_WWm@k6U4g^??<7>5`oh9eDc4+34uzt*r5u725iM@fb+iU09}rOkN54km#)4 zB!;>&w2Vzs@~XvZpgULYV_q{sChI)MrQY_TFXp-IXUg_ z$joBw11~ebOPm})D<+iK50$aaGn=C<@s;kds5^|xIh#LL^(}mfCDP+9pYCpLMj(92 zCCBIzS=Z1!6iVEXUF)FLe}#@B`IHjS$~&YCjozySt^HSnjnax2L|=+9>vB?V8kS&k}<^2_4vO*sZsbScMCj2SD*&{xq_jlOkK8$G}+Ty~d7EC>lwG?FskY$&(6e;u`m)jW* zQ0HWvuZC0PCi8H#`OkQU6QzP-={zkNu_68p$GaU6?v$#KEd!{I$wdZjpz3He zq~WUlO22K?{i-DA{HTDoZ`UI)b|fHtX!y=W_l&zYm2FY6O_O^2F4`$|c>dEL-lgsa zUh2NZ=8of+jT=BrjBu9Jsn3O%qSqfKt;+S>A6?k~tcqJ1dx-1;r6ehKltcbx$P`9l zvECVwCeVMA@&R)~ES88jbKT&>go`$(Xx#W4y+1O z9lol>O_mhnk+cu`<53y$`(i7b<#F;~>!+pd{Mr;w^*zJHJ+K%1{6M{=}m2rVEgFA zAFDIO!KKT*q1{EuQO%N5Lu~rw*z9@x9gaIzh}#;PFFWmsq>w?jHeRsH0x=P|hK)?})RFNvw}dCmSRZ6hpD=|Sd&LQB#d$voq+ynF6UR?V>) zj_z&xcOmn$8eAuO)ip5<^L#gn)}N5e;yT*#MvO;VF#}}A zUFhfu!*+?0RD0+_?flbupq#RL>y+xT{UX$Kd$X#X*+sKSDAvhBYuEod7E!CWA*l)T z6_50RCtBSS+hs{cBAInl{S2~0;)jT4+9y_5ecwE}FW%?Z8;6Mm$1FKadt?tZ1~3~)CI9DHl3;Rtz)|U z3d7Tvm!7e>_V>XYPIgDD1Scn0ou&CR@zB&Yvzgo=t@jY!m$q!a4Y8b7+YQ0D3=5B} z)PtdOsQqgxLc!NkvCM{Zw(Ce244DvdJd+i^p(QJFN-@$+hSjo*&WY9&YzKFKlimBY zmF@-zIkoFtRxfdPbCx16R|E0V=u#@q6>4Va5cZd3klV@Uz58zO5$*Y?E4kmH4&1td z1$=}CI<5qe6{A(e+h!!!rep_fN^ zmRWvq^=itV#QhP=tzqY9MgPsQ)#6A#-WP8WY4of&(Pelwy9!$q-wFj2xr_t4rIvB0 z&1Hk^5ODx;-{U)+KX+9D1SI3=wE}0+H-9UyoDs>N4h`P}5+^MfHe>4^O@<@_7f+Xa z33mCGQ&U$au6=hM?FVbf^vn_pqT%}RgfgQK%We1KxvQ}Ub6mQ05S38+`uh)Ovwi+J z9{L^;j+OOHmJGD33+d3`7jI4Y7!9D#p%P|vPUBSt$-d03ZUE&n&sX))>S|)S%adgE zj*zL|;e+TA(1I`okLcf{Xzgx(F+_9=sHHA<2`Zosu`*$|gu-4D5*fmdZX)vGk7Saw>GIl_p>Ue${5 z;qz#*=gl~j=cdQG?Zo287WM;Mf$w~Vs>9{l1x4p|1all;#!`ugcGC{1GI3-Ookq=p zM&0#rz+l=Kd5i3rL-Lhe>v)S|y%s-+*c2M+F(c2AD`o)lExVyS5jm)*{d8j;> zVD{aPpo+srw%9AmJ} z{hg3|(Cw~3G_6@akWLhk-A6h4(Y|!KNNdWtzC7bTbHSU%yDTiWX;MNCAr6HC;I%oT z+HhyfyFbe@{&2um#&>y9KWiB$8ryU$cP2F)4{g3TBccijn$cYyN{2MmvX;i2y-7<2 z&50XAv-G847fll)K)!~qGqL&tt@a$P&qI$pR7LtCO+07EEjHF8nMSS3>!gJ%7Y%AU ztzAy8-kX$uPvIKtU?Is6mR~ZYQ<>E^>u?2NmOTO=D56FUi z%9ba7m<$^6I)(!j1Ia){h&$|f6^GLg^)D7UO3tv`p7&$o<1Hm{JxO|g(Wqm#z0_ob zd$Bagkzpo_DMhD0OwvD#ouK-8(;LlUkcKd)PH;fRob2eT`ulV*83hv4BX~`&)qM`=qp1%bGZhTq-9=Cp~Y?B-3|u-Kd3vR;1LERoka|hojh) zZto!B4{Qpe{e^bPyt893^V{>@ZE0iQOlt;uj~gy@i1rZ>JV#Go-b)Zmixv&tB` zAp|0(4wfH~8BK5b{r)9GdG5A66#z?1D8lX^WX6?)@E`kkH-`!?e4YQGy<5UL*N+nuAdosBYqy zB+csW<9c*I6-ryC?P3;AE4iZ`^g?HZtXV(|Ay9Vud^i9@WxaX zQHqZsWd-SEPCuq@empZaTX{NTN$%zV`$e(KZU~T}Ec9p)U9B0dtPZO=+4D0fmyVHY za~b*`^%_4{nR>_&F_Z&)_cTrQPxdfX;o5-5u6kwKQtK=NZqmp*BY=iZ0DKUfjO+V9 z7Qk$w=jcmLF0@}n+Id?t96%&$aUv)(^dshHl4_4Vs0`_eXk`T~2h8QZ4NKU~{T*i< zv`fz4yIsH;`n?~@s{FT7&TWhXoKWk%snjbtU1*yQ5d;emhP0grnAl0?2B}?RTdf3nRt#%@TShu zvuz1trFTmsE1VaS+_GoZnxgn|GgOWM!WY=bf^xEpQkG^dGm_Kj>7PDXi1sssV=^|@ zb3pgw0*Va(Xj6!Sq-j1i)Nd$?V!-#M37b>T9gb0ec{=f zC?kogY${_dl@YMmpERqpRUSDLdD^aK+c8 zeMth|5Io0PcKaICzLvy3g<14zC3=dw^6OQALU?IfvaC3AqWwHO+dA6}v=oW&M}GS+ zQRV*lL7DO3!Yqkxb3%uT{m7u4noDn`tq_DM|Fnd&STi@&0X3El7s0rEp~J=D4F~GT z0Z+7sZguF)5WMVKa2Q z!9{%cGg0=a8FTZ)ya1Rdb$j-Tx%VwkGIODmnFqoVRi(Egv+es8z#Bds%C}fAkkQ&K zUm6Vl&G5gHjCFyEvvqWGE3o0}t-9Qyy;X9!x_FuZNnNF*4A>AP|9JlL99yRSeUZ5Yjb53TCga!m#U?~o8)J1DKwjAemp=3HD zF(x3K^=qHAR!NWt%@6g<4yy6YQ=CfGR(WeCc5*v;G*o6(C1w~)_ke0{8Qk;3Tr#wK z`^DDwmWhz8MYXlsCazsV<(_BKH@qInk;UGFekuRf?CtUVwR&&IEEz-zh*ZkJaTRx` zhtwHMY~;Var60R|+H~#6%B*FdYCYe9Ma~fpUS7O*OJ~W@@m&p7Dp&CiYI%i0R+be1 z{-!#1zn;D9z3IxTM}dhf9?BvMZ?tav(raJOvvjxQqtWZhSO>c)P<>VJUi`BU_N2`& zPIwq$GB*|{C@#D;&&yaH&X$p0MY3Ppz%i()uc@4LyF9veZ9rW-l~66_duxrT6J!8z zim?0kJ?fY19>|T%24RTBSQV#!#xh4Gn^P9SAU4sh4NWd#(5HcmSd6ucyLjU=6FN_j zA?ebiTYYp!g||lnzwQJhkc_%p&bV9WkMRWD9O`R7XmiuK4w+UP?Z(4$Lqkr&9#IDp z+HCm=|BGbef@=q>;j8L~uOA_6N`lo?BzF+i;dB7;l&Jvd;c=+5N}h|l?GkZyOzob@ zE!m<@dChy1-hekPl>=#iRde0+V{j(gjsr z?&Lja-aWd(B{8P%b}SQI3f+$X?diciSk5{bOQ>O9jL$?%;WFmvfeBL(J7XB!MUx|ee(aPZ1|_(e#IhfE~*GPiqE7em{5|1uOPY7;bv>}+hQyXgC+?)3V}s$>|8GaAKygBK zYg@Ii@Y_$z!!MdyEoMpxSgdTnR|4n6|0~59Yw)7=Wiszm*yB5^4A}MG=l#j{-PoR? ziLh%Q85b5ubV6Gcfkg=HFnby}jz-H%O-F_}BwQa`5c(#1O&X^>l@0h`gQQbuMhOE zy&JRbe~i{m;zzPrT5Mp^jg;qasZm9*iD41Y8adf9VoD>%$E%Y@b6K)6EPnRdt>`#5 z(O;vPwF)@UpaKeSSbz$VC-}3~whX+ND2S)|+6TYF<(m$84pAfIIV4@5em^b>%zl+U z?sxv=lv6lII9onDJ}Cn27kFIs^w0DYMByOg{(qINu+tC{l$Gf_=?n0by42)U_%EDq zz2;OA>&X|DEG(m?B&Im80gAH!Y63FC1*!*1-f3ub4H(sQ(Wxj*G3P>9w3~nYAdDjy z2UCoc{eV|Im==Z_AE09Hxs2xci+!qz!}Q?&-isRahTwCv zHx;~e&pvD-;@r0NyDJX>SEQLCt~^8q+#|?FIn~m@DKNauF?U>;4&F$hVJ$9iEw$+s>O-67Q z12uu7I;TLT(vob`H8=BKMaBlO8`t`~Ytn0pk&Iym1;=!4yYHgNTqB8!DyTTwo$hI> z>J_#)W(K)PUv?X8;0o$7sqx?MD;QIR?Ki<|&bFKZ)QF+I9U^+8Ni_m}T`{ zPB?5q?Nc`!9?{QcTi579jJKrRzep)IMbQXwEy5HD0-b-*NVkxcYe*QVvl$79y({?; z;OhSK$kb4*(@o`=EUz6-rRPBTM9gQ(&y>t(86oJEBrC5&urw3dzpGX-o1h;SYl>4w zRHTU3Zk8AtDW|?kDJ&J_w(1s9zq}EZbcrRjV9d!xR!PiHE64o)K9QZvhmVcAE`XGq zD@Kas<>$pe(_v>zK+ou65Wow{a4OU6EKC|1lokemvlrzivk@92OeI;3d1Y-;in)~$ z2JwieMU0l}DuN_(T3SPY0@1U6g_Xt!(37E(LiF|3orRLc&`-xptA*b_ktW3t=PrG< z^5^yspz5h`X_QBqvMJ2Nq$wx!I)VNq5A# z!1y!NoU((mOcq!V@}d6d@I=2GV+{P6j_HMm2}#+E0o-lIu9jh!C-^tIbHBK^sC zG1ge?(K1{EX5~8cr=0r7(!8(zjhf3R-!0d|Jvk7{dq<0~3fD~d z`(0SzR~PTpr?Wgc)-EeN&sKgat9ym=$h_0n{87m~-Nd88#n6djbl0Pw zHpWJZZaE{>hTO-O3se$Jx~&gd6Wzle8u0N1R(|TdWWU`qC7+J!6`xB8>0B;LStg@` zC=XujImYVx?a146QlcPL^h$b0%x+ASX*co7)&<}i!5?mC7Q6rTXU%pgL;kWc&$mq| zxW#zABVRQ*uNz2NYP)?ZK4VSt*%rjMdF zkHgT5^P8o;C9rBDu}b5(m@>ANu1%X>N7%<(D@YLq{6(m~KT*ZwSrGyPRSA?rm7Zy5 zQuftgr5-fH&YY~QVGAnWzsTpyR>`_V5Va|nV}WN9J>dU0k^BBkg-4iXGO|$iPgn}X z3@A4`oiS1kz*phxnox2hkyWCt%*L3}1-C!AAH?J)GU-(c?VzVo3=0I zXekf&)sg0=W?EK_R_bc48l*?h#E*2sow;K_mr+f6s7ZVGFLM_i%@Wu%9WK=DpF48z zbfCbWE(u*DQaH9OO@5H(I&kS*Uio!5p+L)-Il|Y5geVc=Tw|$#{ox;ySdSc0gwJ?5 zHBK`5hzZcY3nR6!%a9MHZZL7#kjut^59cbKFf+y$$9#IWFBHNIzoM68W%_$~kLm1s z3jPtHneT7u@MU~qiwM6^E71PeO6HJazgRhtd*5WG^SH$v8^gcj9>@|3zBpOy^yEO6 zcvio-6iY3Oj}?5r44+I-jFy36$OSLz>EE3GH_iJ0RK5TD4Sy*wpv-`prl%{6KWXcHug5ZJPm@Q3kHOixva-@WzA<&cwZU=J zEKbr-J)SGgUX+7oZazzWFa|@ykEnk~reRw^u|d+IFLuT@Ffry1w6{v54+% zSkU*rg<{3mxNd&o;)vJXi`uj;h8nw=ADc&v(t?zX31dny!qLn{6`1Kg(G)p_uSPni*64 zWM5V%0fMhp5C8!J?}I1`3N&R@lgzPQ`oK!XmE*5_`nUg78h)S}BJo0t%jn0om3HIK zTN!_a(!xiyoL;}dc*_L^l*ZUH#VDB`+M5@%Tdb@YJjKUFscI{lOdD$^>Vj%QWu}%- zL?V)^c?}4&n;0OeZ%6<c)X}Y`cFxz%Q5UMCVzC}8WAoGqw(uI)(Kd(=sm?WJa?U>m{yLtE=`!E$&@WLZ zp#tpLqDe1Nj;fMyi@4#PAX-pbf0Y~mO&~Z(a)}Il7n01AEyuFjFsQ3T6#F9EwU6P;=hg} z-lv)jqwm7P0<5lRIEU`!6HS1eFM8lh=!w!de5~@GAs$GnitJ$1GD;?mn0}gqeAcwD znE0sfZLY{yuDYV%Kh2DPsEs*7lbKgzfeC5~#>a{*s;kgqAtg2E@>C1Po*hUjdz?d) z3@@u+S@ESTNR<&vP6`R{$1JL1q3PU>t5QOX z@h-JrE*Z<>au5*a%HT*-(!;#Py__52OX*@0)K28{P=d5|*>LGhoJW@!GZ5&Yw;~rz z%$DpK!&WS?U~AQXfoYLhF8k=aY0aPWYIH!OZa$G02|>>T?oqwK+i9XWJ{Z_YB5FhO z+wtLEYH3VjTbBHPT08HkrnYVWqbM9ez#{^J2-2j3fZ(BfC_+Ln34(;6)X+N;ss*qB z!B8Sd5eO~xUIQu;2rYDwD%}tu(mT9x?s?;l@y5I3-rxJZ{a5zL&R%n@z1IAG=3H~H zEPali+$mVZ!D16?YXGuqYocT&2@j&a-*VM%oQTI@^tg7T>5C%P)b|6CE*dFo@tg!F zU#FeqqzhDIz2`=X)9o)Ze^c@QW5M(Oik`QK=fhl@p?tPHf#Qm!IQ!0^22({@3}xQe zqsLpLKqLw^u%S|u@3dCv9T)@vvG)gi9b73>q~mT|Wo3}?mH?O|0p)0Ic!Mgu_%)?t z%V-^Izs|0zXozo zeNF{Uc}j9N^#hSG3dZ3cB(Ja}t(4mxy65|VMMtBLL!-t`SCU3?eoGGGsGy;Nr_N)f z;Z?mV$MaU*Q3BZDgH0a*`_{zuBJC$nGAW4H{P6-+oXDVWXY!D~bFwPq&GbjYa*KtG z7~hU~TMuW~)gs{VoVVz_sCJTJb>7j?v!z_+dOta#n#tuvt0@k6fy9Bcq04T!(e6e& zRoP2Vo^*4R|JwLYV8g9TlWwv1*(ictc2LJmD2%?aCnaCcdoJ%(uD|^=IR5`v=zn>K zfAIHGU#5oWIY*r1clfPE%ISgo#ev&D+kf1(g!x_F@FY-04lod$W;v$p>1(BJCiOi29S-vg(V$cv41 zI)AhK|G-!OD+`s{JLIaZgR}Z3acZFa(d?0Tp%EkSx!2I*_EMN~W{lxb$Z-mH>~UQr z!^wvYzesp#LE|2Ne1e7&@to`BsxTqFM^~`aKW4)^%!I1vN8Yd~aO{`uu8{>F;K(es z5BGF!m^o!M{gsQ#YDh(A1lu-fKFC==sGAvHGKa3YKvi~veNU!rl2$flCbtIKQzFAw z_pqBII+^b;O9Wfd$)@ySPjc1kA)L8*gml z0(*1^wuCqlEhA?@{*~dKToquco?OcxRlVkPRG)V>D8}0KFcIf#WKLds=<$r!fy^p} za_(2{AKZ$`w*<+F%~oQTZU)ZBiij!*Bnod#A_J$6vbWuPGV9jbEPsXYpUlN!fCErU zG;@x&^6Z>}6$6cj$Fu-1Y&JpLw?MaHEC3Ah867&zIPyJ4iK&#pRkMP^j{lTv*&()~ zE0(gLsePrpx8RF1GZLnEl=n2}E$R9wvqbwG4vSd@w_nBTjI)m_De}pUXlhKQZtDes zKrH41k7Y`O!Y1pR)Fx>aEnSI2B|`q)9X^c$6kP~0LevdKlz%ZfjR8OmXN5a z$`bFsqtKJWk?}mg;nT2^f}LB4hS4ftLpJ`?y2T(SJn6lp9DLi8B}wig)n_J(&keuD zfBojmn`isQ8ElS!D5j}kFbda?OtV7{-(Xna;0)5lBdXKgAixYlf_BcM1-e+_Qyyo< zr$TPX_CQ^9>+w2SS+h8qh0dW4@lDMVlsMuRB}53nW$*Ufzxe5V(DHG^=zX#3G^mL2 zhz_n2cea@B3xlna4Cip;>-~*n6ijo&ZJPfCpAAlf+6UeS_-w%?VdTm9ZI@lumTXA>f_nWDrDV|mJu?15OIM?<&0NRgHk9$3AQF!D6XJPwmSxAXIaI8_pu; zSfr%g&(4}K6AEPyYfSfF>u=;n#OB?KI?}WaAv=!IQiX>|(@;#LR_-?s3pV7L(VdhZ zG0(+@@zfH(H{s@&U%*eT2Aa_>C;1!i%zX`owhcpqcGlWlIFDd4s?EOgE;_{x+xk!O zo-vO{U66>wKN)<07LESIdiK+wgDM~re2|-U7|o2!oK}KQ`&&l)@SG|t*X;pZB7~6U~W`5$$HdBLZ|%pK{hwH#{kdtVGNCw z7UJlaNjXl+x}O}2$j6yx2k2(me_=pQ9~N9sdB?-w_$fjGKoC5cGje_{;YE5jf4^a9 zKm9arV>p56p;Fj1rqDupr-KgCqDhvZvROXI+EusS(o$dRX-U{`9dkxZ4_!X`lW-a!Uqc=}j4D|m1#T1eWKaU; zkWJX#*67afD#m*)I%mE_qg@S(YvrZr?`K^4&)E#jq8soEn&-5}ylp&w=w*Xp*ijc6 zkPMt`2sH#u?4p_T1cS_meZe68((EY`ON!V?dvX#ZaK5;9*WD?ILUgW;0>s5GREBQD zhnZX(6?z5+x&5j86tSY{W?A+Sb}Ria%qTa7j90siE))I#@(TY`V(i~j?vJF=usy`# zddr#6Ob^vPX zsIT6hq^3&VQ2MS)dFW+~{Vde0dfPG=KCk?@(+_s%8+KCtUuO%6?e$sGl}h4^NQ zc?&S$^v)NrRKRDfa#CDo6UM02MxXj#6x?#BpaEHujek-dF>U6f5fKe4v5_$7q8D;! zh*dLbQ8gkM=d9{(tH0b6LfgX9p~yQPg_6DxRAA#a(+yc)Fowfc7G=&y;cMp@`XG8Z z?6Hbs+)nPjlX~-^reDv;G2SY+K*(NB%}6}a11;oH=zbgyj|F#hz_U-BliXK=4C+UA z8-$`j0cxq+oVmmP?!Y5NPGnR!Uj-Y(Ky$D8!Tk#|DLjLNn0Q#g{LlGAzXwO}miUd>P8mVlTM@59-e8H1g zcMmIvFN%;p@OuCuxtcscR>}|^yfdw568GG-`&;33}9IF)3e+Foxlj;kN*f!e;8~y#@kbe`6?T=k2!?2%{4O8Sk zcZFA6`k0vfd~YzB79V?$M93SsG>jKH zh(T;GN9tvWMJSw1a+b@hR%1XYu7pt+uSXDsuJ0^-RkyJ1Np;9cZpu7VTY8plSCc|0 zuDVO3%nM=S0NMor6Jq1*lLH7t9WzTY@$5__G{JN z48*m{x-b2#kX44FR3&wBRxvEweB9f!t&)6D2C<>Xr!N~lUf%)ZL&_XRb z6f$CiyK9sc`LdL4>G?Ljoj}4IoA9%<=VoTSjsuP_eutyG;IfBWAo~b)#^e%?_vF(L zT*D|-sf)y&y}FVw3kk;$GgDaP4Y~r{fr|dOcTeG&91&Gv-ii%M_i|VQG#Wflf;&;c zBi~+K)K=~LF|h0T==7j$MN{z7Dk7X=K}&PQ?t#jl(7np0VjV`jU`Tm--u1Q!jP59Z zyeR^#>2MZD!%jckG~%sk)AeEG$NX5BYN)_W|J&gpFH?cNpE?5!jJ>*|u^p+=0`4tm z4?+tq1Cn&9YvbwV!c4LIFaDY5@~#* z(N~L4KTha~Hf@`ZW}DUFZXVK~D7SGlM@`EPuE5stMiE!1$c;PdC0AMkdN#h5H7yhf z4uinrVe3~gIyt%s255&gSlGPNai$8s4rDJWt!kx@Sja#1Jy3k;1sEW`=@{>MW;I5A zfql&NT`R^Vt033E=wg z%&{?e4^ZKH!oGc^8-68BpatkV8_x%6R?0nn^IDlBh-OeE&RB#4sO%Yrnu)iCWNjxTBD$>VYTjbIV(qX<6TwXaN+<*E?^i^4;P*l!FSvs4pEJ9NLY@xe7#OXCu1eH4ce2lno&Hw$ z*&(H=xWUDiK3{Xa5?{&v&~BQ7SlkHWxB9QIpfa(B$gV>^84pO`n|9`HA8fK>4Rcxb z&z_WM#DcS@j;(D&aDa|Ot7VXRy!#z?sMA9&Oi*H9)jfYHYhi&N_T2C3mT6A}wqF?l zM59T!(vY!SuOk~gUHMY?H<}})h7xP;YY(iWx!3XE!c(oXxVXsuiw9Qj{xfE+oQu4Wh=qoZi_gXr}q+DMz>HS~4;W z^t$rc2fn_#sFdQ@%9yKMe(OOU&agVIg8$>7K0lessg_*pvwSDMzZ z&m=K9G!}@*Z4KE&KD2!5B8d_%)s(k|A4=di-e^S9FJL75?OpU<#tWIcgGU9pz@Izh zMf5b6(AR*0EUO{1en=7hoEhtVbx43Ygj#d_Xen|h_tKIezx=G8_nV5}MUBHc(WWqQ z+GW^I>P-^}37`IKrcz5vp0`C}%z=pZY@Z7^-s)@!;(-#z>p;=XibEbiyDF&lM9N&= zzyQ#0Fd+Nx*2{1PSELg@Zef365Y(DqIcpbdZq-npF*sNTmH^PKemhni31TP7Ni%UL z%&(^&&kwEu=}`EA7;FrEY?J`&og_LT9-kI%RIx9J)tD%h--+5U|7N<9$66>0uV%7VcsSz}hw%@yJ48ce425jO#l5!_&sRit-+0(r0#=j#kxbvbsltiqtx zp1Arvzvrdpcj%kA_`E83t3^$oMq2A!7g@2b9i?qB3oVP0DT7v|-K)W48z}=_ZHTwC z<;!FPd8U%+R>Sn_re6^aY~VX7KK7Q5iakf7ey&Fe4J9!E?G(9=ChoDNb2kE1YW9GA zPv3XL>+WmXN#NF)Xoq~x{xlFIgr{k?V*sZ|=J$NmLJ6q?B4s7`+^6=0ht-@imx_Lz za6_giQbW{hBfeGJ2^PbU1bpq#V^@ODlSYyZf9@W zMW(MfF~g?{wo!7Ny?K-QHyJZ9vy+!uCC+7Drh9>lN$e9}bz6*_-aSDMjyru9<%g=q z-2sq1QS=lkxn7A+pGiyTgYrP?{uWkgkZ^>g_q!Fu>jMj!@_h6hOADUXXoPTDg-SU* zniJCaXo{jKjN2-VuWuH%(dGYf@gtsIy9KX5&IPFv3e}yvruw{5JSBmKa(nr_Mz$%LX840DzKZ+!Ey;niwdmd(*Ujhq`KZy1C zlsX~{UccB991rt#E43}|ehgh9mUpetg%mg_*uA25ZdA=o#`5GZJ83z?yozX5>bqs; zcaEa$@6|0RfQ!3E;X_ubp{o^?VXdud^U%&UeA~rZZCKRS6XmD@wWNNW(hm=k&xYRE zu_{e0Z?st>^?((xH~BqQ^fTc^NKpS~1G;2oV|v}Gg%?jRQkReZ`14|5Nl7$G)*~dB z1;Ik_=Q- zE5Pj0(mRf0ZW7&Y<#Y`;JVe}-d54AYaJ8z*-C0LvzU;fF8)7BTu>p&FHRan%N~t10 zN}{V7d&S)AG@dJKGk^lt-`ar+B{X)vS-O3TqqiyCr-UC=V;otwgJ^{v!y>Pz^Z-%+ z4hxI&O(EU7G`iwluJVn~7ATDiHOQQ54`v5R)2TJ1m%PLc{sT5yu6X=YO@|bp?#CJ? zqyzC;aKALVu-YxA=*_XnCuJ%sn#}Y62%t;k>GFE|!AiXOb2`2ubfZym+8sqRn6U6TQ{?hssA^l%SwWEzar#^(s4-}T1pcB zN+%ow3U*q3Cpip1a3Qox|m+dS_qFPuC8Y-yxCcdKs^YAMsCza3tV~k&3g`rqA9(T!QSavNhxFcF-9%7r+p?7 z4KyFO-u#)8Rh=MYYx*d@7Bs3z87L^$h818S&y7SOxiR>-WKxwS+nJQ)sUWITA8gsrpHcCwFk<#WbQ-_vmsaBPA90F zZWihU%&EyD+jnrd?Gh_BzL|Okx_H|_1J;hFotzYce!=Y6g?f61q=^OLq@uR{j|Z=d z%FuD>moS_V`COS^8B~Slc5y)U-%PfH1IypFI$Q5Qc$xPT+xGBX(qaJd(|C#F!3)QL zh=vmDky9Ub-vdjXPT21H#l-c*^Ph%h3$#l6&_6=X>ufqcc%D+4p*PMpIz6&gzS}V5 zH=MbI=aqvtUJaT_?C@InG#;B9>1UHi$(ViYaQdTEiNV}=ZsUVNRV>~MZOI6t%_14a z8Wx;we7I;b$C=@>pgb(T7mPR#kji%|vSi0F=T`}D9L5?-I!e`Fs=2ycAa%VHwrUa4 z(0s_})ory1h#0x6ly6KN_+ieJT-!YK#)WxUB2{tlo_q#=WxkaBuPOKw@ z3ye^WQOVGSB$mfH1J)~{c4t<1=Ng|eEWohhqwC-7gHrQ)yXtSeSCB?89r0c+svm2< z7yq4uhe$A&MF}VwrV%z~1k+&EArd=f0(A1@b7jFSCdB?N&TfTEY#ly36~60TSC06R zZ?6GUN6$iEL&X$pDqb+uD=H#=7l!vAY5z(h`bC!#( zcQ7(^slPjG<~aSCGO7woq38^U8v#*0;Oz}Z7x)=&9^0f)bSkzYV(E6pU9(BWZV#3=E*f7XxN`Cip}qSPZrf#O-tqif8?4t#JWm6e3~<>@PUHo8`LZK zs_s6fugf%L(j!~bn4o2qh{0m7=pIklXbB`EZ{km8$vE1FIl7eXjyO=d7YC<)7MCx> zw7zuS$GwkXRp*m<*}?%~nG8 zi+TFcZIBREIM-Fr=gN-<^{3%iT`2`4Wg7qcywSf*Ed9SqV3Z+1|zxo=Aw9IVO>zwz^a{E_^@M?;d+o zuRcF7FL)q&zK%>e{&nftzqIr3wJk?7xL`ev^5*(K>SYeTH-i3iaqH~V1G?Nucivu^ z;NR6d|GZZ1-)L~`Os&`PuZkq9j{bRy*!|ZL*K TransformQuantParam(const CppQuantParam& cppQuantParam) +{ + // cppQuantParam.numBits empty means no quantization is applied, return nullptr directly. + if (cppQuantParam.numBits.empty()) { + return nullptr; + } + + std::unique_ptr quantParam = std::make_unique(); + quantParam->numBits = cppQuantParam.numBits.data(); + quantParam->quantCount = cppQuantParam.numBits.size(); + quantParam->scale = cppQuantParam.scale.data(); + quantParam->zeroPoint = cppQuantParam.zeroPoint.data(); + return quantParam; +} + +OH_NN_UInt32Array TransformUInt32Array(const std::vector& vector) +{ + uint32_t* data = (vector.empty()) ? nullptr : const_cast(vector.data()); + return {data, vector.size()}; +} +} // Anonymous namespace + +// AddTensors() expects tensors do not destruct and free before the test case end. +OH_NN_ReturnCode NNRtTest::AddTensors(const std::vector& cppTensors) +{ + OH_NN_Tensor tensor; + OH_NN_ReturnCode status{OH_NN_SUCCESS}; + for (const CppTensor& cppTensor : cppTensors) { + tensor = { + .dataType = cppTensor.dataType, + .dimensionCount = static_cast(cppTensor.dimensions.size()), + .dimensions = cppTensor.dimensions.empty() ? nullptr : cppTensor.dimensions.data(), + .type = cppTensor.type + }; + + const CppQuantParam& cppQuantParam = cppTensor.quantParam; + if ((cppQuantParam.numBits.size() != cppQuantParam.scale.size()) + || (cppQuantParam.scale.size() != cppQuantParam.zeroPoint.size())) { + LOGE("NNRtTest::AddTensors failed, get different number of numBits, scales and zeroPoints."); + return OH_NN_INVALID_PARAMETER; + } + // If no quantization is applied, quantParam == nullptr and no need to check. + std::unique_ptr quantParam = TransformQuantParam(cppQuantParam); + tensor.quantParam = quantParam.get(); + + m_tensors.emplace_back(tensor); + m_quantParams.emplace_back(std::move(quantParam)); + + status = OH_NNModel_AddTensor(m_model, &tensor); + if (status != OH_NN_SUCCESS) { + LOGE("NNRtTest::AddTensors failed, error happens when adding tensor."); + m_tensors.clear(); + m_quantParams.clear(); + return status; + } + + if (cppTensor.data != nullptr) { + uint32_t index = m_tensors.size() - 1; + status = OH_NNModel_SetTensorData(m_model, index, cppTensor.data, cppTensor.dataLength); + if (status != OH_NN_SUCCESS) { + LOGE("NNRtTest::AddTensors failed, error happens when setting value."); + m_tensors.clear(); + m_quantParams.clear(); + return status; + } + } + } + + return status; +} + +OH_NN_ReturnCode NNRtTest::AddOperation(OH_NN_OperationType opType, + const std::vector& paramIndices, + const std::vector& inputIndices, + const std::vector& outputIndices) +{ + const OH_NN_UInt32Array params = TransformUInt32Array(paramIndices); + const OH_NN_UInt32Array inputs = TransformUInt32Array(inputIndices); + const OH_NN_UInt32Array outputs = TransformUInt32Array(outputIndices); + + OH_NN_ReturnCode status = OH_NNModel_AddOperation(m_model, opType, ¶ms, &inputs, &outputs); + if (status == OH_NN_SUCCESS) { + Node node = { + .opType = opType, + .inputs = inputIndices, + .outputs = outputIndices, + .params = paramIndices + }; + m_nodes.emplace_back(node); + } + + return status; +} + +OH_NN_ReturnCode NNRtTest::SpecifyInputAndOutput(const std::vector& inputIndices, + const std::vector& outputIndices) +{ + const OH_NN_UInt32Array inputs = TransformUInt32Array(inputIndices); + const OH_NN_UInt32Array outputs = TransformUInt32Array(outputIndices); + + OH_NN_ReturnCode status = OH_NNModel_SpecifyInputsAndOutputs(m_model, &inputs, &outputs); + if (status == OH_NN_SUCCESS) { + m_inputs = inputIndices; + m_outputs = outputIndices; + } + + return status; +} + +OH_NN_ReturnCode NNRtTest::SetInput(uint32_t index, + const std::vector& dimensions, + const void* buffer, + size_t length) +{ + OH_NN_Tensor tensor = m_tensors[m_inputs[index]]; + tensor.dimensions = dimensions.data(); + + return OH_NNExecutor_SetInput(m_executor, index, &tensor, buffer, length); +} + +OH_NN_ReturnCode NNRtTest::SetOutput(uint32_t index, void* buffer, size_t length) +{ + return OH_NNExecutor_SetOutput(m_executor, index, buffer, length); +} + +OH_NN_ReturnCode NNRtTest::SetInputFromMemory(uint32_t index, + const std::vector& dimensions, + const void* buffer, + size_t length, + OH_NN_Memory** pMemory) +{ + if (buffer == nullptr) { + LOGE("NNRtTest::SetInputFromMemory failed, passed nullptr to buffer."); + return OH_NN_INVALID_PARAMETER; + } + + if (pMemory == nullptr) { + LOGE("NNRtTest::SetInputFromMemory failed, passed nullptr to pMemory."); + return OH_NN_INVALID_PARAMETER; + } + + OH_NN_Memory* memory = OH_NNExecutor_AllocateInputMemory(m_executor, index, length); + if (memory == nullptr) { + LOGE("NNRtTest::SetInputFromMemory failed, error happened when creating input memory."); + return OH_NN_MEMORY_ERROR; + } + + OH_NN_Tensor tensor = m_tensors[m_inputs[index]]; + tensor.dimensions = dimensions.data(); + + OH_NN_ReturnCode status = OH_NNExecutor_SetInputWithMemory(m_executor, index, &tensor, memory); + if (status != OH_NN_SUCCESS) { + LOGE("NNRtTest::SetInputFromMemory failed, error happened when setting input."); + OH_NNExecutor_DestroyInputMemory(m_executor, index, &memory); + } + + errno_t error_code = memcpy_s(const_cast(memory->data), memory->length, buffer, length); + if (error_code != EOK) { + LOGE("NNRtTest::SetInputFromMemory failed, error happens when copying data to OH_NN_Memory. Error code: %d.", + error_code); + OH_NNExecutor_DestroyInputMemory(m_executor, index, &memory); + return OH_NN_MEMORY_ERROR; + } + + *pMemory = memory; + return status; +} + +OH_NN_ReturnCode NNRtTest::SetOutputFromMemory(uint32_t index, size_t length, OH_NN_Memory** pMemory) +{ + if (pMemory == nullptr) { + LOGE("NNRtTest::SetOutputFromMemory failed, passed nullptr to pMemory."); + return OH_NN_INVALID_PARAMETER; + } + + OH_NN_Memory* memory = OH_NNExecutor_AllocateOutputMemory(m_executor, index, length); + if (memory == nullptr) { + LOGE("NNRtTest::SetOutputFromMemory failed, error happened when creating output memory."); + return OH_NN_MEMORY_ERROR; + } + + OH_NN_ReturnCode status = OH_NNExecutor_SetOutputWithMemory(m_executor, index, memory); + if (status != OH_NN_SUCCESS) { + LOGE("NNRtTest::SetOutputFromMemory failed, error happened when setting output."); + OH_NNExecutor_DestroyOutputMemory(m_executor, index, &memory); + } + + *pMemory = memory; + return status; +} + +OH_NN_ReturnCode NNRtTest::GetDevices() +{ + const size_t* devicesID{nullptr}; + uint32_t count{0}; + OH_NN_ReturnCode status = OH_NNDevice_GetAllDevicesID(&devicesID, &count); + if (status != OH_NN_SUCCESS) { + LOGE("NNRtTest::GetDevices failed, get all devices ID failed."); + return status; + } + + for (uint32_t i = 0; i < count; i++) { + m_devices.emplace_back(devicesID[i]); + } + return OH_NN_SUCCESS; +} +} // namespace SystemTest +} // NeuralNetworkRuntime +} // OHOS \ No newline at end of file diff --git a/test/system_test/common/nnrt_test.h b/test/system_test/common/nnrt_test.h new file mode 100644 index 0000000..a117096 --- /dev/null +++ b/test/system_test/common/nnrt_test.h @@ -0,0 +1,89 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_SYSTEM_TEST_NNRT_TEST +#define NEURAL_NETWORK_RUNTIME_SYSTEM_TEST_NNRT_TEST + +#include +#include +#include +#include + +#include "interfaces/kits/c/neural_network_runtime.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace SystemTest { +struct CppQuantParam { + std::vector numBits; + std::vector scale; + std::vector zeroPoint; +}; + +struct CppTensor { + OH_NN_DataType dataType{OH_NN_UNKNOWN}; + std::vector dimensions; + void* data{nullptr}; + size_t dataLength{0}; + CppQuantParam quantParam; + OH_NN_TensorType type{OH_NN_TENSOR}; +}; + +struct Node { + OH_NN_OperationType opType; + std::vector inputs; + std::vector outputs; + std::vector params; +}; + +class NNRtTest : public testing::Test { +public: + virtual OH_NN_ReturnCode AddTensors(const std::vector& cppTensors); + virtual OH_NN_ReturnCode AddOperation(OH_NN_OperationType opType, + const std::vector& paramIndices, + const std::vector& inputIndices, + const std::vector& outputIndices); + virtual OH_NN_ReturnCode SpecifyInputAndOutput(const std::vector& inputIndices, + const std::vector& outputIndices); + virtual OH_NN_ReturnCode SetInput(uint32_t index, + const std::vector& dimensions, + const void* buffer, + size_t length); + virtual OH_NN_ReturnCode SetOutput(uint32_t index, void* buffer, size_t length); + virtual OH_NN_ReturnCode SetInputFromMemory(uint32_t index, + const std::vector& dimensions, + const void* buffer, + size_t length, + OH_NN_Memory** pMemory); + virtual OH_NN_ReturnCode SetOutputFromMemory(uint32_t index, size_t length, OH_NN_Memory** pMemory); + virtual OH_NN_ReturnCode GetDevices(); + +protected: + OH_NNModel* m_model{nullptr}; + OH_NNCompilation* m_compilation{nullptr}; + OH_NNExecutor* m_executor{nullptr}; + + std::vector m_tensors; + std::vector> m_quantParams; + std::vector m_nodes; + std::vector m_inputs; + std::vector m_outputs; + std::vector m_devices; +}; +} // namespace SystemTest +} // NeuralNetworkRuntime +} // OHOS + +#endif // NEURAL_NETWORK_RUNTIME_SYSTEM_TEST_NNRT_TEST \ No newline at end of file diff --git a/test/system_test/device_test.cpp b/test/system_test/device_test.cpp new file mode 100644 index 0000000..0fe24de --- /dev/null +++ b/test/system_test/device_test.cpp @@ -0,0 +1,190 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include + +#include "interfaces/kits/c/neural_network_runtime.h" + +using namespace testing; +using namespace testing::ext; + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace SystemTest { +class DeviceTest : public testing::Test { +public: + void SetUp() {} + void TearDown() {} + +public: + std::string m_deviceName {"RK3568-CPU_Rockchip"}; + size_t m_deviceId {std::hash{}("RK3568-CPU_Rockchip")}; + OH_NN_DeviceType m_deviceType {OH_NN_CPU}; +}; + +/* + * @tc.name: device_001 + * @tc.desc: Get all devices id successfully. + * @tc.type: FUNC + */ +HWTEST_F(DeviceTest, device_001, testing::ext::TestSize.Level1) +{ + const size_t* allDeviceIds = nullptr; + uint32_t count {0}; + + OH_NN_ReturnCode ret = OH_NNDevice_GetAllDevicesID(&allDeviceIds, &count); + EXPECT_EQ(OH_NN_SUCCESS, ret); + + uint32_t expectCount = 1; + EXPECT_EQ(expectCount, count); + EXPECT_EQ(m_deviceId, *allDeviceIds); +} + +/* + * @tc.name: device_002 + * @tc.desc: Get all devices id with nullptr deviceId parameter. + * @tc.type: FUNC + */ +HWTEST_F(DeviceTest, device_002, testing::ext::TestSize.Level1) +{ + uint32_t count {0}; + + OH_NN_ReturnCode ret = OH_NNDevice_GetAllDevicesID(nullptr, &count); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: device_003 + * @tc.desc: Get all devices id with nullptr count parameter. + * @tc.type: FUNC + */ +HWTEST_F(DeviceTest, device_003, testing::ext::TestSize.Level1) +{ + const size_t* allDeviceIds = nullptr; + + OH_NN_ReturnCode ret = OH_NNDevice_GetAllDevicesID(&allDeviceIds, nullptr); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: device_004 + * @tc.desc: Get all devices id with not nullptr deviceId pointer. + * @tc.type: FUNC + */ +HWTEST_F(DeviceTest, device_004, testing::ext::TestSize.Level1) +{ + const size_t allDeviceIds = 0; + const size_t* pAllDeviceIds = &allDeviceIds; + uint32_t count {0}; + + OH_NN_ReturnCode ret = OH_NNDevice_GetAllDevicesID(&pAllDeviceIds, &count); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: device_005 + * @tc.desc: Get device name successfully. + * @tc.type: FUNC + */ +HWTEST_F(DeviceTest, device_005, testing::ext::TestSize.Level1) +{ + const char* name = nullptr; + OH_NN_ReturnCode ret = OH_NNDevice_GetName(m_deviceId, &name); + EXPECT_EQ(OH_NN_SUCCESS, ret); + std::string sName(name); + EXPECT_EQ(m_deviceName, sName); +} + +/* + * @tc.name: device_006 + * @tc.desc: Get device name with invalid deviceId. + * @tc.type: FUNC + */ +HWTEST_F(DeviceTest, device_006, testing::ext::TestSize.Level1) +{ + const size_t deviceId = 0; + const char* name = nullptr; + OH_NN_ReturnCode ret = OH_NNDevice_GetName(deviceId, &name); + EXPECT_EQ(OH_NN_FAILED, ret); +} + +/* + * @tc.name: device_007 + * @tc.desc: Get device name without nullptr name pointer. + * @tc.type: FUNC + */ +HWTEST_F(DeviceTest, device_007, testing::ext::TestSize.Level1) +{ + const size_t deviceId = 0; + const char* name = "name"; + OH_NN_ReturnCode ret = OH_NNDevice_GetName(deviceId, &name); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: device_008 + * @tc.desc: Get device name with nullptr name parameter. + * @tc.type: FUNC + */ +HWTEST_F(DeviceTest, device_008, testing::ext::TestSize.Level1) +{ + const size_t deviceId = 0; + OH_NN_ReturnCode ret = OH_NNDevice_GetName(deviceId, nullptr); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: device_009 + * @tc.desc: Get device type successfully. + * @tc.type: FUNC + */ +HWTEST_F(DeviceTest, device_009, testing::ext::TestSize.Level1) +{ + OH_NN_DeviceType type {OH_NN_OTHERS}; + OH_NN_ReturnCode ret = OH_NNDevice_GetType(m_deviceId, &type); + EXPECT_EQ(OH_NN_SUCCESS, ret); + EXPECT_EQ(m_deviceType, type); +} + +/* + * @tc.name: device_010 + * @tc.desc: Get device type with invalid deviceId. + * @tc.type: FUNC + */ +HWTEST_F(DeviceTest, device_010, testing::ext::TestSize.Level1) +{ + const size_t deviceId = 0; + OH_NN_DeviceType type {OH_NN_OTHERS}; + OH_NN_ReturnCode ret = OH_NNDevice_GetType(deviceId, &type); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: device_011 + * @tc.desc: Get device type with nullptr type. + * @tc.type: FUNC + */ +HWTEST_F(DeviceTest, device_011, testing::ext::TestSize.Level1) +{ + const size_t deviceId = 0; + OH_NN_ReturnCode ret = OH_NNDevice_GetType(deviceId, nullptr); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} +} // namespace SystemTest +} // namespace NeuralNetworkRuntime +} // namespace OHOS \ No newline at end of file diff --git a/test/system_test/end_to_end_test.cpp b/test/system_test/end_to_end_test.cpp new file mode 100644 index 0000000..c668209 --- /dev/null +++ b/test/system_test/end_to_end_test.cpp @@ -0,0 +1,617 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "end_to_end_test.h" + +#include +#include +#include +#include +#include + +#include "securec.h" + +#include "common/log.h" +#include "interfaces/kits/c/neural_network_runtime.h" + +namespace fs = std::filesystem; + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace SystemTest { +const float INPUT_ONE = 1.23; +const float INPUT_TWO = 2.34; +const float EXPECTED_OUTPUT = 5.91; +const int8_t EXPECTED_QUANT_OUTPUT = 10; +const float EPSILON = 1e-4; +const uint32_t NO_DEVICE_COUNT = 0; +const int32_t ELEMENT_COUNT = 12; +const uint32_t ADDEND_DATA_LENGTH = ELEMENT_COUNT * sizeof(float); +const std::string CACHE_DIR = "/data/local/tmp/nnrt_st_cache"; +const uint32_t CACHE_VERSION = 1; +const int REPEAT_TIMES = 100; + +// End2EndTest build a model with two connected add operations. +OH_NN_ReturnCode End2EndTest::BuildModel(const std::vector& tensors) +{ + m_model = OH_NNModel_Construct(); + if (m_model == nullptr) { + LOGE("End2EndTest::BuildModel failed, error happens when creating OH_NNModel."); + return OH_NN_MEMORY_ERROR; + } + + OH_NN_ReturnCode status = AddTensors(tensors); + if (status != OH_NN_SUCCESS) { + LOGE("End2EndTest::BuildModel failed, error happens when adding tensors."); + return status; + } + + status = AddOperation(OH_NN_OPS_ADD, {2}, {0, 1}, {3}); + if (status != OH_NN_SUCCESS) { + LOGE("End2EndTest::BuildModel failed, error happends when adding first Add operation into the model."); + return status; + } + + status = AddOperation(OH_NN_OPS_ADD, {2}, {3, 1}, {4}); + if (status != OH_NN_SUCCESS) { + LOGE("End2EndTest::BuildModel failed, error happends when adding second Add operation into the model."); + return status; + } + + status = SpecifyInputAndOutput({0, 1}, {4}); + if (status != OH_NN_SUCCESS) { + LOGE("End2EndTest::BuildModel failed, error happends when specifying the inputs and outputs."); + return status; + } + + status = OH_NNModel_Finish(m_model); + if (status != OH_NN_SUCCESS) { + LOGE("End2EndTest::BuildModel failed, error happends during constructing the model."); + return status; + } + + return status; +} + +OH_NN_ReturnCode End2EndTest::IsExpectedOutput(const float* outputBuffer) +{ + if (outputBuffer == nullptr) { + LOGE("End2EndTest::IsExpectedOutput failed, pass nullptr to outputBuffer."); + return OH_NN_INVALID_PARAMETER; + } + + for (int i = 0; i < ELEMENT_COUNT; i++) { + LOGI("Comparing inference output with expected value, output index: %d, output value: %f, " + "expected value: %f.", i, outputBuffer[i], EXPECTED_OUTPUT); + if (std::abs(outputBuffer[i] - EXPECTED_OUTPUT) > EPSILON) { + return OH_NN_FAILED; + } + } + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode End2EndTest::IsExpectedOutput(const OH_NN_Memory* outputMemory) +{ + if (outputMemory == nullptr) { + LOGE("End2EndTest::IsExpectedOutput failed, pass nullptr to outputMemory."); + return OH_NN_INVALID_PARAMETER; + } + + if (outputMemory->length == 0) { + LOGE("End2EndTest::IsExpectedOutput failed, outputMemory is empty."); + return OH_NN_FAILED; + } + + float* output = static_cast(const_cast(outputMemory->data)); + return IsExpectedOutput(output); +} + +/* + * @tc.name: end_to_end_test_001 + * @tc.desc: Test End-to-End operation of Neural Network Runtime. + * @tc.type: FUNC + */ +HWTEST_F(End2EndTest, end_to_end_test_001, testing::ext::TestSize.Level1) +{ + // Prepare tensors + int8_t activationValue{0}; + CppQuantParam quantParam{{}, {}, {}}; + CppTensor addend1{OH_NN_FLOAT32, {3, 2, 2}, nullptr, 0, quantParam, OH_NN_TENSOR}; + CppTensor addend2{OH_NN_FLOAT32, {3, 2, 2}, nullptr, 0, quantParam, OH_NN_TENSOR}; + CppTensor activation{OH_NN_INT8, {}, (void*)(&activationValue), 1, quantParam, OH_NN_ADD_ACTIVATIONTYPE}; + CppTensor immediateTensor{OH_NN_FLOAT32, {3, 2, 2}, nullptr, 0, quantParam, OH_NN_TENSOR}; + CppTensor output{OH_NN_FLOAT32, {3, 2, 2}, nullptr, 0, quantParam, OH_NN_TENSOR}; + std::vector tensors{addend1, addend2, activation, immediateTensor, output}; + + ASSERT_EQ(OH_NN_SUCCESS, BuildModel(tensors)); + + m_compilation = OH_NNCompilation_Construct(m_model); + ASSERT_NE(nullptr, m_compilation); + OH_NNModel_Destroy(&m_model); + ASSERT_EQ(nullptr, m_model); + + ASSERT_EQ(OH_NN_SUCCESS, GetDevices()); + ASSERT_GT(m_devices.size(), NO_DEVICE_COUNT); // Expect available accelerator. + size_t targetDevice = m_devices[0]; // Use the first device in system test. + ASSERT_EQ(OH_NN_SUCCESS, OH_NNCompilation_SetDevice(m_compilation, targetDevice)); + ASSERT_EQ(OH_NN_SUCCESS, OH_NNCompilation_Build(m_compilation)); + + m_executor = OH_NNExecutor_Construct(m_compilation); + ASSERT_NE(nullptr, m_executor); + OH_NNCompilation_Destroy(&m_compilation); + ASSERT_EQ(nullptr, m_compilation); + + // Set value of firstAddend + std::vector firstAddendValue(ELEMENT_COUNT, INPUT_ONE); + ASSERT_EQ(OH_NN_SUCCESS, SetInput(0, {3, 2, 2}, (void*)firstAddendValue.data(), ADDEND_DATA_LENGTH)); + + // Set value of secondAddend + std::vector secondAddendValue(ELEMENT_COUNT, INPUT_TWO); + ASSERT_EQ(OH_NN_SUCCESS, SetInput(1, {3, 2, 2}, (void*)secondAddendValue.data(), ADDEND_DATA_LENGTH)); + + // Set output buffer of output + float outputBuffer[ELEMENT_COUNT]; + ASSERT_EQ(OH_NN_SUCCESS, SetOutput(0, (void*)outputBuffer, ADDEND_DATA_LENGTH)); + + // Run inference and assert output value + ASSERT_EQ(OH_NN_SUCCESS, OH_NNExecutor_Run(m_executor)); + ASSERT_EQ(OH_NN_SUCCESS, IsExpectedOutput(outputBuffer)); + + OH_NNExecutor_Destroy(&m_executor); + ASSERT_EQ(nullptr, m_executor); +} + +/* + * @tc.name: end_to_end_test_002 + * @tc.desc: Test End-to-End operation of Neural Network Runtime using OH_NN_Memory + * @tc.type: FUNC + */ +HWTEST_F(End2EndTest, end_to_end_test_002, testing::ext::TestSize.Level1) +{ + // Prepare tensors + int8_t activationValue{0}; + CppQuantParam quantParam{{}, {}, {}}; + CppTensor addend1{OH_NN_FLOAT32, {3, 2, 2}, nullptr, 0, quantParam, OH_NN_TENSOR}; + CppTensor addend2{OH_NN_FLOAT32, {3, 2, 2}, nullptr, 0, quantParam, OH_NN_TENSOR}; + CppTensor activation{OH_NN_INT8, {}, (void*)(&activationValue), 1, quantParam, OH_NN_ADD_ACTIVATIONTYPE}; + CppTensor immediateTensor{OH_NN_FLOAT32, {3, 2, 2}, nullptr, 0, quantParam, OH_NN_TENSOR}; + CppTensor output{OH_NN_FLOAT32, {3, 2, 2}, nullptr, 0, quantParam, OH_NN_TENSOR}; + std::vector tensors{addend1, addend2, activation, immediateTensor, output}; + + ASSERT_EQ(OH_NN_SUCCESS, BuildModel(tensors)); + + m_compilation = OH_NNCompilation_Construct(m_model); + ASSERT_NE(nullptr, m_compilation); + OH_NNModel_Destroy(&m_model); + ASSERT_EQ(nullptr, m_model); + + ASSERT_EQ(OH_NN_SUCCESS, GetDevices()); + ASSERT_GT(m_devices.size(), NO_DEVICE_COUNT); // Expect available accelerator. + size_t targetDevice = m_devices[0]; // Use the first device in system test. + ASSERT_EQ(OH_NN_SUCCESS, OH_NNCompilation_SetDevice(m_compilation, targetDevice)); + ASSERT_EQ(OH_NN_SUCCESS, OH_NNCompilation_Build(m_compilation)); + + m_executor = OH_NNExecutor_Construct(m_compilation); + ASSERT_NE(nullptr, m_executor); + OH_NNCompilation_Destroy(&m_compilation); + ASSERT_EQ(nullptr, m_compilation); + + // Set value of firstAddend + std::vector firstAddendValue(ELEMENT_COUNT, INPUT_ONE); + OH_NN_Memory* firstAddendMemory; + ASSERT_EQ(OH_NN_SUCCESS, + SetInputFromMemory(0, {3, 2, 2}, (void*)firstAddendValue.data(), ADDEND_DATA_LENGTH, &firstAddendMemory)); + + // Set value of secondAddend + std::vector secondAddendValue(ELEMENT_COUNT, INPUT_TWO); + OH_NN_Memory* secondAddendMemory; + ASSERT_EQ(OH_NN_SUCCESS, + SetInputFromMemory(1, {3, 2, 2}, (void*)secondAddendValue.data(), ADDEND_DATA_LENGTH, &secondAddendMemory)); + + // Set output buffer of output + OH_NN_Memory* outputMemory; + ASSERT_EQ(OH_NN_SUCCESS, SetOutputFromMemory(0, ADDEND_DATA_LENGTH, &outputMemory)); + + // Run inference and assert output value + ASSERT_EQ(OH_NN_SUCCESS, OH_NNExecutor_Run(m_executor)); + ASSERT_EQ(OH_NN_SUCCESS, IsExpectedOutput(outputMemory)); + + OH_NNExecutor_DestroyInputMemory(m_executor, 0, &firstAddendMemory); + ASSERT_EQ(nullptr, firstAddendMemory); + OH_NNExecutor_DestroyInputMemory(m_executor, 1, &secondAddendMemory); + ASSERT_EQ(nullptr, secondAddendMemory); + OH_NNExecutor_DestroyOutputMemory(m_executor, 0, &outputMemory); + ASSERT_EQ(nullptr, outputMemory); + + OH_NNExecutor_Destroy(&m_executor); + ASSERT_EQ(nullptr, m_executor); +} + +/* + * @tc.name: end_to_end_test_003 + * @tc.desc: Test End-to-End operation of Neural Network Runtime with dynamic inputs. + * @tc.type: FUNC + */ +HWTEST_F(End2EndTest, end_to_end_test_003, testing::ext::TestSize.Level1) +{ + // Prepare tensors + int8_t activationValue{0}; + CppQuantParam quantParam{{}, {}, {}}; + std::vector value(ELEMENT_COUNT, INPUT_ONE); + CppTensor addend1{OH_NN_FLOAT32, {3, 2, 2}, (void*)value.data(), ADDEND_DATA_LENGTH, quantParam, OH_NN_TENSOR}; + CppTensor addend2{OH_NN_FLOAT32, {3, 2, 2}, nullptr, 0, quantParam, OH_NN_TENSOR}; + CppTensor activation{OH_NN_INT8, {}, (void*)(&activationValue), 1, quantParam, OH_NN_ADD_ACTIVATIONTYPE}; + CppTensor immediateTensor{OH_NN_FLOAT32, {3, 2, 2}, nullptr, 0, quantParam, OH_NN_TENSOR}; + CppTensor output{OH_NN_FLOAT32, {3, 2, 2}, nullptr, 0, quantParam, OH_NN_TENSOR}; + std::vector tensors{addend1, addend2, activation, immediateTensor, output}; + + m_model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, m_model); + ASSERT_EQ(OH_NN_SUCCESS, AddTensors(tensors)); + ASSERT_EQ(OH_NN_SUCCESS, AddOperation(OH_NN_OPS_ADD, {2}, {0, 1}, {3})); + ASSERT_EQ(OH_NN_SUCCESS, AddOperation(OH_NN_OPS_ADD, {2}, {3, 1}, {4})); + ASSERT_EQ(OH_NN_SUCCESS, SpecifyInputAndOutput({1}, {4})); + ASSERT_EQ(OH_NN_SUCCESS, OH_NNModel_Finish(m_model)); + + m_compilation = OH_NNCompilation_Construct(m_model); + ASSERT_NE(nullptr, m_compilation); + OH_NNModel_Destroy(&m_model); + ASSERT_EQ(nullptr, m_model); + + ASSERT_EQ(OH_NN_SUCCESS, GetDevices()); + ASSERT_GT(m_devices.size(), NO_DEVICE_COUNT); // Expect available accelerator. + size_t targetDevice = m_devices[0]; // Use the first device in system test. + ASSERT_EQ(OH_NN_SUCCESS, OH_NNCompilation_SetDevice(m_compilation, targetDevice)); + ASSERT_EQ(OH_NN_SUCCESS, OH_NNCompilation_Build(m_compilation)); + + m_executor = OH_NNExecutor_Construct(m_compilation); + ASSERT_NE(nullptr, m_executor); + OH_NNCompilation_Destroy(&m_compilation); + ASSERT_EQ(nullptr, m_compilation); + + // Set value of secondAddend + std::vector secondAddendValue(ELEMENT_COUNT, INPUT_TWO); + ASSERT_EQ(OH_NN_SUCCESS, SetInput(0, {3, 2, 2}, (void*)secondAddendValue.data(), ADDEND_DATA_LENGTH)); + + // Set output buffer of output + float outputBuffer[ELEMENT_COUNT]; + ASSERT_EQ(OH_NN_SUCCESS, SetOutput(0, (void*)outputBuffer, ADDEND_DATA_LENGTH)); + + // Run inference and assert output value + ASSERT_EQ(OH_NN_SUCCESS, OH_NNExecutor_Run(m_executor)); + ASSERT_EQ(OH_NN_SUCCESS, IsExpectedOutput(outputBuffer)); + + OH_NNExecutor_Destroy(&m_executor); + ASSERT_EQ(nullptr, m_executor); +} + +/* + * @tc.name: end_to_end_test_004 + * @tc.desc: Test End-to-End operation of Neural Network Runtime. + * @tc.type: FUNC + */ +HWTEST_F(End2EndTest, end_to_end_test_004, testing::ext::TestSize.Level1) +{ + // Prepare tensors + int8_t activationValue{0}; + CppQuantParam quantParam{{}, {}, {}}; + CppTensor addend1{OH_NN_FLOAT32, {-1, 2, 2}, nullptr, 0, quantParam, OH_NN_TENSOR}; + CppTensor addend2{OH_NN_FLOAT32, {3, 2, 2}, nullptr, 0, quantParam, OH_NN_TENSOR}; + CppTensor activation{OH_NN_INT8, {}, (void*)(&activationValue), 1, quantParam, OH_NN_ADD_ACTIVATIONTYPE}; + CppTensor immediateTensor{OH_NN_FLOAT32, {3, 2, 2}, nullptr, 0, quantParam, OH_NN_TENSOR}; + CppTensor output{OH_NN_FLOAT32, {3, 2, 2}, nullptr, 0, quantParam, OH_NN_TENSOR}; + std::vector tensors{addend1, addend2, activation, immediateTensor, output}; + + ASSERT_EQ(OH_NN_SUCCESS, BuildModel(tensors)); + + m_compilation = OH_NNCompilation_Construct(m_model); + ASSERT_NE(nullptr, m_compilation); + OH_NNModel_Destroy(&m_model); + ASSERT_EQ(nullptr, m_model); + + ASSERT_EQ(OH_NN_SUCCESS, GetDevices()); + ASSERT_GT(m_devices.size(), NO_DEVICE_COUNT); // Expect available accelerator. + size_t targetDevice = m_devices[0]; // Use the first device in system test. + ASSERT_EQ(OH_NN_SUCCESS, OH_NNCompilation_SetDevice(m_compilation, targetDevice)); + ASSERT_EQ(OH_NN_SUCCESS, OH_NNCompilation_Build(m_compilation)); + + m_executor = OH_NNExecutor_Construct(m_compilation); + ASSERT_NE(nullptr, m_executor); + OH_NNCompilation_Destroy(&m_compilation); + ASSERT_EQ(nullptr, m_compilation); + + // Set value of firstAddend + std::vector firstAddendValue(ELEMENT_COUNT, INPUT_ONE); + ASSERT_EQ(OH_NN_SUCCESS, SetInput(0, {3, 2, 2}, (void*)firstAddendValue.data(), ADDEND_DATA_LENGTH)); + + // Set value of secondAddend + std::vector secondAddendValue(ELEMENT_COUNT, INPUT_TWO); + ASSERT_EQ(OH_NN_SUCCESS, SetInput(1, {3, 2, 2}, (void*)secondAddendValue.data(), ADDEND_DATA_LENGTH)); + + // Set output buffer of output + float outputBuffer[ELEMENT_COUNT]; + ASSERT_EQ(OH_NN_SUCCESS, SetOutput(0, (void*)outputBuffer, ADDEND_DATA_LENGTH)); + + // Run inference and assert output value + ASSERT_EQ(OH_NN_SUCCESS, OH_NNExecutor_Run(m_executor)); + ASSERT_EQ(OH_NN_SUCCESS, IsExpectedOutput(outputBuffer)); + + OH_NNExecutor_Destroy(&m_executor); + ASSERT_EQ(nullptr, m_executor); +} + +/* + * @tc.name: end_to_end_test_005 + * @tc.desc: Test End-to-End execution with cache setting and loading. + * @tc.type: FUNC + */ +HWTEST_F(End2EndTest, end_to_end_test_005, testing::ext::TestSize.Level1) +{ + // Prepare tensors + int8_t activationValue{0}; + CppQuantParam quantParam{{}, {}, {}}; + CppTensor addend1{OH_NN_FLOAT32, {3, 2, 2}, nullptr, 0, quantParam, OH_NN_TENSOR}; + CppTensor addend2{OH_NN_FLOAT32, {3, 2, 2}, nullptr, 0, quantParam, OH_NN_TENSOR}; + CppTensor activation{OH_NN_INT8, {}, (void*)(&activationValue), 1, quantParam, OH_NN_ADD_ACTIVATIONTYPE}; + CppTensor immediateTensor{OH_NN_FLOAT32, {3, 2, 2}, nullptr, 0, quantParam, OH_NN_TENSOR}; + CppTensor output{OH_NN_FLOAT32, {3, 2, 2}, nullptr, 0, quantParam, OH_NN_TENSOR}; + std::vector tensors{addend1, addend2, activation, immediateTensor, output}; + ASSERT_EQ(OH_NN_SUCCESS, BuildModel(tensors)); + + ASSERT_EQ(OH_NN_SUCCESS, GetDevices()); + ASSERT_GT(m_devices.size(), NO_DEVICE_COUNT); // Expect available accelerator. + size_t targetDevice = m_devices[0]; // Use the first device in system test. + + // Used to export cache. + OH_NNCompilation* compilationCacheExporter = OH_NNCompilation_Construct(m_model); + ASSERT_NE(nullptr, compilationCacheExporter); + + const fs::path cachePath{CACHE_DIR}; + ASSERT_EQ(false, fs::exists(cachePath)); + ASSERT_EQ(true, fs::create_directory(cachePath)); + + ASSERT_EQ(OH_NN_SUCCESS, OH_NNCompilation_SetDevice(compilationCacheExporter, targetDevice)); + ASSERT_EQ(OH_NN_SUCCESS, OH_NNCompilation_SetCache(compilationCacheExporter, CACHE_DIR.c_str(), CACHE_VERSION)); + ASSERT_EQ(OH_NN_SUCCESS, OH_NNCompilation_Build(compilationCacheExporter)); + ASSERT_EQ(false, fs::is_empty(cachePath)); + OH_NNCompilation_Destroy(&compilationCacheExporter); + ASSERT_EQ(nullptr, compilationCacheExporter); + + // This compilation loads cache. + m_compilation = OH_NNCompilation_Construct(m_model); + ASSERT_NE(nullptr, m_compilation); + OH_NNModel_Destroy(&m_model); + ASSERT_EQ(nullptr, m_model); + + ASSERT_EQ(OH_NN_SUCCESS, OH_NNCompilation_SetDevice(m_compilation, targetDevice)); + ASSERT_EQ(OH_NN_SUCCESS, OH_NNCompilation_SetCache(m_compilation, CACHE_DIR.c_str(), CACHE_VERSION)); + ASSERT_EQ(OH_NN_SUCCESS, OH_NNCompilation_Build(m_compilation)); + + m_executor = OH_NNExecutor_Construct(m_compilation); + ASSERT_NE(nullptr, m_executor); + OH_NNCompilation_Destroy(&m_compilation); + ASSERT_EQ(nullptr, m_compilation); + + // Set value of firstAddend + std::vector firstAddendValue(ELEMENT_COUNT, INPUT_ONE); + ASSERT_EQ(OH_NN_SUCCESS, SetInput(0, {3, 2, 2}, (void*)firstAddendValue.data(), ADDEND_DATA_LENGTH)); + + // Set value of secondAddend + std::vector secondAddendValue(ELEMENT_COUNT, INPUT_TWO); + ASSERT_EQ(OH_NN_SUCCESS, SetInput(1, {3, 2, 2}, (void*)secondAddendValue.data(), ADDEND_DATA_LENGTH)); + + // Set output buffer of output + float outputBuffer[ELEMENT_COUNT]; + ASSERT_EQ(OH_NN_SUCCESS, SetOutput(0, (void*)outputBuffer, ADDEND_DATA_LENGTH)); + + // Run inference and assert output value + ASSERT_EQ(OH_NN_SUCCESS, OH_NNExecutor_Run(m_executor)); + ASSERT_EQ(OH_NN_SUCCESS, IsExpectedOutput(outputBuffer)); + + OH_NNExecutor_Destroy(&m_executor); + ASSERT_EQ(nullptr, m_executor); + + // If cache directory and files and delete, remove_all() should return a value larger than 1. + // The actual value depends on the implementation of NNRt service. + ASSERT_GT(fs::remove_all(cachePath), (std::uintmax_t)1); +} + +/* + * @tc.name: end_to_end_test_006 + * @tc.desc: Test End-to-End execution mixing SetInput and SetInputFromMemory functions. + * @tc.type: FUNC + */ +HWTEST_F(End2EndTest, end_to_end_test_006, testing::ext::TestSize.Level1) +{ + // Prepare tensors + int8_t activationValue{0}; + CppQuantParam quantParam{{}, {}, {}}; + CppTensor addend1{OH_NN_FLOAT32, {3, 2, 2}, nullptr, 0, quantParam, OH_NN_TENSOR}; + CppTensor addend2{OH_NN_FLOAT32, {3, 2, 2}, nullptr, 0, quantParam, OH_NN_TENSOR}; + CppTensor activation{OH_NN_INT8, {}, (void*)(&activationValue), 1, quantParam, OH_NN_ADD_ACTIVATIONTYPE}; + CppTensor immediateTensor{OH_NN_FLOAT32, {3, 2, 2}, nullptr, 0, quantParam, OH_NN_TENSOR}; + CppTensor output{OH_NN_FLOAT32, {3, 2, 2}, nullptr, 0, quantParam, OH_NN_TENSOR}; + std::vector tensors{addend1, addend2, activation, immediateTensor, output}; + + ASSERT_EQ(OH_NN_SUCCESS, BuildModel(tensors)); + + ASSERT_EQ(OH_NN_SUCCESS, GetDevices()); + ASSERT_GT(m_devices.size(), NO_DEVICE_COUNT); // Expect available accelerator. + size_t targetDevice = m_devices[0]; // Use the first device in system test. + + // This compilation loads cache. + m_compilation = OH_NNCompilation_Construct(m_model); + ASSERT_NE(nullptr, m_compilation); + OH_NNModel_Destroy(&m_model); + ASSERT_EQ(nullptr, m_model); + + ASSERT_EQ(OH_NN_SUCCESS, OH_NNCompilation_SetDevice(m_compilation, targetDevice)); + ASSERT_EQ(OH_NN_SUCCESS, OH_NNCompilation_Build(m_compilation)); + + m_executor = OH_NNExecutor_Construct(m_compilation); + ASSERT_NE(nullptr, m_executor); + OH_NNCompilation_Destroy(&m_compilation); + ASSERT_EQ(nullptr, m_compilation); + + // Set value of firstAddend + std::vector firstAddendValue(ELEMENT_COUNT, INPUT_ONE); + ASSERT_EQ(OH_NN_SUCCESS, SetInput(0, {3, 2, 2}, (void*)firstAddendValue.data(), ADDEND_DATA_LENGTH)); + + // Set value of secondAddend + std::vector secondAddendValue(ELEMENT_COUNT, INPUT_TWO); + OH_NN_Memory* secondAddendMemory; + ASSERT_EQ(OH_NN_SUCCESS, + SetInputFromMemory(1, {3, 2, 2}, (void*)secondAddendValue.data(), ADDEND_DATA_LENGTH, &secondAddendMemory)); + + // Set output buffer of output + OH_NN_Memory* outputMemory; + ASSERT_EQ(OH_NN_SUCCESS, SetOutputFromMemory(0, ADDEND_DATA_LENGTH, &outputMemory)); + + // Run inference and assert output value + ASSERT_EQ(OH_NN_SUCCESS, OH_NNExecutor_Run(m_executor)); + ASSERT_EQ(OH_NN_SUCCESS, IsExpectedOutput(outputMemory)); + + OH_NNExecutor_DestroyInputMemory(m_executor, 1, &secondAddendMemory); + ASSERT_EQ(nullptr, secondAddendMemory); + OH_NNExecutor_DestroyOutputMemory(m_executor, 0, &outputMemory); + ASSERT_EQ(nullptr, outputMemory); + + OH_NNExecutor_Destroy(&m_executor); + ASSERT_EQ(nullptr, m_executor); +} + +/* + * @tc.name: end_to_end_test_007 + * @tc.desc: Test End-to-End operation of Neural Network Runtime with quantization. + * @tc.type: FUNC + */ +HWTEST_F(End2EndTest, end_to_end_test_007, testing::ext::TestSize.Level1) +{ + // Prepare tensors + int8_t activationValue{0}; + CppQuantParam quantParam{{}, {}, {}}; + CppQuantParam quantParam1{{8}, {0.2}, {0}}; + CppQuantParam quantParam2{{8}, {0.4}, {0}}; + CppTensor addend1{OH_NN_INT8, {3, 2, 2}, nullptr, 0, quantParam1, OH_NN_TENSOR}; + CppTensor addend2{OH_NN_INT8, {3, 2, 2}, nullptr, 0, quantParam1, OH_NN_TENSOR}; + CppTensor activation{OH_NN_INT8, {}, (void*)(&activationValue), 1, quantParam, OH_NN_ADD_ACTIVATIONTYPE}; + CppTensor immediateTensor{OH_NN_INT8, {3, 2, 2}, nullptr, 0, quantParam1, OH_NN_TENSOR}; + CppTensor output{OH_NN_INT8, {3, 2, 2}, nullptr, 0, quantParam2, OH_NN_TENSOR}; + std::vector tensors{addend1, addend2, activation, immediateTensor, output}; + + ASSERT_EQ(OH_NN_SUCCESS, BuildModel(tensors)); + + m_compilation = OH_NNCompilation_Construct(m_model); + ASSERT_NE(nullptr, m_compilation); + OH_NNModel_Destroy(&m_model); + ASSERT_EQ(nullptr, m_model); + + ASSERT_EQ(OH_NN_SUCCESS, GetDevices()); + ASSERT_GT(m_devices.size(), NO_DEVICE_COUNT); // Expect available accelerator. + size_t targetDevice = m_devices[0]; // Use the first device in system test. + ASSERT_EQ(OH_NN_SUCCESS, OH_NNCompilation_SetDevice(m_compilation, targetDevice)); + ASSERT_EQ(OH_NN_SUCCESS, OH_NNCompilation_Build(m_compilation)); + + m_executor = OH_NNExecutor_Construct(m_compilation); + ASSERT_NE(nullptr, m_executor); + OH_NNCompilation_Destroy(&m_compilation); + ASSERT_EQ(nullptr, m_compilation); + + // Set value of firstAddend + std::vector firstAddendValue(ELEMENT_COUNT, 4); + ASSERT_EQ(OH_NN_SUCCESS, SetInput(0, {3, 2, 2}, (void*)firstAddendValue.data(), ADDEND_DATA_LENGTH)); + + // Set value of secondAddend + std::vector secondAddendValue(ELEMENT_COUNT, 8); + ASSERT_EQ(OH_NN_SUCCESS, SetInput(1, {3, 2, 2}, (void*)secondAddendValue.data(), ADDEND_DATA_LENGTH)); + + // Set output buffer of output + int8_t outputBuffer[ELEMENT_COUNT]; + ASSERT_EQ(OH_NN_SUCCESS, SetOutput(0, (void*)outputBuffer, ADDEND_DATA_LENGTH)); + + // Run inference and assert output value + ASSERT_EQ(OH_NN_SUCCESS, OH_NNExecutor_Run(m_executor)); + for (int i = 0; i < ELEMENT_COUNT; i++) { + printf("Comparing output with expected value, output index: %d, output value: %d, expected value: %d.", + i, static_cast(outputBuffer[i]), static_cast(EXPECTED_QUANT_OUTPUT)); + ASSERT_EQ(outputBuffer[i], EXPECTED_QUANT_OUTPUT); + } + + OH_NNExecutor_Destroy(&m_executor); + ASSERT_EQ(nullptr, m_executor); +} + +/* + * @tc.name: end_to_end_test_008 + * @tc.desc: Test End-to-End operation of Neural Network Runtime by calling OH_NNExecutor_Run multiple times. + * @tc.type: FUNC + */ +HWTEST_F(End2EndTest, end_to_end_test_008, testing::ext::TestSize.Level1) +{ + // Prepare tensors + int8_t activationValue{0}; + CppQuantParam quantParam{{}, {}, {}}; + CppTensor addend1{OH_NN_FLOAT32, {3, 2, 2}, nullptr, 0, quantParam, OH_NN_TENSOR}; + CppTensor addend2{OH_NN_FLOAT32, {3, 2, 2}, nullptr, 0, quantParam, OH_NN_TENSOR}; + CppTensor activation{OH_NN_INT8, {}, (void*)(&activationValue), 1, quantParam, OH_NN_ADD_ACTIVATIONTYPE}; + CppTensor immediateTensor{OH_NN_FLOAT32, {3, 2, 2}, nullptr, 0, quantParam, OH_NN_TENSOR}; + CppTensor output{OH_NN_FLOAT32, {3, 2, 2}, nullptr, 0, quantParam, OH_NN_TENSOR}; + std::vector tensors{addend1, addend2, activation, immediateTensor, output}; + + ASSERT_EQ(OH_NN_SUCCESS, BuildModel(tensors)); + + m_compilation = OH_NNCompilation_Construct(m_model); + ASSERT_NE(nullptr, m_compilation); + OH_NNModel_Destroy(&m_model); + ASSERT_EQ(nullptr, m_model); + + ASSERT_EQ(OH_NN_SUCCESS, GetDevices()); + ASSERT_GT(m_devices.size(), NO_DEVICE_COUNT); // Expect available accelerator. + size_t targetDevice = m_devices[0]; // Use the first device in system test. + ASSERT_EQ(OH_NN_SUCCESS, OH_NNCompilation_SetDevice(m_compilation, targetDevice)); + ASSERT_EQ(OH_NN_SUCCESS, OH_NNCompilation_Build(m_compilation)); + + m_executor = OH_NNExecutor_Construct(m_compilation); + ASSERT_NE(nullptr, m_executor); + OH_NNCompilation_Destroy(&m_compilation); + ASSERT_EQ(nullptr, m_compilation); + + std::vector firstAddendValue(ELEMENT_COUNT, INPUT_ONE); + std::vector secondAddendValue(ELEMENT_COUNT, INPUT_TWO); + float outputBuffer[ELEMENT_COUNT]; + + // Test inference multiple times. + for (int i = 0; i < REPEAT_TIMES; i++) { + + // Set value of firstAddend + ASSERT_EQ(OH_NN_SUCCESS, SetInput(0, {3, 2, 2}, (void*)firstAddendValue.data(), ADDEND_DATA_LENGTH)); + + // Set value of secondAddend + ASSERT_EQ(OH_NN_SUCCESS, SetInput(1, {3, 2, 2}, (void*)secondAddendValue.data(), ADDEND_DATA_LENGTH)); + + // Set output buffer of output + ASSERT_EQ(OH_NN_SUCCESS, SetOutput(0, (void*)outputBuffer, ADDEND_DATA_LENGTH)); + + // Run inference and assert output value + ASSERT_EQ(OH_NN_SUCCESS, OH_NNExecutor_Run(m_executor)); + ASSERT_EQ(OH_NN_SUCCESS, IsExpectedOutput(outputBuffer)); + } + + OH_NNExecutor_Destroy(&m_executor); + ASSERT_EQ(nullptr, m_executor); +} +} // namespace SystemTest +} // NeuralNetworkRuntime +} // OHOS \ No newline at end of file diff --git a/test/system_test/end_to_end_test.h b/test/system_test/end_to_end_test.h new file mode 100644 index 0000000..7255bce --- /dev/null +++ b/test/system_test/end_to_end_test.h @@ -0,0 +1,41 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef SYSTEM_TEST_END_TO_END_TEST +#define SYSTEM_TEST_END_TO_END_TEST + +#include +#include +#include + +#include "interfaces/kits/c/neural_network_runtime.h" +#include "test/system_test/common/nnrt_test.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace SystemTest { +class End2EndTest : public NNRtTest { +public: + End2EndTest() = default; + + OH_NN_ReturnCode BuildModel(const std::vector& tensors); + OH_NN_ReturnCode IsExpectedOutput(const float* outputBuffer); + OH_NN_ReturnCode IsExpectedOutput(const OH_NN_Memory* outputMemory); +}; +} // namespace SystemTest +} // NeuralNetworkRuntime +} // OHOS + +#endif // SYSTEM_TEST_END_TO_END_TEST \ No newline at end of file diff --git a/test/system_test/stress_test.cpp b/test/system_test/stress_test.cpp new file mode 100644 index 0000000..21344fa --- /dev/null +++ b/test/system_test/stress_test.cpp @@ -0,0 +1,185 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include +#include + +#include "securec.h" + +#include "test/system_test/common/nnrt_test.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace SystemTest { +constexpr int TMP_LENGTH = 32; +constexpr int PATH_LENGTH = 255; +constexpr int STRESS_COUNT = 10000000; +const float EPSILON = 1e-4; +const uint32_t NO_DEVICE_COUNT = 0; +const uint32_t ADDEND_DATA_LENGTH = 12 * sizeof(float); +const std::string VMRSS = "VmSize:"; + +class StressTest : public NNRtTest { +public: + StressTest() = default; +}; + +std::string GetVMRSS(pid_t pid) +{ + std::string fileName{"/proc/"}; + fileName += std::to_string(pid) + "/status"; + std::ifstream ifs(fileName, std::ios::binary); + if (!ifs.is_open()) { + std::cout << "Failed to open " << fileName << std::endl; + return ""; + } + + std::string vmRss; + // Extract physical memory use from process status. + while (!ifs.eof()) { + getline(ifs, vmRss); + // Compare the first seven characters, which is "VmSize:". + if (vmRss.compare(0, 7, VMRSS) == 0) { + break; + } + } + ifs.close(); + + time_t t = time(nullptr); + char tmp[TMP_LENGTH] {' '}; + strftime(&(tmp[1]), TMP_LENGTH * sizeof(char), "%Y-%m-%d %H:%M:%S", localtime(&t)); + + return vmRss + tmp; +} + +void PrintVMRSS(pid_t pid) +{ + char path[PATH_LENGTH]; + if (!getcwd(path, PATH_LENGTH)) { + std::cout << "Failed to get current path" << std::endl; + return; + } + std::string pathStr = path; + std::string pathFull = pathStr + "/RealtimeVMRSS_" + std::to_string(pid) + ".txt"; + + std::ofstream out(pathFull, std::ios::app); + if (!out.is_open()) { + std::cout << "Some error occurs" << std::endl; + return; + } + + while (true) { + std::string rss = GetVMRSS(pid); + if (rss.empty()) { + std::cout << "Some error occurs" << std::endl; + out.close(); + return; + } + + out << rss << std::endl; + sleep(1); + } +} + +/* + * @tc.name: stress_test_001 + * @tc.desc: Check memory leak by repeatly implement end-to-end execution. + * @tc.type: FUNC + */ +HWTEST_F(StressTest, stress_test_001, testing::ext::TestSize.Level1) +{ + std::cout << "Start RunDoubleConvStressTest test cast." << std::endl; + + pid_t pidOfStressTest = getpid(); + std::thread thread(PrintVMRSS, pidOfStressTest); + + size_t targetDevice{0}; + + int8_t activationValue{0}; + CppQuantParam quantParam{{}, {}, {}}; + CppTensor addend1{OH_NN_FLOAT32, {3, 2, 2}, nullptr, 0, quantParam, OH_NN_TENSOR}; + CppTensor addend2{OH_NN_FLOAT32, {3, 2, 2}, nullptr, 0, quantParam, OH_NN_TENSOR}; + CppTensor activation{OH_NN_INT8, {}, (void*)(&activationValue), 1, quantParam, OH_NN_ADD_ACTIVATIONTYPE}; + CppTensor output{OH_NN_FLOAT32, {3, 2, 2}, nullptr, 0, quantParam, OH_NN_TENSOR}; + std::vector tensors{addend1, addend2, activation, output}; + + std::vector firstAddendValue(12, 1.23); + std::vector secondAddendValue(12, 2.34); + float outputBuffer[12]; + std::vector expectedOutput(12, 3.57); + + for (int i = 0; i < STRESS_COUNT; i++) { + tensors = {addend1, addend2, activation, output}; + + m_model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, m_model); + ASSERT_EQ(OH_NN_SUCCESS, AddTensors(tensors)); + ASSERT_EQ(OH_NN_SUCCESS, AddOperation(OH_NN_OPS_ADD, {2}, {0, 1}, {3})); + ASSERT_EQ(OH_NN_SUCCESS, SpecifyInputAndOutput({0, 1}, {3})); + ASSERT_EQ(OH_NN_SUCCESS, OH_NNModel_Finish(m_model)); + + m_compilation = OH_NNCompilation_Construct(m_model); + ASSERT_NE(nullptr, m_compilation); + OH_NNModel_Destroy(&m_model); + ASSERT_EQ(nullptr, m_model); + + ASSERT_EQ(OH_NN_SUCCESS, GetDevices()); + ASSERT_GT(m_devices.size(), NO_DEVICE_COUNT); // Expect available accelerator. + targetDevice = m_devices[0]; // Use the first device in system test. + ASSERT_EQ(OH_NN_SUCCESS, OH_NNCompilation_SetDevice(m_compilation, targetDevice)); + ASSERT_EQ(OH_NN_SUCCESS, OH_NNCompilation_Build(m_compilation)); + + m_executor = OH_NNExecutor_Construct(m_compilation); + ASSERT_NE(nullptr, m_executor); + OH_NNCompilation_Destroy(&m_compilation); + ASSERT_EQ(nullptr, m_compilation); + + // Set value of firstAddend + ASSERT_EQ(OH_NN_SUCCESS, SetInput(0, {3, 2, 2}, (void*)firstAddendValue.data(), ADDEND_DATA_LENGTH)); + + // Set value of secondAddend + ASSERT_EQ(OH_NN_SUCCESS, SetInput(1, {3, 2, 2}, (void*)secondAddendValue.data(), ADDEND_DATA_LENGTH)); + + // Set output buffer of output + ASSERT_EQ(OH_NN_SUCCESS, SetOutput(0, (void*)outputBuffer, ADDEND_DATA_LENGTH)); + + // Run inference and assert output value + ASSERT_EQ(OH_NN_SUCCESS, OH_NNExecutor_Run(m_executor)); + for (int j = 0; j < 12; j++) { + ASSERT_LE(std::abs(outputBuffer[j]-expectedOutput[j]), EPSILON); + } + + OH_NNExecutor_Destroy(&m_executor); + ASSERT_EQ(nullptr, m_executor); + + m_tensors.clear(); + m_quantParams.clear(); + m_nodes.clear(); + m_inputs.clear(); + m_outputs.clear(); + m_devices.clear(); + + if (i % 1000 == 0) { + std::cout << "Execute " << i << "times." << std::endl; + } + } + thread.join(); +} +} // namespace SystemTest +} // NeuralNetworkRuntime +} // OHOS \ No newline at end of file diff --git a/test/unittest/BUILD.gn b/test/unittest/BUILD.gn new file mode 100644 index 0000000..cca7052 --- /dev/null +++ b/test/unittest/BUILD.gn @@ -0,0 +1,23 @@ +# Copyright (c) 2022 Huawei Device Co., Ltd. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import("//build/ohos.gni") + +group("unittest") { + testonly = true + deps = [ + "inner_kits:inner_kits_unittest", + "components:components_unittest", + "ops:ops_unittest", + ] +} \ No newline at end of file diff --git a/test/unittest/common/base_test.cpp b/test/unittest/common/base_test.cpp new file mode 100644 index 0000000..26529b3 --- /dev/null +++ b/test/unittest/common/base_test.cpp @@ -0,0 +1,43 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "base_test.h" + +using namespace OHOS::NeuralNetworkRuntime::Ops; +using namespace std; +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +void BaseTest::SetUp() {} + +void BaseTest::TearDown() {} + +std::shared_ptr BaseTest::TransToNNTensor( + OH_NN_DataType dataType, const std::vector& dim, const OH_NN_QuantParam* quantParam, + OH_NN_TensorType type) +{ + std::shared_ptr nnTensor = std::make_shared(); + OH_NN_Tensor tensor; + tensor.dataType = dataType; + tensor.dimensionCount = dim.size(); + tensor.dimensions = (dim.empty() ? nullptr : dim.data()); + tensor.quantParam = quantParam; + tensor.type = type; + nnTensor->BuildFromOHNNTensor(tensor); + return nnTensor; +} +} // namespace UnitTest +} // namespace NeuralNetworkRuntime +} // namespace OHOS diff --git a/test/unittest/common/base_test.h b/test/unittest/common/base_test.h new file mode 100644 index 0000000..b77a3b6 --- /dev/null +++ b/test/unittest/common/base_test.h @@ -0,0 +1,37 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_BASE_TEST_H +#define NEURAL_NETWORK_RUNTIME_BASE_TEST_H + +#include +#include +#include "frameworks/native/ops_builder.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class BaseTest : public testing::Test { +public: + virtual void SetUp(); + virtual void TearDown(); + virtual std::shared_ptr TransToNNTensor( + OH_NN_DataType dataType, const std::vector &dim, const OH_NN_QuantParam* quantParam, + OH_NN_TensorType type); +}; +} // namespace UnitTest +} // namespace NeuralNetworkRuntime +} // namespace OHOS +#endif \ No newline at end of file diff --git a/test/unittest/common/compilation_mock_idevice.cpp b/test/unittest/common/compilation_mock_idevice.cpp new file mode 100644 index 0000000..52f647d --- /dev/null +++ b/test/unittest/common/compilation_mock_idevice.cpp @@ -0,0 +1,286 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "common/utils.h" +#include "frameworks/native/device_manager.h" +#include "frameworks/native/hdi_device.h" +#include "frameworks/native/nn_tensor.h" +#include "test/unittest/common/mock_idevice.h" + +OH_NN_ReturnCode OHOS::HDI::Nnrt::V1_0::MockIPreparedModel::m_ExpectRetCode = OH_NN_OPERATION_FORBIDDEN; + +namespace OHOS { +namespace NeuralNetworkRuntime { +std::shared_ptr DeviceManager::GetDevice(size_t deviceId) const +{ + sptr idevice + = sptr(new (std::nothrow) OHOS::HDI::Nnrt::V1_0::MockIDevice()); + if (idevice == nullptr) { + LOGE("DeviceManager mock GetDevice failed, error happened when new sptr"); + return nullptr; + } + + std::shared_ptr device = CreateSharedPtr(idevice); + if (device == nullptr) { + LOGE("DeviceManager mock GetDevice failed, the device is nullptr"); + return nullptr; + } + + if (deviceId == 0) { + LOGE("DeviceManager mock GetDevice failed, the passed parameter deviceId is 0"); + return nullptr; + } else { + return device; + } +} + +OH_NN_ReturnCode HDIDevice::IsModelCacheSupported(bool& isSupported) +{ + // isSupported is false when expecting to return success + if (HDI::Nnrt::V1_0::MockIPreparedModel::m_ExpectRetCode == OH_NN_SUCCESS) { + // In order not to affect other use cases, set to the OH_NN_OPERATION_FORBIDDEN + HDI::Nnrt::V1_0::MockIPreparedModel::m_ExpectRetCode = OH_NN_OPERATION_FORBIDDEN; + isSupported = false; + return OH_NN_SUCCESS; + } + + if (HDI::Nnrt::V1_0::MockIPreparedModel::m_ExpectRetCode == OH_NN_FAILED) { + HDI::Nnrt::V1_0::MockIPreparedModel::m_ExpectRetCode = OH_NN_OPERATION_FORBIDDEN; + isSupported = false; + return OH_NN_FAILED; + } + + isSupported = true; + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode HDIDevice::GetSupportedOperation(std::shared_ptr model, + std::vector& ops) +{ + if (HDI::Nnrt::V1_0::MockIPreparedModel::m_ExpectRetCode == OH_NN_INVALID_FILE) { + HDI::Nnrt::V1_0::MockIPreparedModel::m_ExpectRetCode = OH_NN_OPERATION_FORBIDDEN; + ops.emplace_back(true); + return OH_NN_SUCCESS; + } + + if (model == nullptr) { + LOGE("HDIDevice mock GetSupportedOperation failed, Model is nullptr, cannot query supported operation."); + return OH_NN_NULL_PTR; + } + + if (HDI::Nnrt::V1_0::MockIPreparedModel::m_ExpectRetCode == OH_NN_SUCCESS) { + HDI::Nnrt::V1_0::MockIPreparedModel::m_ExpectRetCode = OH_NN_OPERATION_FORBIDDEN; + ops.emplace_back(false); + return OH_NN_SUCCESS; + } + + ops.emplace_back(true); + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode HDIDevice::IsDynamicInputSupported(bool& isSupported) +{ + if (HDI::Nnrt::V1_0::MockIPreparedModel::m_ExpectRetCode == OH_NN_FAILED) { + HDI::Nnrt::V1_0::MockIPreparedModel::m_ExpectRetCode = OH_NN_OPERATION_FORBIDDEN; + isSupported = false; + return OH_NN_FAILED; + } + + if (HDI::Nnrt::V1_0::MockIPreparedModel::m_ExpectRetCode == OH_NN_INVALID_PATH) { + HDI::Nnrt::V1_0::MockIPreparedModel::m_ExpectRetCode = OH_NN_OPERATION_FORBIDDEN; + isSupported = false; + return OH_NN_SUCCESS; + } + + isSupported = true; + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode HDIDevice::IsPerformanceModeSupported(bool& isSupported) +{ + if (HDI::Nnrt::V1_0::MockIPreparedModel::m_ExpectRetCode == OH_NN_FAILED) { + HDI::Nnrt::V1_0::MockIPreparedModel::m_ExpectRetCode = OH_NN_OPERATION_FORBIDDEN; + isSupported = false; + return OH_NN_FAILED; + } + + if (HDI::Nnrt::V1_0::MockIPreparedModel::m_ExpectRetCode == OH_NN_SUCCESS) { + HDI::Nnrt::V1_0::MockIPreparedModel::m_ExpectRetCode = OH_NN_OPERATION_FORBIDDEN; + isSupported = false; + return OH_NN_SUCCESS; + } + + isSupported = true; + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode HDIDevice::IsPrioritySupported(bool& isSupported) +{ + if (HDI::Nnrt::V1_0::MockIPreparedModel::m_ExpectRetCode == OH_NN_INVALID_PARAMETER) { + HDI::Nnrt::V1_0::MockIPreparedModel::m_ExpectRetCode = OH_NN_OPERATION_FORBIDDEN; + isSupported = false; + return OH_NN_INVALID_PARAMETER; + } + + if (HDI::Nnrt::V1_0::MockIPreparedModel::m_ExpectRetCode == OH_NN_SUCCESS) { + HDI::Nnrt::V1_0::MockIPreparedModel::m_ExpectRetCode = OH_NN_OPERATION_FORBIDDEN; + isSupported = false; + return OH_NN_SUCCESS; + } + + isSupported = true; + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode HDIDevice::IsFloat16PrecisionSupported(bool& isSupported) +{ + if (HDI::Nnrt::V1_0::MockIPreparedModel::m_ExpectRetCode == OH_NN_SUCCESS) { + HDI::Nnrt::V1_0::MockIPreparedModel::m_ExpectRetCode = OH_NN_OPERATION_FORBIDDEN; + isSupported = false; + return OH_NN_SUCCESS; + } + + if (HDI::Nnrt::V1_0::MockIPreparedModel::m_ExpectRetCode == OH_NN_MEMORY_ERROR) { + HDI::Nnrt::V1_0::MockIPreparedModel::m_ExpectRetCode = OH_NN_OPERATION_FORBIDDEN; + isSupported = false; + return OH_NN_MEMORY_ERROR; + } + + isSupported = true; + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode HDIDevice::PrepareModel(std::shared_ptr model, + const ModelConfig& config, + std::shared_ptr& preparedModel) +{ + if (model == nullptr) { + LOGE("HDIDevice mock PrepareModel failed, the model is nullptr"); + return OH_NN_INVALID_PARAMETER; + } + + if (config.enableFloat16 == false) { + LOGE("HDIDevice mock PrepareModel failed, the enableFloat16 is false"); + return OH_NN_FAILED; + } + + sptr hdiPreparedModel = sptr(new (std::nothrow) OHOS::HDI::Nnrt::V1_0::MockIPreparedModel()); + if (hdiPreparedModel == nullptr) { + LOGE("HDIDevice mock PrepareModel failed, error happened when new sptr"); + return OH_NN_NULL_PTR; + } + + preparedModel = CreateSharedPtr(hdiPreparedModel); + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode HDIPreparedModel::ExportModelCache(std::vector& modelCache) +{ + if (!modelCache.empty()) { + LOGE("HDIPreparedModel mock ExportModelCache failed, the modelCache is not empty"); + return OH_NN_INVALID_PARAMETER; + } + + if (HDI::Nnrt::V1_0::MockIPreparedModel::m_ExpectRetCode == OH_NN_FAILED) { + HDI::Nnrt::V1_0::MockIPreparedModel::m_ExpectRetCode = OH_NN_OPERATION_FORBIDDEN; + return OH_NN_FAILED; + } + + int bufferSize = 13; + ModelBuffer modelBuffer; + std::string aBuffer = "mock_buffer_a"; + modelBuffer.buffer = (void*)aBuffer.c_str(); + modelBuffer.length = bufferSize; + modelCache.emplace_back(modelBuffer); + + ModelBuffer modelBuffer2; + std::string bBuffer = "mock_buffer_b"; + modelBuffer2.buffer = (void*)bBuffer.c_str(); + modelBuffer2.length = bufferSize; + modelCache.emplace_back(modelBuffer2); + + return OH_NN_SUCCESS; +} + +void* HDIDevice::AllocateBuffer(size_t length) +{ + if (length == 0) { + LOGE("HDIDevice mock AllocateBuffer failed, the length param is invalid"); + return nullptr; + } + + if (HDI::Nnrt::V1_0::MockIPreparedModel::m_ExpectRetCode == OH_NN_NULL_PTR) { + HDI::Nnrt::V1_0::MockIPreparedModel::m_ExpectRetCode = OH_NN_OPERATION_FORBIDDEN; + return nullptr; + } + + void* buffer = (void*)malloc(length); + if (buffer == nullptr) { + LOGE("HDIDevice mock AllocateBuffer failed, the buffer is nullptr"); + return nullptr; + } + return buffer; +} + +OH_NN_ReturnCode HDIDevice::ReleaseBuffer(const void* buffer) +{ + if (buffer == nullptr) { + LOGE("HDIDevice mock ReleaseBuffer failed, the buffer is nullptr"); + return OH_NN_NULL_PTR; + } + + free(const_cast(buffer)); + buffer = nullptr; + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode HDIDevice::PrepareModelFromModelCache(const std::vector& modelCache, + const ModelConfig& config, + std::shared_ptr& preparedModel) +{ + if (HDI::Nnrt::V1_0::MockIPreparedModel::m_ExpectRetCode == OH_NN_FAILED) { + HDI::Nnrt::V1_0::MockIPreparedModel::m_ExpectRetCode = OH_NN_OPERATION_FORBIDDEN; + return OH_NN_FAILED; + } + + if (modelCache.size() == 0 || config.enableFloat16 == false) { + LOGE("HDIDevice mock PrepareModel failed, the modelCache size equals 0 or enableFloat16 is false"); + return OH_NN_FAILED; + } + + sptr hdiPreparedModel = sptr(new (std::nothrow) OHOS::HDI::Nnrt::V1_0::MockIPreparedModel()); + if (hdiPreparedModel == nullptr) { + LOGE("HDIDevice mock PrepareModelFromModelCache failed, error happened when new sptr"); + return OH_NN_NULL_PTR; + } + + preparedModel = CreateSharedPtr(hdiPreparedModel); + + return OH_NN_SUCCESS; +} + +bool NNTensor::IsDynamicShape() const +{ + if (HDI::Nnrt::V1_0::MockIPreparedModel::m_ExpectRetCode == OH_NN_FAILED) { + HDI::Nnrt::V1_0::MockIPreparedModel::m_ExpectRetCode = OH_NN_OPERATION_FORBIDDEN; + return false; + } + + return true; +} +} // namespace NeuralNetworkRuntime +} // namespace OHOS \ No newline at end of file diff --git a/test/unittest/common/executor_mock_device.cpp b/test/unittest/common/executor_mock_device.cpp new file mode 100644 index 0000000..47934e0 --- /dev/null +++ b/test/unittest/common/executor_mock_device.cpp @@ -0,0 +1,104 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "frameworks/native/compilation.h" +#include "frameworks/native/execution_plan.h" +#include "frameworks/native/hdi_device.h" +#include "test/unittest/common/mock_idevice.h" + +OH_NN_ReturnCode OHOS::HDI::Nnrt::V1_0::MockIPreparedModel::m_ExpectRetCode = OH_NN_OPERATION_FORBIDDEN; + +namespace OHOS { +namespace NeuralNetworkRuntime { +std::shared_ptr ExecutionPlan::GetInputDevice() const +{ + sptr idevice + = sptr(new (std::nothrow) OHOS::HDI::Nnrt::V1_0::MockIDevice()); + std::shared_ptr device = std::make_shared(idevice); + return device; +} + +std::shared_ptr ExecutionPlan::GetOutputDevice() const +{ + sptr idevice + = sptr(new (std::nothrow) OHOS::HDI::Nnrt::V1_0::MockIDevice()); + std::shared_ptr device = std::make_shared(idevice); + return device; +} + +void* HDIDevice::AllocateBuffer(size_t length) +{ + if (length == 0) { + LOGE("The length param is invalid, length=0"); + return nullptr; + } + + void* buffer = (void*)malloc(length); + if (buffer == nullptr) { + LOGE("alloct buffer failed"); + return nullptr; + } + + if (OHOS::HDI::Nnrt::V1_0::MockIPreparedModel::m_ExpectRetCode == OH_NN_INVALID_PARAMETER) { + OHOS::HDI::Nnrt::V1_0::MockIPreparedModel::m_ExpectRetCode = OH_NN_OPERATION_FORBIDDEN; + return nullptr; + } + return buffer; +} + +OH_NN_ReturnCode HDIDevice::ReleaseBuffer(const void* buffer) +{ + if (buffer == nullptr) { + LOGE("alloct buffer failed"); + return OH_NN_FAILED; + } + free(const_cast(buffer)); + buffer = nullptr; + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode HDIPreparedModel::Run(const std::vector& inputs, const std::vector& outputs, + std::vector>& outputsDims, std::vector& isOutputBufferEnough) +{ + if (inputs.empty() || outputs.empty()) { + return OH_NN_INVALID_PARAMETER; + } + + if (OHOS::HDI::Nnrt::V1_0::MockIPreparedModel::m_ExpectRetCode == OH_NN_FAILED) { + OHOS::HDI::Nnrt::V1_0::MockIPreparedModel::m_ExpectRetCode = OH_NN_OPERATION_FORBIDDEN; + return OH_NN_INVALID_PARAMETER; + } + + isOutputBufferEnough.emplace_back(true); + outputsDims.emplace_back(outputs[0].dimensions); + + return OH_NN_SUCCESS; +} + +std::shared_ptr Compilation::GetExecutionPlan() const +{ + sptr hdiPreparedModel = OHOS::sptr(new (std::nothrow) OHOS::HDI::Nnrt::V1_0::HDI::Nnrt::V1_0::MockIPreparedModel()); + + std::shared_ptr preparedModel = std::make_shared(hdiPreparedModel); + sptr idevice + = OHOS::sptr(new (std::nothrow) OHOS::HDI::Nnrt::V1_0::MockIDevice()); + std::shared_ptr device = std::make_shared(idevice); + ExecutionPlan executor(preparedModel, device); + std::shared_ptr pExcutor = std::make_shared(executor); + return pExcutor; +} +} // namespace NeuralNetworkRuntime +} // namespace OHOS \ No newline at end of file diff --git a/test/unittest/common/file_utils.cpp b/test/unittest/common/file_utils.cpp new file mode 100644 index 0000000..c6cd79f --- /dev/null +++ b/test/unittest/common/file_utils.cpp @@ -0,0 +1,51 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "file_utils.h" + +#include +#include + +#include "common/log.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +FileUtils::FileUtils(const std::string &filename) :m_filename(filename) +{ +} + +FileUtils::~FileUtils() +{ + if (!m_filename.empty()) { + int ret = unlink(m_filename.c_str()); + if (ret != 0) { + LOGE("Failed to delete file: %s.", m_filename.c_str()); + } + } +} + +bool FileUtils::WriteFile(const std::string &data) +{ + std::ofstream outFile(m_filename); + if (!outFile.is_open()) { + LOGE("Failed to open file: %s.", m_filename.c_str()); + return false; + } + outFile.write(data.c_str(), data.length()); + outFile.close(); + return true; +} +} // namespace NeuralNetworkRuntime +} // namespace OHOS diff --git a/test/unittest/common/file_utils.h b/test/unittest/common/file_utils.h new file mode 100644 index 0000000..b1a8526 --- /dev/null +++ b/test/unittest/common/file_utils.h @@ -0,0 +1,34 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_UNITTEST_FILE_UTILS_H +#define NEURAL_NETWORK_RUNTIME_UNITTEST_FILE_UTILS_H + +#include + +namespace OHOS { +namespace NeuralNetworkRuntime { +class FileUtils { +public: + explicit FileUtils(const std::string &filename); + ~FileUtils(); + bool WriteFile(const std::string &data); + +private: + std::string m_filename; +}; +} // namespace NeuralNetworkRuntime +} // namespace OHOS +#endif diff --git a/test/unittest/common/inner_model_mock_device.cpp b/test/unittest/common/inner_model_mock_device.cpp new file mode 100644 index 0000000..386ee5b --- /dev/null +++ b/test/unittest/common/inner_model_mock_device.cpp @@ -0,0 +1,72 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include + +#include "common/utils.h" +#include "frameworks/native/inner_model.h" +#include "frameworks/native/hdi_device.h" +#include "frameworks/native/device_manager.h" +#include "frameworks/native/ops/div_builder.h" +#include "mock_idevice.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +// Mock the palce where the devicemanager GetDevice is called in inner_model build function. +std::shared_ptr DeviceManager::GetDevice(size_t deviceId) const +{ + sptr idevice = + sptr(new (std::nothrow) OHOS::HDI::Nnrt::V1_0::MockIDevice()); + + if (idevice == nullptr) { + LOGE("DeviceManager mock GetDevice failed, error happened when new sptr"); + return nullptr; + } else { + std::shared_ptr device = CreateSharedPtr(idevice); + if (device == nullptr) { + LOGE("DeviceManager mock GetDevice failed, device is nullptr"); + return nullptr; + } + + if (deviceId == 0) { + return nullptr; + } else { + return device; + } + } +} + +// Mock the palce where the operator GetPrimitive is called in inner_model build function. +Ops::LiteGraphPrimitvePtr Ops::DivBuilder::GetPrimitive() +{ + Ops::LiteGraphPrimitvePtr primitive = {nullptr, DestroyLiteGraphTensor}; + return primitive; +} + +// Mock the palce where the device GetSupportedOperation is called in inner_model build function. +OH_NN_ReturnCode HDIDevice::GetSupportedOperation(std::shared_ptr model, + std::vector& supportedOperations) +{ + supportedOperations = {true, true, true}; + + if (model->name_ == "Loaded_NNR_Model") { + return OH_NN_UNAVALIDABLE_DEVICE; + } else { + return OH_NN_SUCCESS; + } +} +} // NeuralNetworkRuntime +} // OHOS diff --git a/test/unittest/common/mock_idevice.cpp b/test/unittest/common/mock_idevice.cpp new file mode 100644 index 0000000..a6dab85 --- /dev/null +++ b/test/unittest/common/mock_idevice.cpp @@ -0,0 +1,54 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "mock_idevice.h" + +namespace OHOS { +namespace HDI { +namespace Nnrt { +namespace V1_0 { +sptr INnrtDevice::Get(bool isStub) +{ + return INnrtDevice::Get("device_service", isStub); +} + +sptr INnrtDevice::Get(const std::string& serviceName, bool isStub) +{ + if (isStub) { + return nullptr; + } + + sptr mockIDevice = sptr(new (std::nothrow) MockIDevice()); + if (mockIDevice == nullptr) { + return nullptr; + } + std::string deviceName = "MockDevice"; + EXPECT_CALL(*((V1_0::MockIDevice*)mockIDevice.GetRefPtr()), GetDeviceName(::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<0>(deviceName), ::testing::Return(HDF_SUCCESS))); + + std::string vendorName = "MockVendor"; + EXPECT_CALL(*((V1_0::MockIDevice*)mockIDevice.GetRefPtr()), GetVendorName(::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<0>(vendorName), ::testing::Return(HDF_SUCCESS))); + + V1_0::DeviceStatus deviceStatus = V1_0::DeviceStatus::AVAILABLE; + EXPECT_CALL(*((V1_0::MockIDevice*)mockIDevice.GetRefPtr()), GetDeviceStatus(::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<0>(deviceStatus), ::testing::Return(HDF_SUCCESS))); + + return mockIDevice; +} +} // V1_0 +} // Nnrt +} // HDI +} // OHOS \ No newline at end of file diff --git a/test/unittest/common/mock_idevice.h b/test/unittest/common/mock_idevice.h new file mode 100644 index 0000000..64e8231 --- /dev/null +++ b/test/unittest/common/mock_idevice.h @@ -0,0 +1,62 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_MOCK_IDEVICE_H +#define NEURAL_NETWORK_RUNTIME_MOCK_IDEVICE_H + +#include + +#include "frameworks/native/hdi_prepared_model.h" +#include "frameworks/native/memory_manager.h" +#include "frameworks/native/transform.h" + +namespace OHOS { +namespace HDI { +namespace Nnrt { +namespace V1_0 { +class MockIDevice : public INnrtDevice { +public: + MOCK_METHOD1(GetDeviceName, int32_t(std::string&)); + MOCK_METHOD1(GetVendorName, int32_t(std::string&)); + MOCK_METHOD1(GetDeviceType, int32_t(DeviceType&)); + MOCK_METHOD1(GetDeviceStatus, int32_t(DeviceStatus&)); + MOCK_METHOD2(GetSupportedOperation, int32_t(const Model&, std::vector&)); + MOCK_METHOD1(IsFloat16PrecisionSupported, int32_t(bool&)); + MOCK_METHOD1(IsPerformanceModeSupported, int32_t(bool&)); + MOCK_METHOD1(IsPrioritySupported, int32_t(bool&)); + MOCK_METHOD1(IsDynamicInputSupported, int32_t(bool&)); + MOCK_METHOD3(PrepareModel, int32_t(const Model&, const ModelConfig&, OHOS::sptr&)); + MOCK_METHOD1(IsModelCacheSupported, int32_t(bool&)); + MOCK_METHOD3(PrepareModelFromModelCache, int32_t(const std::vector&, const ModelConfig&, + OHOS::sptr&)); + MOCK_METHOD2(AllocateBuffer, int32_t(uint32_t, SharedBuffer&)); + MOCK_METHOD1(ReleaseBuffer, int32_t(const SharedBuffer&)); + MOCK_METHOD2(GetVersion, int32_t(uint32_t&, uint32_t&)); +}; + +class MockIPreparedModel : public IPreparedModel { +public: + MOCK_METHOD1(ExportModelCache, int32_t(std::vector&)); + MOCK_METHOD4(Run, int32_t(const std::vector&, const std::vector&, + std::vector>&, std::vector&)); + MOCK_METHOD2(GetVersion, int32_t(uint32_t&, uint32_t&)); + + static OH_NN_ReturnCode m_ExpectRetCode; +}; +} // V1_0 +} // Nnrt +} // HDI +} // OHOS +#endif // NEURAL_NETWORK_RUNTIME_MOCK_IDEVICE_H diff --git a/test/unittest/components/BUILD.gn b/test/unittest/components/BUILD.gn new file mode 100644 index 0000000..877f084 --- /dev/null +++ b/test/unittest/components/BUILD.gn @@ -0,0 +1,336 @@ +# Copyright (c) 2022 Huawei Device Co., Ltd. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import("//build/test.gni") + +module_output_path = "neural_network_runtime/" + +config("module_private_config") { + visibility = [ ":*" ] + + include_dirs = [ + "//third_party/googletest/googlemock/include", + "//foundation/ai/neural_network_runtime", + "//foundation/communication/ipc/interfaces/innerkits/ipc_core/include", + "//third_party/mindspore/mindspore/lite/mindir/include" + ] +} + +ohos_unittest("CompilationTest") { + module_out_path = module_output_path + + sources = [ "//foundation/ai/neural_network_runtime/test/unittest/components/compilation/compilation_test.cpp" ] + sources += [ "//foundation/ai/neural_network_runtime/test/unittest/common/compilation_mock_idevice.cpp" ] + configs = [ ":module_private_config" ] + + deps = [ + "//foundation/ai/neural_network_runtime/frameworks:libneural_network_runtime", + "//third_party/googletest:gtest_main", + "//third_party/googletest:gmock_main", + ] + + external_deps = [ + "c_utils:utils", + "hdf_core:libhdf_utils", + "hitrace_native:libhitracechain", + "hiviewdfx_hilog_native:libhilog", + "drivers_interface_nnrt:libnnrt_proxy_1.0", + "mindspore:mindir" + ] +} + +ohos_unittest("ExecutorTest") { + module_out_path = module_output_path + + sources = [ "//foundation/ai/neural_network_runtime/test/unittest/components/executor/executor_test.cpp" ] + sources += [ "//foundation/ai/neural_network_runtime/test/unittest/common/executor_mock_device.cpp" ] + configs = [ ":module_private_config" ] + + deps = [ + "//foundation/ai/neural_network_runtime/frameworks:libneural_network_runtime", + "//third_party/googletest:gtest_main", + "//third_party/googletest:gmock_main", + ] + + external_deps = [ + "c_utils:utils", + "hdf_core:libhdf_utils", + "hitrace_native:libhitracechain", + "hiviewdfx_hilog_native:libhilog", + "drivers_interface_nnrt:libnnrt_proxy_1.0", + "mindspore:mindir" + ] +} + +ohos_unittest("DeviceManagerTest") { + module_out_path = module_output_path + + sources = [ "//foundation/ai/neural_network_runtime/test/unittest/components/device_manager/device_manager_test.cpp" ] + sources += [ "//foundation/ai/neural_network_runtime/test/unittest/common/mock_idevice.cpp" ] + configs = [ ":module_private_config" ] + + deps = [ + "//foundation/ai/neural_network_runtime/frameworks:libneural_network_runtime", + "//third_party/googletest:gtest_main", + "//third_party/googletest:gmock_main", + ] + + external_deps = [ + "c_utils:utils", + "hitrace_native:libhitracechain", + "hiviewdfx_hilog_native:libhilog", + "drivers_interface_nnrt:libnnrt_proxy_1.0", + "mindspore:mindir" + ] +} + +ohos_unittest("DeviceRegistrarTest") { + module_out_path = module_output_path + + sources = [ "//foundation/ai/neural_network_runtime/test/unittest/components/device_registrar/device_registrar_test.cpp" ] + sources += [ "//foundation/ai/neural_network_runtime/test/unittest/common/mock_idevice.cpp" ] + configs = [ ":module_private_config" ] + + deps = [ + "//foundation/ai/neural_network_runtime/frameworks:libneural_network_runtime", + "//third_party/googletest:gtest_main", + "//third_party/googletest:gmock_main", + ] + + external_deps = [ + "c_utils:utils", + "hitrace_native:libhitracechain", + "hiviewdfx_hilog_native:libhilog", + "drivers_interface_nnrt:libnnrt_proxy_1.0", + "mindspore:mindir" + ] +} + +ohos_unittest("HDIDeviceTest") { + module_out_path = module_output_path + + sources = [ "//foundation/ai/neural_network_runtime/test/unittest/components/hdi_device/hdi_device_test.cpp" ] + sources += [ "//foundation/ai/neural_network_runtime/test/unittest/common/mock_idevice.cpp" ] + sources += [ "//foundation/ai/neural_network_runtime/test/unittest/common/file_utils.cpp" ] + configs = [ ":module_private_config" ] + + deps = [ + "//foundation/ai/neural_network_runtime/frameworks:libneural_network_runtime", + "//third_party/googletest:gtest_main", + "//third_party/googletest:gmock_main", + ] + + external_deps = [ + "c_utils:utils", + "hitrace_native:libhitracechain", + "hiviewdfx_hilog_native:libhilog", + "drivers_interface_nnrt:libnnrt_proxy_1.0", + "mindspore:mindir" + ] +} + +ohos_unittest("HDIPreparedModelTest") { + module_out_path = module_output_path + + sources = [ "//foundation/ai/neural_network_runtime/test/unittest/components/hdi_prepared_model/hdi_prepared_model_test.cpp" ] + sources += [ "//foundation/ai/neural_network_runtime/test/unittest/common/mock_idevice.cpp" ] + sources += [ "//foundation/ai/neural_network_runtime/test/unittest/common/file_utils.cpp" ] + configs = [ ":module_private_config" ] + + deps = [ + "//foundation/ai/neural_network_runtime/frameworks:libneural_network_runtime", + "//third_party/googletest:gtest_main", + "//third_party/googletest:gmock_main", + ] + + external_deps = [ + "c_utils:utils", + "hitrace_native:libhitracechain", + "hiviewdfx_hilog_native:libhilog", + "drivers_interface_nnrt:libnnrt_proxy_1.0", + "mindspore:mindir" + ] +} + +ohos_unittest("MemoryManagerTest") { + module_out_path = module_output_path + + sources = [ "//foundation/ai/neural_network_runtime/test/unittest/components/memory_manager/memory_manager_test.cpp" ] + sources += [ "//foundation/ai/neural_network_runtime/test/unittest/common/file_utils.cpp" ] + configs = [ ":module_private_config" ] + + deps = [ + "//foundation/ai/neural_network_runtime/frameworks:libneural_network_runtime", + "//third_party/googletest:gtest_main", + "//third_party/googletest:gmock_main", + ] + + external_deps = [ + "hitrace_native:libhitracechain", + "hiviewdfx_hilog_native:libhilog", + "drivers_interface_nnrt:libnnrt_proxy_1.0", + "mindspore:mindir" + ] +} + +ohos_unittest("TransformTest") { + module_out_path = module_output_path + + sources = [ "//foundation/ai/neural_network_runtime/test/unittest/components/transform/transform_test.cpp" ] + configs = [ ":module_private_config" ] + + deps = [ + "//foundation/ai/neural_network_runtime/frameworks:libneural_network_runtime", + "//third_party/googletest:gtest_main", + "//third_party/googletest:gmock_main", + ] + + external_deps = [ + "hitrace_native:libhitracechain", + "hiviewdfx_hilog_native:libhilog", + "drivers_interface_nnrt:libnnrt_proxy_1.0", + "mindspore:mindir" + ] +} + +ohos_unittest("InnerModelTest") { + module_out_path = module_output_path + + sources = [ "//foundation/ai/neural_network_runtime/test/unittest/components/inner_model/inner_model_test.cpp" ] + sources += [ "//foundation/ai/neural_network_runtime/test/unittest/common/inner_model_mock_device.cpp" ] + configs = [ ":module_private_config" ] + + deps = [ + "//foundation/ai/neural_network_runtime/frameworks:libneural_network_runtime", + "//third_party/googletest:gtest_main", + "//third_party/googletest:gmock_main", + ] + + external_deps = [ + "c_utils:utils", + "hdf_core:libhdf_utils", + "hitrace_native:libhitracechain", + "hiviewdfx_hilog_native:libhilog", + "drivers_interface_nnrt:libnnrt_proxy_1.0", + "mindspore:mindir" + ] +} + +ohos_unittest("NnTensorTest") { + module_out_path = module_output_path + + sources = [ "//foundation/ai/neural_network_runtime/test/unittest/components/inner_model/nn_tensor_test.cpp" ] + configs = [ ":module_private_config" ] + + deps = [ + "//foundation/ai/neural_network_runtime/frameworks:libneural_network_runtime", + "//third_party/googletest:gtest_main", + "//third_party/googletest:gmock_main", + ] + + external_deps = [ + "c_utils:utils", + "hdf_core:libhdf_utils", + "hitrace_native:libhitracechain", + "hiviewdfx_hilog_native:libhilog", + "drivers_interface_nnrt:libnnrt_proxy_1.0", + "mindspore:mindir" + ] +} + +ohos_unittest("NnValidationTest") { + module_out_path = module_output_path + + sources = [ "//foundation/ai/neural_network_runtime/test/unittest/components/inner_model/nn_validation_test.cpp" ] + configs = [ ":module_private_config" ] + + deps = [ + "//foundation/ai/neural_network_runtime/frameworks:libneural_network_runtime", + "//third_party/googletest:gtest_main", + "//third_party/googletest:gmock_main", + ] + + external_deps = [ + "c_utils:utils", + "hdf_core:libhdf_utils", + "hitrace_native:libhitracechain", + "hiviewdfx_hilog_native:libhilog", + "drivers_interface_nnrt:libnnrt_proxy_1.0", + "mindspore:mindir" + ] +} + +ohos_unittest("OpsRegistryTest") { + module_out_path = module_output_path + + sources = [ "//foundation/ai/neural_network_runtime/test/unittest/components/inner_model/ops_regitstry_test.cpp" ] + configs = [ ":module_private_config" ] + + deps = [ + "//foundation/ai/neural_network_runtime/frameworks:libneural_network_runtime", + "//third_party/googletest:gtest_main", + "//third_party/googletest:gmock_main", + ] + + external_deps = [ + "c_utils:utils", + "hdf_core:libhdf_utils", + "hitrace_native:libhitracechain", + "hiviewdfx_hilog_native:libhilog", + "drivers_interface_nnrt:libnnrt_proxy_1.0", + "mindspore:mindir" + ] +} + +ohos_unittest("NeuralNetworkRuntimeTest") { + module_out_path = module_output_path + + sources = [ "//foundation/ai/neural_network_runtime/test/unittest/components/neural_network_runtime_test/neural_network_runtime_test.cpp" ] + sources += [ "//foundation/ai/neural_network_runtime/test/unittest/common/executor_mock_device.cpp" ] + + configs = [ ":module_private_config" ] + + deps = [ + "//foundation/ai/neural_network_runtime/frameworks:libneural_network_runtime", + "//third_party/googletest:gtest_main", + "//third_party/googletest:gmock_main", + ] + + external_deps = [ + "c_utils:utils", + "hdf_core:libhdf_utils", + "hitrace_native:libhitracechain", + "hiviewdfx_hilog_native:libhilog", + "drivers_interface_nnrt:libnnrt_proxy_1.0", + "mindspore:mindir" + ] +} + +group("components_unittest") { + testonly = true + deps = [ + ":CompilationTest", + ":ExecutorTest", + ":DeviceManagerTest", + ":DeviceRegistrarTest", + ":HDIDeviceTest", + ":HDIPreparedModelTest", + ":MemoryManagerTest", + ":TransformTest", + ":InnerModelTest", + ":NnTensorTest", + ":NnValidationTest", + ":OpsRegistryTest", + ":NeuralNetworkRuntimeTest", + ] +} diff --git a/test/unittest/components/compilation/compilation_test.cpp b/test/unittest/components/compilation/compilation_test.cpp new file mode 100644 index 0000000..8529ccb --- /dev/null +++ b/test/unittest/components/compilation/compilation_test.cpp @@ -0,0 +1,1143 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "compilation_test.h" + +#include + +#include "mindir.h" + +#include "test/unittest/common/mock_idevice.h" + +using namespace OHOS::NeuralNetworkRuntime; +using namespace OHOS::HDI::Nnrt::V1_0; + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +static const int DATA_VALUE = 1; +static const int DATA_NUM = 36; +static const int DIM_NUM = 3; +OH_NN_ReturnCode CompilationTest::BuildModelGraph(NeuralNetworkRuntime::InnerModel& innerModel) +{ + // liteGraph is released internally by innerModel + mindspore::lite::LiteGraph* liteGraph = new (std::nothrow) mindspore::lite::LiteGraph; + EXPECT_NE(nullptr, liteGraph); + + liteGraph->name_ = "testGraph"; + liteGraph->input_indices_ = {0}; + liteGraph->output_indices_ = {1}; + liteGraph->all_tensors_ = {nullptr}; + const std::vector quant_params {}; + const std::vector data(DATA_NUM, DATA_VALUE); + const std::vector dim = {DIM_NUM, DIM_NUM}; + + for (size_t indexInput = 0; indexInput < liteGraph->input_indices_.size(); ++indexInput) { + liteGraph->all_tensors_.emplace_back(mindspore::lite::MindIR_Tensor_Create(liteGraph->name_, + mindspore::lite::DATA_TYPE_FLOAT32, dim, mindspore::lite::FORMAT_NCHW, data, quant_params)); + } + + for (size_t indexOutput = 0; indexOutput < liteGraph->output_indices_.size(); ++indexOutput) { + liteGraph->all_tensors_.emplace_back(mindspore::lite::MindIR_Tensor_Create(liteGraph->name_, + mindspore::lite::DATA_TYPE_FLOAT32, dim, mindspore::lite::FORMAT_NCHW, data, quant_params)); + } + + OH_NN_ReturnCode ret = innerModel.BuildFromLiteGraph(liteGraph); + return ret; +} + +void CompilationTest::SetConfig(Compilation& compilationTest) +{ + std::size_t deviceId = 1; + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.SetDevice(deviceId)); + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.SetPerformance(OH_NN_PERFORMANCE_EXTREME)); + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.SetPriority(OH_NN_PRIORITY_HIGH)); + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.SetEnableFp16(true)); +} + +void CompilationTest::WriteFile(uint64_t version, uint64_t fileNumber, std::size_t cacheDeviceId) +{ + uint64_t cacheSize = 4; + uint64_t writeSize = 7; + uint64_t cacheInfo[7] = {}; + auto cacheInfoPtr = cacheInfo; + *cacheInfoPtr++ = fileNumber; + *cacheInfoPtr++ = version; + *cacheInfoPtr++ = cacheDeviceId; + for (uint64_t i = 0; i < cacheSize; ++i) { + *cacheInfoPtr++ = i; + } + std::ofstream inFile("cache_info.nncache", std::ios::binary | std::ios::out | std::ios::trunc); + inFile.write(reinterpret_cast(cacheInfo), writeSize * sizeof(uint64_t)); + inFile.close(); +} + +void CompilationTest::BuildCompilation(InnerModel& innerModel) +{ + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilationTest(&innerModel); + SetConfig(compilationTest); + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.SetCacheDir("./", 1)); + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.Build()); +} + +/* + * @tc.name: compilation_set_device_001 + * @tc.desc: Verify the set deviceId after compilation finish of the SetDevice function. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_set_device_001, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilationTest(&innerModel); + SetConfig(compilationTest); + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.Build()); + + std::size_t deviceId = 1; + OH_NN_ReturnCode ret = compilationTest.SetDevice(deviceId); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/* + * @tc.name: compilation_set_device_002 + * @tc.desc: Verify the deviceId does not exist of the SetDevice function. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_set_device_002, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilationTest(&innerModel); + size_t deviceId = 0; + OH_NN_ReturnCode ret = compilationTest.SetDevice(deviceId); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: compilation_set_device_003 + * @tc.desc: Verify the error happened when getting supported operation of the SetDevice function. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_set_device_003, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + Compilation compilationTest(&innerModel); + std::size_t deviceId = 1; + OH_NN_ReturnCode ret = compilationTest.SetDevice(deviceId); + EXPECT_EQ(OH_NN_NULL_PTR, ret); +} + +/* + * @tc.name: compilation_set_device_004 + * @tc.desc: Verify the current device not support the model of the SetDevice function. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_set_device_004, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilationTest(&innerModel); + std::size_t deviceId = 1; + MockIPreparedModel::m_ExpectRetCode = OH_NN_SUCCESS; + OH_NN_ReturnCode ret = compilationTest.SetDevice(deviceId); + EXPECT_EQ(OH_NN_FAILED, ret); +} + +/* + * @tc.name: compilation_set_device_005 + * @tc.desc: Verify the error happened when checking whether device supports dynamic input of the SetDevice function. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_set_device_005, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilationTest(&innerModel); + std::size_t deviceId = 1; + MockIPreparedModel::m_ExpectRetCode = OH_NN_FAILED; + OH_NN_ReturnCode ret = compilationTest.SetDevice(deviceId); + EXPECT_EQ(OH_NN_FAILED, ret); +} + +/* + * @tc.name: compilation_set_device_006 + * @tc.desc: Verify the device does not support dynamic shape inputs of the SetDevice function. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_set_device_006, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilationTest(&innerModel); + std::size_t deviceId = 1; + MockIPreparedModel::m_ExpectRetCode = OH_NN_INVALID_PATH; + OH_NN_ReturnCode ret = compilationTest.SetDevice(deviceId); + EXPECT_EQ(OH_NN_FAILED, ret); +} + +/* + * @tc.name: compilation_set_device_007 + * @tc.desc: Verify the set normal deviceId of the SetDevice function. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_set_device_007, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilationTest(&innerModel); + std::size_t deviceId = 1; + OH_NN_ReturnCode ret = compilationTest.SetDevice(deviceId); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/* + * @tc.name: compilation_set_cachedir_001 + * @tc.desc: Verify the set cache after compilation finish of the SetCacheDir function. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_set_cachedir_001, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilationTest(&innerModel); + SetConfig(compilationTest); + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.Build()); + + std::size_t deviceId = 1; + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, compilationTest.SetDevice(deviceId)); + + OH_NN_ReturnCode ret = compilationTest.SetCacheDir("../", 1); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/* + * @tc.name: compilation_set_cachedir_002 + * @tc.desc: Verify the not set device of the SetCacheDir function. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_set_cachedir_002, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + Compilation compilationTest(&innerModel); + OH_NN_ReturnCode ret = compilationTest.SetCacheDir("../", 1); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/* + * @tc.name: compilation_set_cachedir_003 + * @tc.desc: Verify the Fail to query whether the device is available to save cache model of the SetCacheDir function. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_set_cachedir_003, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilationTest(&innerModel); + std::size_t deviceId = 1; + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.SetDevice(deviceId)); + + MockIPreparedModel::m_ExpectRetCode = OH_NN_FAILED; + OH_NN_ReturnCode ret = compilationTest.SetCacheDir("../", 1); + EXPECT_EQ(OH_NN_FAILED, ret); +} + +/* + * @tc.name: compilation_set_cachedir_004 + * @tc.desc: Verify the device is unavailable to save cache model of the SetCacheDir function. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_set_cachedir_004, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilationTest(&innerModel); + std::size_t deviceId = 1; + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.SetDevice(deviceId)); + + MockIPreparedModel::m_ExpectRetCode = OH_NN_SUCCESS; + OH_NN_ReturnCode ret = compilationTest.SetCacheDir("../", 1); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/* + * @tc.name: compilation_set_cachedir_005 + * @tc.desc: Verify the cache model path is invalid of the SetCacheDir function. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_set_cachedir_005, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilationTest(&innerModel); + std::size_t deviceId = 1; + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.SetDevice(deviceId)); + OH_NN_ReturnCode ret = compilationTest.SetCacheDir("../compilation_test.cpp", 1); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: compilation_set_cachedir_006 + * @tc.desc: Verify the cache model path is not a directory of the SetCacheDir function. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_set_cachedir_006, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilationTest(&innerModel); + std::size_t deviceId = 1; + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.SetDevice(deviceId)); + OH_NN_ReturnCode ret = compilationTest.SetCacheDir("./CompilationTest", 1); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: compilation_set_cachedir_007 + * @tc.desc: Verify the success of the SetCacheDir function. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_set_cachedir_007, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilationTest(&innerModel); + std::size_t deviceId = 1; + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.SetDevice(deviceId)); + OH_NN_ReturnCode ret = compilationTest.SetCacheDir("../", 1); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/* + * @tc.name: compilation_set_performance_001 + * @tc.desc: Verify the set performance after compilation finish of the SetPerformance function. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_set_performance_001, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilationTest(&innerModel); + + SetConfig(compilationTest); + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.Build()); + + std::size_t deviceId = 1; + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, compilationTest.SetDevice(deviceId)); + + OH_NN_ReturnCode ret = compilationTest.SetPerformance(OH_NN_PERFORMANCE_NONE); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/* + * @tc.name: compilation_set_performance_002 + * @tc.desc: Verify the set performance before set device of the SetPerformance function. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_set_performance_002, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + Compilation compilationTest(&innerModel); + OH_NN_ReturnCode ret = compilationTest.SetPerformance(OH_NN_PERFORMANCE_NONE); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/* + * @tc.name: compilation_set_performance_003 + * @tc.desc: Verify the call device failed of the SetPerformance function. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_set_performance_003, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilationTest(&innerModel); + + std::size_t deviceId = 1; + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.SetDevice(deviceId)); + + MockIPreparedModel::m_ExpectRetCode = OH_NN_FAILED; + OH_NN_ReturnCode ret = compilationTest.SetPerformance(OH_NN_PERFORMANCE_NONE); + EXPECT_EQ(OH_NN_FAILED, ret); +} + +/* + * @tc.name: compilation_set_performance_004 + * @tc.desc: Verify the device is not support performance setting of the SetPerformance function. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_set_performance_004, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilationTest(&innerModel); + + std::size_t deviceId = 1; + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.SetDevice(deviceId)); + + MockIPreparedModel::m_ExpectRetCode = OH_NN_SUCCESS; + OH_NN_ReturnCode ret = compilationTest.SetPerformance(OH_NN_PERFORMANCE_NONE); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/* + * @tc.name: compilation_set_performance_005 + * @tc.desc: Verify the passed invalid performance of the SetPerformance function. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_set_performance_005, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilationTest(&innerModel); + + std::size_t deviceId = 1; + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.SetDevice(deviceId)); + + OH_NN_PerformanceMode performance = static_cast(5); + OH_NN_ReturnCode ret = compilationTest.SetPerformance(performance); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: compilation_set_performance_006 + * @tc.desc: Verify the success of the SetPerformance function. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_set_performance_006, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilationTest(&innerModel); + + std::size_t deviceId = 1; + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.SetDevice(deviceId)); + + OH_NN_ReturnCode ret = compilationTest.SetPerformance(OH_NN_PERFORMANCE_NONE); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/* + * @tc.name: compilation_set_priority_001 + * @tc.desc: Verify the set priority after compilation finish of the SetPriority function. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_set_priority_001, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilationTest(&innerModel); + + SetConfig(compilationTest); + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.Build()); + + std::size_t deviceId = 1; + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, compilationTest.SetDevice(deviceId)); + + OH_NN_ReturnCode ret = compilationTest.SetPriority(OH_NN_PRIORITY_LOW); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/* + * @tc.name: compilation_set_priority_002 + * @tc.desc: Verify the set priority before set device of the SetPriority function. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_set_priority_002, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + Compilation compilationTest(&innerModel); + + OH_NN_ReturnCode ret = compilationTest.SetPriority(OH_NN_PRIORITY_LOW); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/* + * @tc.name: compilation_set_priority_003 + * @tc.desc: Verify the call device failed of the SetPriority function. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_set_priority_003, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilationTest(&innerModel); + + std::size_t deviceId = 1; + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.SetDevice(deviceId)); + + MockIPreparedModel::m_ExpectRetCode = OH_NN_INVALID_PARAMETER; + OH_NN_ReturnCode ret = compilationTest.SetPriority(OH_NN_PRIORITY_LOW); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: compilation_set_priority_004 + * @tc.desc: Verify the device is not support priority setting of the SetPriority function. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_set_priority_004, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilationTest(&innerModel); + + std::size_t deviceId = 1; + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.SetDevice(deviceId)); + + MockIPreparedModel::m_ExpectRetCode = OH_NN_SUCCESS; + OH_NN_ReturnCode ret = compilationTest.SetPriority(OH_NN_PRIORITY_LOW); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/* + * @tc.name: compilation_set_priority_005 + * @tc.desc: Verify the passed invalid priority of the SetPriority function. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_set_priority_005, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilationTest(&innerModel); + + std::size_t deviceId = 1; + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.SetDevice(deviceId)); + + OH_NN_Priority priority = static_cast(5);; + OH_NN_ReturnCode ret = compilationTest.SetPriority(priority); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: compilation_set_priority_006 + * @tc.desc: Verify the success of the SetPriority function. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_set_priority_006, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilationTest(&innerModel); + + std::size_t deviceId = 1; + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.SetDevice(deviceId)); + + OH_NN_ReturnCode ret = compilationTest.SetPriority(OH_NN_PRIORITY_LOW); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/* + * @tc.name: compilation_set_enable_fp16_001 + * @tc.desc: Verify the enable float16 after compilation finish of the SetEnableFp16 function. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_set_enable_fp16_001, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilationTest(&innerModel); + + SetConfig(compilationTest); + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.Build()); + + std::size_t deviceId = 1; + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, compilationTest.SetDevice(deviceId)); + + OH_NN_ReturnCode ret = compilationTest.SetEnableFp16(true); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/* + * @tc.name: compilation_set_enable_fp16_002 + * @tc.desc: Verify the set enable fp16 before set device of the SetEnableFp16 function. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_set_enable_fp16_002, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + Compilation compilationTest(&innerModel); + + OH_NN_ReturnCode ret = compilationTest.SetEnableFp16(true); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/* + * @tc.name: compilation_set_enable_fp16_003 + * @tc.desc: Verify the call device failed of the SetEnableFp16 function. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_set_enable_fp16_003, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilationTest(&innerModel); + + std::size_t deviceId = 1; + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.SetDevice(deviceId)); + + MockIPreparedModel::m_ExpectRetCode = OH_NN_MEMORY_ERROR; + OH_NN_ReturnCode ret = compilationTest.SetEnableFp16(true); + EXPECT_EQ(OH_NN_MEMORY_ERROR, ret); +} + +/* + * @tc.name: compilation_set_enable_fp16_004 + * @tc.desc: Verify the device is not support float16 precision setting of the SetEnableFp16 function. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_set_enable_fp16_004, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilationTest(&innerModel); + + std::size_t deviceId = 1; + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.SetDevice(deviceId)); + + MockIPreparedModel::m_ExpectRetCode = OH_NN_SUCCESS; + OH_NN_ReturnCode ret = compilationTest.SetEnableFp16(true); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/* + * @tc.name: compilation_set_enable_fp16_005 + * @tc.desc: Verify the success of the SetEnableFp16 function. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_set_enable_fp16_005, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilationTest(&innerModel); + + std::size_t deviceId = 1; + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.SetDevice(deviceId)); + + OH_NN_ReturnCode ret = compilationTest.SetEnableFp16(true); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/* + * @tc.name: compilation_get_input_tensors_001 + * @tc.desc: Verify the normal input tensors of the GetInputTensors function. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_get_input_tensors_001, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + Compilation compilationTest(&innerModel); + EXPECT_EQ(innerModel.GetInputTensors(), compilationTest.GetInputTensors()); +} + +/* + * @tc.name: compilation_get_output_tensors_001 + * @tc.desc: Verify the normal output tensors of the GetOutputTensors function. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_get_output_tensors_001, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + Compilation compilationTest(&innerModel); + EXPECT_EQ(innerModel.GetOutputTensors(), compilationTest.GetOutputTensors()); +} + +/* + * @tc.name: compilation_get_execution_plan_001 + * @tc.desc: Verify the passed nullptr of the GetExecutionPlan function. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_get_execution_plan_001, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + Compilation compilationTest(&innerModel); + EXPECT_EQ(nullptr, compilationTest.GetExecutionPlan()); +} + +/* + * @tc.name: compilation_is_dynamic_shape_001 + * @tc.desc: Verify the input tensor is empth of the IsDynamicShape function. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_is_dynamic_shape_001, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + Compilation compilationTest(&innerModel); + EXPECT_EQ(false, compilationTest.IsDynamicShape()); +} + +/* + * @tc.name: compilation_is_dynamic_shape_002 + * @tc.desc: Verify the return true of the IsDynamicShape function. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_is_dynamic_shape_002, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilationTest(&innerModel); + EXPECT_EQ(true, compilationTest.IsDynamicShape()); +} + +/* + * @tc.name: compilation_is_dynamic_shape_003 + * @tc.desc: Verify the return false of the IsDynamicShape function. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_is_dynamic_shape_003, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilationTest(&innerModel); + MockIPreparedModel::m_ExpectRetCode = OH_NN_FAILED; + EXPECT_EQ(false, compilationTest.IsDynamicShape()); +} + +/* + * @tc.name: compilation_is_build_001 + * @tc.desc: Verify return false of the IsBuild function. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_is_build_001, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + Compilation compilationTest(&innerModel); + EXPECT_EQ(false, compilationTest.IsBuild()); +} + +/* + * @tc.name: compilation_build_001 + * @tc.desc: Verify the build after compilation finish of the Build function. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_build_001, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilationTest(&innerModel); + SetConfig(compilationTest); + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.Build()); + + OH_NN_ReturnCode ret = compilationTest.Build(); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/* + * @tc.name: compilation_build_002 + * @tc.desc: Verify the not set device of the Build function. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_build_002, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilationTest(&innerModel); + OH_NN_ReturnCode ret = compilationTest.Build(); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/* + * @tc.name: compilation_build_003 + * @tc.desc: Verify the preparing model failed of the Build function without set cache path. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_build_003, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + Compilation compilationTest(&innerModel); + MockIPreparedModel::m_ExpectRetCode = OH_NN_INVALID_FILE; + SetConfig(compilationTest); + OH_NN_ReturnCode ret = compilationTest.Build(); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: compilation_build_004 + * @tc.desc: Verify the success of the Build function without set cache path. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_build_004, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilationTest(&innerModel); + SetConfig(compilationTest); + OH_NN_ReturnCode ret = compilationTest.Build(); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/* + * @tc.name: compilation_build_005 + * @tc.desc: Verify the preparing model failed of the Build function without cache file. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_build_005, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + Compilation compilationTest(&innerModel); + std::size_t deviceId = 1; + MockIPreparedModel::m_ExpectRetCode = OH_NN_INVALID_FILE; + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.SetDevice(deviceId)); + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.SetCacheDir("./", 1)); + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.SetEnableFp16(true)); + OH_NN_ReturnCode ret = compilationTest.Build(); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: compilation_build_006 + * @tc.desc: Verify the export model cache failed of the Build function without cache file. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_build_006, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilationTest(&innerModel); + std::size_t deviceId = 1; + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.SetDevice(deviceId)); + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.SetCacheDir("./", 1)); + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.SetEnableFp16(true)); + + MockIPreparedModel::m_ExpectRetCode = OH_NN_FAILED; + OH_NN_ReturnCode ret = compilationTest.Build(); + EXPECT_EQ(OH_NN_FAILED, ret); +} + +/* + * @tc.name: compilation_build_007 + * @tc.desc: Verify the model cache file is invalid to generating cache mode of the Build function without cache file. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_build_007, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilationTest(&innerModel); + std::size_t deviceId = 1; + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.SetDevice(deviceId)); + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.SetCacheDir("/sys", 1)); + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.SetEnableFp16(true)); + OH_NN_ReturnCode ret = compilationTest.Build(); + EXPECT_EQ(OH_NN_INVALID_FILE, ret); +} + +/* + * @tc.name: compilation_build_008 + * @tc.desc: Verify the success to generating cache mode of the Build function without cache file. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_build_008, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilationTest(&innerModel); + std::size_t deviceId = 1; + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.SetDevice(deviceId)); + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.SetCacheDir("./", 1)); + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.SetEnableFp16(true)); + OH_NN_ReturnCode ret = compilationTest.Build(); + EXPECT_EQ(0, remove("0.nncache")); + EXPECT_EQ(0, remove("1.nncache")); + EXPECT_EQ(0, remove("cache_info.nncache")); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/* + * @tc.name: compilation_build_009 + * @tc.desc: Verify the Fail to get the content of info cache file of the Build. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_build_009, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + BuildCompilation(innerModel); + + Compilation compilationTest(&innerModel); + SetConfig(compilationTest); + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.SetCacheDir("./", 1)); + + std::ofstream createFile("cache_info.nncache"); + createFile.close(); + OH_NN_ReturnCode ret = compilationTest.Build(); + EXPECT_EQ(0, remove("0.nncache")); + EXPECT_EQ(0, remove("1.nncache")); + EXPECT_EQ(0, remove("cache_info.nncache")); + EXPECT_EQ(OH_NN_INVALID_FILE, ret); +} + +/* + * @tc.name: compilation_build_010 + * @tc.desc: Verify the deviceId in the cache files is different from current deviceId of the Build function. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_build_010, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + BuildCompilation(innerModel); + WriteFile(1, 4, 2); + + Compilation compilationTest(&innerModel); + SetConfig(compilationTest); + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.SetCacheDir("./", 1)); + + OH_NN_ReturnCode ret = compilationTest.Build(); + EXPECT_EQ(0, remove("0.nncache")); + EXPECT_EQ(0, remove("1.nncache")); + EXPECT_EQ(0, remove("cache_info.nncache")); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: compilation_build_011 + * @tc.desc: Verify the info cache file has been changed of the Build function. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_build_011, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + BuildCompilation(innerModel); + WriteFile(1, 100, 1); + + Compilation compilationTest(&innerModel); + SetConfig(compilationTest); + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.SetCacheDir("./", 1)); + + OH_NN_ReturnCode ret = compilationTest.Build(); + EXPECT_EQ(0, remove("0.nncache")); + EXPECT_EQ(0, remove("1.nncache")); + EXPECT_EQ(0, remove("cache_info.nncache")); + EXPECT_EQ(OH_NN_INVALID_FILE, ret); +} + +/* + * @tc.name: compilation_build_012 + * @tc.desc: Verify the Preparing model failed of the Build function model version is greater than cached versio. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_build_012, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + Compilation compilationTest(&innerModel); + MockIPreparedModel::m_ExpectRetCode = OH_NN_INVALID_FILE; + SetConfig(compilationTest); + WriteFile(0, 4, 1); + + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.SetCacheDir("./", 1)); + + std::ofstream inFile("0.nncache", std::ios::binary | std::ios::out | std::ios::trunc); + inFile.close(); + + OH_NN_ReturnCode ret = compilationTest.Build(); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: compilation_build_013 + * @tc.desc: Verify that the build function return success message with model version is greater than cached version + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_build_013, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilationTest(&innerModel); + SetConfig(compilationTest); + WriteFile(0, 1, 1); + + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.SetCacheDir("./", 1)); + + std::ofstream inFile("0.nncache", std::ios::binary | std::ios::out | std::ios::trunc); + inFile.close(); + + OH_NN_ReturnCode ret = compilationTest.Build(); + EXPECT_EQ(0, remove("0.nncache")); + EXPECT_EQ(0, remove("1.nncache")); + EXPECT_EQ(0, remove("cache_info.nncache")); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/* + * @tc.name: compilation_build_014 + * @tc.desc: Verify the model version is less than version cache of the Build function. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_build_014, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + BuildCompilation(innerModel); + WriteFile(3, 4, 1); + + Compilation compilationTest(&innerModel); + SetConfig(compilationTest); + + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.SetCacheDir("./", 1)); + + OH_NN_ReturnCode ret = compilationTest.Build(); + EXPECT_EQ(0, remove("0.nncache")); + EXPECT_EQ(0, remove("1.nncache")); + EXPECT_EQ(0, remove("cache_info.nncache")); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/* + * @tc.name: compilation_build_015 + * @tc.desc: Verify the checking cache model failed of the Build function with release buffer. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_build_015, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + BuildCompilation(innerModel); + EXPECT_EQ(0, remove("1.nncache")); + + Compilation compilationTest(&innerModel); + SetConfig(compilationTest); + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.SetCacheDir("./", 1)); + + OH_NN_ReturnCode ret = compilationTest.Build(); + EXPECT_EQ(0, remove("0.nncache")); + EXPECT_EQ(0, remove("cache_info.nncache")); + EXPECT_EQ(OH_NN_INVALID_FILE, ret); +} + +/* + * @tc.name: compilation_build_016 + * @tc.desc: Verify the get cache file length of the Build function. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_build_016, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + BuildCompilation(innerModel); + + Compilation compilationTest(&innerModel); + SetConfig(compilationTest); + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.SetCacheDir("./", 1)); + + std::ofstream inFile("0.nncache", std::ios::binary | std::ios::out | std::ios::trunc); + inFile.close(); + + OH_NN_ReturnCode ret = compilationTest.Build(); + EXPECT_EQ(0, remove("0.nncache")); + EXPECT_EQ(0, remove("1.nncache")); + EXPECT_EQ(0, remove("cache_info.nncache")); + EXPECT_EQ(OH_NN_INVALID_FILE, ret); +} + +/* + * @tc.name: compilation_build_017 + * @tc.desc: Verify the fail to create file buffer of the Build function. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_build_017, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + BuildCompilation(innerModel); + + Compilation compilationTest(&innerModel); + SetConfig(compilationTest); + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.SetCacheDir("./", 1)); + + MockIPreparedModel::m_ExpectRetCode = OH_NN_NULL_PTR; + OH_NN_ReturnCode ret = compilationTest.Build(); + EXPECT_EQ(0, remove("0.nncache")); + EXPECT_EQ(0, remove("1.nncache")); + EXPECT_EQ(0, remove("cache_info.nncache")); + EXPECT_EQ(OH_NN_NULL_PTR, ret); +} + +/* + * @tc.name: compilation_build_018 + * @tc.desc: Verify the cache model file has been changed of the Build function. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_build_018, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + BuildCompilation(innerModel); + + Compilation compilationTest(&innerModel); + SetConfig(compilationTest); + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.SetCacheDir("./", 1)); + + uint64_t version = 1; + uint64_t fileNumber = 1; + std::size_t cacheDeviceId = 1; + uint64_t cacheInfo[7] = {}; + auto cacheInfoPtr = cacheInfo; + *cacheInfoPtr++ = fileNumber; + *cacheInfoPtr++ = version; + *cacheInfoPtr++ = cacheDeviceId; + for (uint64_t i = 0; i < 4; ++i) { + *cacheInfoPtr++ = i; + } + + std::ofstream onFile("0.nncache", std::ios::binary | std::ios::out | std::ios::trunc); + onFile.write(reinterpret_cast(cacheInfo), 7 * sizeof(uint64_t)); + onFile.close(); + + OH_NN_ReturnCode ret = compilationTest.Build(); + EXPECT_EQ(0, remove("0.nncache")); + EXPECT_EQ(0, remove("1.nncache")); + EXPECT_EQ(0, remove("cache_info.nncache")); + EXPECT_EQ(OH_NN_INVALID_FILE, ret); +} + +/* + * @tc.name: compilation_build_019 + * @tc.desc: Verify the preparing model from cache failed of the Build function with load cache build. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_build_019, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + BuildCompilation(innerModel); + + Compilation compilationTest(&innerModel); + SetConfig(compilationTest); + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.SetCacheDir("./", 1)); + + MockIPreparedModel::m_ExpectRetCode = OH_NN_FAILED; + OH_NN_ReturnCode ret = compilationTest.Build(); + EXPECT_EQ(0, remove("0.nncache")); + EXPECT_EQ(0, remove("1.nncache")); + EXPECT_EQ(0, remove("cache_info.nncache")); + EXPECT_EQ(OH_NN_FAILED, ret); +} + +/* + * @tc.name: compilation_build_020 + * @tc.desc: Verify the success of the Build function with load cache build. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_build_020, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + BuildCompilation(innerModel); + + Compilation compilationTest(&innerModel); + SetConfig(compilationTest); + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.SetCacheDir("./", 1)); + + OH_NN_ReturnCode ret = compilationTest.Build(); + EXPECT_EQ(0, remove("0.nncache")); + EXPECT_EQ(0, remove("1.nncache")); + EXPECT_EQ(0, remove("cache_info.nncache")); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} +} // namespace UnitTest +} // namespace NeuralNetworkRuntime +} // namespace OHOS diff --git a/test/unittest/components/compilation/compilation_test.h b/test/unittest/components/compilation/compilation_test.h new file mode 100644 index 0000000..8217f4f --- /dev/null +++ b/test/unittest/components/compilation/compilation_test.h @@ -0,0 +1,38 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_COMPILATION_UNITTEST_H +#define NEURAL_NETWORK_RUNTIME_COMPILATION_UNITTEST_H + +#include + +#include "frameworks/native/compilation.h" +#include "frameworks/native/inner_model.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class CompilationTest : public testing::Test { +public: + OH_NN_ReturnCode BuildModelGraph(NeuralNetworkRuntime::InnerModel& innerModel); + void SetConfig(Compilation& compilationTest); + void WriteFile(uint64_t version, uint64_t fileNumber, std::size_t cacheDeviceId); + void BuildCompilation(InnerModel& innerModel); +}; +} // namespace UnitTest +} // namespace NeuralNetworkRuntime +} // namespace OHOS + +#endif // NEURAL_NETWORK_RUNTIME_COMPILATION_UNITTEST_H \ No newline at end of file diff --git a/test/unittest/components/device_manager/device_manager_test.cpp b/test/unittest/components/device_manager/device_manager_test.cpp new file mode 100644 index 0000000..3e50689 --- /dev/null +++ b/test/unittest/components/device_manager/device_manager_test.cpp @@ -0,0 +1,242 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include + +#include "common/log.h" +#include "frameworks/native/device_manager.h" +#include "frameworks/native/hdi_device.h" +#include "test/unittest/common/mock_idevice.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime; +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class DeviceManagerTest : public testing::Test { +protected: + void MockInit(OHOS::sptr device, const std::vector& typeVect, + const std::string& deviceName, const std::string& vendorName); +}; + +void DeviceManagerTest::MockInit(OHOS::sptr device, const std::vector& typeVect, + const std::string& deviceName, const std::string& vendorName) +{ + const size_t typeSize = 4; + int index = 0; + EXPECT_EQ(typeSize, typeVect.size()); + EXPECT_CALL(*device, GetDeviceName(::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<0>(deviceName), + ::testing::Return(typeVect[index++]))); + + EXPECT_CALL(*device, GetVendorName(::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<0>(vendorName), + ::testing::Return(typeVect[index++]))); + + V1_0::DeviceStatus deviceStatus = V1_0::DeviceStatus::AVAILABLE; + EXPECT_CALL(*device, GetDeviceStatus(::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<0>(deviceStatus), + ::testing::Return(typeVect[index++]))); + + uint32_t majorVer = 1; + uint32_t minorVer = 0; + EXPECT_CALL(*device, GetVersion(::testing::_, ::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<0>(majorVer), ::testing::SetArgReferee<1>(minorVer), + ::testing::Return(typeVect[index++]))); +} + +/** + * @tc.name: devicemanager_getalldeviceid_001 + * @tc.desc: Verify the GetAllDeviceId function return deviceid list is not null. + * @tc.type: FUNC + */ +HWTEST_F(DeviceManagerTest, devicemanager_getalldeviceid_001, TestSize.Level0) +{ + auto &deviceManager = DeviceManager::GetInstance(); + std::vector idVect = deviceManager.GetAllDeviceId(); + EXPECT_NE((size_t)0, idVect.size()); + + const size_t expectDeviceId {std::hash{}("MockDevice_MockVendor")}; + EXPECT_EQ(expectDeviceId, idVect[0]); + + const std::string expectDeviceName = "MockDevice"; + std::string deviceName = ""; + std::shared_ptr retDevice = deviceManager.GetDevice(idVect[0]); + retDevice->GetDeviceName(deviceName); + EXPECT_EQ(deviceName, expectDeviceName); +} + +/** + * @tc.name: devicemanager_getdevice_001 + * @tc.desc: Verify the GetDevice function return nullptr in case of deviceId invalid. + * @tc.type: FUNC + */ +HWTEST_F(DeviceManagerTest, devicemanager_getdevice_001, TestSize.Level0) +{ + auto &deviceManager = DeviceManager::GetInstance(); + const size_t deviceId = 1; + std::shared_ptr result = deviceManager.GetDevice(deviceId); + EXPECT_EQ(nullptr, result); +} + +/** + * @tc.name: devicemanager_getdevice_002 + * @tc.desc: Verify the GetDevice function validate device name return specified device name. + * @tc.type: FUNC + */ +HWTEST_F(DeviceManagerTest, devicemanager_getdevice_002, TestSize.Level0) +{ + auto &deviceManager = DeviceManager::GetInstance(); + std::vector idVect = deviceManager.GetAllDeviceId(); + EXPECT_EQ((size_t)1, idVect.size()); + size_t deviceId = idVect[0]; + std::shared_ptr result = deviceManager.GetDevice(deviceId); + EXPECT_NE(nullptr, result); + + const std::string expectDeviceNameA = "MockDevice"; + std::string deviceName = ""; + result->GetDeviceName(deviceName); + EXPECT_EQ(deviceName, expectDeviceNameA); +} + +/** + * @tc.name: devicemanager_registerdevice_001 + * @tc.desc: Verify the RegisterDevice function register repeatly. + * @tc.type: FUNC + */ +HWTEST_F(DeviceManagerTest, devicemanager_registerdevice_001, TestSize.Level0) +{ + std::vector typeVect = {HDF_SUCCESS, HDF_SUCCESS, HDF_SUCCESS, HDF_SUCCESS}; + OHOS::sptr device = OHOS::sptr(new (std::nothrow) V1_0::MockIDevice()); + EXPECT_NE(device.GetRefPtr(), nullptr); + + std::string deviceName = "MockDevice"; + std::string vendorName = "MockVendor"; + MockInit(device, typeVect, deviceName, vendorName); + + std::function()> creator = + [&device]()->std::shared_ptr {return std::make_shared(device);}; + auto& deviceManager = DeviceManager::GetInstance(); + OH_NN_ReturnCode result = deviceManager.RegisterDevice(creator); + EXPECT_EQ(OH_NN_FAILED, result); +} + +/** + * @tc.name: devicemanager_registerdevice_002 + * @tc.desc: Verify the RegisterDevice function return invalid parameter. + * @tc.type: FUNC + */ +HWTEST_F(DeviceManagerTest, devicemanager_registerdevice_002, TestSize.Level0) +{ + std::function()> creator = + []()->std::shared_ptr {return nullptr;}; + auto& deviceManager = DeviceManager::GetInstance(); + OH_NN_ReturnCode result = deviceManager.RegisterDevice(creator); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, result); +} + +/** + * @tc.name: devicemanager_registerdevice_003 + * @tc.desc: Verify the RegisterDevice function return unavailable device in case of device name invalid param. + * @tc.type: FUNC + */ +HWTEST_F(DeviceManagerTest, devicemanager_registerdevice_003, TestSize.Level0) +{ + std::vector typeVect = {HDF_FAILURE, HDF_SUCCESS, HDF_SUCCESS, HDF_SUCCESS}; + OHOS::sptr device = OHOS::sptr(new (std::nothrow) V1_0::MockIDevice()); + EXPECT_NE(device.GetRefPtr(), nullptr); + + std::string deviceName = "MockDevice"; + std::string vendorName = "MockVendor"; + MockInit(device, typeVect, deviceName, vendorName); + + std::function()> creator = + [&device]()->std::shared_ptr {return std::make_shared(device);}; + auto& deviceManager = DeviceManager::GetInstance(); + OH_NN_ReturnCode result = deviceManager.RegisterDevice(creator); + EXPECT_EQ(OH_NN_UNAVALIDABLE_DEVICE, result); +} + +/** + * @tc.name: devicemanager_registerdevice_004 + * @tc.desc: Verify the RegisterDevice function return unavailable device in case of vendor name failure. + * @tc.type: FUNC + */ +HWTEST_F(DeviceManagerTest, devicemanager_registerdevice_004, TestSize.Level0) +{ + std::vector typeVect = {HDF_SUCCESS, HDF_FAILURE, HDF_SUCCESS, HDF_SUCCESS}; + OHOS::sptr device = OHOS::sptr(new (std::nothrow) V1_0::MockIDevice()); + EXPECT_NE(device.GetRefPtr(), nullptr); + + std::string deviceName = "MockDevice"; + std::string vendorName = "MockVendor"; + MockInit(device, typeVect, deviceName, vendorName); + + std::function()> creator = + [&device]()->std::shared_ptr {return std::make_shared(device);}; + auto& deviceManager = DeviceManager::GetInstance(); + OH_NN_ReturnCode result = deviceManager.RegisterDevice(creator); + EXPECT_EQ(OH_NN_UNAVALIDABLE_DEVICE, result); +} + +/** + * @tc.name: devicemanager_registerdevice_005 + * @tc.desc: Verify the RegisterDevice function return success. + * @tc.type: FUNC + */ +HWTEST_F(DeviceManagerTest, devicemanager_registerdevice_005, TestSize.Level0) +{ + std::vector typeVect = {HDF_SUCCESS, HDF_SUCCESS, HDF_SUCCESS, HDF_SUCCESS}; + OHOS::sptr device = OHOS::sptr(new (std::nothrow) V1_0::MockIDevice()); + EXPECT_NE(device.GetRefPtr(), nullptr); + + std::string deviceName = "MockDeviceA"; + std::string vendorName = "MockVendorA"; + MockInit(device, typeVect, deviceName, vendorName); + + std::function()> creator = + [&device]()->std::shared_ptr {return std::make_shared(device);}; + auto& deviceManager = DeviceManager::GetInstance(); + OH_NN_ReturnCode result = deviceManager.RegisterDevice(creator); + EXPECT_EQ(OH_NN_SUCCESS, result); + + std::vector idVect = deviceManager.GetAllDeviceId(); + EXPECT_NE((size_t)0, idVect.size()); + + const size_t expectDeviceId {std::hash{}("MockDeviceA_MockVendorA")}; + EXPECT_EQ(expectDeviceId, idVect[0]); + + const std::string expectDeviceName = "MockDeviceA_MockVendorA"; + const std::string retDeviceName = deviceManager.GetDeviceName(idVect[0]); + EXPECT_EQ(retDeviceName, expectDeviceName); +} + +/** + * @tc.name: devicemanager_getdevicename_001 + * @tc.desc: Verify the GetDevice function return empty string in case of deviceid invalid. + * @tc.type: FUNC + */ +HWTEST_F(DeviceManagerTest, devicemanager_getdevicename_001, TestSize.Level0) +{ + auto &deviceManager = DeviceManager::GetInstance(); + const size_t deviceId = 1; + std::string result = deviceManager.GetDeviceName(deviceId); + EXPECT_EQ("", result); +} +} // namespace UnitTest +} // namespace NeuralNetworkRuntime +} // namespace OHOS diff --git a/test/unittest/components/device_registrar/device_registrar_test.cpp b/test/unittest/components/device_registrar/device_registrar_test.cpp new file mode 100644 index 0000000..4f1ec7c --- /dev/null +++ b/test/unittest/components/device_registrar/device_registrar_test.cpp @@ -0,0 +1,264 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include + +#include +#include +#include + +#include "common/log.h" +#include "interfaces/oem/cpp_api/device_registrar.h" +#include "frameworks/native/hdi_device.h" +#include "frameworks/native/device_manager.h" +#include "test/unittest/common/mock_idevice.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime; +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class IRegisterDevice : public HDI::HdiBase { +public: + DECLARE_HDI_DESCRIPTOR(u"ohos.hdi.nnrt.v1_0.IRegisterDevice"); + + virtual ~IRegisterDevice() = default; + + static sptr Get(bool isStub = false); + static sptr Get(const std::string& serviceName, bool isStub = false); + + virtual int32_t GetDeviceName(std::string& name) = 0; + + virtual int32_t GetVendorName(std::string& name) = 0; + + virtual int32_t GetDeviceType(V1_0::DeviceType& deviceType) = 0; + + virtual int32_t GetDeviceStatus(V1_0::DeviceStatus& status) = 0; + + virtual int32_t GetSupportedOperation(const V1_0::Model& model, std::vector& ops) = 0; + + virtual int32_t IsFloat16PrecisionSupported(bool& isSupported) = 0; + + virtual int32_t IsPerformanceModeSupported(bool& isSupported) = 0; + + virtual int32_t IsPrioritySupported(bool& isSupported) = 0; + + virtual int32_t IsDynamicInputSupported(bool& isSupported) = 0; + + virtual int32_t PrepareModel(const V1_0::Model& model, const V1_0::ModelConfig& config, + sptr& preparedModel) = 0; + + virtual int32_t IsModelCacheSupported(bool& isSupported) = 0; + + virtual int32_t PrepareModelFromModelCache(const std::vector& modelCache, + const V1_0::ModelConfig& config, sptr& preparedModel) = 0; + + virtual int32_t AllocateBuffer(uint32_t length, V1_0::SharedBuffer& buffer) = 0; + + virtual int32_t ReleaseBuffer(const V1_0::SharedBuffer& buffer) = 0; + + virtual int32_t GetVersion(uint32_t& majorVer, uint32_t& minorVer) + { + majorVer = INNRT_DEVICE_MAJOR_VERSION; + minorVer = INNRT_DEVICE_MINOR_VERSION; + return HDF_SUCCESS; + } +}; + +class SimulationDevice : public Device { +public: + explicit SimulationDevice(OHOS::sptr device) {}; + + OH_NN_ReturnCode GetDeviceName(std::string& name) override + { + name = "MockIDeviceA"; + return OH_NN_SUCCESS; + }; + OH_NN_ReturnCode GetVendorName(std::string& name) override + { + name = "MockVendorA"; + return OH_NN_SUCCESS; + }; + OH_NN_ReturnCode GetDeviceType(OH_NN_DeviceType& deviceType) override + { + return OH_NN_SUCCESS; + }; + OH_NN_ReturnCode GetDeviceStatus(DeviceStatus& status) override + { + status = DeviceStatus::AVAILABLE; + return OH_NN_SUCCESS; + }; + OH_NN_ReturnCode GetSupportedOperation(std::shared_ptr model, + std::vector& ops) override + { + return OH_NN_SUCCESS; + }; + + OH_NN_ReturnCode IsFloat16PrecisionSupported(bool& isSupported) override + { + return OH_NN_SUCCESS; + }; + OH_NN_ReturnCode IsPerformanceModeSupported(bool& isSupported) override + { + return OH_NN_SUCCESS; + }; + OH_NN_ReturnCode IsPrioritySupported(bool& isSupported) override + { + return OH_NN_SUCCESS; + }; + OH_NN_ReturnCode IsDynamicInputSupported(bool& isSupported) override + { + return OH_NN_SUCCESS; + }; + OH_NN_ReturnCode IsModelCacheSupported(bool& isSupported) override + { + return OH_NN_SUCCESS; + }; + + OH_NN_ReturnCode PrepareModel(std::shared_ptr model, const ModelConfig& config, + std::shared_ptr& preparedModel) override + { + return OH_NN_SUCCESS; + }; + OH_NN_ReturnCode PrepareModelFromModelCache(const std::vector& modelCache, const ModelConfig& config, + std::shared_ptr& preparedModel) override + { + return OH_NN_SUCCESS; + }; + + void *AllocateBuffer(size_t length) override + { + return nullptr; + }; + OH_NN_ReturnCode ReleaseBuffer(const void* buffer) override + { + return OH_NN_SUCCESS; + }; +}; + +class MockIDeviceImp : public IRegisterDevice { +public: + MOCK_METHOD1(GetDeviceName, int32_t(std::string&)); + MOCK_METHOD1(GetVendorName, int32_t(std::string&)); + MOCK_METHOD1(GetDeviceType, int32_t(V1_0::DeviceType&)); + MOCK_METHOD1(GetDeviceStatus, int32_t(V1_0::DeviceStatus&)); + MOCK_METHOD2(GetSupportedOperation, int32_t(const V1_0::Model&, std::vector&)); + MOCK_METHOD1(IsFloat16PrecisionSupported, int32_t(bool&)); + MOCK_METHOD1(IsPerformanceModeSupported, int32_t(bool&)); + MOCK_METHOD1(IsPrioritySupported, int32_t(bool&)); + MOCK_METHOD1(IsDynamicInputSupported, int32_t(bool&)); + MOCK_METHOD3(PrepareModel, + int32_t(const V1_0::Model&, const V1_0::ModelConfig&, OHOS::sptr&)); + MOCK_METHOD1(IsModelCacheSupported, int32_t(bool&)); + MOCK_METHOD3(PrepareModelFromModelCache, int32_t(const std::vector&, const V1_0::ModelConfig&, + OHOS::sptr&)); + MOCK_METHOD2(AllocateBuffer, int32_t(uint32_t, V1_0::SharedBuffer&)); + MOCK_METHOD1(ReleaseBuffer, int32_t(const V1_0::SharedBuffer&)); + MOCK_METHOD2(GetVersion, int32_t(uint32_t&, uint32_t&)); +}; + +sptr IRegisterDevice::Get(bool isStub) +{ + return IRegisterDevice::Get("device_service", isStub); +} + +sptr IRegisterDevice::Get(const std::string& serviceName, bool isStub) +{ + if (isStub) { + return nullptr; + } + + sptr mockIDevice = sptr(new (std::nothrow) MockIDeviceImp()); + if (mockIDevice.GetRefPtr() == nullptr) { + LOGE("Failed to new MockIDeviceImp object."); + return nullptr; + } + + std::string deviceName = "MockIDeviceA"; + EXPECT_CALL(*((MockIDeviceImp *)mockIDevice.GetRefPtr()), GetDeviceName(::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<0>(deviceName), ::testing::Return(HDF_SUCCESS))); + + std::string vendorName = "MockVendorA"; + EXPECT_CALL(*((MockIDeviceImp *)mockIDevice.GetRefPtr()), GetVendorName(::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<0>(vendorName), ::testing::Return(HDF_SUCCESS))); + + V1_0::DeviceStatus deviceStatus = V1_0::DeviceStatus::AVAILABLE; + EXPECT_CALL(*((MockIDeviceImp *)mockIDevice.GetRefPtr()), GetDeviceStatus(::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<0>(deviceStatus), ::testing::Return(HDF_SUCCESS))); + return mockIDevice; +} + +class DeviceRegistrarTest : public testing::Test { +public: + DeviceRegistrarTest() = default; + ~DeviceRegistrarTest() = default; +}; + +std::shared_ptr CreateDeviceObjectCallback() +{ + OHOS::sptr device = IRegisterDevice::Get(false); + EXPECT_NE(device, nullptr); + std::shared_ptr m_mockDevice = std::make_unique(device); + return m_mockDevice; +} + +std::shared_ptr CreateNullObjectCallback() +{ + return nullptr; +} + +/* * + * @tc.name: devicemanager_getalldeviceid_001 + * @tc.desc: Verify the Constructor function register object success. + * @tc.type: FUNC + */ +HWTEST_F(DeviceRegistrarTest, deviceregistrar_constructor_001, TestSize.Level0) +{ + CreateDevice creator = CreateDeviceObjectCallback; + std::unique_ptr deviceRegister = std::make_unique(creator); + EXPECT_NE(deviceRegister, nullptr); + auto &deviceManager = DeviceManager::GetInstance(); + std::vector idVect = deviceManager.GetAllDeviceId(); + EXPECT_EQ((size_t)2, idVect.size()); + + const size_t expectDeviceId {std::hash{}("MockDevice_MockVendor")}; + EXPECT_EQ(expectDeviceId, idVect[1]); + + const std::string expectDeviceNameA = "MockDevice"; + std::string deviceName = ""; + std::shared_ptr retDevice = deviceManager.GetDevice(idVect[1]); + retDevice->GetDeviceName(deviceName); + EXPECT_EQ(deviceName, expectDeviceNameA); + + const std::string expectDeviceNameB = "MockDevice_MockVendor"; + std::string queryDeviceName = deviceManager.GetDeviceName(idVect[1]); + EXPECT_EQ(queryDeviceName, expectDeviceNameB); +} + +/* * + * @tc.name: devicemanager_getalldeviceid_002 + * @tc.desc: Verify the Constructor function register object creator return nullptr, used for branch coverage. + * @tc.type: FUNC + */ +HWTEST_F(DeviceRegistrarTest, deviceregistrar_constructor_002, TestSize.Level0) +{ + CreateDevice creator = CreateNullObjectCallback; + std::unique_ptr deviceRegister = std::make_unique(creator); + EXPECT_NE(deviceRegister, nullptr); +} +} // namespace UnitTest +} // namespace NeuralNetworkRuntime +} // namespace OHOS diff --git a/test/unittest/components/executor/executor_test.cpp b/test/unittest/components/executor/executor_test.cpp new file mode 100644 index 0000000..5d13e51 --- /dev/null +++ b/test/unittest/components/executor/executor_test.cpp @@ -0,0 +1,1206 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "executor_test.h" + +#include "common/scoped_trace.h" +#include "frameworks/native/compilation.h" +#include "frameworks/native/inner_model.h" +#include "test/unittest/common/mock_idevice.h" + +using namespace OHOS::NeuralNetworkRuntime; +using namespace OHOS::NeuralNetworkRuntime::Ops; +using namespace OHOS::HDI::Nnrt::V1_0; +using namespace OHOS::HiviewDFX; + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +using NNTensorPtr = std::shared_ptr; + +MSLITE::LiteGraph* ExecutorTest::BuildLiteGraph(const std::vector dim, const std::vector dimOut) +{ + MSLITE::LiteGraph* liteGraph = new (std::nothrow) MSLITE::LiteGraph(); + if (liteGraph == nullptr) { + LOGE("liteGraph build failed"); + return nullptr; + } + liteGraph->name_ = "testGraph"; + liteGraph->input_indices_.emplace_back(0); + liteGraph->output_indices_.emplace_back(1); + const std::vector quant_params; + + for (size_t indexInput = 0; indexInput < liteGraph->input_indices_.size(); ++indexInput) { + const std::vector data(36, 1); + void* liteGraphTensor1 = MSLITE::MindIR_Tensor_Create(liteGraph->name_, + MSLITE::DATA_TYPE_FLOAT32, dim, MSLITE::FORMAT_NCHW, data, quant_params); + liteGraph->all_tensors_.emplace_back(liteGraphTensor1); + } + + for (size_t indexOutput = 0; indexOutput < liteGraph->output_indices_.size(); ++indexOutput) { + const std::vector dataOut(36, 1); + void* liteGraphTensor2 = MSLITE::MindIR_Tensor_Create(liteGraph->name_, + MSLITE::DATA_TYPE_FLOAT32, dimOut, MSLITE::FORMAT_NCHW, dataOut, quant_params); + liteGraph->all_tensors_.emplace_back(liteGraphTensor2); + } + + return liteGraph; +} + +OH_NN_Tensor ExecutorTest::SetTensor(OH_NN_DataType dataType, uint32_t dimensionCount, const int32_t *dimensions, + const OH_NN_QuantParam *quantParam, OH_NN_TensorType type) +{ + OH_NN_Tensor tensor; + tensor.dataType = dataType; + tensor.dimensionCount = dimensionCount; + tensor.dimensions = dimensions; + tensor.quantParam = quantParam; + tensor.type = type; + + return tensor; +} + +void ExecutorTest::SetMermory(OH_NN_Memory** &memory) +{ + float dataArry[9] = {0, 1, 2, 3, 4, 5, 6, 7, 8}; + void* const data = dataArry; + OH_NN_Memory memoryPtr = {data, 9 * sizeof(float)}; + OH_NN_Memory* ptr = &memoryPtr; + memory = &ptr; +} + +/* + * @tc.name: executor_set_input_001 + * @tc.desc: Verify that the SetInput function returns a successful message. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_set_input_001, testing::ext::TestSize.Level0) +{ + const MSLITE::LiteGraph* liteGraphTest = BuildLiteGraph(m_dim, m_dimOut); + InnerModel innerModel; + innerModel.BuildFromLiteGraph(liteGraphTest); + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + OH_NN_Tensor tensor = SetTensor(OH_NN_FLOAT32, m_dimensionCount, m_dimArry, nullptr, OH_NN_TENSOR); + void* buffer = m_dataArry; + size_t length = 9 * sizeof(float); + + OH_NN_ReturnCode ret = executorTest.SetInput(m_index, tensor, buffer, length); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/* + * @tc.name: executor_set_input_002 + * @tc.desc: Verify that the SetInput function returns a failed message with out-of-range index. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_set_input_002, testing::ext::TestSize.Level0) +{ + const MSLITE::LiteGraph* liteGraphTest = BuildLiteGraph(m_dim, m_dimOut); + InnerModel innerModel; + innerModel.BuildFromLiteGraph(liteGraphTest); + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + m_index = 6; + OH_NN_Tensor tensor = SetTensor(OH_NN_FLOAT32, m_dimensionCount, m_dimArry, nullptr, OH_NN_TENSOR); + size_t length = 9 * sizeof(float); + void* buffer = m_dataArry; + + OH_NN_ReturnCode ret = executorTest.SetInput(m_index, tensor, buffer, length); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: executor_set_input_003 + * @tc.desc: Verify that the SetInput function returns a failed message with dynamic shape. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_set_input_003, testing::ext::TestSize.Level0) +{ + const MSLITE::LiteGraph* liteGraphTest = BuildLiteGraph(m_dim, m_dimOut); + InnerModel innerModel; + innerModel.BuildFromLiteGraph(liteGraphTest); + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + const int dim = -1; + m_dimensionCount = 1; + OH_NN_Tensor tensor = SetTensor(OH_NN_FLOAT32, m_dimensionCount, &dim, nullptr, OH_NN_TENSOR); + size_t length = 1 * sizeof(float); + float data = 0; + void* buffer = &data; + + OH_NN_ReturnCode ret = executorTest.SetInput(m_index, tensor, buffer, length); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: executor_set_input_004 + * @tc.desc: Verify that the SetInput function returns a failed message with invalid tensor's dataType. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_set_input_004, testing::ext::TestSize.Level0) +{ + const MSLITE::LiteGraph* liteGraphTest = BuildLiteGraph(m_dim, m_dimOut); + InnerModel innerModel; + innerModel.BuildFromLiteGraph(liteGraphTest); + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + OH_NN_Tensor tensor = SetTensor(OH_NN_INT64, m_dimensionCount, m_dimArry, nullptr, OH_NN_TENSOR); + void* buffer = m_dataArry; + size_t length = 9 * sizeof(float); + + OH_NN_ReturnCode ret = executorTest.SetInput(m_index, tensor, buffer, length); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: executor_set_input_005 + * @tc.desc: Verify that the SetInput function returns a failed message with invalid length. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_set_input_005, testing::ext::TestSize.Level0) +{ + const MSLITE::LiteGraph* liteGraphTest = BuildLiteGraph(m_dim, m_dimOut); + InnerModel innerModel; + innerModel.BuildFromLiteGraph(liteGraphTest); + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + + OH_NN_Tensor tensor = SetTensor(OH_NN_FLOAT32, m_dimensionCount, m_dimArry, nullptr, OH_NN_TENSOR); + size_t length = 1 * sizeof(float); + void* buffer = m_dataArry; + + OH_NN_ReturnCode ret = executorTest.SetInput(m_index, tensor, buffer, length); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: executor_set_input_006 + * @tc.desc: Verify that the SetInput function returns a failed message with allocating buffer is unsuccessfully. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_set_input_006, testing::ext::TestSize.Level0) +{ + HDI::Nnrt::V1_0::MockIPreparedModel::m_ExpectRetCode = OH_NN_INVALID_PARAMETER; + const MSLITE::LiteGraph* liteGraphTest = BuildLiteGraph(m_dim, m_dimOut); + InnerModel innerModel; + innerModel.BuildFromLiteGraph(liteGraphTest); + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + OH_NN_Tensor tensor = SetTensor(OH_NN_FLOAT32, m_dimensionCount, m_dimArry, nullptr, OH_NN_TENSOR); + void* buffer = m_dataArry; + size_t length = 9 * sizeof(float); + + OH_NN_ReturnCode ret = executorTest.SetInput(m_index, tensor, buffer, length); + EXPECT_EQ(OH_NN_MEMORY_ERROR, ret); +} + +/* + * @tc.name: executor_set_input_007 + * @tc.desc: Verify that the SetInput function returns a failed message with empty buffer. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_set_input_007, testing::ext::TestSize.Level0) +{ + const MSLITE::LiteGraph* liteGraphTest = BuildLiteGraph(m_dim, m_dimOut); + InnerModel innerModel; + innerModel.BuildFromLiteGraph(liteGraphTest); + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + OH_NN_Tensor tensor = SetTensor(OH_NN_FLOAT32, m_dimensionCount, m_dimArry, nullptr, OH_NN_TENSOR); + void* buffer = nullptr; + size_t length = 9 * sizeof(float); + + OH_NN_ReturnCode ret = executorTest.SetInput(m_index, tensor, buffer, length); + EXPECT_EQ(OH_NN_MEMORY_ERROR, ret); +} + +/* + * @tc.name: executor_set_input_008 + * @tc.desc: Verify that the SetInput function returns a successful message with dataLength <= curBufferLength. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_set_input_008, testing::ext::TestSize.Level0) +{ + const MSLITE::LiteGraph* liteGraphTest = BuildLiteGraph(m_dim, m_dimOut); + InnerModel innerModel; + innerModel.BuildFromLiteGraph(liteGraphTest); + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + OH_NN_Tensor tensor = SetTensor(OH_NN_FLOAT32, m_dimensionCount, m_dimArry, nullptr, OH_NN_TENSOR); + float dataArry[15] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14}; + void* buffer = dataArry; + size_t length = 9 * sizeof(float); + + EXPECT_EQ(OH_NN_SUCCESS, executorTest.SetInput(m_index, tensor, buffer, length)); + + float expectArry[9] = {0, 1, 2, 3, 4, 5, 6, 7, 8}; + void* expectBuffer = expectArry; + OH_NN_ReturnCode ret = executorTest.SetInput(m_index, tensor, expectBuffer, length); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/* + * @tc.name: executor_set_input_009 + * @tc.desc: Verify that the SetInput function returns a failed message with length less than dataLength. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_set_input_009, testing::ext::TestSize.Level0) +{ + const MSLITE::LiteGraph* liteGraphTest = BuildLiteGraph(m_dim, m_dimOut); + InnerModel innerModel; + innerModel.BuildFromLiteGraph(liteGraphTest); + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + OH_NN_Tensor tensor = SetTensor(OH_NN_FLOAT32, m_dimensionCount, m_dimArry, nullptr, OH_NN_TENSOR); + void* const data = m_dataArry; + OH_NN_Memory memory = {data, 9 * sizeof(float)}; + + EXPECT_EQ(OH_NN_SUCCESS, executorTest.SetInputFromMemory(m_index, tensor, memory)); + + float expectData = 0; + void* buffer = &expectData; + size_t length = 1 * sizeof(float); + + OH_NN_ReturnCode ret = executorTest.SetInput(m_index, tensor, buffer, length); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: executor_set_input_010 + * @tc.desc: Verify that the SetInput function returns a failed message with BuildFromOHNNTensor unsuccessfully. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_set_input_010, testing::ext::TestSize.Level0) +{ + const MSLITE::LiteGraph* liteGraphTest = BuildLiteGraph(m_dim, m_dimOut); + InnerModel innerModel; + innerModel.BuildFromLiteGraph(liteGraphTest); + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + m_dimensionCount = 0; + OH_NN_Tensor tensor = SetTensor(OH_NN_UNKNOWN, m_dimensionCount, m_dimArry, nullptr, OH_NN_TENSOR); + void* buffer = m_dataArry; + size_t length = 9 * sizeof(float); + + OH_NN_ReturnCode ret = executorTest.SetInput(m_index, tensor, buffer, length); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: executor_set_input_011 + * @tc.desc: Verify that the SetInput function returns a successful message with dataLength <= curBufferLength. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_set_input_011, testing::ext::TestSize.Level0) +{ + const std::vector expectDim = {3, -1}; + const MSLITE::LiteGraph* liteGraphTest = BuildLiteGraph(expectDim, m_dimOut); + InnerModel innerModel; + innerModel.BuildFromLiteGraph(liteGraphTest); + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + OH_NN_Tensor tensor = SetTensor(OH_NN_FLOAT32, m_dimensionCount, m_dimArry, nullptr, OH_NN_TENSOR); + void* buffer = m_dataArry; + size_t length = 9 * sizeof(float); + EXPECT_EQ(OH_NN_SUCCESS, executorTest.SetInput(m_index, tensor, buffer, length)); + + const int32_t testDim[2] = {3, 5}; + OH_NN_Tensor expectTensor = SetTensor(OH_NN_FLOAT32, m_dimensionCount, testDim, nullptr, OH_NN_TENSOR); + size_t expectLength = 15 * sizeof(float); + float expectArry[15] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14}; + void* expectBuffer = expectArry; + OH_NN_ReturnCode ret = executorTest.SetInput(m_index, expectTensor, expectBuffer, expectLength); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/* + * @tc.name: executor_set_input_from_memory_001 + * @tc.desc: Verify that the SetInputFromMemory function returns a successful message. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_set_input_from_memory_001, testing::ext::TestSize.Level0) +{ + const MSLITE::LiteGraph* liteGraphTest = BuildLiteGraph(m_dim, m_dimOut); + InnerModel innerModel; + innerModel.BuildFromLiteGraph(liteGraphTest); + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + OH_NN_Tensor tensor = SetTensor(OH_NN_FLOAT32, m_dimensionCount, m_dimArry, nullptr, OH_NN_TENSOR); + void* const data = m_dataArry; + OH_NN_Memory memory = {data, 9 * sizeof(float)}; + + OH_NN_ReturnCode ret = executorTest.SetInputFromMemory(m_index, tensor, memory); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/* + * @tc.name: executor_set_input_from_memory_002 + * @tc.desc: Verify that the SetInputFromMemory function returns a failed message with out-of-range index. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_set_input_from_memory_002, testing::ext::TestSize.Level0) +{ + const MSLITE::LiteGraph* liteGraphTest = BuildLiteGraph(m_dim, m_dimOut); + InnerModel innerModel; + innerModel.BuildFromLiteGraph(liteGraphTest); + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + m_index = 6; + OH_NN_Tensor tensor = SetTensor(OH_NN_FLOAT32, m_dimensionCount, m_dimArry, nullptr, OH_NN_TENSOR); + void* const data = m_dataArry; + OH_NN_Memory memory = {data, 9 * sizeof(float)}; + + OH_NN_ReturnCode ret = executorTest.SetInputFromMemory(m_index, tensor, memory); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: executor_set_input_from_memory_003 + * @tc.desc: Verify that the SetInputFromMemory function returns a failed message with dynamic shape. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_set_input_from_memory_003, testing::ext::TestSize.Level0) +{ + const MSLITE::LiteGraph* liteGraphTest = BuildLiteGraph(m_dim, m_dimOut); + InnerModel innerModel; + innerModel.BuildFromLiteGraph(liteGraphTest); + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + const int dim = -1; + OH_NN_Tensor tensor; + tensor.dataType = OH_NN_FLOAT32; + tensor.dimensionCount = 1; + tensor.dimensions = &dim; + tensor.quantParam = nullptr; + tensor.type = OH_NN_TENSOR; + float value = 0; + void* const data = &value; + OH_NN_Memory memory = {data, 1 * sizeof(float)}; + + OH_NN_ReturnCode ret = executorTest.SetInputFromMemory(m_index, tensor, memory); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: executor_set_input_from_memory_004 + * @tc.desc: Verify that the SetInputFromMemory function returns a failed message with invalid tensor's dataType. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_set_input_from_memory_004, testing::ext::TestSize.Level0) +{ + const MSLITE::LiteGraph* liteGraphTest = BuildLiteGraph(m_dim, m_dimOut); + InnerModel innerModel; + innerModel.BuildFromLiteGraph(liteGraphTest); + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + OH_NN_Tensor tensor = SetTensor(OH_NN_INT64, m_dimensionCount, m_dimArry, nullptr, OH_NN_TENSOR); + void* const data = m_dataArry; + OH_NN_Memory memory = {data, 9 * sizeof(float)}; + + OH_NN_ReturnCode ret = executorTest.SetInputFromMemory(m_index, tensor, memory); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: executor_set_input_from_memory_005 + * @tc.desc: Verify that the SetInput function returns a failed message with invalid memory.length. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_set_input_from_memory_005, testing::ext::TestSize.Level0) +{ + const MSLITE::LiteGraph* liteGraphTest = BuildLiteGraph(m_dim, m_dimOut); + InnerModel innerModel; + innerModel.BuildFromLiteGraph(liteGraphTest); + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + OH_NN_Tensor tensor = SetTensor(OH_NN_FLOAT32, m_dimensionCount, m_dimArry, nullptr, OH_NN_TENSOR); + void* const data = m_dataArry; + OH_NN_Memory memory = {data, 1 * sizeof(float)}; + + OH_NN_ReturnCode ret = executorTest.SetInputFromMemory(m_index, tensor, memory); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: executor_set_output_001 + * @tc.desc: Verify that the SetOutput function returns a successful message. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_set_output_001, testing::ext::TestSize.Level0) +{ + const MSLITE::LiteGraph* liteGraphTest = BuildLiteGraph(m_dim, m_dimOut); + InnerModel innerModel; + innerModel.BuildFromLiteGraph(liteGraphTest); + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + size_t length = 9 * sizeof(float); + void* buffer = m_dataArry; + + OH_NN_ReturnCode ret = executorTest.SetOutput(m_index, buffer, length); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/* + * @tc.name: executor_set_output_002 + * @tc.desc: Verify that the SetOutput function returns a failed message with out-of-range index. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_set_output_002, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + m_index = 6; + size_t length = 9 * sizeof(float); + void* buffer = m_dataArry; + + OH_NN_ReturnCode ret = executorTest.SetOutput(m_index, buffer, length); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: executor_set_output_003 + * @tc.desc: Verify that the SetOutput function returns a failed message with invalid length. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_set_output_003, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + size_t length = 2 * sizeof(float); + void* buffer = m_dataArry; + + OH_NN_ReturnCode ret = executorTest.SetOutput(m_index, buffer, length); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: executor_set_output_004 + * @tc.desc: Verify that the SetOutput function returns a failed message with allocating buffer is failed. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_set_output_004, testing::ext::TestSize.Level0) +{ + HDI::Nnrt::V1_0::MockIPreparedModel::m_ExpectRetCode = OH_NN_INVALID_PARAMETER; + const MSLITE::LiteGraph* liteGraphTest = BuildLiteGraph(m_dim, m_dimOut); + InnerModel innerModel; + innerModel.BuildFromLiteGraph(liteGraphTest); + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + size_t length = 9 * sizeof(float); + void* buffer = m_dataArry; + + OH_NN_ReturnCode ret = executorTest.SetOutput(m_index, buffer, length); + EXPECT_EQ(OH_NN_MEMORY_ERROR, ret); +} + +/* + * @tc.name: executor_set_output_005 + * @tc.desc: Verify that the SetOutput function returns a successful message. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_set_output_005, testing::ext::TestSize.Level0) +{ + const MSLITE::LiteGraph* liteGraphTest = BuildLiteGraph(m_dim, m_dimOut); + InnerModel innerModel; + innerModel.BuildFromLiteGraph(liteGraphTest); + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + void* const data = m_dataArry; + OH_NN_Memory memory = {data, 9 * sizeof(float)}; + EXPECT_EQ(OH_NN_SUCCESS, executorTest.SetOutputFromMemory(m_index, memory)); + + size_t length = 1 * sizeof(float); + float expectData = 0; + void* buffer = &expectData; + OH_NN_ReturnCode ret = executorTest.SetOutput(m_index, buffer, length); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: executor_set_output_006 + * @tc.desc: Verify that the SetOutput function returns a successful message with length <= curBufferLength. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_set_output_006, testing::ext::TestSize.Level0) +{ + const MSLITE::LiteGraph* liteGraphTest = BuildLiteGraph(m_dim, m_dimOut); + InnerModel innerModel; + innerModel.BuildFromLiteGraph(liteGraphTest); + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + size_t length = 9 * sizeof(float); + void* buffer = m_dataArry; + EXPECT_EQ(OH_NN_SUCCESS, executorTest.SetOutput(m_index, buffer, length)); + + float expectDataArry[9] = {0, 1, 2, 3, 4, 5, 6, 7, 8}; + void* expectBuffer = expectDataArry; + OH_NN_ReturnCode ret = executorTest.SetOutput(m_index, expectBuffer, length); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/* + * @tc.name: executor_set_output_007 + * @tc.desc: Verify that the SetOutput function returns a successful message with length > curBufferLength. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_set_output_007, testing::ext::TestSize.Level0) +{ + const MSLITE::LiteGraph* liteGraphTest = BuildLiteGraph(m_dim, m_dimOut); + InnerModel innerModel; + innerModel.BuildFromLiteGraph(liteGraphTest); + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + size_t length = 9 * sizeof(float); + void* buffer = m_dataArry; + EXPECT_EQ(OH_NN_SUCCESS, executorTest.SetOutput(m_index, buffer, length)); + + size_t expectLength = 15 * sizeof(float); + float expectDataArry[15] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14}; + void* expectBuffer = expectDataArry; + OH_NN_ReturnCode ret = executorTest.SetOutput(m_index, expectBuffer, expectLength); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/* + * @tc.name: executor_set_output_from_memory_001 + * @tc.desc: Verify that the SetOutputFromMemory function returns a successful message. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_set_output_from_memory_001, testing::ext::TestSize.Level0) +{ + const MSLITE::LiteGraph* liteGraphTest = BuildLiteGraph(m_dim, m_dimOut); + InnerModel innerModel; + innerModel.BuildFromLiteGraph(liteGraphTest); + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + void* const data = m_dataArry; + OH_NN_Memory memory = {data, 9 * sizeof(float)}; + + OH_NN_ReturnCode ret = executorTest.SetOutputFromMemory(m_index, memory); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/* + * @tc.name: executor_set_output_from_memory_002 + * @tc.desc: Verify that the SetOutputFromMemory function returns a failed message with out-of-range index. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_set_output_from_memory_002, testing::ext::TestSize.Level0) +{ + const MSLITE::LiteGraph* liteGraphTest = BuildLiteGraph(m_dim, m_dimOut); + InnerModel innerModel; + innerModel.BuildFromLiteGraph(liteGraphTest); + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + m_index = 6; + void* const data = m_dataArry; + OH_NN_Memory memory = {data, 9 * sizeof(float)}; + + OH_NN_ReturnCode ret = executorTest.SetOutputFromMemory(m_index, memory); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: executor_set_output_from_memory_003 + * @tc.desc: Verify that the SetOutputFromMemory function returns a failed message with invalid memory.length. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_set_output_from_memory_003, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + void* const data = m_dataArry; + OH_NN_Memory memory = {data, 0}; + + OH_NN_ReturnCode ret = executorTest.SetOutputFromMemory(m_index, memory); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: executor_set_output_from_memory_004 + * @tc.desc: Verify that the SetOutputFromMemory function returns a failed message with memory.length < dataLength. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_set_output_from_memory_004, testing::ext::TestSize.Level0) +{ + const std::vector expectDim = {4, 4}; + const MSLITE::LiteGraph* liteGraphTest = BuildLiteGraph(m_dim, expectDim); + InnerModel innerModel; + innerModel.BuildFromLiteGraph(liteGraphTest); + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + void* const data = m_dataArry; + OH_NN_Memory memory = {data, 9 * sizeof(float)}; + + OH_NN_ReturnCode ret = executorTest.SetOutputFromMemory(m_index, memory); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: executor_set_output_from_memory_005 + * @tc.desc: Verify that the SetOutputFromMemory function returns a successful message. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_set_output_from_memory_005, testing::ext::TestSize.Level0) +{ + const MSLITE::LiteGraph* liteGraphTest = BuildLiteGraph(m_dim, m_dimOut); + InnerModel innerModel; + innerModel.BuildFromLiteGraph(liteGraphTest); + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + size_t length = 9 * sizeof(float); + void* buffer = m_dataArry; + + EXPECT_EQ(OH_NN_SUCCESS, executorTest.SetOutput(m_index, buffer, length)); + void* const data = m_dataArry; + OH_NN_Memory memory = {data, 9 * sizeof(float)}; + + OH_NN_ReturnCode ret = executorTest.SetOutputFromMemory(m_index, memory); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/* + * @tc.name: executor_get_output_dimensions_001 + * @tc.desc: Verify that the GetOutputShape function returns a successful message. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_get_output_dimensions_001, testing::ext::TestSize.Level0) +{ + const MSLITE::LiteGraph* liteGraphTest = BuildLiteGraph(m_dim, m_dimOut); + InnerModel innerModel; + innerModel.BuildFromLiteGraph(liteGraphTest); + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + OH_NN_Tensor tensor = SetTensor(OH_NN_FLOAT32, m_dimensionCount, m_dimArry, nullptr, OH_NN_TENSOR); + size_t length = 9 * sizeof(float); + void* buffer = m_dataArry; + + EXPECT_EQ(OH_NN_SUCCESS, executorTest.SetInput(m_index, tensor, buffer, length)); + EXPECT_EQ(OH_NN_SUCCESS, executorTest.SetOutput(m_index, buffer, length)); + EXPECT_EQ(OH_NN_SUCCESS, executorTest.Run()); + + int32_t expectDim[2] = {3, 3}; + int32_t* ptr = expectDim; + int32_t** dimensions = &ptr; + uint32_t dimensionCount = 2; + + OH_NN_ReturnCode ret = executorTest.GetOutputShape(m_index, dimensions, dimensionCount); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/* + * @tc.name: executor_get_output_dimensions_002 + * @tc.desc: Verify that the GetOutputShape function returns a failed message without run. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_get_output_dimensions_002, testing::ext::TestSize.Level0) +{ + size_t length = 9 * sizeof(float); + void* buffer = m_dataArry; + const MSLITE::LiteGraph* liteGraphTest = BuildLiteGraph(m_dim, m_dimOut); + InnerModel innerModel; + innerModel.BuildFromLiteGraph(liteGraphTest); + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + OH_NN_Tensor tensor = SetTensor(OH_NN_FLOAT32, m_dimensionCount, m_dimArry, nullptr, OH_NN_TENSOR); + + EXPECT_EQ(OH_NN_SUCCESS, executorTest.SetInput(m_index, tensor, buffer, length)); + EXPECT_EQ(OH_NN_SUCCESS, executorTest.SetOutput(m_index, buffer, length)); + + int32_t testDim[2] = {3, 3}; + int32_t* ptr = testDim; + int32_t** dimensions = &ptr; + uint32_t dimensionCount = 2; + + OH_NN_ReturnCode ret = executorTest.GetOutputShape(m_index, dimensions, dimensionCount); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/* + * @tc.name: executor_get_output_dimensions_003 + * @tc.desc: Verify that the GetOutputShape function returns a failed message with out-of-range index. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_get_output_dimensions_003, testing::ext::TestSize.Level0) +{ + const MSLITE::LiteGraph* liteGraphTest = BuildLiteGraph(m_dim, m_dimOut); + InnerModel innerModel; + innerModel.BuildFromLiteGraph(liteGraphTest); + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + void* buffer = m_dataArry; + size_t length = 9 * sizeof(float); + OH_NN_Tensor tensor = SetTensor(OH_NN_FLOAT32, m_dimensionCount, m_dimArry, nullptr, OH_NN_TENSOR); + + EXPECT_EQ(OH_NN_SUCCESS, executorTest.SetInput(m_index, tensor, buffer, length)); + EXPECT_EQ(OH_NN_SUCCESS, executorTest.SetOutput(m_index, buffer, length)); + EXPECT_EQ(OH_NN_SUCCESS, executorTest.Run()); + + uint32_t testIndex = 6; + int32_t testDim[2] = {3, 3}; + int32_t* ptr = testDim; + int32_t** dimensions = &ptr; + uint32_t dimensionCount = 2; + + OH_NN_ReturnCode ret = executorTest.GetOutputShape(testIndex, dimensions, dimensionCount); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: executor_create_input_memory_001 + * @tc.desc: Verify that the CreateInputMemory function returns a successful message. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_create_input_memory_001, testing::ext::TestSize.Level0) +{ + const MSLITE::LiteGraph* liteGraphTest = BuildLiteGraph(m_dim, m_dimOut); + InnerModel innerModel; + innerModel.BuildFromLiteGraph(liteGraphTest); + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + OH_NN_Memory** memory = nullptr; + SetMermory(memory); + size_t length = 9 * sizeof(float); + + OH_NN_ReturnCode ret = executorTest.CreateInputMemory(m_index, length, memory); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/* + * @tc.name: executor_create_input_memory_002 + * @tc.desc: Verify that the CreateInputMemory function returns a failed message with out-of-range index. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_create_input_memory_002, testing::ext::TestSize.Level0) +{ + const MSLITE::LiteGraph* liteGraphTest = BuildLiteGraph(m_dim, m_dimOut); + InnerModel innerModel; + innerModel.BuildFromLiteGraph(liteGraphTest); + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + size_t length = 9 * sizeof(float); + OH_NN_Memory** memory = nullptr; + SetMermory(memory); + + m_index = 6; + OH_NN_ReturnCode ret = executorTest.CreateInputMemory(m_index, length, memory); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: executor_create_input_memory_003 + * @tc.desc: Verify that the CreateInputMemory function returns a failed message with allocating buffer unsuccessfully. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_create_input_memory_003, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + HDI::Nnrt::V1_0::MockIPreparedModel::m_ExpectRetCode = OH_NN_INVALID_PARAMETER; + const MSLITE::LiteGraph* liteGraphTest = BuildLiteGraph(m_dim, m_dimOut); + innerModel.BuildFromLiteGraph(liteGraphTest); + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + size_t length = 9 * sizeof(float); + OH_NN_Memory** memory = nullptr; + SetMermory(memory); + + OH_NN_ReturnCode ret = executorTest.CreateInputMemory(m_index, length, memory); + EXPECT_EQ(OH_NN_MEMORY_ERROR, ret); +} + +/* + * @tc.name: executor_destroy_input_memory_001 + * @tc.desc: Verify that the DestroyInputMemory function returns a successful message. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_destroy_input_memory_001, testing::ext::TestSize.Level0) +{ + const MSLITE::LiteGraph* liteGraphTest = BuildLiteGraph(m_dim, m_dimOut); + InnerModel innerModel; + innerModel.BuildFromLiteGraph(liteGraphTest); + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + size_t length = 9 * sizeof(float); + float dataArry[9] = {0, 1, 2, 3, 4, 5, 6, 7, 8}; + void* const data = dataArry; + OH_NN_Memory memoryPtr = {data, 9 * sizeof(float)}; + OH_NN_Memory* ptr = &memoryPtr; + OH_NN_Memory** memory = &ptr; + + EXPECT_EQ(OH_NN_SUCCESS, executorTest.CreateInputMemory(m_index, length, memory)); + OH_NN_ReturnCode ret = executorTest.DestroyInputMemory(m_index, memory); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/* + * @tc.name: executor_destroy_input_memory_002 + * @tc.desc: Verify that the DestroyInputMemory function returns a failed message with out-of-range index. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_destroy_input_memory_002, testing::ext::TestSize.Level0) +{ + const MSLITE::LiteGraph* liteGraphTest = BuildLiteGraph(m_dim, m_dimOut); + InnerModel innerModel; + innerModel.BuildFromLiteGraph(liteGraphTest); + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + size_t length = 9 * sizeof(float); + OH_NN_Memory** memory = nullptr; + SetMermory(memory); + + uint32_t testIndex = 6; + EXPECT_EQ(OH_NN_SUCCESS, executorTest.CreateInputMemory(m_index, length, memory)); + OH_NN_ReturnCode ret = executorTest.DestroyInputMemory(testIndex, memory); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: executor_destroy_input_memory_003 + * @tc.desc: Verify that the DestroyInputMemory function returns a failed message without creating memory. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_destroy_input_memory_003, testing::ext::TestSize.Level0) +{ + const MSLITE::LiteGraph* liteGraphTest = BuildLiteGraph(m_dim, m_dimOut); + InnerModel innerModel; + innerModel.BuildFromLiteGraph(liteGraphTest); + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + OH_NN_Memory** memory = nullptr; + SetMermory(memory); + + OH_NN_ReturnCode ret = executorTest.DestroyInputMemory(m_index, memory); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: executor_destroy_input_memory_004 + * @tc.desc: Verify that the DestroyInputMemory function returns a failed message with invalid memory.data. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_destroy_input_memory_004, testing::ext::TestSize.Level0) +{ + const MSLITE::LiteGraph* liteGraphTest = BuildLiteGraph(m_dim, m_dimOut); + InnerModel innerModel; + innerModel.BuildFromLiteGraph(liteGraphTest); + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + size_t length = 9 * sizeof(float); + OH_NN_Memory** memory = nullptr; + SetMermory(memory); + + EXPECT_EQ(OH_NN_SUCCESS, executorTest.CreateInputMemory(m_index, length, memory)); + + float arry[9] = {0, 0, 0, 0, 0, 0, 0, 0, 0}; + void* const expectData = arry; + OH_NN_Memory mptr = {expectData, 9 * sizeof(float)}; + OH_NN_Memory* expectPtr = &mptr; + OH_NN_Memory** expectMemory = &expectPtr; + + OH_NN_ReturnCode ret = executorTest.DestroyInputMemory(m_index, expectMemory); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: executor_create_output_memory_001 + * @tc.desc: Verify that the CreateOutputMemory function returns a successful message. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_create_output_memory_001, testing::ext::TestSize.Level0) +{ + const MSLITE::LiteGraph* liteGraphTest = BuildLiteGraph(m_dim, m_dimOut); + InnerModel innerModel; + innerModel.BuildFromLiteGraph(liteGraphTest); + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + size_t length = 9 * sizeof(float); + OH_NN_Memory** memory = nullptr; + SetMermory(memory); + + OH_NN_ReturnCode ret = executorTest.CreateOutputMemory(m_index, length, memory); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/* + * @tc.name: executor_create_output_memory_002 + * @tc.desc: Verify that the CreateOutputMemory function returns a failed message with out-of-range index. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_create_output_memory_002, testing::ext::TestSize.Level0) +{ + const MSLITE::LiteGraph* liteGraphTest = BuildLiteGraph(m_dim, m_dimOut); + InnerModel innerModel; + innerModel.BuildFromLiteGraph(liteGraphTest); + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + m_index = 6; + size_t length = 9 * sizeof(float); + OH_NN_Memory** memory = nullptr; + SetMermory(memory); + + OH_NN_ReturnCode ret = executorTest.CreateOutputMemory(m_index, length, memory); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: executor_create_output_memory_003 + * @tc.desc: Verify that the CreateOutputMemory function returns a failed message with allocating buffer unsuccessfully. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_create_output_memory_003, testing::ext::TestSize.Level0) +{ + HDI::Nnrt::V1_0::MockIPreparedModel::m_ExpectRetCode = OH_NN_INVALID_PARAMETER; + const MSLITE::LiteGraph* liteGraphTest = BuildLiteGraph(m_dim, m_dimOut); + InnerModel innerModel; + innerModel.BuildFromLiteGraph(liteGraphTest); + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + size_t length = 9 * sizeof(float); + OH_NN_Memory** memory = nullptr; + SetMermory(memory); + + OH_NN_ReturnCode ret = executorTest.CreateOutputMemory(m_index, length, memory); + EXPECT_EQ(OH_NN_MEMORY_ERROR, ret); +} + +/* + * @tc.name: executor_destroy_output_memory_001 + * @tc.desc: Verify that the DestroyOutputMemory function returns a successful message. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_destroy_output_memory_001, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + const MSLITE::LiteGraph* liteGraphTest = BuildLiteGraph(m_dim, m_dimOut); + innerModel.BuildFromLiteGraph(liteGraphTest); + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + size_t length = 9 * sizeof(float); + OH_NN_Memory** memory = nullptr; + SetMermory(memory); + + EXPECT_EQ(OH_NN_SUCCESS, executorTest.CreateOutputMemory(m_index, length, memory)); + OH_NN_ReturnCode ret = executorTest.DestroyOutputMemory(m_index, memory); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/* + * @tc.name: executor_destroy_output_memory_002 + * @tc.desc: Verify that the DestroyOutputMemory function returns a failed message with out-of-range index. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_destroy_output_memory_002, testing::ext::TestSize.Level0) +{ + const MSLITE::LiteGraph* liteGraphTest = BuildLiteGraph(m_dim, m_dimOut); + InnerModel innerModel; + innerModel.BuildFromLiteGraph(liteGraphTest); + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + uint32_t testIndex = 6; + size_t length = 9 * sizeof(float); + OH_NN_Memory** memory = nullptr; + SetMermory(memory); + + EXPECT_EQ(OH_NN_SUCCESS, executorTest.CreateOutputMemory(m_index, length, memory)); + OH_NN_ReturnCode ret = executorTest.DestroyOutputMemory(testIndex, memory); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: executor_destroy_output_memory_003 + * @tc.desc: Verify that the DestroyOutputMemory function returns a failed message without creating memory. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_destroy_output_memory_003, testing::ext::TestSize.Level0) +{ + const MSLITE::LiteGraph* liteGraphTest = BuildLiteGraph(m_dim, m_dimOut); + InnerModel innerModel; + innerModel.BuildFromLiteGraph(liteGraphTest); + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + OH_NN_Memory** memory = nullptr; + SetMermory(memory); + + OH_NN_ReturnCode ret = executorTest.DestroyOutputMemory(m_index, memory); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: executor_destroy_output_memory_004 + * @tc.desc: Verify that the DestroyOutputMemory function returns a failed message with invalid memory.data. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_destroy_output_memory_004, testing::ext::TestSize.Level0) +{ + const MSLITE::LiteGraph* liteGraphTest = BuildLiteGraph(m_dim, m_dimOut); + InnerModel innerModel; + innerModel.BuildFromLiteGraph(liteGraphTest); + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + size_t length = 9 * sizeof(float); + OH_NN_Memory** memory = nullptr; + SetMermory(memory); + + EXPECT_EQ(OH_NN_SUCCESS, executorTest.CreateOutputMemory(m_index, length, memory)); + + float arry[9] = {0, 0, 0, 0, 0, 0, 0, 0, 0}; + void* const expectData = arry; + OH_NN_Memory mptr = {expectData, 9 * sizeof(float)}; + OH_NN_Memory* expectPtr = &mptr; + OH_NN_Memory** expectMemory = &expectPtr; + + OH_NN_ReturnCode ret = executorTest.DestroyOutputMemory(m_index, expectMemory); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: executor_run_test_001 + * @tc.desc: Verify that the Run function returns a successful message. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_run_test_001, testing::ext::TestSize.Level0) +{ + HiviewDFX::HiTraceId traceId = HiTraceChain::Begin("executor_run_test_001", HITRACE_FLAG_TP_INFO); + const MSLITE::LiteGraph* liteGraphTest = BuildLiteGraph(m_dim, m_dimOut); + InnerModel innerModel; + innerModel.BuildFromLiteGraph(liteGraphTest); + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + size_t length = 9 * sizeof(float); + OH_NN_Tensor tensor = SetTensor(OH_NN_FLOAT32, m_dimensionCount, m_dimArry, nullptr, OH_NN_TENSOR); + void* buffer = m_dataArry; + + EXPECT_EQ(OH_NN_SUCCESS, executorTest.SetInput(m_index, tensor, buffer, length)); + EXPECT_EQ(OH_NN_SUCCESS, executorTest.SetOutput(m_index, buffer, length)); + OH_NN_ReturnCode ret = executorTest.Run(); + HiTraceChain::End(traceId); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/* + * @tc.name: executor_run_test_002 + * @tc.desc: Verify that the DestroyOutputMemory function returns a failed message without SetInput. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_run_test_002, testing::ext::TestSize.Level0) +{ + const MSLITE::LiteGraph* liteGraphTest = BuildLiteGraph(m_dim, m_dimOut); + InnerModel innerModel; + innerModel.BuildFromLiteGraph(liteGraphTest); + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + OH_NN_ReturnCode ret = executorTest.Run(); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: executor_run_test_003 + * @tc.desc: Verify that the DestroyOutputMemory function returns a failed message without SetOutput. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_run_test_003, testing::ext::TestSize.Level0) +{ + const MSLITE::LiteGraph* liteGraphTest = BuildLiteGraph(m_dim, m_dimOut); + InnerModel innerModel; + innerModel.BuildFromLiteGraph(liteGraphTest); + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + OH_NN_Tensor tensor = SetTensor(OH_NN_FLOAT32, m_dimensionCount, m_dimArry, nullptr, OH_NN_TENSOR); + void* buffer = m_dataArry; + size_t length = 9 * sizeof(float); + + EXPECT_EQ(OH_NN_SUCCESS, executorTest.SetInput(m_index, tensor, buffer, length)); + OH_NN_ReturnCode ret = executorTest.Run(); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: executor_run_test_004 + * @tc.desc: Verify that the DestroyOutputMemory function returns a failed message with failed executionPlan.Run. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_run_test_004, testing::ext::TestSize.Level0) +{ + HDI::Nnrt::V1_0::MockIPreparedModel::m_ExpectRetCode = OH_NN_FAILED; + const MSLITE::LiteGraph* liteGraphTest = BuildLiteGraph(m_dim, m_dimOut); + InnerModel innerModel; + innerModel.BuildFromLiteGraph(liteGraphTest); + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + OH_NN_Tensor tensor = SetTensor(OH_NN_FLOAT32, m_dimensionCount, m_dimArry, nullptr, OH_NN_TENSOR); + void* buffer = m_dataArry; + size_t length = 9 * sizeof(float); + + EXPECT_EQ(OH_NN_SUCCESS, executorTest.SetInput(m_index, tensor, buffer, length)); + EXPECT_EQ(OH_NN_SUCCESS, executorTest.SetOutput(m_index, buffer, length)); + OH_NN_ReturnCode ret = executorTest.Run(); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} +} // namespace UnitTest +} // namespace NeuralNetworkRuntime +} // namespace OHOS \ No newline at end of file diff --git a/test/unittest/components/executor/executor_test.h b/test/unittest/components/executor/executor_test.h new file mode 100644 index 0000000..05837b5 --- /dev/null +++ b/test/unittest/components/executor/executor_test.h @@ -0,0 +1,48 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_EXECUTOR_UNITTEST_H +#define NEURAL_NETWORK_RUNTIME_EXECUTOR_UNITTEST_H + +#include + +#include "mindir.h" + +#include "frameworks/native/executor.h" + +namespace MSLITE = mindspore::lite; +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class ExecutorTest : public testing::Test { +public: + MSLITE::LiteGraph* BuildLiteGraph(const std::vector dim, const std::vector dimOut); + OH_NN_Tensor SetTensor(OH_NN_DataType dataType, uint32_t dimensionCount, const int32_t *dimensions, + const OH_NN_QuantParam *quantParam, OH_NN_TensorType type); + void SetMermory(OH_NN_Memory** &memory); + +public: + uint32_t m_index {0}; + const std::vector m_dim {3, 3}; + const std::vector m_dimOut {3, 3}; + const int32_t m_dimArry[2] {3, 3}; + uint32_t m_dimensionCount {2}; + float m_dataArry[9] {0, 1, 2, 3, 4, 5, 6, 7, 8}; +}; +} // namespace UnitTest +} // namespace NeuralNetworkRuntime +} // namespace OHOS + +#endif // NEURAL_NETWORK_RUNTIME_EXECUTOR_UNITTEST_H \ No newline at end of file diff --git a/test/unittest/components/hdi_device/hdi_device_test.cpp b/test/unittest/components/hdi_device/hdi_device_test.cpp new file mode 100644 index 0000000..07925bf --- /dev/null +++ b/test/unittest/components/hdi_device/hdi_device_test.cpp @@ -0,0 +1,875 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "frameworks/native/hdi_device.h" +#include "test/unittest/common/mock_idevice.h" +#include "test/unittest/common/file_utils.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime; +namespace mindspore { +namespace lite { +OHOS::HDI::Nnrt::V1_0::Model* MindIR_LiteGraph_To_Model(const LiteGraph* lite_graph, + const OHOS::HDI::Nnrt::V1_0::SharedBuffer& buffer) +{ + return new (std::nothrow) OHOS::HDI::Nnrt::V1_0::Model(); +} + +void MindIR_Model_Destroy(OHOS::HDI::Nnrt::V1_0::Model** model) +{ + if ((model != nullptr) && (*model != nullptr)) { + delete *model; + *model = nullptr; + } +} + +size_t MindIR_LiteGraph_GetConstTensorSize(const mindspore::lite::LiteGraph* lite_graph) +{ + return 1; +} +} +} + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class HDIDeviceTest : public testing::Test { +protected: + void GetBuffer(void*& buffer, size_t length); + OH_NN_ReturnCode PrepareModel(int32_t allocBufferType, int32_t prepareType); +}; + +void HDIDeviceTest::GetBuffer(void*& buffer, size_t length) +{ + std::string data = "ABCD"; + const size_t dataLength = 100; + data.resize(dataLength, '+'); + + std::string filename = "/data/log/memory-001.dat"; + FileUtils fileUtils(filename); + fileUtils.WriteFile(data); + + int fd = open(filename.c_str(), O_RDWR); + EXPECT_NE(fd, -1); + + const auto &memoryManager = MemoryManager::GetInstance(); + buffer = memoryManager->MapMemory(fd, length); + EXPECT_NE(buffer, nullptr); + + const char* result = static_cast(buffer); + int index = 0; + EXPECT_EQ('A', result[index++]); + EXPECT_EQ('B', result[index++]); + EXPECT_EQ('C', result[index++]); + EXPECT_EQ('D', result[index++]); + close(fd); +} + +OH_NN_ReturnCode HDIDeviceTest::PrepareModel(int32_t allocBufferType, int32_t prepareType) +{ + std::shared_ptr model = std::make_shared(); + OHOS::sptr sp = OHOS::sptr(new (std::nothrow) V1_0::MockIDevice()); + EXPECT_NE(sp, nullptr); + + std::unique_ptr hdiDevice = std::make_unique(sp); + EXPECT_NE(hdiDevice, nullptr); + + V1_0::SharedBuffer buffer {1, 1, 0, 1}; + EXPECT_CALL(*sp, AllocateBuffer(::testing::_, ::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<1>(buffer), ::testing::Return(allocBufferType))); + + std::shared_ptr preparedModel; + const int position = 2; + OHOS::sptr iPreparedModel = + OHOS::sptr(new (std::nothrow) V1_0::MockIPreparedModel()); + EXPECT_CALL(*sp, PrepareModel(::testing::_, ::testing::_, ::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee(iPreparedModel), + ::testing::Return(prepareType))); + + ModelConfig config; + OH_NN_ReturnCode result = hdiDevice->PrepareModel(model, config, preparedModel); + return result; +} + +/* * + * @tc.name: hdidevice_constructor_001 + * @tc.desc: Verify the Constructor function return object success. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_constructor_001, TestSize.Level0) +{ + OHOS::sptr device = V1_0::INnrtDevice::Get(false); + EXPECT_NE(device, nullptr); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); +} + +/* * + * @tc.name: hdidevice_getdevicename_001 + * @tc.desc: Verify the GetDeviceName function validate device name success. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_getdevicename_001, TestSize.Level0) +{ + OHOS::sptr device = V1_0::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + std::string deviceName = "MockDevice"; + EXPECT_CALL(*((V1_0::MockIDevice *)device.GetRefPtr()), GetDeviceName(::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<0>(deviceName), ::testing::Return(HDF_SUCCESS))); + + const std::string expectDeviceName = "MockDevice"; + std::string newDeviceName = ""; + OH_NN_ReturnCode result = hdiDevice->GetDeviceName(newDeviceName); + EXPECT_EQ(OH_NN_SUCCESS, result); + EXPECT_EQ(expectDeviceName, newDeviceName); +} + +/* * + * @tc.name: hdidevice_getdevicename_002 + * @tc.desc: Verify the GetDeviceName function return unavailable device. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_getdevicename_002, TestSize.Level0) +{ + OHOS::sptr device = V1_0::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + std::string deviceName = "MockDevice"; + EXPECT_CALL(*((V1_0::MockIDevice *)device.GetRefPtr()), GetDeviceName(::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<0>(deviceName), ::testing::Return(HDF_FAILURE))); + OH_NN_ReturnCode result = hdiDevice->GetDeviceName(deviceName); + EXPECT_EQ(OH_NN_UNAVALIDABLE_DEVICE, result); +} + +/* * + * @tc.name: hdidevice_getvendorname_001 + * @tc.desc: Verify the GetVendorName function validate vendor name success. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_getvendorname_001, TestSize.Level0) +{ + OHOS::sptr device = V1_0::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + std::string vendorName = "MockVendor"; + EXPECT_CALL(*((V1_0::MockIDevice *)device.GetRefPtr()), GetVendorName(::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<0>(vendorName), ::testing::Return(HDF_SUCCESS))); + + const std::string expectDeviceName = "MockVendor"; + std::string newVendorName = ""; + OH_NN_ReturnCode result = hdiDevice->GetVendorName(newVendorName); + EXPECT_EQ(OH_NN_SUCCESS, result); + EXPECT_EQ(expectDeviceName, newVendorName); +} + +/* * + * @tc.name: hdidevice_getvendorname_002 + * @tc.desc: Verify the GetVendorName function return unavailable device. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_getvendorname_002, TestSize.Level0) +{ + OHOS::sptr device = V1_0::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + std::string vendorName = "MockVendor"; + EXPECT_CALL(*((V1_0::MockIDevice *)device.GetRefPtr()), GetVendorName(::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<0>(vendorName), ::testing::Return(HDF_FAILURE))); + OH_NN_ReturnCode result = hdiDevice->GetVendorName(vendorName); + EXPECT_EQ(OH_NN_UNAVALIDABLE_DEVICE, result); +} + +/* * + * @tc.name: hdidevice_getdevicetype_001 + * @tc.desc: Verify the GetDeviceType function validate device type success. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_getdevicetype_001, TestSize.Level0) +{ + OHOS::sptr device = V1_0::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + V1_0::DeviceType iDeviceType = V1_0::DeviceType::CPU; + EXPECT_CALL(*((V1_0::MockIDevice *)device.GetRefPtr()), GetDeviceType(::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<0>(iDeviceType), ::testing::Return(HDF_SUCCESS))); + + OH_NN_DeviceType expectDeviceType = OH_NN_CPU; + OH_NN_DeviceType newDeviceType = OH_NN_CPU; + OH_NN_ReturnCode result = hdiDevice->GetDeviceType(newDeviceType); + EXPECT_EQ(OH_NN_SUCCESS, result); + EXPECT_EQ(expectDeviceType, newDeviceType); +} + +/* * + * @tc.name: hdidevice_getdevicetype_002 + * @tc.desc: Verify the GetDeviceType function return unavailable device. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_getdevicetype_002, TestSize.Level0) +{ + OHOS::sptr device = V1_0::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + + OH_NN_DeviceType deviceType = OH_NN_CPU; + V1_0::DeviceType iDeviceType = V1_0::DeviceType::CPU; + EXPECT_CALL(*((V1_0::MockIDevice *)device.GetRefPtr()), GetDeviceType(::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<0>(iDeviceType), ::testing::Return(HDF_FAILURE))); + OH_NN_ReturnCode result = hdiDevice->GetDeviceType(deviceType); + EXPECT_EQ(OH_NN_UNAVALIDABLE_DEVICE, result); +} + +/* * + * @tc.name: hdidevice_getdevicestatus_001 + * @tc.desc: Verify the GetDeviceStatus function validate device status success. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_getdevicestatus_001, TestSize.Level0) +{ + OHOS::sptr device = V1_0::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + + V1_0::DeviceStatus iDeviceStatus = V1_0::DeviceStatus::AVAILABLE; + EXPECT_CALL(*((V1_0::MockIDevice *)device.GetRefPtr()), GetDeviceStatus(::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<0>(iDeviceStatus), ::testing::Return(HDF_SUCCESS))); + + const DeviceStatus expectDeviceStatus = AVAILABLE; + DeviceStatus newDeviceStatus = AVAILABLE; + OH_NN_ReturnCode result = hdiDevice->GetDeviceStatus(newDeviceStatus); + EXPECT_EQ(OH_NN_SUCCESS, result); + EXPECT_EQ(expectDeviceStatus, newDeviceStatus); +} + +/* * + * @tc.name: hdidevice_getdevicestatus_002 + * @tc.desc: Verify the GetDeviceStatus function return unavailable device. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_getdevicestatus_002, TestSize.Level0) +{ + OHOS::sptr device = V1_0::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + DeviceStatus deviceStatus = AVAILABLE; + V1_0::DeviceStatus iDeviceStatus = V1_0::DeviceStatus::AVAILABLE; + EXPECT_CALL(*((V1_0::MockIDevice *)device.GetRefPtr()), GetDeviceStatus(::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<0>(iDeviceStatus), ::testing::Return(HDF_FAILURE))); + OH_NN_ReturnCode result = hdiDevice->GetDeviceStatus(deviceStatus); + EXPECT_EQ(OH_NN_UNAVALIDABLE_DEVICE, result); +} + +/* * + * @tc.name: hdidevice_getsupportedoperation_001 + * @tc.desc: Verify the GetSupportedOperation function return success. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_getsupportedoperation_001, TestSize.Level0) +{ + std::vector ops {true}; + std::shared_ptr model = std::make_shared(); + OHOS::sptr device = V1_0::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + + V1_0::SharedBuffer buffer {1, 1, 0, 1}; + EXPECT_CALL(*((V1_0::MockIDevice *)device.GetRefPtr()), AllocateBuffer(::testing::_, ::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<1>(buffer), ::testing::Return(HDF_SUCCESS))); + + EXPECT_CALL(*((V1_0::MockIDevice *)device.GetRefPtr()), GetSupportedOperation(::testing::_, ::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<1>(ops), ::testing::Return(HDF_SUCCESS))); + + std::vector newOps {true}; + const std::vector expectOps {true}; + OH_NN_ReturnCode result = hdiDevice->GetSupportedOperation(model, newOps); + EXPECT_EQ(OH_NN_SUCCESS, result); + auto expectOpsSize = expectOps.size(); + for (size_t i = 0; i < expectOpsSize; ++i) { + EXPECT_EQ(expectOps[i], newOps[i]); + } +} + +/* * + * @tc.name: hdidevice_getsupportedoperation_002 + * @tc.desc: Verify the GetSupportedOperation function return failed in case of allocate buffer failure. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_getsupportedoperation_002, TestSize.Level0) +{ + std::vector ops; + std::shared_ptr model = std::make_shared(); + OHOS::sptr device = V1_0::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + + V1_0::SharedBuffer buffer {1, 1, 0, 1}; + EXPECT_CALL(*((V1_0::MockIDevice *)device.GetRefPtr()), AllocateBuffer(::testing::_, ::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<1>(buffer), ::testing::Return(HDF_FAILURE))); + + OH_NN_ReturnCode result = hdiDevice->GetSupportedOperation(model, ops); + EXPECT_EQ(OH_NN_FAILED, result); +} + +/* * + * @tc.name: hdidevice_getsupportedoperation_003 + * @tc.desc: Verify the GetSupportedOperation function return nullptr. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_getsupportedoperation_003, TestSize.Level0) +{ + OHOS::sptr device = V1_0::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + + std::shared_ptr model = nullptr; + std::vector ops; + OH_NN_ReturnCode result = hdiDevice->GetSupportedOperation(model, ops); + EXPECT_EQ(OH_NN_NULL_PTR, result); +} + +/* * + * @tc.name: hdidevice_getsupportedoperation_004 + * @tc.desc: Verify the GetSupportedOperation function return unavalidable device. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_getsupportedoperation_004, TestSize.Level0) +{ + std::vector ops {true}; + std::shared_ptr model = std::make_shared(); + OHOS::sptr device = V1_0::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + + V1_0::SharedBuffer buffer {2, 1, 0, 1}; + EXPECT_CALL(*((V1_0::MockIDevice *)device.GetRefPtr()), AllocateBuffer(::testing::_, ::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<1>(buffer), ::testing::Return(HDF_SUCCESS))); + + EXPECT_CALL(*((V1_0::MockIDevice *)device.GetRefPtr()), GetSupportedOperation(::testing::_, ::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<1>(ops), ::testing::Return(HDF_FAILURE))); + + std::vector newOps {true}; + OH_NN_ReturnCode result = hdiDevice->GetSupportedOperation(model, newOps); + EXPECT_EQ(OH_NN_UNAVALIDABLE_DEVICE, result); +} + +/* * + * @tc.name: hdidevice_isfloat16precisionsupported_001 + * @tc.desc: Verify the IsFloat16PrecisionSupported function return success. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_isfloat16precisionsupported_001, TestSize.Level0) +{ + OHOS::sptr device = V1_0::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + + bool isSupported = false; + EXPECT_CALL(*((V1_0::MockIDevice *)device.GetRefPtr()), IsFloat16PrecisionSupported(::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<0>(isSupported), ::testing::Return(HDF_SUCCESS))); + OH_NN_ReturnCode result = hdiDevice->IsFloat16PrecisionSupported(isSupported); + EXPECT_EQ(OH_NN_SUCCESS, result); +} + +/* * + * @tc.name: hdidevice_isfloat16precisionsupported_002 + * @tc.desc: Verify the IsFloat16PrecisionSupported function return unavailable device. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_isfloat16precisionsupported_002, TestSize.Level0) +{ + OHOS::sptr device = V1_0::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + + bool isSupported = false; + EXPECT_CALL(*((V1_0::MockIDevice *)device.GetRefPtr()), IsFloat16PrecisionSupported(::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<0>(isSupported), ::testing::Return(HDF_FAILURE))); + OH_NN_ReturnCode result = hdiDevice->IsFloat16PrecisionSupported(isSupported); + EXPECT_EQ(OH_NN_UNAVALIDABLE_DEVICE, result); +} + +/* * + * @tc.name: hdidevice_isperformancemodesupported_001 + * @tc.desc: Verify the IsPerformanceModeSupported function return success. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_isperformancemodesupported_001, TestSize.Level0) +{ + OHOS::sptr device = V1_0::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + + bool isSupported = false; + EXPECT_CALL(*((V1_0::MockIDevice *)device.GetRefPtr()), IsPerformanceModeSupported(::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<0>(isSupported), ::testing::Return(HDF_SUCCESS))); + + bool newIsSupported = false; + const bool expectIsSupported = false; + OH_NN_ReturnCode result = hdiDevice->IsPerformanceModeSupported(newIsSupported); + EXPECT_EQ(OH_NN_SUCCESS, result); + EXPECT_EQ(expectIsSupported, newIsSupported); +} + +/* * + * @tc.name: hdidevice_isperformancemodesupported_002 + * @tc.desc: Verify the IsPerformanceModeSupported function return unavailable device. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_isperformancemodesupported_002, TestSize.Level0) +{ + OHOS::sptr device = V1_0::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + + bool isSupported = false; + EXPECT_CALL(*((V1_0::MockIDevice *)device.GetRefPtr()), IsPerformanceModeSupported(::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<0>(isSupported), ::testing::Return(HDF_FAILURE))); + OH_NN_ReturnCode result = hdiDevice->IsPerformanceModeSupported(isSupported); + EXPECT_EQ(OH_NN_UNAVALIDABLE_DEVICE, result); +} + +/* * + * @tc.name: hdidevice_isprioritysupported_001 + * @tc.desc: Verify the IsPrioritySupported function return success. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_isprioritysupported_001, TestSize.Level0) +{ + OHOS::sptr device = V1_0::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + + bool isSupported = false; + EXPECT_CALL(*((V1_0::MockIDevice *)device.GetRefPtr()), IsPrioritySupported(::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<0>(isSupported), ::testing::Return(HDF_SUCCESS))); + + bool newIsSupported = false; + bool expectIsSupported = false; + OH_NN_ReturnCode result = hdiDevice->IsPrioritySupported(newIsSupported); + EXPECT_EQ(OH_NN_SUCCESS, result); + EXPECT_EQ(newIsSupported, expectIsSupported); +} + +/* * + * @tc.name: hdidevice_isprioritysupported_002 + * @tc.desc: Verify the IsPrioritySupported function return unavailable device. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_isprioritysupported_002, TestSize.Level0) +{ + OHOS::sptr device = V1_0::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + + bool isSupported = false; + EXPECT_CALL(*((V1_0::MockIDevice *)device.GetRefPtr()), IsPrioritySupported(::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<0>(isSupported), ::testing::Return(HDF_FAILURE))); + OH_NN_ReturnCode result = hdiDevice->IsPrioritySupported(isSupported); + EXPECT_EQ(OH_NN_UNAVALIDABLE_DEVICE, result); +} + +/* * + * @tc.name: hdidevice_isdynamicinputsupported_001 + * @tc.desc: Verify the IsDynamicInputSupported function return success. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_isdynamicinputsupported_001, TestSize.Level0) +{ + OHOS::sptr device = V1_0::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + + bool isSupported = false; + EXPECT_CALL(*((V1_0::MockIDevice *)device.GetRefPtr()), IsDynamicInputSupported(::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<0>(isSupported), ::testing::Return(HDF_SUCCESS))); + + bool newIsSupported = false; + bool expectIsSupported = false; + OH_NN_ReturnCode result = hdiDevice->IsDynamicInputSupported(newIsSupported); + EXPECT_EQ(OH_NN_SUCCESS, result); + EXPECT_EQ(newIsSupported, expectIsSupported); +} + +/* * + * @tc.name: hdidevice_isdynamicinputsupported_002 + * @tc.desc: Verify the IsDynamicInputSupported function return unavailable device. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_isdynamicinputsupported_002, TestSize.Level0) +{ + OHOS::sptr device = V1_0::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + + bool isSupported = false; + EXPECT_CALL(*((V1_0::MockIDevice *)device.GetRefPtr()), IsDynamicInputSupported(::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<0>(isSupported), ::testing::Return(HDF_FAILURE))); + OH_NN_ReturnCode result = hdiDevice->IsDynamicInputSupported(isSupported); + EXPECT_EQ(OH_NN_UNAVALIDABLE_DEVICE, result); +} + +/* * + * @tc.name: hdidevice_isdynamicinputsupported_001 + * @tc.desc: Verify the IsModelCacheSupported function return success. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_ismodelcachesupported_001, TestSize.Level0) +{ + OHOS::sptr device = V1_0::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + + bool isSupported = false; + EXPECT_CALL(*((V1_0::MockIDevice *)device.GetRefPtr()), IsModelCacheSupported(::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<0>(isSupported), ::testing::Return(HDF_SUCCESS))); + + bool newIsSupported = false; + bool expectIsSupported = false; + OH_NN_ReturnCode result = hdiDevice->IsModelCacheSupported(newIsSupported); + EXPECT_EQ(OH_NN_SUCCESS, result); + EXPECT_EQ(expectIsSupported, newIsSupported); +} + +/* * + * @tc.name: hdidevice_isdynamicinputsupported_002 + * @tc.desc: Verify the IsModelCacheSupported function return unavailable device. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_ismodelcachesupported_002, TestSize.Level0) +{ + OHOS::sptr device = V1_0::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + + bool isSupported = false; + EXPECT_CALL(*((V1_0::MockIDevice *)device.GetRefPtr()), IsModelCacheSupported(::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<0>(isSupported), ::testing::Return(HDF_FAILURE))); + OH_NN_ReturnCode result = hdiDevice->IsModelCacheSupported(isSupported); + EXPECT_EQ(OH_NN_UNAVALIDABLE_DEVICE, result); +} + +/* * + * @tc.name: hdidevice_preparemodel_001 + * @tc.desc: Verify the PrepareModel function return success. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_preparemodel_001, TestSize.Level0) +{ + int32_t allocBufferType = HDF_SUCCESS; + int32_t prepareType = HDF_SUCCESS; + OH_NN_ReturnCode result = PrepareModel(allocBufferType, prepareType); + EXPECT_EQ(OH_NN_SUCCESS, result); +} + +/* * + * @tc.name: hdidevice_preparemodel_002 + * @tc.desc: Verify the PrepareModel function return invalid parameter. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_preparemodel_002, TestSize.Level0) +{ + OHOS::sptr device = V1_0::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + + std::shared_ptr model = nullptr; + ModelConfig config; + std::shared_ptr preparedModel; + OH_NN_ReturnCode result = hdiDevice->PrepareModel(model, config, preparedModel); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, result); +} + +/* * + * @tc.name: hdidevice_preparemodel_003 + * @tc.desc: Verify the PrepareModel function return failed. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_preparemodel_003, TestSize.Level0) +{ + int32_t allocBufferType = HDF_SUCCESS; + int32_t prepareType = HDF_FAILURE; + OH_NN_ReturnCode result = PrepareModel(allocBufferType, prepareType); + EXPECT_EQ(OH_NN_FAILED, result); +} + +/* * + * @tc.name: hdidevice_preparemodel_004 + * @tc.desc: Verify the PrepareModel function return failed. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_preparemodel_004, TestSize.Level0) +{ + int32_t allocBufferType = HDF_FAILURE; + int32_t prepareType = HDF_FAILURE; + OH_NN_ReturnCode result = PrepareModel(allocBufferType, prepareType); + EXPECT_EQ(OH_NN_FAILED, result); +} + +/* * + * @tc.name: hdidevice_preparemodelfrommodelcache_001 + * @tc.desc: Verify the PrepareModelFromModelCache function return success. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_preparemodelfrommodelcache_001, TestSize.Level0) +{ + size_t length = 100; + void *buffer = nullptr; + GetBuffer(buffer, length); + + std::vector modelCache = { { buffer, 100 } }; + ModelConfig config; + + OHOS::sptr sp = OHOS::sptr(new (std::nothrow) V1_0::MockIDevice()); + EXPECT_NE(sp, nullptr); + + std::unique_ptr hdiDevice = std::make_unique(sp); + EXPECT_NE(hdiDevice, nullptr); + + std::shared_ptr preparedModel; + + OHOS::sptr iPreparedModel = + OHOS::sptr(new (std::nothrow) V1_0::MockIPreparedModel()); + EXPECT_CALL(*sp, PrepareModelFromModelCache(::testing::_, ::testing::_, ::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<2>(iPreparedModel), ::testing::Return(HDF_SUCCESS))); + + OH_NN_ReturnCode result = hdiDevice->PrepareModelFromModelCache(modelCache, config, preparedModel); + const auto &memoryManager = MemoryManager::GetInstance(); + memoryManager->UnMapMemory(buffer); + EXPECT_EQ(OH_NN_SUCCESS, result); +} + +/* * + * @tc.name: hdidevice_preparemodelfrommodelcache_002 + * @tc.desc: Verify the PrepareModelFromModelCache function return unavailable device. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_preparemodelfrommodelcache_002, TestSize.Level0) +{ + size_t length = 100; + void *buffer = nullptr; + GetBuffer(buffer, length); + + OHOS::sptr sp = OHOS::sptr(new (std::nothrow) V1_0::MockIDevice()); + EXPECT_NE(sp, nullptr); + + std::unique_ptr hdiDevice = std::make_unique(sp); + EXPECT_NE(hdiDevice, nullptr); + + std::vector modelCache = { { buffer, 100 } }; + ModelConfig config; + OHOS::sptr preModel = + OHOS::sptr(new (std::nothrow) V1_0::MockIPreparedModel()); + EXPECT_NE(preModel, nullptr); + + std::shared_ptr preparedModel = std::make_shared(preModel); + + OHOS::sptr iPreparedModel = + OHOS::sptr(new (std::nothrow) V1_0::MockIPreparedModel); + EXPECT_CALL(*sp, PrepareModelFromModelCache(::testing::_, ::testing::_, ::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<2>(iPreparedModel), ::testing::Return(HDF_FAILURE))); + + OH_NN_ReturnCode result = hdiDevice->PrepareModelFromModelCache(modelCache, config, preparedModel); + EXPECT_EQ(OH_NN_UNAVALIDABLE_DEVICE, result); +} + +/* * + * @tc.name: hdidevice_preparemodelfrommodelcache_003 + * @tc.desc: Verify the PrepareModelFromModelCache function return nullptr. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_preparemodelfrommodelcache_003, TestSize.Level0) +{ + OHOS::sptr device = V1_0::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + + std::vector modelCache = { { nullptr, 0 } }; + ModelConfig config; + std::shared_ptr preparedModel; + OH_NN_ReturnCode result = hdiDevice->PrepareModelFromModelCache(modelCache, config, preparedModel); + EXPECT_EQ(OH_NN_NULL_PTR, result); +} + +/* * + * @tc.name: hdidevice_allocatebuffer_001 + * @tc.desc: Verify the AllocateBuffer function return nullptr. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_allocatebuffer_001, TestSize.Level0) +{ + OHOS::sptr device = V1_0::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + + V1_0::SharedBuffer buffer; + EXPECT_CALL(*((V1_0::MockIDevice *)device.GetRefPtr()), AllocateBuffer(::testing::_, ::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<1>(buffer), ::testing::Return(HDF_FAILURE))); + + size_t length = 8; + void *result = hdiDevice->AllocateBuffer(length); + EXPECT_EQ(nullptr, result); + hdiDevice->ReleaseBuffer(result); +} + +/* * + * @tc.name: hdidevice_allocatebuffer_002 + * @tc.desc: Verify the AllocateBuffer function return nullptr and HDF_FAILURE. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_allocatebuffer_002, TestSize.Level0) +{ + OHOS::sptr device = V1_0::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + + size_t length = 8; + void *result = hdiDevice->AllocateBuffer(length); + EXPECT_EQ(nullptr, result); + hdiDevice->ReleaseBuffer(result); +} + +/* * + * @tc.name: hdidevice_allocatebuffer_003 + * @tc.desc: Verify the AllocateBuffer function return nullptr in case of 0 size. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_allocatebuffer_003, TestSize.Level0) +{ + OHOS::sptr device = V1_0::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + + size_t length = 0; + void *result = hdiDevice->AllocateBuffer(length); + EXPECT_EQ(nullptr, result); +} + +/* * + * @tc.name: hdidevice_releasebuffer_001 + * @tc.desc: Verify the ReleaseBuffer function validate buffer success. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_releasebuffer_001, TestSize.Level0) +{ + size_t length = 100; + void *buffer = nullptr; + GetBuffer(buffer, length); + + OHOS::sptr device = V1_0::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + + EXPECT_CALL(*((V1_0::MockIDevice *)device.GetRefPtr()), ReleaseBuffer(::testing::_)) + .WillRepeatedly(::testing::Return(HDF_SUCCESS)); + + EXPECT_NE(hdiDevice, nullptr); + hdiDevice->ReleaseBuffer(buffer); + const auto &memoryManager = MemoryManager::GetInstance(); + memoryManager->UnMapMemory(buffer); +} + +/* * + * @tc.name: hdidevice_releasebuffer_002 + * @tc.desc: Verify the ReleaseBuffer function validate AllocateBuffer return nullptr. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_releasebuffer_002, TestSize.Level0) +{ + OHOS::sptr device = V1_0::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + + V1_0::SharedBuffer sharedbuffer; + EXPECT_CALL(*((V1_0::MockIDevice *)device.GetRefPtr()), AllocateBuffer(::testing::_, ::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<1>(sharedbuffer), ::testing::Return(HDF_FAILURE))); + + EXPECT_CALL(*((V1_0::MockIDevice *)device.GetRefPtr()), ReleaseBuffer(::testing::_)) + .WillRepeatedly(::testing::Return(HDF_FAILURE)); + + size_t length = 8; + void *buffer = hdiDevice->AllocateBuffer(length); + hdiDevice->ReleaseBuffer(buffer); +} + +/* * + * @tc.name: hdidevice_releasebuffer_003 + * @tc.desc: Verify the ReleaseBuffer function validate param buffer is nullptr. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_releasebuffer_003, TestSize.Level0) +{ + OHOS::sptr device = V1_0::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + + void *buffer = nullptr; + hdiDevice->ReleaseBuffer(buffer); +} + +/* * + * @tc.name: hdidevice_releasebuffer_004 + * @tc.desc: Verify the ReleaseBuffer function validate invalid buffer. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_releasebuffer_004, TestSize.Level0) +{ + const size_t length = 100; + auto* buffer = new(std::nothrow) char[length]; + OHOS::sptr device = V1_0::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + + hdiDevice->ReleaseBuffer(buffer); + delete[] buffer; + buffer = nullptr; +} + +/* * + * @tc.name: hdidevice_releasebuffer_005 + * @tc.desc: Verify the ReleaseBuffer function validate moc object's ReleaseBuffer return failure. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_releasebuffer_005, TestSize.Level0) +{ + size_t length = 100; + void *buffer = nullptr; + GetBuffer(buffer, length); + + OHOS::sptr device = V1_0::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + + EXPECT_CALL(*((V1_0::MockIDevice *)device.GetRefPtr()), ReleaseBuffer(::testing::_)) + .WillRepeatedly(::testing::Return(HDF_FAILURE)); + + hdiDevice->ReleaseBuffer(buffer); + const auto &memoryManager = MemoryManager::GetInstance(); + memoryManager->UnMapMemory(buffer); +} +} // namespace UnitTest +} // namespace NeuralNetworkRuntime +} // namespace OHOS diff --git a/test/unittest/components/hdi_prepared_model/hdi_prepared_model_test.cpp b/test/unittest/components/hdi_prepared_model/hdi_prepared_model_test.cpp new file mode 100644 index 0000000..d946b63 --- /dev/null +++ b/test/unittest/components/hdi_prepared_model/hdi_prepared_model_test.cpp @@ -0,0 +1,344 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include + +#include +#include + +#include "common/log.h" +#include "frameworks/native/hdi_prepared_model.h" +#include "frameworks/native/memory_manager.h" +#include "frameworks/native/transform.h" +#include "test/unittest/common/mock_idevice.h" +#include "test/unittest/common/file_utils.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime; +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class HDIPreparedModelTest : public testing::Test { +protected: + void GetBuffer(void*& buffer, size_t length); + void InitTensor(std::vector& inputs, void* buffer, size_t length); + OH_NN_ReturnCode Run(std::vector& inputs); +}; + +void HDIPreparedModelTest::GetBuffer(void*& buffer, size_t length) +{ + std::string data = "ABCD"; + const size_t dataLength = 100; + data.resize(dataLength, '-'); + + std::string filename = "/data/log/memory-001.dat"; + FileUtils fileUtils(filename); + fileUtils.WriteFile(data); + + int fd = open(filename.c_str(), O_RDWR); + EXPECT_NE(-1, fd); + + const auto& memoryManager = MemoryManager::GetInstance(); + buffer = memoryManager->MapMemory(fd, length); + close(fd); +} + +void HDIPreparedModelTest::InitTensor(std::vector& inputs, void* buffer, size_t length) +{ + IOTensor inputTensor; + inputTensor.dataType = OH_NN_INT8; + inputTensor.dataType = OH_NN_INT8; + inputTensor.format = OH_NN_FORMAT_NCHW; + inputTensor.data = buffer; + inputTensor.length = length; + inputs.emplace_back(std::move(inputTensor)); +} + +OH_NN_ReturnCode HDIPreparedModelTest::Run(std::vector& inputs) +{ + const int vvPosition = 2; + const int vPosition = 3; + std::vector outputs; + std::vector> outputsDims {{0}}; + std::vector isOutputBufferEnough {}; + + OHOS::sptr sp = + OHOS::sptr(new (std::nothrow) V1_0::MockIPreparedModel()); + EXPECT_NE(sp, nullptr); + + std::unique_ptr preparedModel = std::make_unique(sp); + EXPECT_CALL(*sp, Run(::testing::_, ::testing::_, ::testing::_, ::testing::_)) + .WillRepeatedly(::testing::DoAll( + ::testing::SetArgReferee(outputsDims), + ::testing::SetArgReferee(isOutputBufferEnough), + ::testing::Return(HDF_SUCCESS)) + ); + + OH_NN_ReturnCode result = preparedModel->Run(inputs, outputs, outputsDims, isOutputBufferEnough); + return result; +} + +/** + * @tc.name: hidpreparedmodel_constructor_001 + * @tc.desc: Verify the Constructor function validate constructor success. + * @tc.type: FUNC + */ +HWTEST_F(HDIPreparedModelTest, hidpreparedmodel_constructor_001, TestSize.Level0) +{ + OHOS::sptr hdiPreparedModel = + OHOS::sptr(new (std::nothrow) V1_0::MockIPreparedModel()); + EXPECT_NE(hdiPreparedModel, nullptr); + + std::unique_ptr preparedModel = std::make_unique(hdiPreparedModel); + EXPECT_NE(preparedModel, nullptr); +} + +/** + * @tc.name: hidpreparedmodel_exportmodelcache_001 + * @tc.desc: Verify the ExportModelCache function return memory error. + * @tc.type: FUNC + */ +HWTEST_F(HDIPreparedModelTest, hidpreparedmodel_exportmodelcache_001, TestSize.Level0) +{ + std::vector bufferVect = {{100, 100, 0, 100}}; + OHOS::sptr hdiPreparedModel = + OHOS::sptr(new (std::nothrow) V1_0::MockIPreparedModel()); + std::unique_ptr preparedModel = std::make_unique(hdiPreparedModel); + std::vector modelCache; + EXPECT_CALL(*((V1_0::MockIPreparedModel*)hdiPreparedModel.GetRefPtr()), + ExportModelCache(::testing::_)) + .WillRepeatedly( + ::testing::DoAll( + ::testing::SetArgReferee<0>(bufferVect), + ::testing::Return(HDF_SUCCESS) + ) + ); + + OH_NN_ReturnCode result = preparedModel->ExportModelCache(modelCache); + EXPECT_EQ(OH_NN_MEMORY_ERROR, result); +} + +/** + * @tc.name: hidpreparedmodel_exportmodelcache_002 + * @tc.desc: Verify the ExportModelCache function return success. + * @tc.type: FUNC + */ +HWTEST_F(HDIPreparedModelTest, hidpreparedmodel_exportmodelcache_002, TestSize.Level0) +{ + std::vector bufferVect; + OHOS::sptr mockPreparedModel = + OHOS::sptr(new (std::nothrow) V1_0::MockIPreparedModel()); + EXPECT_NE(mockPreparedModel, nullptr); + + std::unique_ptr preparedModel = std::make_unique(mockPreparedModel); + std::vector modelCache; + EXPECT_CALL(*((V1_0::MockIPreparedModel*)mockPreparedModel.GetRefPtr()), + ExportModelCache(::testing::_)) + .WillRepeatedly( + ::testing::DoAll( + ::testing::SetArgReferee<0>(bufferVect), + ::testing::Return(HDF_SUCCESS) + ) + ); + + OH_NN_ReturnCode result = preparedModel->ExportModelCache(modelCache); + EXPECT_EQ(OH_NN_SUCCESS, result); +} + +/** + * @tc.name: hidpreparedmodel_exportmodelcache_003 + * @tc.desc: Verify the ExportModelCache function return invalid parameter. + * @tc.type: FUNC + */ +HWTEST_F(HDIPreparedModelTest, hidpreparedmodel_exportmodelcache_003, TestSize.Level0) +{ + OHOS::sptr hdiPreparedModel = + OHOS::sptr(new (std::nothrow) V1_0::MockIPreparedModel()); + EXPECT_NE(hdiPreparedModel, nullptr); + + std::unique_ptr preparedModel = std::make_unique(hdiPreparedModel); + std::vector modelCache {{nullptr, 0}}; + OH_NN_ReturnCode result = preparedModel->ExportModelCache(modelCache); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, result); +} + +/** + * @tc.name: hidpreparedmodel_exportmodelcache_004 + * @tc.desc: Verify the ExportModelCache function return unvailable device. + * @tc.type: FUNC + */ +HWTEST_F(HDIPreparedModelTest, hidpreparedmodel_exportmodelcache_004, TestSize.Level0) +{ + std::vector bufferVect = {{100, 100, 0, 100}}; + OHOS::sptr mockPreparedModel = + OHOS::sptr(new (std::nothrow) V1_0::MockIPreparedModel()); + EXPECT_NE(mockPreparedModel, nullptr); + + std::unique_ptr preparedModel = std::make_unique(mockPreparedModel); + std::vector modelCache; + EXPECT_CALL(*((V1_0::MockIPreparedModel*)mockPreparedModel.GetRefPtr()), + ExportModelCache(::testing::_)) + .WillRepeatedly( + ::testing::DoAll( + ::testing::SetArgReferee<0>(bufferVect), + ::testing::Return(HDF_FAILURE) + ) + ); + + OH_NN_ReturnCode result = preparedModel->ExportModelCache(modelCache); + EXPECT_EQ(OH_NN_UNAVALIDABLE_DEVICE, result); +} + +/** + * @tc.name: hidpreparedmodel_run_001 + * @tc.desc: Verify the Run function return invalid parameter. + * @tc.type: FUNC + */ +HWTEST_F(HDIPreparedModelTest, hidpreparedmodel_run_001, TestSize.Level0) +{ + IOTensor inputTensor; + inputTensor.dataType = OH_NN_INT8; + + IOTensor outputTensor; + outputTensor.dataType = OH_NN_INT8; + std::vector inputs; + inputs.emplace_back(std::move(inputTensor)); + std::vector outputs; + + std::vector iOutputTensors; + V1_0::IOTensor iTensor; + iOutputTensors.emplace_back(iTensor); + std::vector> outputsDims {{0}}; + std::vector isOutputBufferEnough {}; + + std::shared_ptr sp = std::make_shared(); + OHOS::sptr hdiPreparedModel = + OHOS::sptr(new (std::nothrow) V1_0::MockIPreparedModel()); + EXPECT_NE(hdiPreparedModel, nullptr); + + std::unique_ptr preparedModel = std::make_unique(hdiPreparedModel); + OH_NN_ReturnCode result = preparedModel->Run(inputs, outputs, outputsDims, isOutputBufferEnough); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, result); +} + +/** + * @tc.name: hidpreparedmodel_run_002 + * @tc.desc: Verify the Run function return success. + * @tc.type: FUNC + */ +HWTEST_F(HDIPreparedModelTest, hidpreparedmodel_run_002, TestSize.Level0) +{ + const size_t length = 100; + void* buffer = nullptr; + GetBuffer(buffer, length); + + std::vector inputs; + std::vector outputs; + InitTensor(inputs, buffer, length); + + OH_NN_ReturnCode result = Run(inputs); + EXPECT_EQ(OH_NN_SUCCESS, result); + const auto& memoryManager = MemoryManager::GetInstance(); + memoryManager->UnMapMemory(buffer); +} + +/** + * @tc.name: hidpreparedmodel_run_003 + * @tc.desc: Verify the Run function return unavailable device in case of run failure. + * @tc.type: FUNC + */ +HWTEST_F(HDIPreparedModelTest, hidpreparedmodel_run_003, TestSize.Level0) +{ + const size_t length = 100; + void* buffer = nullptr; + GetBuffer(buffer, length); + + std::vector inputs; + std::vector outputs; + InitTensor(inputs, buffer, length); + + std::vector> outputsDims {}; + std::vector isOutputBufferEnough {}; + + OHOS::sptr sp = + OHOS::sptr(new (std::nothrow) V1_0::MockIPreparedModel()); + EXPECT_NE(sp, nullptr); + + std::unique_ptr preparedModel = std::make_unique(sp); + + EXPECT_CALL(*sp, Run(::testing::_, ::testing::_, ::testing::_, ::testing::_)) + .WillRepeatedly( + ::testing::DoAll( + ::testing::SetArgReferee<2>(outputsDims), + ::testing::SetArgReferee<3>(isOutputBufferEnough), + ::testing::Return(HDF_FAILURE) + ) + ); + + OH_NN_ReturnCode result = preparedModel->Run(inputs, outputs, outputsDims, isOutputBufferEnough); + EXPECT_EQ(OH_NN_UNAVALIDABLE_DEVICE, result); + const auto& memoryManager = MemoryManager::GetInstance(); + memoryManager->UnMapMemory(buffer); +} + +/** + * @tc.name: hidpreparedmodel_run_004 + * @tc.desc: Verify the Run function return invalid parameter. + * @tc.type: FUNC + */ +HWTEST_F(HDIPreparedModelTest, hidpreparedmodel_run_004, TestSize.Level0) +{ + std::vector inputs; + InitTensor(inputs, nullptr, 0); + OH_NN_ReturnCode result = Run(inputs); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, result); +} + +/** + * @tc.name: hidpreparedmodel_run_005 + * @tc.desc: Verify the Run function return invalid parameter in case of output invalid. + * @tc.type: FUNC + */ +HWTEST_F(HDIPreparedModelTest, hidpreparedmodel_run_005, TestSize.Level0) +{ + const size_t length = 100; + void* buffer = nullptr; + GetBuffer(buffer, length); + + std::vector inputs; + std::vector outputs; + InitTensor(inputs, buffer, length); + InitTensor(outputs, nullptr, 0); + + std::vector> outputsDims {}; + std::vector isOutputBufferEnough {}; + + OHOS::sptr sp = + OHOS::sptr(new (std::nothrow) V1_0::MockIPreparedModel()); + EXPECT_NE(sp, nullptr); + + std::unique_ptr preparedModel = std::make_unique(sp); + + OH_NN_ReturnCode result = preparedModel->Run(inputs, outputs, outputsDims, isOutputBufferEnough); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, result); + const auto& memoryManager = MemoryManager::GetInstance(); + memoryManager->UnMapMemory(buffer); +} +} // namespace UnitTest +} // namespace NeuralNetworkRuntime +} // namespace OHOS diff --git a/test/unittest/components/inner_model/inner_model_test.cpp b/test/unittest/components/inner_model/inner_model_test.cpp new file mode 100644 index 0000000..e40c042 --- /dev/null +++ b/test/unittest/components/inner_model/inner_model_test.cpp @@ -0,0 +1,825 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include + +#include "common/utils.h" +#include "common/log.h" +#include "frameworks/native/nn_tensor.h" +#include "frameworks/native/inner_model.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime; + +namespace NNRT { +namespace UnitTest { +class InnerModelTest : public testing::Test { +public: + void SetLiteGraph(mindspore::lite::LiteGraph* liteGraph); + void SetTensors(); + void SetIndices(); + +public: + InnerModel m_innerModelTest; + + std::vector m_dimInput{3, 3}; + std::vector m_dimOutput{3, 3}; + std::vector m_inputIndices{0}; + std::vector m_outputIndices{1}; + + OH_NN_OperationType m_opType{OH_NN_OPS_ADD}; + + OH_NN_UInt32Array m_inputs; + OH_NN_UInt32Array m_outputs; + OH_NN_UInt32Array m_params; + + uint32_t m_paramIndexs[1]{3}; + uint32_t m_inputIndexs[2]{0, 1}; + uint32_t m_outputIndexs[1]{2}; +}; + +void InnerModelTest::SetLiteGraph(mindspore::lite::LiteGraph* liteGraph) +{ + liteGraph->name_ = "testGraph"; + liteGraph->input_indices_ = m_inputIndices; + liteGraph->output_indices_ = m_outputIndices; + + const std::vector quant_params {}; + + for (size_t indexInput = 0; indexInput < liteGraph->input_indices_.size(); ++indexInput) { + const std::vector data(36, 1); + liteGraph->all_tensors_.emplace_back(mindspore::lite::MindIR_Tensor_Create(liteGraph->name_, + mindspore::lite::DATA_TYPE_FLOAT32, m_dimInput, mindspore::lite::FORMAT_NCHW, data, quant_params)); + } + + for (size_t indexOutput = 0; indexOutput < liteGraph->output_indices_.size(); ++indexOutput) { + const std::vector dataOut(36, 1); + liteGraph->all_tensors_.emplace_back(mindspore::lite::MindIR_Tensor_Create(liteGraph->name_, + mindspore::lite::DATA_TYPE_FLOAT32, m_dimOutput, mindspore::lite::FORMAT_NCHW, dataOut, quant_params)); + } +} + +void InnerModelTest::SetTensors() +{ + const int dim[2] = {2, 2}; + const OH_NN_Tensor& tensor = {OH_NN_FLOAT32, 2, dim, nullptr, OH_NN_TENSOR}; + + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.AddTensor(tensor)); + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.AddTensor(tensor)); + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.AddTensor(tensor)); + + const OH_NN_Tensor& tensorParam = {OH_NN_INT8, 0, nullptr, nullptr, OH_NN_ADD_ACTIVATIONTYPE}; + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.AddTensor(tensorParam)); +} + +void InnerModelTest::SetIndices() +{ + m_params.data = m_paramIndexs; + m_params.size = sizeof(m_paramIndexs) / sizeof(uint32_t); + + m_inputs.data = m_inputIndexs; + m_inputs.size = sizeof(m_inputIndexs) / sizeof(uint32_t); + + m_outputs.data = m_outputIndexs; + m_outputs.size = sizeof(m_outputIndexs) / sizeof(uint32_t); +} + +/** + * @tc.name: inner_model_construct_nntensor_from_litegraph_001 + * @tc.desc: Verify the input_indices is empty of the construct_nntensor_from_litegraph function + * @tc.type: FUNC + */ +HWTEST_F(InnerModelTest, inner_model_construct_nntensor_from_litegraph_001, TestSize.Level1) +{ + mindspore::lite::LiteGraph* liteGraph = new (std::nothrow) mindspore::lite::LiteGraph(); + EXPECT_NE(nullptr, liteGraph); + m_inputIndices = {}; + + SetLiteGraph(liteGraph); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_innerModelTest.BuildFromLiteGraph(liteGraph)); + mindspore::lite::MindIR_LiteGraph_Destroy(&liteGraph); +} + +/** + * @tc.name: inner_model_construct_nntensor_from_litegraph_002 + * @tc.desc: Verify the input_indices is out of bounds of the construct_nntensor_from_litegraph function + * @tc.type: FUNC + */ +HWTEST_F(InnerModelTest, inner_model_construct_nntensor_from_litegraph_002, TestSize.Level1) +{ + mindspore::lite::LiteGraph* liteGraph = new (std::nothrow) mindspore::lite::LiteGraph(); + EXPECT_NE(nullptr, liteGraph); + m_inputIndices = {6}; + + SetLiteGraph(liteGraph); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_innerModelTest.BuildFromLiteGraph(liteGraph)); + mindspore::lite::MindIR_LiteGraph_Destroy(&liteGraph); +} + +/** + * @tc.name: inner_model_construct_nntensor_from_litegraph_003 + * @tc.desc: Verify the success of the construct_nntensor_from_litegraph function + * @tc.type: FUNC + */ +HWTEST_F(InnerModelTest, inner_model_construct_nntensor_from_litegraph_003, TestSize.Level1) +{ + mindspore::lite::LiteGraph* liteGraph = new (std::nothrow) mindspore::lite::LiteGraph(); + EXPECT_NE(nullptr, liteGraph); + + SetLiteGraph(liteGraph); + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.BuildFromLiteGraph(liteGraph)); +} + +/** + * @tc.name: inner_model_construct_nntensor_from_litegraph_004 + * @tc.desc: Verify the nntensor build failed nullptr return of the construct_nntensor_from_litegraph function + * @tc.type: FUNC + */ +HWTEST_F(InnerModelTest, inner_model_construct_nntensor_from_litegraph_004, TestSize.Level1) +{ + mindspore::lite::LiteGraph* liteGraph = new (std::nothrow) mindspore::lite::LiteGraph(); + EXPECT_NE(nullptr, liteGraph); + m_dimInput = {3, -3}; + + SetLiteGraph(liteGraph); + EXPECT_EQ(OH_NN_NULL_PTR, m_innerModelTest.BuildFromLiteGraph(liteGraph)); + mindspore::lite::MindIR_LiteGraph_Destroy(&liteGraph); +} + +/** + * @tc.name: inner_model_construct_nntensor_from_litegraph_005 + * @tc.desc: Verify the output indices out of bounds of the construct_nntensor_from_litegraph function + * @tc.type: FUNC + */ +HWTEST_F(InnerModelTest, inner_model_construct_nntensor_from_litegraph_005, TestSize.Level1) +{ + mindspore::lite::LiteGraph* liteGraph = new (std::nothrow) mindspore::lite::LiteGraph(); + EXPECT_NE(nullptr, liteGraph); + m_outputIndices = {6}; + + SetLiteGraph(liteGraph); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_innerModelTest.BuildFromLiteGraph(liteGraph)); + mindspore::lite::MindIR_LiteGraph_Destroy(&liteGraph); +} + +/** + * @tc.name: inner_model_build_from_lite_graph_001 + * @tc.desc: Verify the litegraph is nullptr of the build_from_lite_graph function + * @tc.type: FUNC + */ +HWTEST_F(InnerModelTest, inner_model_build_from_lite_graph_001, TestSize.Level1) +{ + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_innerModelTest.BuildFromLiteGraph(nullptr)); +} + +/** + * @tc.name: inner_model_build_from_lite_graph_002 + * @tc.desc: Verify the buildfromlitegraph twice forbidden of the build_from_lite_graph function + * @tc.type: FUNC + */ +HWTEST_F(InnerModelTest, inner_model_build_from_lite_graph_002, TestSize.Level1) +{ + mindspore::lite::LiteGraph* liteGraph = new (std::nothrow) mindspore::lite::LiteGraph(); + EXPECT_NE(nullptr, liteGraph); + + SetLiteGraph(liteGraph); + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.BuildFromLiteGraph(liteGraph)); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, m_innerModelTest.BuildFromLiteGraph(liteGraph)); +} + +/** + * @tc.name: inner_model_build_from_lite_graph_003 + * @tc.desc: Verify the litegraph->alltensors is empty of the build_from_lite_graph function + * @tc.type: FUNC + */ +HWTEST_F(InnerModelTest, inner_model_build_from_lite_graph_003, TestSize.Level1) +{ + mindspore::lite::LiteGraph* liteGraph = new (std::nothrow) mindspore::lite::LiteGraph(); + EXPECT_NE(nullptr, liteGraph); + liteGraph->name_ = "testGraph"; + liteGraph->input_indices_ = {0}; + liteGraph->output_indices_ = {1}; + + const int32_t dimInput[2] = {2, 2}; + const OH_NN_Tensor& tensor = {OH_NN_INT8, 2, dimInput, nullptr, OH_NN_TENSOR}; + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.AddTensor(tensor)); + + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, m_innerModelTest.BuildFromLiteGraph(liteGraph)); + mindspore::lite::MindIR_LiteGraph_Destroy(&liteGraph); +} + + +/** + * @tc.name: inner_model_add_tensor_001 + * @tc.desc: Verify the success of the addtensor function + * @tc.type: FUNC + */ +HWTEST_F(InnerModelTest, inner_model_add_tensor_001, TestSize.Level1) +{ + const int32_t dimInput[2] = {2, 2}; + const OH_NN_Tensor& tensor = {OH_NN_INT8, 2, dimInput, nullptr, OH_NN_TENSOR}; + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.AddTensor(tensor)); +} + +/** + * @tc.name: inner_model_add_tensor_002 + * @tc.desc: Verify the addtensor after buildfromlitegraph of the addtensor function + * @tc.type: FUNC + */ +HWTEST_F(InnerModelTest, inner_model_add_tensor_002, TestSize.Level1) +{ + mindspore::lite::LiteGraph* liteGraph = new (std::nothrow) mindspore::lite::LiteGraph(); + EXPECT_NE(nullptr, liteGraph); + SetLiteGraph(liteGraph); + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.BuildFromLiteGraph(liteGraph)); + + const int32_t dimInput[2] = {2, 2}; + const OH_NN_Tensor& tensor = {OH_NN_INT8, 2, dimInput, nullptr, OH_NN_TENSOR}; + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, m_innerModelTest.AddTensor(tensor)); +} + +/** + * @tc.name: inner_model_add_tensor_003 + * @tc.desc: Verify the buildfromnntensor failed of the addtensor function + * @tc.type: FUNC + */ +HWTEST_F(InnerModelTest, inner_model_add_tensor_003, TestSize.Level1) +{ + const int32_t dimInput[2] = {2, -2}; + const OH_NN_Tensor& tensor = {OH_NN_INT8, 2, dimInput, nullptr, OH_NN_TENSOR}; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_innerModelTest.AddTensor(tensor)); +} + + +/** + * @tc.name: inner_model_set_tensor_value_001 + * @tc.desc: Verify the success of the set_tensor_value function + * @tc.type: FUNC + */ +HWTEST_F(InnerModelTest, inner_model_set_tensor_value_001, TestSize.Level1) +{ + SetTensors(); + + uint32_t index = 3; + const int8_t activation = 0; + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.SetTensorValue(index, + static_cast(&activation), sizeof(int8_t))); +} + +/** + * @tc.name: inner_model_set_tensor_value_002 + * @tc.desc: Verify the index out of bounds of the set_tensor_value function + * @tc.type: FUNC + */ +HWTEST_F(InnerModelTest, inner_model_set_tensor_value_002, TestSize.Level1) +{ + SetTensors(); + + uint32_t index = 6; + const int8_t activation = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_innerModelTest.SetTensorValue(index, + static_cast(&activation), sizeof(int8_t))); +} + +/** + * @tc.name: inner_model_set_tensor_value_003 + * @tc.desc: Verify the buffer value is nullptr of the set_tensor_value function + * @tc.type: FUNC + */ +HWTEST_F(InnerModelTest, inner_model_set_tensor_value_003, TestSize.Level1) +{ + SetTensors(); + + uint32_t index = 3; + const int8_t activation = 0; + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.SetTensorValue(index, + nullptr, sizeof(activation))); +} + +/** + * @tc.name: inner_model_set_tensor_value_004 + * @tc.desc: Verify the length invalid of the set_tensor_value function + * @tc.type: FUNC + */ +HWTEST_F(InnerModelTest, inner_model_set_tensor_value_004, TestSize.Level1) +{ + SetTensors(); + + uint32_t index = 3; + const int8_t activation = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_innerModelTest.SetTensorValue(index, + static_cast(&activation), 0)); +} + +/** + * @tc.name: inner_model_set_tensor_value_005 + * @tc.desc: Verify the after buildgraph of the set_tensor_value function + * @tc.type: FUNC + */ +HWTEST_F(InnerModelTest, inner_model_set_tensor_value_005, TestSize.Level1) +{ + uint32_t index = 3; + const int8_t activation = 0; + + mindspore::lite::LiteGraph* liteGraph = new (std::nothrow) mindspore::lite::LiteGraph(); + EXPECT_NE(nullptr, liteGraph); + SetLiteGraph(liteGraph); + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.BuildFromLiteGraph(liteGraph)); + + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, m_innerModelTest.SetTensorValue(index, + static_cast(&activation), sizeof(int8_t))); +} + +/** + * @tc.name: inner_model_set_tensor_value_006 + * @tc.desc: Verify the set value twice of the set_tensor_value function + * @tc.type: FUNC + */ +HWTEST_F(InnerModelTest, inner_model_set_tensor_value_006, TestSize.Level1) +{ + SetTensors(); + + uint32_t index = 3; + const int8_t activation = 0; + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.SetTensorValue(index, + static_cast(&activation), sizeof(int8_t))); + + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_innerModelTest.SetTensorValue(index, + static_cast(&activation), sizeof(int8_t))); +} + +/** + * @tc.name: inner_model_set_tensor_value_007 + * @tc.desc: Verify the tensor dynamicShape of the set_tensor_value function + * @tc.type: FUNC + */ +HWTEST_F(InnerModelTest, inner_model_set_tensor_value_007, TestSize.Level1) +{ + const int32_t dimInput[2] = {2, -1}; + const OH_NN_Tensor& tensor = {OH_NN_FLOAT32, 2, dimInput, nullptr, OH_NN_TENSOR}; + + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.AddTensor(tensor)); + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.AddTensor(tensor)); + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.AddTensor(tensor)); + const OH_NN_Tensor& tensorParam = {OH_NN_INT8, 0, nullptr, nullptr, OH_NN_ADD_ACTIVATIONTYPE}; + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.AddTensor(tensorParam)); + + uint32_t index = 0; + float x[4] = {0, 1, 2, 3}; + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, m_innerModelTest.SetTensorValue(index, + x, sizeof(x)- 1)); +} + +/** + * @tc.name: inner_model_add_operation_001 + * @tc.desc: Verify the success of the addoperation function + * @tc.type: FUNC + */ +HWTEST_F(InnerModelTest, inner_model_add_operation_001, TestSize.Level1) +{ + SetIndices(); + + SetTensors(); + + uint32_t index = 3; + const int8_t activation = 0; + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.SetTensorValue(index, + static_cast(&activation), sizeof(int8_t))); + + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.AddOperation(m_opType, m_params, m_inputs, m_outputs)); +} + +/** + * @tc.name: inner_model_add_operation_002 + * @tc.desc: Verify the after buildgraph of the addtensor function + * @tc.type: FUNC + */ +HWTEST_F(InnerModelTest, inner_model_add_operation_002, TestSize.Level1) +{ + OH_NN_OperationType m_opType = OH_NN_OPS_ADD; + OH_NN_UInt32Array m_inputs; + OH_NN_UInt32Array m_outputs; + OH_NN_UInt32Array m_params; + + mindspore::lite::LiteGraph* liteGraph = new (std::nothrow) mindspore::lite::LiteGraph(); + EXPECT_NE(nullptr, liteGraph); + SetLiteGraph(liteGraph); + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.BuildFromLiteGraph(liteGraph)); + + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, m_innerModelTest.AddOperation(m_opType, m_params, m_inputs, + m_outputs)); +} + +/** + * @tc.name: inner_model_add_operation_003 + * @tc.desc: Verify the without set buffer of the addtensor function + * @tc.type: FUNC + */ +HWTEST_F(InnerModelTest, inner_model_add_operation_003, TestSize.Level1) +{ + SetIndices(); + SetTensors(); + + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_innerModelTest.AddOperation(m_opType, m_params, m_inputs, m_outputs)); +} + +/** + * @tc.name: inner_model_add_operation_004 + * @tc.desc: Verify the output indices equal to input indices of the addtensor function + * @tc.type: FUNC + */ +HWTEST_F(InnerModelTest, inner_model_add_operation_004, TestSize.Level1) +{ + m_outputIndexs[0] = 0; + + SetIndices(); + SetTensors(); + + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_innerModelTest.AddOperation(m_opType, m_params, m_inputs, m_outputs)); +} + +/** + * @tc.name: inner_model_add_operation_005 + * @tc.desc: Verify the optype invalid of the addtensor function + * @tc.type: FUNC + */ +HWTEST_F(InnerModelTest, inner_model_add_operation_005, TestSize.Level1) +{ + m_opType = OH_NN_OperationType(99); + + SetIndices(); + SetTensors(); + + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_innerModelTest.AddOperation(m_opType, m_params, m_inputs, m_outputs)); +} + +/** + * @tc.name: inner_model_add_operation_006 + * @tc.desc: Verify the input indices out of bounds of the addoperation function + * @tc.type: FUNC + */ +HWTEST_F(InnerModelTest, inner_model_add_operation_006, TestSize.Level1) +{ + m_inputIndexs[1] = 6; + + SetIndices(); + SetTensors(); + + uint32_t index = 3; + const int8_t activation = 0; + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.SetTensorValue(index, + static_cast(&activation), sizeof(int8_t))); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_innerModelTest.AddOperation(m_opType, m_params, m_inputs, m_outputs)); +} + +/** + * @tc.name: inner_model_add_operation_007 + * @tc.desc: Verify the param indices out of bounds of the addoperation function + * @tc.type: FUNC + */ +HWTEST_F(InnerModelTest, inner_model_add_operation_007, TestSize.Level1) +{ + m_paramIndexs[0] = 6; + + SetIndices(); + SetTensors(); + + uint32_t index = 3; + const int8_t activation = 0; + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.SetTensorValue(index, + static_cast(&activation), sizeof(int8_t))); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_innerModelTest.AddOperation(m_opType, m_params, m_inputs, m_outputs)); +} + +/** + * @tc.name: inner_model_add_operation_008 + * @tc.desc: Verify the input indices size is 0 of the addoperation function + * @tc.type: FUNC + */ +HWTEST_F(InnerModelTest, inner_model_add_operation_008, TestSize.Level1) +{ + SetIndices(); + + m_inputs.size = 0; + m_inputs.data = nullptr; + SetTensors(); + + uint32_t index = 3; + const int8_t activation = 0; + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.SetTensorValue(index, + static_cast(&activation), sizeof(int8_t))); + + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_innerModelTest.AddOperation(m_opType, m_params, m_inputs, m_outputs)); +} + +/** + * @tc.name: inner_model_add_operation_009 + * @tc.desc: Verify the output indices size is 0 of the addoperation function + * @tc.type: FUNC + */ +HWTEST_F(InnerModelTest, inner_model_add_operation_009, TestSize.Level1) +{ + SetIndices(); + + m_outputs.size = 0; + m_outputs.data = nullptr; + SetTensors(); + + uint32_t index = 3; + const int8_t activation = 0; + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.SetTensorValue(index, + static_cast(&activation), sizeof(int8_t))); + + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_innerModelTest.AddOperation(m_opType, m_params, m_inputs, m_outputs)); +} + +/** + * @tc.name: inner_model_add_operation_010 + * @tc.desc: Verify the ops build failed of the addoperation function + * @tc.type: FUNC + */ +HWTEST_F(InnerModelTest, inner_model_add_operation_010, TestSize.Level1) +{ + SetIndices(); + + const int32_t dimInput1[2] = {2, 2}; + const OH_NN_Tensor& tensor = {OH_NN_FLOAT32, 2, dimInput1, nullptr, OH_NN_TENSOR}; + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.AddTensor(tensor)); + const int32_t dimInput2[2] = {2, 2}; + const OH_NN_Tensor& tensor1 = {OH_NN_FLOAT32, 2, dimInput2, nullptr, OH_NN_TENSOR}; + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.AddTensor(tensor1)); + const int32_t dimOutput[2] = {2, 2}; + const OH_NN_Tensor& tensor2 = {OH_NN_FLOAT32, 2, dimOutput, nullptr, OH_NN_TENSOR}; + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.AddTensor(tensor2)); + const OH_NN_Tensor& tensor3 = {OH_NN_INT8, 0, nullptr, nullptr, OH_NN_DIV_ACTIVATIONTYPE}; + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.AddTensor(tensor3)); + + uint32_t index = 3; + const int8_t activation = 0; + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.SetTensorValue(index, + static_cast(&activation), sizeof(int8_t))); + + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_innerModelTest.AddOperation(m_opType, m_params, m_inputs, m_outputs)); +} + +/** + * @tc.name: inner_model_specify_inputs_and_outputs_001 + * @tc.desc: Verify the success of the specify_inputs_and_outputs function + * @tc.type: FUNC + */ +HWTEST_F(InnerModelTest, inner_model_specify_inputs_and_outputs_001, TestSize.Level1) +{ + SetIndices(); + SetTensors(); + + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.SpecifyInputsAndOutputs(m_inputs, m_outputs)); + + std::vector> inTensors = m_innerModelTest.GetInputTensors(); + EXPECT_EQ(inTensors.size(), m_inputs.size); + std::vector> outTensors = m_innerModelTest.GetOutputTensors(); + EXPECT_EQ(outTensors.size(), m_outputs.size); +} + +/** + * @tc.name: inner_model_specify_inputs_and_outputs_002 + * @tc.desc: Verify the after buildgraph of the specify_inputs_and_outputs function + * @tc.type: FUNC + */ +HWTEST_F(InnerModelTest, inner_model_specify_inputs_and_outputs_002, TestSize.Level1) +{ + OH_NN_UInt32Array inputs; + OH_NN_UInt32Array outputs; + inputs.data = m_inputIndexs; + inputs.size = sizeof(m_inputIndexs) / sizeof(uint32_t); + outputs.data = nullptr; + outputs.size = sizeof(m_outputIndexs) / sizeof(uint32_t); + + mindspore::lite::LiteGraph* liteGraph = new (std::nothrow) mindspore::lite::LiteGraph(); + EXPECT_NE(nullptr, liteGraph); + SetLiteGraph(liteGraph); + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.BuildFromLiteGraph(liteGraph)); + + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, m_innerModelTest.SpecifyInputsAndOutputs(inputs, outputs)); +} + +/** + * @tc.name: inner_model_specify_inputs_and_outputs_003 + * @tc.desc: Verify the output indices is nullptr but length not 0 of the specify_inputs_and_outputs function + * @tc.type: FUNC + */ +HWTEST_F(InnerModelTest, inner_model_specify_inputs_and_outputs_003, TestSize.Level1) +{ + SetIndices(); + + m_outputs.data = nullptr; + SetTensors(); + + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_innerModelTest.SpecifyInputsAndOutputs(m_inputs, m_outputs)); +} + +/** + * @tc.name: inner_model_specify_inputs_and_outputs_004 + * @tc.desc: Verify the specift twice of the specify_inputs_and_outputs function + * @tc.type: FUNC + */ +HWTEST_F(InnerModelTest, inner_model_specify_inputs_and_outputs_004, TestSize.Level1) +{ + SetIndices(); + SetTensors(); + + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.SpecifyInputsAndOutputs(m_inputs, m_outputs)); + + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, m_innerModelTest.SpecifyInputsAndOutputs(m_inputs, m_outputs)); +} + +/** + * @tc.name: inner_model_build_001 + * @tc.desc: Verify the success of the build function + * @tc.type: FUNC + */ +HWTEST_F(InnerModelTest, inner_model_build_001, TestSize.Level1) +{ + SetIndices(); + SetTensors(); + + uint32_t index = 3; + const int8_t activation = 0; + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.SetTensorValue(index, + static_cast(&activation), sizeof(int8_t))); + + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.AddOperation(m_opType, m_params, m_inputs, m_outputs)); + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.SpecifyInputsAndOutputs(m_inputs, m_outputs)); + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.Build()); + EXPECT_EQ(true, m_innerModelTest.IsBuild()); +} + +/** + * @tc.name: inner_model_build_002 + * @tc.desc: Verify the build twice forbidden of the build function + * @tc.type: FUNC + */ +HWTEST_F(InnerModelTest, inner_model_build_002, TestSize.Level1) +{ + SetIndices(); + SetTensors(); + + uint32_t index = 3; + const int8_t activation = 0; + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.SetTensorValue(index, + static_cast(&activation), sizeof(int8_t))); + + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.AddOperation(m_opType, m_params, m_inputs, m_outputs)); + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.SpecifyInputsAndOutputs(m_inputs, m_outputs)); + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.Build()); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, m_innerModelTest.Build()); +} + +/** + * @tc.name: inner_model_build_003 + * @tc.desc: Verify the params not match optype of the build function + * @tc.type: FUNC + */ +HWTEST_F(InnerModelTest, inner_model_build_003, TestSize.Level1) +{ + OH_NN_OperationType m_opType = OH_NN_OPS_DIV; + + SetIndices(); + + const int dim[2] = {2, 2}; + const OH_NN_Tensor& tensor = {OH_NN_FLOAT32, 2, dim, nullptr, OH_NN_TENSOR}; + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.AddTensor(tensor)); + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.AddTensor(tensor)); + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.AddTensor(tensor)); + const OH_NN_Tensor& tensorParam = {OH_NN_INT8, 0, nullptr, nullptr, OH_NN_DIV_ACTIVATIONTYPE}; + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.AddTensor(tensorParam)); + + uint32_t index = 3; + const int8_t activation = 0; + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.SetTensorValue(index, + static_cast(&activation), sizeof(int8_t))); + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.SpecifyInputsAndOutputs(m_inputs, m_outputs)); + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.AddOperation(m_opType, m_params, m_inputs, m_outputs)); + EXPECT_EQ(OH_NN_FAILED, m_innerModelTest.Build()); +} + +/** + * @tc.name: inner_model_build_004 + * @tc.desc: Verify the success of the build function + * @tc.type: FUNC + */ +HWTEST_F(InnerModelTest, inner_model_build_004, TestSize.Level1) +{ + SetIndices(); + SetTensors(); + + uint32_t index = 3; + const int8_t activation = 0; + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.SetTensorValue(index, + static_cast(&activation), sizeof(int8_t))); + + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.AddOperation(m_opType, m_params, m_inputs, m_outputs)); + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.SpecifyInputsAndOutputs(m_inputs, m_outputs)); + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.Build()); +} + +/** + * @tc.name: inner_model_get_supported_operation_001 + * @tc.desc: Verify the success of the get_supported_operation function + * @tc.type: FUNC + */ +HWTEST_F(InnerModelTest, inner_model_get_supported_operation_001, TestSize.Level1) +{ + const bool *isSupported = nullptr; + uint32_t opCount = 1; + + SetIndices(); + SetTensors(); + + uint32_t index = 3; + const int8_t activation = 0; + size_t deviceID = 10; + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.SetTensorValue(index, + static_cast(&activation), sizeof(int8_t))); + + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.AddOperation(m_opType, m_params, m_inputs, m_outputs)); + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.SpecifyInputsAndOutputs(m_inputs, m_outputs)); + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.Build()); + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.GetSupportedOperations(deviceID, &isSupported, opCount)); +} + +/** + * @tc.name: inner_model_get_supported_operation_002 + * @tc.desc: Verify the mock hdi device result of the get_supported_operation function + * @tc.type: FUNC + */ +HWTEST_F(InnerModelTest, inner_model_get_supported_operation_002, TestSize.Level1) +{ + size_t deviceID = 10; + const bool *isSupported = nullptr; + uint32_t opCount = 1; + + mindspore::lite::LiteGraph* liteGraph = new (std::nothrow) mindspore::lite::LiteGraph(); + EXPECT_NE(nullptr, liteGraph); + SetLiteGraph(liteGraph); + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.BuildFromLiteGraph(liteGraph)); + + EXPECT_EQ(OH_NN_UNAVALIDABLE_DEVICE, m_innerModelTest.GetSupportedOperations(deviceID, &isSupported, opCount)); +} + +/** + * @tc.name: inner_model_get_supported_operation_003 + * @tc.desc: Verify the mock device manager of the get_supported_operation function + * @tc.type: FUNC + */ +HWTEST_F(InnerModelTest, inner_model_get_supported_operation_003, TestSize.Level1) +{ + const bool *isSupported = nullptr; + uint32_t opCount = 1; + + SetIndices(); + SetTensors(); + + uint32_t index = 3; + const int8_t activation = 0; + size_t deviceID = 0; + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.SetTensorValue(index, + static_cast(&activation), sizeof(int8_t))); + + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.AddOperation(m_opType, m_params, m_inputs, m_outputs)); + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.SpecifyInputsAndOutputs(m_inputs, m_outputs)); + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.Build()); + EXPECT_EQ(OH_NN_FAILED, m_innerModelTest.GetSupportedOperations(deviceID, &isSupported, opCount)); + + std::shared_ptr liteGraph = m_innerModelTest.GetLiteGraphs(); + EXPECT_EQ(liteGraph->name_, "NNR_Model"); +} + +/** + * @tc.name: inner_model_get_supported_operation_004 + * @tc.desc: Verify the before build of the get_supported_operation function + * @tc.type: FUNC + */ +HWTEST_F(InnerModelTest, inner_model_get_supported_operation_004, TestSize.Level1) +{ + size_t deviceID = 10; + const bool *isSupported = nullptr; + uint32_t opCount = 1; + + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, m_innerModelTest.GetSupportedOperations(deviceID, &isSupported, opCount)); +} +} // namespace UnitTest +} // namespace NNRT diff --git a/test/unittest/components/inner_model/nn_tensor_test.cpp b/test/unittest/components/inner_model/nn_tensor_test.cpp new file mode 100644 index 0000000..a288c26 --- /dev/null +++ b/test/unittest/components/inner_model/nn_tensor_test.cpp @@ -0,0 +1,525 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include + +#include "frameworks/native/validation.h" +#include "frameworks/native/nn_tensor.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime; +using namespace OHOS::NeuralNetworkRuntime::Validation; + +namespace NNRT { +namespace UnitTest { +class NnTensorTest : public testing::Test { +}; + +/** + * @tc.name: nn_tensor_parse_dimensions_001 + * @tc.desc: Verify the success of the parse_dimensions function + * @tc.type: FUNC + */ +HWTEST_F(NnTensorTest, nn_tensor_parse_dimensions_001, TestSize.Level1) +{ + const int dim[2] = {2, 2}; + OH_NN_Tensor tensor{OH_NN_FLOAT32, 2, dim, nullptr, OH_NN_TENSOR}; + + NNTensor nnTensor; + EXPECT_EQ(OH_NN_SUCCESS, nnTensor.BuildFromOHNNTensor(tensor)); +} + +/** + * @tc.name: nn_tensor_parse_dimensions_002 + * @tc.desc: Verify the invalid dimensions of the parse_dimensions function + * @tc.type: FUNC + */ +HWTEST_F(NnTensorTest, nn_tensor_parse_dimensions_002, TestSize.Level1) +{ + OH_NN_Tensor tensor; + tensor.dataType = OH_NN_FLOAT32; + tensor.dimensionCount = 2; + tensor.dimensions = nullptr; + tensor.quantParam = nullptr; + tensor.type = OH_NN_TENSOR; + + NNTensor nnTensor; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, nnTensor.BuildFromOHNNTensor(tensor)); +} + +/** + * @tc.name: nn_tensor_parse_dimensions_003 + * @tc.desc: Verify the invalid shape tensor of the parse_dimensions function + * @tc.type: FUNC + */ +HWTEST_F(NnTensorTest, nn_tensor_parse_dimensions_003, TestSize.Level1) +{ + const int dim[2] = {2, -2}; + OH_NN_Tensor tensor{OH_NN_FLOAT32, 2, dim, nullptr, OH_NN_TENSOR}; + + NNTensor nnTensor; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, nnTensor.BuildFromOHNNTensor(tensor)); +} + +/** + * @tc.name: nn_tensor_parse_dimensions_004 + * @tc.desc: Verify the dynamic shape of the parse_dimensions function + * @tc.type: FUNC + */ +HWTEST_F(NnTensorTest, nn_tensor_parse_dimensions_004, TestSize.Level1) +{ + const int dim[2] = {2, -1}; + OH_NN_Tensor tensor{OH_NN_FLOAT32, 2, dim, nullptr, OH_NN_TENSOR}; + + NNTensor nnTensor; + EXPECT_EQ(OH_NN_SUCCESS, nnTensor.BuildFromOHNNTensor(tensor)); +} + +/** + * @tc.name: nn_tensor_parse_dimensions_005 + * @tc.desc: Verify the dims out of bounds of the parse_dimensions function + * @tc.type: FUNC + */ +HWTEST_F(NnTensorTest, nn_tensor_parse_dimensions_005, TestSize.Level1) +{ + const int dim[3] = {1000000, 1000000, 10000000}; + OH_NN_Tensor tensor{OH_NN_FLOAT32, 2, dim, nullptr, OH_NN_TENSOR}; + + NNTensor nnTensor; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, nnTensor.BuildFromOHNNTensor(tensor)); +} + + +/** + * @tc.name: nn_tensor_parse_quant_params_001 + * @tc.desc: Verify the success of the parse_quant_params function + * @tc.type: FUNC + */ +HWTEST_F(NnTensorTest, nn_tensor_parse_quant_params_001, TestSize.Level1) +{ + const double scale = 1.0; + const int32_t zeroPoint = 0; + const uint32_t numBits = 8; + const OH_NN_QuantParam quantParam = {1, &numBits, &scale, &zeroPoint}; + + NNTensor nnTensor; + const int dim[2] = {2, 2}; + OH_NN_Tensor tensor{OH_NN_FLOAT32, 2, dim, &quantParam, OH_NN_TENSOR}; + + EXPECT_EQ(OH_NN_SUCCESS, nnTensor.BuildFromOHNNTensor(tensor)); +} + +/** + * @tc.name: nn_tensor_parse_quant_params_002 + * @tc.desc: Verify the invalid numbits of the parse_quant_params function + * @tc.type: FUNC + */ +HWTEST_F(NnTensorTest, nn_tensor_parse_quant_params_002, TestSize.Level1) +{ + const double scale = 1.0; + const int32_t zeroPoint = 0; + const uint32_t numBits = 16; + const OH_NN_QuantParam quantParam = {1, &numBits, &scale, &zeroPoint}; + + NNTensor nnTensor; + const int dim[2] = {2, 2}; + OH_NN_Tensor tensor{OH_NN_FLOAT32, 2, dim, &quantParam, OH_NN_TENSOR}; + + EXPECT_EQ(OH_NN_INVALID_PARAMETER, nnTensor.BuildFromOHNNTensor(tensor)); +} + +/** + * @tc.name: nn_tensor_parse_quant_params_004 + * @tc.desc: Verify the invalid scale of the parse_quant_params function + * @tc.type: FUNC + */ +HWTEST_F(NnTensorTest, nn_tensor_parse_quant_params_004, TestSize.Level1) +{ + const int32_t zeroPoint = 0; + const uint32_t numBits = 8; + const OH_NN_QuantParam quantParam = {1, &numBits, nullptr, &zeroPoint}; + + NNTensor nnTensor; + const int dim[2] = {2, 2}; + OH_NN_Tensor tensor{OH_NN_FLOAT32, 2, dim, &quantParam, OH_NN_TENSOR}; + + EXPECT_EQ(OH_NN_INVALID_PARAMETER, nnTensor.BuildFromOHNNTensor(tensor)); +} + +/** + * @tc.name: nn_tensor_parse_quant_params_005 + * @tc.desc: Verify the invalid zeropoint of the parse_quant_params function + * @tc.type: FUNC + */ +HWTEST_F(NnTensorTest, nn_tensor_parse_quant_params_005, TestSize.Level1) +{ + const double scale = 1.0; + const uint32_t numBits = 8; + const OH_NN_QuantParam quantParam = {1, &numBits, &scale, nullptr}; + + NNTensor nnTensor; + const int dim[2] = {2, 2}; + OH_NN_Tensor tensor{OH_NN_FLOAT32, 2, dim, &quantParam, OH_NN_TENSOR}; + + EXPECT_EQ(OH_NN_INVALID_PARAMETER, nnTensor.BuildFromOHNNTensor(tensor)); +} + +/** + * @tc.name: nn_tensor_set_dimensions_001 + * @tc.desc: Verify the success of the set_dimensions function + * @tc.type: FUNC + */ +HWTEST_F(NnTensorTest, nn_tensor_set_dimensions_001, TestSize.Level1) +{ + const int dim[2] = {2, -1}; + OH_NN_Tensor tensor{OH_NN_FLOAT32, 2, dim, nullptr, OH_NN_TENSOR}; + const std::vector dimensions = {2, 3}; + + NNTensor nnTensor; + EXPECT_EQ(OH_NN_SUCCESS, nnTensor.BuildFromOHNNTensor(tensor)); + EXPECT_EQ(OH_NN_SUCCESS, nnTensor.SetDimensions(dimensions)); +} + +/** + * @tc.name: nn_tensor_set_dimensions_002 + * @tc.desc: Verify the dim out of bounds of the set_dimensions function + * @tc.type: FUNC + */ +HWTEST_F(NnTensorTest, nn_tensor_set_dimensions_002, TestSize.Level1) +{ + const int dim[2] = {2, -1}; + OH_NN_Tensor tensor{OH_NN_FLOAT32, 2, dim, nullptr, OH_NN_TENSOR}; + + NNTensor nnTensor; + const std::vector dimensions = {2, 3, 5}; + EXPECT_EQ(OH_NN_SUCCESS, nnTensor.BuildFromOHNNTensor(tensor)); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, nnTensor.SetDimensions(dimensions)); +} + +/** + * @tc.name: nn_tensor_compare_attribute_001 + * @tc.desc: Verify the success of the CompareAttribute function + * @tc.type: FUNC + */ +HWTEST_F(NnTensorTest, nn_tensor_compare_attribute_001, TestSize.Level1) +{ + const int dim[2] = {2, 2}; + OH_NN_Tensor tensor{OH_NN_FLOAT32, 2, dim, nullptr, OH_NN_TENSOR}; + + NNTensor nnTensor; + NNTensor expectTensor; + EXPECT_EQ(OH_NN_SUCCESS, nnTensor.BuildFromOHNNTensor(tensor)); + expectTensor = std::move(nnTensor); + EXPECT_EQ(true, nnTensor.CompareAttribute(nnTensor)); +} + +/** + * @tc.name: nn_tensor_compare_attribute_002 + * @tc.desc: Verify the datatype not equal of the CompareAttribute function + * @tc.type: FUNC + */ +HWTEST_F(NnTensorTest, nn_tensor_compare_attribute_002, TestSize.Level1) +{ + const int dim[2] = {2, 2}; + OH_NN_Tensor tensor{OH_NN_FLOAT32, 2, dim, nullptr, OH_NN_TENSOR}; + + NNTensor nnTensor; + NNTensor expectTensor; + EXPECT_EQ(OH_NN_SUCCESS, nnTensor.BuildFromOHNNTensor(tensor)); + + const int dimExpect[2] = {2, 2}; + OH_NN_Tensor tensorExpect{OH_NN_INT32, 2, dimExpect, nullptr, OH_NN_TENSOR}; + EXPECT_EQ(OH_NN_SUCCESS, expectTensor.BuildFromOHNNTensor(tensorExpect)); + + EXPECT_EQ(false, nnTensor.CompareAttribute(expectTensor)); +} + +/** + * @tc.name: nn_tensor_compare_attribute_003 + * @tc.desc: Verify the dim size not equal of the CompareAttribute function + * @tc.type: FUNC + */ +HWTEST_F(NnTensorTest, nn_tensor_compare_attribute_003, TestSize.Level1) +{ + const int dim[2] = {2, 2}; + OH_NN_Tensor tensor{OH_NN_FLOAT32, 2, dim, nullptr, OH_NN_TENSOR}; + + NNTensor nnTensor; + NNTensor expectTensor; + EXPECT_EQ(OH_NN_SUCCESS, nnTensor.BuildFromOHNNTensor(tensor)); + + const int dimExpect[3] = {2, 2, 3}; + OH_NN_Tensor tensorExpect{OH_NN_FLOAT32, 3, dimExpect, nullptr, OH_NN_TENSOR}; + EXPECT_EQ(OH_NN_SUCCESS, expectTensor.BuildFromOHNNTensor(tensorExpect)); + + EXPECT_EQ(false, nnTensor.CompareAttribute(expectTensor)); +} + +/** + * @tc.name: nn_tensor_compare_attribute_004 + * @tc.desc: Verify the dim value not equal of the CompareAttribute function + * @tc.type: FUNC + */ +HWTEST_F(NnTensorTest, nn_tensor_compare_attribute_004, TestSize.Level1) +{ + const int dim[2] = {2, 2}; + OH_NN_Tensor tensor{OH_NN_FLOAT32, 2, dim, nullptr, OH_NN_TENSOR}; + + NNTensor nnTensor; + NNTensor expectTensor; + EXPECT_EQ(OH_NN_SUCCESS, nnTensor.BuildFromOHNNTensor(tensor)); + + const int dimExpect[2] = {2, 3}; + OH_NN_Tensor tensorExpect{OH_NN_FLOAT32, 2, dimExpect, nullptr, OH_NN_TENSOR}; + EXPECT_EQ(OH_NN_SUCCESS, expectTensor.BuildFromOHNNTensor(tensorExpect)); + + EXPECT_EQ(false, nnTensor.CompareAttribute(expectTensor)); +} + +/** + * @tc.name: nn_tensor_is_scalar_001 + * @tc.desc: Verify the success of the is_scalar function + * @tc.type: FUNC + */ +HWTEST_F(NnTensorTest, nn_tensor_is_scalar_001, TestSize.Level1) +{ + const int dim[2] = {2, 2}; + OH_NN_Tensor tensor{OH_NN_FLOAT32, 2, dim, nullptr, OH_NN_TENSOR}; + + NNTensor nnTensor; + EXPECT_EQ(OH_NN_SUCCESS, nnTensor.BuildFromOHNNTensor(tensor)); + EXPECT_EQ(false, nnTensor.IsScalar()); +} + +/** + * @tc.name: nn_tensor_build_from_tensor_001 + * @tc.desc: Verify the success of the build_from_tensor function + * @tc.type: FUNC + */ +HWTEST_F(NnTensorTest, nn_tensor_convert_to_io_tensor_001, TestSize.Level1) +{ + const int dim[2] = {2, 2}; + OH_NN_Tensor tensor{OH_NN_FLOAT32, 2, dim, nullptr, OH_NN_TENSOR}; + + NNTensor nnTensor; + EXPECT_EQ(OH_NN_SUCCESS, nnTensor.BuildFromOHNNTensor(tensor)); + + int8_t* activationValue = new (std::nothrow) int8_t[1]{0}; + EXPECT_NE(nullptr, activationValue); + + // After SetBuffer, this memory is released by NNTensor + nnTensor.SetBuffer(activationValue, sizeof(int8_t)); + IOTensor ioTensor; + nnTensor.ConvertToIOTensor(ioTensor); + EXPECT_EQ(sizeof(int8_t), ioTensor.length); +} + +/** + * @tc.name: nn_tensor_get_buffer_length_001 + * @tc.desc: Verify the success of the get_buffer_length function + * @tc.type: FUNC + */ +HWTEST_F(NnTensorTest, nn_tensor_get_buffer_length_001, TestSize.Level1) +{ + const int dim[2] = {2, 2}; + OH_NN_Tensor tensor{OH_NN_FLOAT32, 2, dim, nullptr, OH_NN_TENSOR}; + + NNTensor nnTensor; + EXPECT_EQ(OH_NN_SUCCESS, nnTensor.BuildFromOHNNTensor(tensor)); + int8_t* activationValue = new (std::nothrow) int8_t[1]{0}; + EXPECT_NE(nullptr, activationValue); + + // After SetBuffer, this memory is released by NNTensor + nnTensor.SetBuffer(activationValue, sizeof(int8_t)); + size_t length = sizeof(int8_t); + EXPECT_EQ(length, nnTensor.GetBufferLength()); +} + +/** + * @tc.name: nn_tensor_get_format_001 + * @tc.desc: Verify the success of the get_format function + * @tc.type: FUNC + */ +HWTEST_F(NnTensorTest, nn_tensor_get_format_001, TestSize.Level1) +{ + const int dim[2] = {2, 2}; + OH_NN_Tensor tensor{OH_NN_FLOAT32, 2, dim, nullptr, OH_NN_TENSOR}; + + NNTensor nnTensor; + EXPECT_EQ(OH_NN_SUCCESS, nnTensor.BuildFromOHNNTensor(tensor)); + OH_NN_Format format = OH_NN_FORMAT_NHWC; + EXPECT_EQ(format, nnTensor.GetFormat()); +} + +/** + * @tc.name: nn_tensor_get_name_001 + * @tc.desc: Verify the success of the get name function + * @tc.type: FUNC + */ +HWTEST_F(NnTensorTest, nn_tensor_get_name_001, TestSize.Level1) +{ + NNTensor nnTensor; + const std::string& name = "test"; + nnTensor.SetName(name); + EXPECT_EQ(name, nnTensor.GetName()); +} + +/** + * @tc.name: nn_tensor_get_quant_param_001 + * @tc.desc: Verify the success of the get_quant_param function + * @tc.type: FUNC + */ +HWTEST_F(NnTensorTest, nn_tensor_get_quant_param_001, TestSize.Level1) +{ + const int dim[2] = {2, 2}; + OH_NN_Tensor tensor{OH_NN_FLOAT32, 2, dim, nullptr, OH_NN_TENSOR}; + + NNTensor nnTensor; + EXPECT_EQ(OH_NN_SUCCESS, nnTensor.BuildFromOHNNTensor(tensor)); + + std::vector quantParam = nnTensor.GetQuantParam(); + size_t quantSize = 0; + EXPECT_EQ(quantSize, quantParam.size()); +} + +/** + * @tc.name: nn_tensor_build_from_tensor_002 + * @tc.desc: Verify the invalid datatype value of the build_from_tensor function + * @tc.type: FUNC + */ +HWTEST_F(NnTensorTest, nn_tensor_build_from_tensor_002, TestSize.Level1) +{ + const int dim[2] = {2, 2}; + + int dataTypeTest = 13; + OH_NN_DataType dataType = (OH_NN_DataType)dataTypeTest; + OH_NN_Tensor tensor{dataType, 2, dim, nullptr, OH_NN_TENSOR}; + + NNTensor nnTensor; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, nnTensor.BuildFromOHNNTensor(tensor)); +} + +/** + * @tc.name: nn_tensor_convert_to_lite_graph_tensor_001 + * @tc.desc: Verify the success of the convert_to_lite_graph function + * @tc.type: FUNC + */ +HWTEST_F(NnTensorTest, nn_tensor_convert_to_lite_graph_tensor_001, TestSize.Level1) +{ + const int dim[2] = {2, 2}; + OH_NN_Tensor tensor{OH_NN_FLOAT32, 2, dim, nullptr, OH_NN_TENSOR}; + + NNTensor nnTensor; + EXPECT_EQ(OH_NN_SUCCESS, nnTensor.BuildFromOHNNTensor(tensor)); + + LiteGraphTensorPtr tensorPtr = {nullptr, DestroyLiteGraphTensor}; + EXPECT_NE(tensorPtr, nnTensor.ConvertToLiteGraphTensor()); +} + +/** + * @tc.name: nn_tensor_convert_to_lite_graph_tensor_002 + * @tc.desc: Verify the success with quant of the convert_to_lite_graph function + * @tc.type: FUNC + */ +HWTEST_F(NnTensorTest, nn_tensor_convert_to_lite_graph_tensor_002, TestSize.Level1) +{ + const int dim[2] = {2, 2}; + + OH_NN_Tensor tensor; + tensor.dataType = OH_NN_FLOAT32; + tensor.dimensionCount = 2; + tensor.dimensions = dim; + const double scale = 1.0; + const int32_t zeroPoint = 0; + const uint32_t numBits = 8; + const OH_NN_QuantParam quantParam = {1, &numBits, &scale, &zeroPoint}; + tensor.quantParam = &quantParam; + tensor.type = OH_NN_TENSOR; + + NNTensor nnTensor; + EXPECT_EQ(OH_NN_SUCCESS, nnTensor.BuildFromOHNNTensor(tensor)); + + LiteGraphTensorPtr tensorPtr = {nullptr, DestroyLiteGraphTensor}; + EXPECT_NE(tensorPtr, nnTensor.ConvertToLiteGraphTensor()); +} + +/** + * @tc.name: nn_tensor_build_001 + * @tc.desc: Verify the success of the build function + * @tc.type: FUNC + */ +HWTEST_F(NnTensorTest, nn_tensor_build_001, TestSize.Level1) +{ + OH_NN_DataType dataType = OH_NN_FLOAT32; + const std::vector dimensions = {2, 2}; + const std::vector quantParam = {{8, 1.0, 0}, {8, 1.0, 0}, {8, 1.0, 0}}; + OH_NN_TensorType type = OH_NN_ADD_ACTIVATIONTYPE; + + NNTensor nnTensor; + EXPECT_EQ(OH_NN_SUCCESS, nnTensor.Build(dataType, dimensions, quantParam, type)); +} + +/** + * @tc.name: nn_tensor_build_002 + * @tc.desc: Verify the invalid datatype value of the build function + * @tc.type: FUNC + */ +HWTEST_F(NnTensorTest, nn_tensor_build_002, TestSize.Level1) +{ + int dataTypeTest = 13; + OH_NN_DataType dataType = (OH_NN_DataType)dataTypeTest; + const std::vector dimensions = {2, 2}; + const std::vector quantParam = {{8, 1.0, 0}, {8, 1.0, 0}, {8, 1.0, 0}}; + OH_NN_TensorType type = OH_NN_ADD_ACTIVATIONTYPE; + + NNTensor nnTensor; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, nnTensor.Build(dataType, dimensions, quantParam, type)); +} + +/** + * @tc.name: nn_tensor_build_003 + * @tc.desc: Verify the dynamic shape of the build function + * @tc.type: FUNC + */ +HWTEST_F(NnTensorTest, nn_tensor_build_003, TestSize.Level1) +{ + OH_NN_DataType dataType = OH_NN_FLOAT32; + const std::vector dimensions = {2, -2}; + const std::vector quantParam = {{8, 1.0, 0}, {8, 1.0, 0}, {8, 1.0, 0}}; + OH_NN_TensorType type = OH_NN_ADD_ACTIVATIONTYPE; + + NNTensor nnTensor; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, nnTensor.Build(dataType, dimensions, quantParam, type)); +} + +/** + * @tc.name: nn_tensor_build_004 + * @tc.desc: Verify the invalid numbits of the build function + * @tc.type: FUNC + */ +HWTEST_F(NnTensorTest, nn_tensor_build_004, TestSize.Level1) +{ + OH_NN_DataType dataType = OH_NN_FLOAT32; + const std::vector dimensions = {2, 2}; + const std::vector quantParam = {{2, 1.0, 0}, {2, 1.0, 0}, {2, 1.0, 0}}; + OH_NN_TensorType type = OH_NN_ADD_ACTIVATIONTYPE; + + NNTensor nnTensor; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, nnTensor.Build(dataType, dimensions, quantParam, type)); +} +} // namespace UnitTest +} // namespace NNRT diff --git a/test/unittest/components/inner_model/nn_validation_test.cpp b/test/unittest/components/inner_model/nn_validation_test.cpp new file mode 100644 index 0000000..49a2e81 --- /dev/null +++ b/test/unittest/components/inner_model/nn_validation_test.cpp @@ -0,0 +1,175 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include + +#include "frameworks/native/validation.h" +#include "frameworks/native/nn_tensor.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime; +using namespace OHOS::NeuralNetworkRuntime::Validation; + +namespace NNRT { +namespace UnitTest { +class NnValidationTest : public testing::Test { +}; + +/** + * @tc.name: nn_validation_validate_tensor_datatype_001 + * @tc.desc: Verify the success of the validate_tensor_datatype function + * @tc.type: FUNC + */ +HWTEST_F(NnValidationTest, nn_validation_validate_tensor_datatype_001, TestSize.Level1) +{ + int dataTypeTest = 12; + OH_NN_DataType dataType = (OH_NN_DataType)dataTypeTest; + EXPECT_EQ(true, ValidateTensorDataType(dataType)); +} + +/** + * @tc.name: nn_validation_validate_tensor_datatype_002 + * @tc.desc: Verify the gt bounds of the validate_tensor_datatype function + * @tc.type: FUNC + */ +HWTEST_F(NnValidationTest, nn_validation_validate_tensor_datatype_002, TestSize.Level1) +{ + int dataTypeTest = 13; + OH_NN_DataType dataType = (OH_NN_DataType)dataTypeTest; + EXPECT_EQ(false, ValidateTensorDataType(dataType)); +} + +/** + * @tc.name: nn_validation_validate_tensor_datatype_003 + * @tc.desc: Verify the lt bounds of the validate_tensor_datatype function + * @tc.type: FUNC + */ +HWTEST_F(NnValidationTest, nn_validation_validate_tensor_datatype_003, TestSize.Level1) +{ + int dataTypeTest = -1; + OH_NN_DataType dataType = (OH_NN_DataType)dataTypeTest; + EXPECT_EQ(false, ValidateTensorDataType(dataType)); +} + +/** + * @tc.name: nn_validation_validate_preformance_mode_001 + * @tc.desc: Verify the success of the validate_preformance_mode function + * @tc.type: FUNC + */ +HWTEST_F(NnValidationTest, nn_validation_validate_preformance_mode_001, TestSize.Level1) +{ + int performanceModeTest = 4; + OH_NN_PerformanceMode performanceMode = (OH_NN_PerformanceMode)performanceModeTest; + EXPECT_EQ(true, ValidatePerformanceMode(performanceMode)); +} + +/** + * @tc.name: nn_validation_validate_preformance_mode_002 + * @tc.desc: Verify the gt bounds of the validate_preformance_mode function + * @tc.type: FUNC + */ +HWTEST_F(NnValidationTest, nn_validation_validate_preformance_mode_002, TestSize.Level1) +{ + int performanceModeTest = 5; + OH_NN_PerformanceMode performanceMode = (OH_NN_PerformanceMode)performanceModeTest; + EXPECT_EQ(false, ValidatePerformanceMode(performanceMode)); +} + +/** + * @tc.name: nn_validation_validate_preformance_mode_003 + * @tc.desc: Verify the lt bounds of the validate_preformance_mode function + * @tc.type: FUNC + */ +HWTEST_F(NnValidationTest, nn_validation_validate_preformance_mode_003, TestSize.Level1) +{ + int performanceModeTest = -1; + OH_NN_PerformanceMode performanceMode = (OH_NN_PerformanceMode)performanceModeTest; + EXPECT_EQ(false, ValidatePerformanceMode(performanceMode)); +} + +/** + * @tc.name: nn_validation_validate_priority_001 + * @tc.desc: Verify the success of the validate_priority function + * @tc.type: FUNC + */ +HWTEST_F(NnValidationTest, nn_validation_validate_priority_001, TestSize.Level1) +{ + int priorityTest = 2; + OH_NN_Priority priority = (OH_NN_Priority)priorityTest; + EXPECT_EQ(true, ValidatePriority(priority)); +} + +/** + * @tc.name: nn_validation_validate_priority_002 + * @tc.desc: Verify the gt bounds of the validate_priority function + * @tc.type: FUNC + */ +HWTEST_F(NnValidationTest, nn_validation_validate_priority_002, TestSize.Level1) +{ + int priorityTest = 4; + OH_NN_Priority priority = (OH_NN_Priority)priorityTest; + EXPECT_EQ(false, ValidatePriority(priority)); +} + +/** + * @tc.name: nn_validation_validate_priority_003 + * @tc.desc: Verify the lt bounds of the validate_priority function + * @tc.type: FUNC + */ +HWTEST_F(NnValidationTest, nn_validation_validate_priority_003, TestSize.Level1) +{ + int priorityTest = -1; + OH_NN_Priority priority = (OH_NN_Priority)priorityTest; + EXPECT_EQ(false, ValidatePriority(priority)); +} + +/** + * @tc.name: nn_validation_fusetype_001 + * @tc.desc: Verify the success of the validate_fusetype function + * @tc.type: FUNC + */ +HWTEST_F(NnValidationTest, nn_validation_fusetype_001, TestSize.Level1) +{ + int fuseTypeTest = 2; + OH_NN_FuseType fuseType = (OH_NN_FuseType)fuseTypeTest; + EXPECT_EQ(true, ValidateFuseType(fuseType)); +} + +/** + * @tc.name: nn_validation_fusetype_002 + * @tc.desc: Verify the gt bounds of the validate_fusetype function + * @tc.type: FUNC + */ +HWTEST_F(NnValidationTest, nn_validation_fusetype_002, TestSize.Level1) +{ + int fuseTypeTest = 3; + OH_NN_FuseType fuseType = (OH_NN_FuseType)fuseTypeTest; + EXPECT_EQ(false, ValidateFuseType(fuseType)); +} + +/** + * @tc.name: nn_validation_fusetype_003 + * @tc.desc: Verify the lt bounds of the validate_fusetype function + * @tc.type: FUNC + */ +HWTEST_F(NnValidationTest, nn_validation_fusetype_003, TestSize.Level1) +{ + int fuseTypeTest = -1; + OH_NN_FuseType fuseType = (OH_NN_FuseType)fuseTypeTest; + EXPECT_EQ(false, ValidateFuseType(fuseType)); +} +} // namespace UnitTest +} // namespace NNRT diff --git a/test/unittest/components/inner_model/ops_regitstry_test.cpp b/test/unittest/components/inner_model/ops_regitstry_test.cpp new file mode 100644 index 0000000..de3cc84 --- /dev/null +++ b/test/unittest/components/inner_model/ops_regitstry_test.cpp @@ -0,0 +1,64 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include + +#include "frameworks/native/validation.h" +#include "frameworks/native/ops_registry.h" +#include "frameworks/native/ops/add_builder.h" +#include "frameworks/native/ops/div_builder.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime; +using namespace OHOS::NeuralNetworkRuntime::Validation; +using namespace OHOS::NeuralNetworkRuntime::Ops; + +namespace NNRT { +namespace UnitTest { +class OpsRegistryTest : public testing::Test { +}; + +/** + * @tc.name: registry_001 + * @tc.desc: Verify the registry success the registar function + * @tc.type: FUNC + */ +HWTEST_F(OpsRegistryTest, registry_001, TestSize.Level1) +{ + const int newRegistryOperationType = 100; + REGISTER_OPS(AddBuilder, OH_NN_OperationType(newRegistryOperationType)); + + OpsRegistry& opsregistry = OpsRegistry::GetSingleton(); + EXPECT_NE(nullptr, opsregistry.GetOpsBuilder(OH_NN_OperationType(newRegistryOperationType))); +} + +/** + * @tc.name: registry_002 + * @tc.desc: Verify the registry twice the registar function + * @tc.type: FUNC + */ +HWTEST_F(OpsRegistryTest, registry_002, TestSize.Level1) +{ + const int newRegistryOperationType = 1000; + REGISTER_OPS(AddBuilder, OH_NN_OperationType(newRegistryOperationType)); + + OpsRegistry& opsregistry = OpsRegistry::GetSingleton(); + EXPECT_NE(nullptr, opsregistry.GetOpsBuilder(OH_NN_OperationType(newRegistryOperationType))); + + REGISTER_OPS(DivBuilder, OH_NN_OperationType(newRegistryOperationType)); +} +} // namespace UnitTest +} // namespace NNRT diff --git a/test/unittest/components/memory_manager/memory_manager_test.cpp b/test/unittest/components/memory_manager/memory_manager_test.cpp new file mode 100644 index 0000000..eba193d --- /dev/null +++ b/test/unittest/components/memory_manager/memory_manager_test.cpp @@ -0,0 +1,231 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include + +#include +#include +#include +#include + +#include + +#include "interfaces/oem/cpp_api/cpp_type.h" +#include "frameworks/native/memory_manager.h" +#include "test/unittest/common/file_utils.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime; +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class MemoryManagerTest : public testing::Test { +public: + MemoryManagerTest() = default; + ~MemoryManagerTest() = default; +}; + +/** + * @tc.name: memorymanagertest_mapmemory_001 + * @tc.desc: Verify the MapMemory function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(MemoryManagerTest, memorymanagertest_mapmemory_001, TestSize.Level0) +{ + const auto& memoryManager = MemoryManager::GetInstance(); + int fd = -1; + size_t length = 0; + void* result = memoryManager->MapMemory(fd, length); + EXPECT_EQ(nullptr, result); +} + +/** + * @tc.name: memorymanagertest_mapmemory_002 + * @tc.desc: Verify the MapMemory function return nullptr in case of length 0. + * @tc.type: FUNC + */ +HWTEST_F(MemoryManagerTest, memorymanagertest_mapmemory_002, TestSize.Level0) +{ + const auto& memoryManager = MemoryManager::GetInstance(); + int fd = 0; + size_t length = 0; + void* result = memoryManager->MapMemory(fd, length); + EXPECT_EQ(nullptr, result); +} + +/** + * @tc.name: memorymanagertest_mapmemory_003 + * @tc.desc: Verify the MapMemory function return nullptr in case of fd 0. + * @tc.type: FUNC + */ +HWTEST_F(MemoryManagerTest, memorymanagertest_mapmemory_003, TestSize.Level0) +{ + const auto& memoryManager = MemoryManager::GetInstance(); + int fd = 0; + size_t length = 1; + void* result = memoryManager->MapMemory(fd, length); + EXPECT_EQ(nullptr, result); +} + +/** + * @tc.name: memorymanagertest_mapmemory_004 + * @tc.desc: Verify the MapMemory function validate mapmemory content success. + * @tc.type: FUNC + */ +HWTEST_F(MemoryManagerTest, memorymanagertest_mapmemory_004, TestSize.Level0) +{ + std::string data = "ABCD"; + const size_t dataLength = 100; + data.resize(dataLength, '*'); + + std::string filename = "/data/log/memory-001.dat"; + FileUtils fileUtils(filename); + fileUtils.WriteFile(data); + + int fd = open(filename.c_str(), O_RDWR); + EXPECT_NE(-1, fd); + + size_t length = 4; + const auto& memoryManager = MemoryManager::GetInstance(); + char* result = static_cast(memoryManager->MapMemory(fd, length)); + EXPECT_NE(nullptr, result); + EXPECT_EQ('A', static_cast(result[0])); + EXPECT_EQ('B', static_cast(result[1])); + EXPECT_EQ('C', static_cast(result[2])); + EXPECT_EQ('D', static_cast(result[3])); + memoryManager->UnMapMemory(result); + close(fd); +} + +/** + * @tc.name: memorymanagertest_unmapmemory_001 + * @tc.desc: Verify the UnMapMemory function validate behavior. + * @tc.type: FUNC + */ +HWTEST_F(MemoryManagerTest, memorymanagertest_unmapmemory_001, TestSize.Level0) +{ + const auto& memoryManager = MemoryManager::GetInstance(); + void* memory = nullptr; + memoryManager->UnMapMemory(memory); +} + +/** + * @tc.name: memorymanagertest_unmapmemory_002 + * @tc.desc: Verify the UnMapMemory function validate behavior + * @tc.type: FUNC + */ +HWTEST_F(MemoryManagerTest, memorymanagertest_unmapmemory_002, TestSize.Level0) +{ + const auto& memoryManager = MemoryManager::GetInstance(); + void* memory = malloc(10); + memoryManager->UnMapMemory(memory); + free(memory); +} + +/** + * @tc.name: memorymanagertest_unmapmemory_003 + * @tc.desc: Verify the UnMapMemory function pairwise behavior. + * @tc.type: FUNC + */ +HWTEST_F(MemoryManagerTest, memorymanagertest_unmapmemory_003, TestSize.Level0) +{ + std::string data = "ABCD"; + const size_t dataLength = 100; + data.resize(dataLength, '/'); + + std::string filename = "/data/log/memory-001.dat"; + FileUtils fileUtils(filename); + fileUtils.WriteFile(data); + + int fd = 0; + fd = open(filename.c_str(), O_RDWR); + EXPECT_NE(-1, fd); + + size_t length = 10; + const auto& memoryManager = MemoryManager::GetInstance(); + void* buffer = memoryManager->MapMemory(fd, length); + memoryManager->UnMapMemory(buffer); + close(fd); +} + +/** + * @tc.name: memorymanagertest_getmemory_001 + * @tc.desc: Verify the GetMemory function return nullptr. + * @tc.type: FUNC + */ +HWTEST_F(MemoryManagerTest, memorymanagertest_getmemory_001, TestSize.Level0) +{ + const auto& memoryManager = MemoryManager::GetInstance(); + void* buffer = nullptr; + Memory memory; + OH_NN_ReturnCode result = memoryManager->GetMemory(buffer, memory); + EXPECT_EQ(OH_NN_NULL_PTR, result); +} + +/** + * @tc.name: memorymanagertest_getmemory_002 + * @tc.desc: Verify the GetMemory function return invalid parameter. + * @tc.type: FUNC + */ +HWTEST_F(MemoryManagerTest, memorymanagertest_getmemory_002, TestSize.Level0) +{ + const auto& memoryManager = MemoryManager::GetInstance(); + void* buffer = malloc(10); + Memory memory; + OH_NN_ReturnCode result = memoryManager->GetMemory(buffer, memory); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, result); + free(buffer); +} + +/** + * @tc.name: memorymanagertest_getmemory_003 + * @tc.desc: Verify the GetMemory function validate memory content success. + * @tc.type: FUNC + */ +HWTEST_F(MemoryManagerTest, memorymanagertest_getmemory_003, TestSize.Level0) +{ + std::string data = "ABCD"; + const size_t dataLength = 100; + data.resize(dataLength, '%'); + + std::string filename = "/data/log/memory-001.dat"; + FileUtils fileUtils(filename); + fileUtils.WriteFile(data); + + int fd = 0; + fd = open(filename.c_str(), O_RDWR); + EXPECT_NE(-1, fd); + + size_t length = 4; + const auto& memoryManager = MemoryManager::GetInstance(); + void* buffer = memoryManager->MapMemory(fd, length); + close(fd); + + Memory memory; + OH_NN_ReturnCode result = memoryManager->GetMemory(buffer, memory); + EXPECT_EQ(OH_NN_SUCCESS, result); + EXPECT_NE(nullptr, memory.data); + + const char* tmpData = static_cast(memory.data); + EXPECT_EQ('A', static_cast(tmpData[0])); + EXPECT_EQ('B', static_cast(tmpData[1])); + EXPECT_EQ('C', static_cast(tmpData[2])); + EXPECT_EQ('D', static_cast(tmpData[3])); + memoryManager->UnMapMemory(buffer); +} +} // namespace UnitTest +} // namespace NeuralNetworkRuntime +} // namespace OHOS diff --git a/test/unittest/components/neural_network_runtime_test/neural_network_runtime_test.cpp b/test/unittest/components/neural_network_runtime_test/neural_network_runtime_test.cpp new file mode 100644 index 0000000..404f2e8 --- /dev/null +++ b/test/unittest/components/neural_network_runtime_test/neural_network_runtime_test.cpp @@ -0,0 +1,2221 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "neural_network_runtime_test.h" + +#include "mindir.h" + +#include "common/utils.h" +#include "frameworks/native/compilation.h" +#include "frameworks/native/device_manager.h" +#include "frameworks/native/hdi_device.h" +#include "test/unittest/common/mock_idevice.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +OH_NN_ReturnCode HDIDevice::PrepareModel(std::shared_ptr model, + const ModelConfig& config, + std::shared_ptr& preparedModel) +{ + if (model == nullptr) { + return OH_NN_INVALID_PARAMETER; + } + + if (config.enableFloat16 == false) { + return OH_NN_FAILED; + } + + sptr iPreparedModel = sptr(new OHOS::HDI::Nnrt::V1_0::MockIPreparedModel()); + if (iPreparedModel == nullptr) { + LOGE("HDIDevice mock PrepareModel failed, error happened when new sptr"); + return OH_NN_NULL_PTR; + } + + preparedModel = CreateSharedPtr(iPreparedModel); + return OH_NN_SUCCESS; +} + +std::shared_ptr DeviceManager::GetDevice(size_t deviceId) const +{ + sptr idevice + = sptr(new (std::nothrow) OHOS::HDI::Nnrt::V1_0::MockIDevice()); + if (idevice == nullptr) { + LOGE("DeviceManager mock GetDevice failed, error happened when new sptr"); + return nullptr; + } + + std::shared_ptr device = CreateSharedPtr(idevice); + if (device == nullptr) { + LOGE("DeviceManager mock GetDevice failed, the device is nullptr"); + return nullptr; + } + + if (deviceId == 0) { + LOGE("DeviceManager mock GetDevice failed, the passed parameter deviceId is 0"); + return nullptr; + } else { + return device; + } +} + +OH_NN_ReturnCode HDIDevice::GetDeviceType(OH_NN_DeviceType& deviceType) +{ + if (deviceType == OH_NN_OTHERS) { + return OH_NN_UNAVALIDABLE_DEVICE; + } + + return OH_NN_SUCCESS; +} + +const std::string& DeviceManager::GetDeviceName(size_t deviceId) +{ + static std::string deviceName = ""; + if (deviceId == 0) { + return deviceName; + } + + deviceName = "deviceId"; + return deviceName; +} + +const std::vector& DeviceManager::GetAllDeviceId() +{ + static std::vector deviceIds; + if (OHOS::HDI::Nnrt::V1_0::MockIPreparedModel::m_ExpectRetCode == OH_NN_FAILED) { + // In order not to affect other use cases, set to the OH_NN_OPERATION_FORBIDDEN + OHOS::HDI::Nnrt::V1_0::MockIPreparedModel::m_ExpectRetCode = OH_NN_OPERATION_FORBIDDEN; + return deviceIds; + } + std::size_t device = 1; + deviceIds.emplace_back(device); + return deviceIds; +} + +OH_NN_ReturnCode HDIDevice::IsModelCacheSupported(bool& isSupported) +{ + isSupported = true; + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode HDIDevice::IsPerformanceModeSupported(bool& isSupported) +{ + isSupported = true; + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode HDIDevice::IsPrioritySupported(bool& isSupported) +{ + isSupported = true; + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode HDIDevice::IsFloat16PrecisionSupported(bool& isSupported) +{ + isSupported = true; + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode HDIDevice::GetSupportedOperation(std::shared_ptr model, + std::vector& ops) +{ + if (model == nullptr) { + LOGE("HDIDevice mock GetSupportedOperation failed, Model is nullptr, cannot query supported operation."); + return OH_NN_NULL_PTR; + } + + ops.emplace_back(true); + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode HDIDevice::IsDynamicInputSupported(bool& isSupported) +{ + isSupported = true; + return OH_NN_SUCCESS; +} +} // namespace NeuralNetworkRuntime +} // namespace OHOS + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Unittest { +OH_NN_ReturnCode NeuralNetworkRuntimeTest::BuildModelGraph(InnerModel& innerModel) +{ + // liteGraph is released internally by innerModel + mindspore::lite::LiteGraph* liteGraph = new (std::nothrow) mindspore::lite::LiteGraph; + EXPECT_NE(nullptr, liteGraph); + + liteGraph->name_ = "testGraph"; + liteGraph->input_indices_ = {0}; + liteGraph->output_indices_ = {1}; + liteGraph->all_tensors_ = {nullptr}; + const std::vector data(36, 1); + const std::vector dim = {3, 3}; + const std::vector quant_params {}; + + for (size_t indexInput = 0; indexInput < liteGraph->input_indices_.size(); ++indexInput) { + liteGraph->all_tensors_.emplace_back(mindspore::lite::MindIR_Tensor_Create(liteGraph->name_, + mindspore::lite::DATA_TYPE_FLOAT32, dim, mindspore::lite::FORMAT_NCHW, data, quant_params)); + } + + for (size_t indexOutput = 0; indexOutput < liteGraph->output_indices_.size(); ++indexOutput) { + liteGraph->all_tensors_.emplace_back(mindspore::lite::MindIR_Tensor_Create(liteGraph->name_, + mindspore::lite::DATA_TYPE_FLOAT32, dim, mindspore::lite::FORMAT_NCHW, data, quant_params)); + } + + return innerModel.BuildFromLiteGraph(liteGraph); +} + +void NeuralNetworkRuntimeTest::InitIndices() +{ + m_inputIndices.data = m_inputIndexs; + m_inputIndices.size = sizeof(m_inputIndexs) / sizeof(uint32_t); + + m_outputIndices.data = m_outputIndexs; + m_outputIndices.size = sizeof(m_outputIndexs) / sizeof(uint32_t); + + m_paramIndices.data = m_paramIndexs; + m_paramIndices.size = sizeof(m_paramIndexs) / sizeof(uint32_t); +} + +void NeuralNetworkRuntimeTest::AddModelTensor(InnerModel& innerModel) +{ + const int dim[2] = {2, 2}; + const OH_NN_Tensor& tensor = {OH_NN_FLOAT32, 2, dim, nullptr, OH_NN_TENSOR}; + + EXPECT_EQ(OH_NN_SUCCESS, innerModel.AddTensor(tensor)); + EXPECT_EQ(OH_NN_SUCCESS, innerModel.AddTensor(tensor)); + EXPECT_EQ(OH_NN_SUCCESS, innerModel.AddTensor(tensor)); + + const OH_NN_Tensor& tensorParam = {OH_NN_INT8, 0, nullptr, nullptr, OH_NN_ADD_ACTIVATIONTYPE}; + EXPECT_EQ(OH_NN_SUCCESS, innerModel.AddTensor(tensorParam)); +} + +void NeuralNetworkRuntimeTest::SetTensor() +{ + m_tensor.dataType = OH_NN_INT32; + m_tensor.dimensionCount = 0; + m_tensor.dimensions = nullptr; + m_tensor.quantParam = nullptr; + m_tensor.type = OH_NN_TENSOR; +} + +void NeuralNetworkRuntimeTest::SetInnerBuild(InnerModel& innerModel) +{ + uint32_t index = 3; + const int8_t activation = 0; + EXPECT_EQ(OH_NN_SUCCESS, innerModel.SetTensorValue(index, + static_cast(&activation), sizeof(int8_t))); + + OH_NN_OperationType opType{OH_NN_OPS_ADD}; + EXPECT_EQ(OH_NN_SUCCESS, innerModel.AddOperation(opType, m_paramIndices, m_inputIndices, m_outputIndices)); + EXPECT_EQ(OH_NN_SUCCESS, innerModel.SpecifyInputsAndOutputs(m_inputIndices, m_outputIndices)); + EXPECT_EQ(OH_NN_SUCCESS, innerModel.Build()); +} + +void NeuralNetworkRuntimeTest::SetInputAndOutput(Executor& executor) +{ + size_t length = 9 * sizeof(int32_t); + float input[9] = {0, 1, 2, 3, 4, 5, 6, 7, 8}; + void *buffer = input; + uint32_t index = 0; + + SetTensor(); + EXPECT_EQ(OH_NN_SUCCESS, executor.SetInput(index, m_tensor, buffer, length)); + EXPECT_EQ(OH_NN_SUCCESS, executor.SetOutput(index, buffer, length)); + EXPECT_EQ(OH_NN_SUCCESS, executor.Run()); +} + +/* + * @tc.name: model_construct_001 + * @tc.desc: Verify the return model of the OH_NNModel_Construct function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, model_construct_001, testing::ext::TestSize.Level0) +{ + OH_NNModel* ret = OH_NNModel_Construct(); + EXPECT_NE(nullptr, ret); +} + +/* + * @tc.name: model_add_tensor_001 + * @tc.desc: Verify the OH_NNModel is nullptr of the OH_NNModel_Tensor function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, model_add_tensor_001, testing::ext::TestSize.Level0) +{ + OH_NNModel* model = nullptr; + const int32_t dimInput[2] = {2, 2}; + const OH_NN_Tensor tensor = {OH_NN_INT8, 2, dimInput, nullptr, OH_NN_TENSOR}; + OH_NN_ReturnCode ret = OH_NNModel_AddTensor(model, &tensor); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: model_add_tensor_002 + * @tc.desc: Verify the OH_NN_Tensor is nullptr of the OH_NNModel_AddTensor function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, model_add_tensor_002, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + + OH_NNModel* model = reinterpret_cast(&innerModel); + OH_NN_Tensor* tensor = nullptr; + OH_NN_ReturnCode ret = OH_NNModel_AddTensor(model, tensor); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: model_add_tensor_003 + * @tc.desc: Verify the success of the OH_NNModel_AddTensor function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, model_add_tensor_003, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + OH_NNModel* model = reinterpret_cast(&innerModel); + + const int32_t dimInput[2] = {2, 2}; + const OH_NN_Tensor tensor = {OH_NN_INT8, 2, dimInput, nullptr, OH_NN_TENSOR}; + OH_NN_ReturnCode ret = OH_NNModel_AddTensor(model, &tensor); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/* + * @tc.name: model_add_operation_001 + * @tc.desc: Verify the OH_NNModel is nullptr of the OH_NNModel_AddOperation function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, model_add_operation_001, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + OH_NNModel* model = nullptr; + OH_NN_OperationType opType{OH_NN_OPS_ADD}; + + InitIndices(); + AddModelTensor(innerModel); + + uint32_t index = 3; + const int8_t activation = 0; + EXPECT_EQ(OH_NN_SUCCESS, innerModel.SetTensorValue(index, + static_cast(&activation), sizeof(int8_t))); + + OH_NN_ReturnCode ret = OH_NNModel_AddOperation(model, opType, &m_paramIndices, &m_inputIndices, &m_outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: model_add_operation_002 + * @tc.desc: Verify the paramIndices is nullptr of the OH_NNModel_AddOperation function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, model_add_operation_002, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + OH_NNModel* model = reinterpret_cast(&innerModel); + OH_NN_OperationType opType{OH_NN_OPS_ADD}; + + m_inputIndices.data = m_inputIndexs; + m_inputIndices.size = sizeof(m_inputIndexs) / sizeof(uint32_t); + + m_outputIndices.data = m_outputIndexs; + m_outputIndices.size = sizeof(m_outputIndexs) / sizeof(uint32_t); + + AddModelTensor(innerModel); + uint32_t index = 3; + const int8_t activation = 0; + EXPECT_EQ(OH_NN_SUCCESS, innerModel.SetTensorValue(index, + static_cast(&activation), sizeof(int8_t))); + + OH_NN_ReturnCode ret = OH_NNModel_AddOperation(model, opType, nullptr, &m_inputIndices, &m_outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: model_add_operation_003 + * @tc.desc: Verify the inputIndices is nullptr of the OH_NNModel_AddOperation function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, model_add_operation_003, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + OH_NNModel* model = reinterpret_cast(&innerModel); + OH_NN_OperationType opType{OH_NN_OPS_ADD}; + + m_paramIndices.data = m_paramIndexs; + m_paramIndices.size = sizeof(m_paramIndexs) / sizeof(uint32_t); + + m_outputIndices.data = m_outputIndexs; + m_outputIndices.size = sizeof(m_outputIndexs) / sizeof(uint32_t); + + AddModelTensor(innerModel); + uint32_t index = 3; + const int8_t activation = 0; + EXPECT_EQ(OH_NN_SUCCESS, innerModel.SetTensorValue(index, + static_cast(&activation), sizeof(int8_t))); + + OH_NN_ReturnCode ret = OH_NNModel_AddOperation(model, opType, &m_paramIndices, nullptr, &m_outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: model_add_operation_004 + * @tc.desc: Verify the outputIndices is nullptr of the OH_NNModel_AddOperation function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, model_add_operation_004, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + OH_NNModel* model = reinterpret_cast(&innerModel); + OH_NN_OperationType opType{OH_NN_OPS_ADD}; + + m_paramIndices.data = m_paramIndexs; + m_paramIndices.size = sizeof(m_paramIndexs) / sizeof(uint32_t); + + m_inputIndices.data = m_inputIndexs; + m_inputIndices.size = sizeof(m_inputIndexs) / sizeof(uint32_t); + + AddModelTensor(innerModel); + uint32_t index = 3; + const int8_t activation = 0; + EXPECT_EQ(OH_NN_SUCCESS, innerModel.SetTensorValue(index, + static_cast(&activation), sizeof(int8_t))); + + OH_NN_ReturnCode ret = OH_NNModel_AddOperation(model, opType, &m_paramIndices, &m_inputIndices, nullptr); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: model_add_operation_005 + * @tc.desc: Verify the success of the OH_NNModel_AddOperation function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, model_add_operation_005, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + OH_NNModel* model = reinterpret_cast(&innerModel); + OH_NN_OperationType opType{OH_NN_OPS_ADD}; + + InitIndices(); + AddModelTensor(innerModel); + + uint32_t index = 3; + const int8_t activation = 0; + EXPECT_EQ(OH_NN_SUCCESS, innerModel.SetTensorValue(index, + static_cast(&activation), sizeof(int8_t))); + + OH_NN_ReturnCode ret = OH_NNModel_AddOperation(model, opType, &m_paramIndices, &m_inputIndices, &m_outputIndices); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/* + * @tc.name: model_set_tensor_data_001 + * @tc.desc: Verify the OH_NNModel is nullptr of the OH_NNModel_SetTensorData function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, model_set_tensor_data_001, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + OH_NNModel* model = nullptr; + AddModelTensor(innerModel); + + uint32_t index = 3; + const int8_t activation = 0; + + OH_NN_ReturnCode ret = OH_NNModel_SetTensorData(model, index, static_cast(&activation), + sizeof(int8_t)); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: model_set_tensor_data_002 + * @tc.desc: Verify the data is nullptr of the OH_NNModel_SetTensorData function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, model_set_tensor_data_002, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + OH_NNModel* model = reinterpret_cast(&innerModel); + AddModelTensor(innerModel); + + uint32_t index = 3; + + OH_NN_ReturnCode ret = OH_NNModel_SetTensorData(model, index, nullptr, sizeof(int8_t)); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: model_set_tensor_data_003 + * @tc.desc: Verify the length is 0 of the OH_NNModel_SetTensorData function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, model_set_tensor_data_003, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + OH_NNModel* model = reinterpret_cast(&innerModel); + AddModelTensor(innerModel); + + uint32_t index = 3; + const int8_t activation = 0; + + OH_NN_ReturnCode ret = OH_NNModel_SetTensorData(model, index, static_cast(&activation), 0); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: model_set_tensor_data_004 + * @tc.desc: Verify the successs of the OH_NNModel_SetTensorData function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, model_set_tensor_data_004, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + OH_NNModel* model = reinterpret_cast(&innerModel); + AddModelTensor(innerModel); + + uint32_t index = 3; + const int8_t activation = 0; + + OH_NN_ReturnCode ret = OH_NNModel_SetTensorData(model, index, static_cast(&activation), + sizeof(int8_t)); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/* + * @tc.name: model_specify_inputs_and_outputs_001 + * @tc.desc: Verify the OH_NNModel is nullptr of the OH_NNModel_SpecifyInputsAndOutputs function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, model_specify_inputs_and_outputs_001, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + OH_NNModel* model = nullptr; + + InitIndices(); + AddModelTensor(innerModel); + + OH_NN_ReturnCode ret = OH_NNModel_SpecifyInputsAndOutputs(model, &m_inputIndices, &m_outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: model_specify_inputs_and_outputs_002 + * @tc.desc: Verify the inputIndices is nullptr of the OH_NNModel_SpecifyInputsAndOutputs function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, model_specify_inputs_and_outputs_002, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + OH_NNModel* model = reinterpret_cast(&innerModel); + + InitIndices(); + AddModelTensor(innerModel); + + OH_NN_ReturnCode ret = OH_NNModel_SpecifyInputsAndOutputs(model, nullptr, &m_outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: model_specify_inputs_and_outputs_003 + * @tc.desc: Verify the outputIndices is nullptr of the OH_NNModel_SpecifyInputsAndOutputs function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, model_specify_inputs_and_outputs_003, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + OH_NNModel* model = reinterpret_cast(&innerModel); + + InitIndices(); + AddModelTensor(innerModel); + + OH_NN_ReturnCode ret = OH_NNModel_SpecifyInputsAndOutputs(model, &m_inputIndices, nullptr); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: model_specify_inputs_and_outputs_004 + * @tc.desc: Verify the success of the OH_NNModel_SpecifyInputsAndOutputs function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, model_specify_inputs_and_outputs_004, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + OH_NNModel* model = reinterpret_cast(&innerModel); + + InitIndices(); + AddModelTensor(innerModel); + + OH_NN_ReturnCode ret = OH_NNModel_SpecifyInputsAndOutputs(model, &m_inputIndices, &m_outputIndices); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/* + * @tc.name: model_finish_001 + * @tc.desc: Verify the OH_NNModel is nullptr of the OH_NNModel_Finish function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, model_finish_001, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + OH_NNModel* model = nullptr; + + OH_NN_OperationType opType{OH_NN_OPS_ADD}; + + InitIndices(); + AddModelTensor(innerModel); + + uint32_t index = 3; + const int8_t activation = 0; + EXPECT_EQ(OH_NN_SUCCESS, innerModel.SetTensorValue(index, static_cast(&activation), + sizeof(int8_t))); + + EXPECT_EQ(OH_NN_SUCCESS, innerModel.AddOperation(opType, m_paramIndices, m_inputIndices, m_outputIndices)); + EXPECT_EQ(OH_NN_SUCCESS, innerModel.SpecifyInputsAndOutputs(m_inputIndices, m_outputIndices)); + + OH_NN_ReturnCode ret = OH_NNModel_Finish(model); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: model_finish_002 + * @tc.desc: Verify the success of the OH_NNModel_Finish function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, model_finish_002, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + OH_NNModel* model = reinterpret_cast(&innerModel); + + OH_NN_OperationType opType{OH_NN_OPS_ADD}; + + InitIndices(); + AddModelTensor(innerModel); + + const int8_t activation = 0; + uint32_t index = 3; + EXPECT_EQ(OH_NN_SUCCESS, innerModel.SetTensorValue(index, + static_cast(&activation), sizeof(int8_t))); + + EXPECT_EQ(OH_NN_SUCCESS, innerModel.AddOperation(opType, m_paramIndices, m_inputIndices, m_outputIndices)); + EXPECT_EQ(OH_NN_SUCCESS, innerModel.SpecifyInputsAndOutputs(m_inputIndices, m_outputIndices)); + + OH_NN_ReturnCode ret = OH_NNModel_Finish(model); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/* + * @tc.name: model_destroy_001 + * @tc.desc: Verify the OH_NNModel is nullptr of the OH_NNModel_Destroy function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, model_destroy_001, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + OH_NNModel** pModel = nullptr; + OH_NNModel_Destroy(pModel); + EXPECT_EQ(nullptr, pModel); +} + +/* + * @tc.name: model_destroy_002 + * @tc.desc: Verify the *OH_NNModel is nullptr of the OH_NNModel_Destroy function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, model_destroy_002, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + OH_NNModel* model = nullptr; + OH_NNModel** pModel = &model; + OH_NNModel_Destroy(pModel); + EXPECT_EQ(nullptr, model); +} + +/* + * @tc.name: model_destroy_003 + * @tc.desc: Verify the normal model of the OH_NNModel_Destroy function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, model_destroy_003, testing::ext::TestSize.Level0) +{ + InnerModel* innerModel = new InnerModel(); + EXPECT_NE(nullptr, innerModel); + OH_NNModel* model = reinterpret_cast(innerModel); + OH_NNModel_Destroy(&model); + EXPECT_EQ(nullptr, model); +} + +/* + * @tc.name: model_get_available_operation_001 + * @tc.desc: Verify the OH_NNModel is nullptr of the OH_NNModel_GetAvailableOperations function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, model_get_available_operation_001, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + OH_NNModel* model = nullptr; + + uint32_t opCount = 1; + const bool *pIsAvailable = nullptr; + + InitIndices(); + AddModelTensor(innerModel); + SetInnerBuild(innerModel); + + size_t deviceID = 10; + OH_NN_ReturnCode ret = OH_NNModel_GetAvailableOperations(model, deviceID, &pIsAvailable, &opCount); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: model_get_available_operation_002 + * @tc.desc: Verify the isAvailable is nullptr of the OH_NNModel_GetAvailableOperations function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, model_get_available_operation_002, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + OH_NNModel* model = reinterpret_cast(&innerModel); + + uint32_t opCount = 1; + InitIndices(); + AddModelTensor(innerModel); + SetInnerBuild(innerModel); + + size_t deviceID = 10; + OH_NN_ReturnCode ret = OH_NNModel_GetAvailableOperations(model, deviceID, nullptr, &opCount); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: model_get_available_operation_003 + * @tc.desc: Verify the *isAvailable is no nullptr of the OH_NNModel_GetAvailableOperations function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, model_get_available_operation_003, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + OH_NNModel* model = reinterpret_cast(&innerModel); + + const bool isAvailable = true; + const bool *pIsAvailable = &isAvailable; + uint32_t opCount = 1; + + InitIndices(); + AddModelTensor(innerModel); + SetInnerBuild(innerModel); + + size_t deviceID = 10; + OH_NN_ReturnCode ret = OH_NNModel_GetAvailableOperations(model, deviceID, &pIsAvailable, &opCount); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: model_get_available_operation_004 + * @tc.desc: Verify the opCount is nullptr of the OH_NNModel_GetAvailableOperations function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, model_get_available_operation_004, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + OH_NNModel* model = reinterpret_cast(&innerModel); + + const bool *pIsAvailable = nullptr; + uint32_t* opCount = nullptr; + + InitIndices(); + AddModelTensor(innerModel); + SetInnerBuild(innerModel); + + size_t deviceID = 10; + OH_NN_ReturnCode ret = OH_NNModel_GetAvailableOperations(model, deviceID, &pIsAvailable, opCount); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: model_get_available_operation_005 + * @tc.desc: Verify the success of the OH_NNModel_GetAvailableOperations function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, model_get_available_operation_005, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + OH_NNModel* model = reinterpret_cast(&innerModel); + + const bool *pIsAvailable = nullptr; + uint32_t opCount = 1; + + InitIndices(); + AddModelTensor(innerModel); + SetInnerBuild(innerModel); + + size_t deviceID = 10; + OH_NN_ReturnCode ret = OH_NNModel_GetAvailableOperations(model, deviceID, &pIsAvailable, &opCount); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/* + * @tc.name: compilation_construct_001 + * @tc.desc: Verify the OH_NNModel is nullptr of the OH_NNCompilation_Construct function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, compilation_construct_001, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + const OH_NNModel* model = nullptr; + OH_NNCompilation* ret = OH_NNCompilation_Construct(model); + EXPECT_EQ(nullptr, ret); +} + +/* + * @tc.name: compilation_construct_002 + * @tc.desc: Verify the not OH_NNModel_Build before creating compilation of the OH_NNCompilation_Construct function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, compilation_construct_002, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + OH_NNModel* model = reinterpret_cast(&innerModel); + OH_NNCompilation* ret = OH_NNCompilation_Construct(model); + EXPECT_EQ(nullptr, ret); +} + +/* + * @tc.name: compilation_construct_003 + * @tc.desc: Verify the normal model of the OH_NNCompilation_Construct function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, compilation_construct_003, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + OH_NNModel* model = reinterpret_cast(&innerModel); + OH_NNCompilation* ret = OH_NNCompilation_Construct(model); + EXPECT_NE(nullptr, ret); +} + +/* + * @tc.name: compilation_set_device_001 + * @tc.desc: Verify the OH_NNCompilation is nullptr of the OH_NNCompilation_SetDevice function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, compilation_set_device_001, testing::ext::TestSize.Level0) +{ + OH_NNCompilation* compilation = nullptr; + size_t deviceId = 1; + OH_NN_ReturnCode ret = OH_NNCompilation_SetDevice(compilation, deviceId); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: compilation_set_device_002 + * @tc.desc: Verify the success of the OH_NNCompilation_SetDevice function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, compilation_set_device_002, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilation(&innerModel); + OH_NNCompilation* nnCompilation = reinterpret_cast(&compilation); + size_t deviceId = 1; + OH_NN_ReturnCode ret = OH_NNCompilation_SetDevice(nnCompilation, deviceId); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/* + * @tc.name: compilation_set_cache_001 + * @tc.desc: Verify the OH_NNCompilation is nullptr of the OH_NNCompilation_SetCache function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, compilation_set_cache_001, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilation(&innerModel); + OH_NNCompilation* nnCompilation = nullptr; + const char* cacheDir = "../"; + uint32_t version = 1; + std::size_t deviceId = 1; + EXPECT_EQ(OH_NN_SUCCESS, compilation.SetDevice(deviceId)); + OH_NN_ReturnCode ret = OH_NNCompilation_SetCache(nnCompilation, cacheDir, version); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: compilation_set_cache_002 + * @tc.desc: Verify the cachePath is nullptr of the OH_NNCompilation_SetCache function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, compilation_set_cache_002, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilation(&innerModel); + OH_NNCompilation* nnCompilation = reinterpret_cast(&compilation); + const char* cacheDir = nullptr; + uint32_t version = 1; + std::size_t deviceId = 1; + EXPECT_EQ(OH_NN_SUCCESS, compilation.SetDevice(deviceId)); + OH_NN_ReturnCode ret = OH_NNCompilation_SetCache(nnCompilation, cacheDir, version); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: compilation_set_cache_003 + * @tc.desc: Verify the success of the OH_NNCompilation_SetCache function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, compilation_set_cache_003, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilation(&innerModel); + OH_NNCompilation* nnCompilation = reinterpret_cast(&compilation); + const char* cacheDir = "../"; + uint32_t version = 1; + std::size_t deviceId = 1; + EXPECT_EQ(OH_NN_SUCCESS, compilation.SetDevice(deviceId)); + OH_NN_ReturnCode ret = OH_NNCompilation_SetCache(nnCompilation, cacheDir, version); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/* + * @tc.name: compilation_set_performance_mode_001 + * @tc.desc: Verify the OH_NNCompilation is nullptr of the OH_NNCompilation_SetPerformanceMode function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, compilation_set_performance_mode_001, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilation(&innerModel); + OH_NNCompilation* nnCompilation = nullptr; + OH_NN_PerformanceMode performanceMode = OH_NN_PERFORMANCE_NONE; + + std::size_t deviceId = 1; + EXPECT_EQ(OH_NN_SUCCESS, compilation.SetDevice(deviceId)); + OH_NN_ReturnCode ret = OH_NNCompilation_SetPerformanceMode(nnCompilation, performanceMode); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: compilation_set_performance_mode_002 + * @tc.desc: Verify the success of the OH_NNCompilation_SetPerformanceMode function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, compilation_set_performance_mode_002, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilation(&innerModel); + OH_NNCompilation* nnCompilation = reinterpret_cast(&compilation); + OH_NN_PerformanceMode performanceMode = OH_NN_PERFORMANCE_NONE; + + std::size_t deviceId = 1; + EXPECT_EQ(OH_NN_SUCCESS, compilation.SetDevice(deviceId)); + + OH_NN_ReturnCode ret = OH_NNCompilation_SetPerformanceMode(nnCompilation, performanceMode); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/* + * @tc.name: compilation_set_priority_001 + * @tc.desc: Verify the OH_NNCompilation is nullptr of the OH_NNCompilation_SetPriority function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, compilation_set_priority_001, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilation(&innerModel); + OH_NNCompilation* nnCompilation = nullptr; + OH_NN_Priority priority = OH_NN_PRIORITY_LOW; + + std::size_t deviceId = 1; + EXPECT_EQ(OH_NN_SUCCESS, compilation.SetDevice(deviceId)); + + OH_NN_ReturnCode ret = OH_NNCompilation_SetPriority(nnCompilation, priority); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: compilation_set_priority_002 + * @tc.desc: Verify the success of the OH_NNCompilation_SetPriority function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, compilation_set_priority_002, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilation(&innerModel); + OH_NNCompilation* nnCompilation = reinterpret_cast(&compilation); + OH_NN_Priority priority = OH_NN_PRIORITY_LOW; + + std::size_t deviceId = 1; + EXPECT_EQ(OH_NN_SUCCESS, compilation.SetDevice(deviceId)); + + OH_NN_ReturnCode ret = OH_NNCompilation_SetPriority(nnCompilation, priority); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/* + * @tc.name: compilation_set_enable_float16_001 + * @tc.desc: Verify the OH_NNCompilation is nullptr of the OH_NNCompilation_EnableFloat16 function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, compilation_set_enable_float16_001, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilation(&innerModel); + OH_NNCompilation* nnCompilation = nullptr; + bool enableFloat16 = true; + + std::size_t deviceId = 1; + EXPECT_EQ(OH_NN_SUCCESS, compilation.SetDevice(deviceId)); + + OH_NN_ReturnCode ret = OH_NNCompilation_EnableFloat16(nnCompilation, enableFloat16); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: compilation_set_enable_float16_002 + * @tc.desc: Verify the success of the OH_NNCompilation_EnableFloat16 function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, compilation_set_enable_float16_002, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilation(&innerModel); + OH_NNCompilation* nnCompilation = reinterpret_cast(&compilation); + bool enableFloat16 = true; + + std::size_t deviceId = 1; + EXPECT_EQ(OH_NN_SUCCESS, compilation.SetDevice(deviceId)); + + OH_NN_ReturnCode ret = OH_NNCompilation_EnableFloat16(nnCompilation, enableFloat16); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/* + * @tc.name: compilation_build_001 + * @tc.desc: Verify the OH_NNCompilation is nullptr of the OH_NNCompilation_Build function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, compilation_build_001, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilation(&innerModel); + OH_NNCompilation* nnCompilation = nullptr; + + std::size_t deviceId = 1; + EXPECT_EQ(OH_NN_SUCCESS, compilation.SetDevice(deviceId)); + EXPECT_EQ(OH_NN_SUCCESS, compilation.SetPerformance(OH_NN_PERFORMANCE_EXTREME)); + EXPECT_EQ(OH_NN_SUCCESS, compilation.SetPriority(OH_NN_PRIORITY_HIGH)); + EXPECT_EQ(OH_NN_SUCCESS, compilation.SetEnableFp16(true)); + + OH_NN_ReturnCode ret = OH_NNCompilation_Build(nnCompilation); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: compilation_build_002 + * @tc.desc: Verify the success of the OH_NNCompilation_Build function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, compilation_build_002, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilation(&innerModel); + OH_NNCompilation* nnCompilation = reinterpret_cast(&compilation); + + std::size_t deviceId = 1; + EXPECT_EQ(OH_NN_SUCCESS, compilation.SetDevice(deviceId)); + EXPECT_EQ(OH_NN_SUCCESS, compilation.SetPerformance(OH_NN_PERFORMANCE_EXTREME)); + EXPECT_EQ(OH_NN_SUCCESS, compilation.SetPriority(OH_NN_PRIORITY_HIGH)); + EXPECT_EQ(OH_NN_SUCCESS, compilation.SetEnableFp16(true)); + + OH_NN_ReturnCode ret = OH_NNCompilation_Build(nnCompilation); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/* + * @tc.name: compilation_destroy_001 + * @tc.desc: Verify the OH_NNCompilation is nullptr of the OH_NNCompilation_Destroy function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, compilation_destroy_001, testing::ext::TestSize.Level0) +{ + OH_NNCompilation** pCompilation = nullptr; + OH_NNCompilation_Destroy(pCompilation); + EXPECT_EQ(nullptr, pCompilation); +} + +/* + * @tc.name: compilation_destroy_002 + * @tc.desc: Verify the *OH_NNCompilation is nullptr of the OH_NNCompilation_Destroy function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, compilation_destroy_002, testing::ext::TestSize.Level0) +{ + OH_NNCompilation* compilation = nullptr; + OH_NNCompilation** pCompilation = &compilation; + OH_NNCompilation_Destroy(pCompilation); + EXPECT_EQ(nullptr, compilation); +} + +/* + * @tc.name: compilation_destroy_003 + * @tc.desc: Verify the normal model of the OH_NNCompilation_Destroy function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, compilation_destroy_003, testing::ext::TestSize.Level0) +{ + InnerModel* innerModel = new InnerModel(); + EXPECT_NE(nullptr, innerModel); + Compilation* compilation = new(std::nothrow) Compilation(innerModel); + EXPECT_NE(nullptr, compilation); + OH_NNCompilation* nnCompilation = reinterpret_cast(compilation); + OH_NNCompilation_Destroy(&nnCompilation); + EXPECT_EQ(nullptr, nnCompilation); +} + +/** + * @tc.name: excutor_construct_001 + * @tc.desc: Verify the OH_NNCompilation is nullptr of the OH_NNExecutor_Construct function + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, excutor_construct_001, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilation(&innerModel); + + std::size_t deviceId = 1; + EXPECT_EQ(OH_NN_SUCCESS, compilation.SetDevice(deviceId)); + EXPECT_EQ(OH_NN_SUCCESS, compilation.SetEnableFp16(true)); + EXPECT_EQ(OH_NN_SUCCESS, compilation.SetPerformance(OH_NN_PERFORMANCE_EXTREME)); + EXPECT_EQ(OH_NN_SUCCESS, compilation.SetPriority(OH_NN_PRIORITY_HIGH)); + EXPECT_EQ(OH_NN_SUCCESS, compilation.Build()); + + OH_NNCompilation* nnCompilation = nullptr; + OH_NNExecutor* executor = OH_NNExecutor_Construct(nnCompilation); + EXPECT_EQ(nullptr, executor); +} + +/** + * @tc.name: excutor_construct_002 + * @tc.desc: Verify the not OH_NNCompilation_Build before creating executor of the OH_NNExecutor_Construct function + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, excutor_construct_002, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilation(&innerModel); + OH_NNCompilation* nnCompilation = reinterpret_cast(&compilation); + OH_NNExecutor * executor = OH_NNExecutor_Construct(nnCompilation); + EXPECT_EQ(nullptr, executor); +} + +/** + * @tc.name: excutor_construct_003 + * @tc.desc: Verify the success of the OH_NNExecutor_Construct function + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, excutor_construct_003, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilation(&innerModel); + + std::size_t deviceId = 1; + EXPECT_EQ(OH_NN_SUCCESS, compilation.SetDevice(deviceId)); + EXPECT_EQ(OH_NN_SUCCESS, compilation.SetPerformance(OH_NN_PERFORMANCE_EXTREME)); + EXPECT_EQ(OH_NN_SUCCESS, compilation.SetPriority(OH_NN_PRIORITY_HIGH)); + EXPECT_EQ(OH_NN_SUCCESS, compilation.SetEnableFp16(true)); + EXPECT_EQ(OH_NN_SUCCESS, compilation.Build()); + + OH_NNCompilation* nnCompilation = reinterpret_cast(&compilation); + OH_NNExecutor * executor = OH_NNExecutor_Construct(nnCompilation); + EXPECT_NE(nullptr, executor); +} + +/** + * @tc.name: excutor_setinput_001 + * @tc.desc: Verify the OH_NNExecutor is nullptr of the OH_NNExecutor_SetInput function + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, excutor_setinput_001, testing::ext::TestSize.Level0) +{ + SetTensor(); + + float input[9] = {0, 1, 2, 3, 4, 5, 6, 7, 8}; + const void *buffer = input; + size_t length = 2 * sizeof(float); + uint32_t inputIndex = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNExecutor_SetInput(nullptr, inputIndex, &m_tensor, buffer, length)); +} + +/** + * @tc.name: excutor_setinput_002 + * @tc.desc: Verify the OH_NN_Tensor is nullptr of the OH_NNExecutor_SetInput function + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, excutor_setinput_002, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation innerCompilation(&innerModel); + Executor executor(&innerCompilation); + OH_NNExecutor* nnExecutor = reinterpret_cast(&executor); + + uint32_t inputIndex = 0; + float input[9] = {0, 1, 2, 3, 4, 5, 6, 7, 8}; + const void *buffer = input; + size_t length = 2 * sizeof(float); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNExecutor_SetInput(nnExecutor, inputIndex, nullptr, buffer, length)); +} + +/** + * @tc.name: excutor_setinput_003 + * @tc.desc: Verify the data is nullptr of the OH_NNExecutor_SetInput function + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, excutor_setinput_003, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation innerCompilation(&innerModel); + Executor executor(&innerCompilation); + OH_NNExecutor* nnExecutor = reinterpret_cast(&executor); + + SetTensor(); + + uint32_t inputIndex = 0; + const void *buffer = nullptr; + size_t length = 2 * sizeof(float); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNExecutor_SetInput(nnExecutor, inputIndex, &m_tensor, buffer, length)); +} + +/** + * @tc.name: excutor_setinput_004 + * @tc.desc: Verify the length is 0 of the OH_NNExecutor_SetInput function + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, excutor_setinput_004, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation innerCompilation(&innerModel); + Executor executor(&innerCompilation); + OH_NNExecutor* nnExecutor = reinterpret_cast(&executor); + + uint32_t inputIndex = 0; + SetTensor(); + + size_t length = 0; + float input[9] = {0, 1, 2, 3, 4, 5, 6, 7, 8}; + const void *buffer = input; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNExecutor_SetInput(nnExecutor, inputIndex, &m_tensor, buffer, length)); +} + +/** + * @tc.name: excutor_setinput_005 + * @tc.desc: Verify the success of the OH_NNExecutor_SetInput function + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, excutor_setinput_005, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation innerCompilation(&innerModel); + Executor executor(&innerCompilation); + OH_NNExecutor* nnExecutor = reinterpret_cast(&executor); + + uint32_t inputIndex = 0; + SetTensor(); + + float input[9] = {0, 1, 2, 3, 4, 5, 6, 7, 8}; + const void *buffer = input; + size_t length = 9 * sizeof(int32_t); + OH_NN_ReturnCode ret = OH_NNExecutor_SetInput(nnExecutor, inputIndex, &m_tensor, buffer, length); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/** + * @tc.name: excutor_setoutput_001 + * @tc.desc: Verify the OH_NNExecutor is nullptr of the OH_NNExecutor_SetOutput function + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, excutor_setoutput_001, testing::ext::TestSize.Level0) +{ + uint32_t outputIndex = 0; + float input[9] = {0, 1, 2, 3, 4, 5, 6, 7, 8}; + void *buffer = input; + size_t length = 9 * sizeof(int32_t); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNExecutor_SetOutput(nullptr, outputIndex, buffer, length)); +} + +/** + * @tc.name: excutor_setoutput_002 + * @tc.desc: Verify the data is nullptr of the OH_NNExecutor_SetOutput function + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, excutor_setoutput_002, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation innerCompilation(&innerModel); + Executor executor(&innerCompilation); + OH_NNExecutor* nnExecutor = reinterpret_cast(&executor); + + uint32_t outputIndex = 0; + void *buffer = nullptr; + size_t length = 9 * sizeof(int32_t); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNExecutor_SetOutput(nnExecutor, outputIndex, buffer, length)); +} + +/** + * @tc.name: excutor_setoutput_003 + * @tc.desc: Verify the length is 0 of the OH_NNExecutor_SetOutput function + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, excutor_setoutput_003, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation innerCompilation(&innerModel); + Executor executor(&innerCompilation); + OH_NNExecutor* nnExecutor = reinterpret_cast(&executor); + + uint32_t outputIndex = 0; + float input[9] = {0, 1, 2, 3, 4, 5, 6, 7, 8}; + void *buffer = input; + size_t length = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNExecutor_SetOutput(nnExecutor, outputIndex, buffer, length)); +} + +/** + * @tc.name: excutor_setoutput_004 + * @tc.desc: Verify the success of the OH_NNExecutor_SetOutput function + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, excutor_setoutput_004, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation innerCompilation(&innerModel); + Executor executor(&innerCompilation); + OH_NNExecutor* nnExecutor = reinterpret_cast(&executor); + + uint32_t outputIndex = 0; + float input[9] = {0, 1, 2, 3, 4, 5, 6, 7, 8}; + void *buffer = input; + size_t length = 9 * sizeof(int32_t); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNExecutor_SetOutput(nnExecutor, outputIndex, buffer, length)); +} + +/** + * @tc.name: excutor_getoutputshape_001 + * @tc.desc: Verify the OH_NNExecutor is nullptr of the OH_NNExecutor_GetOutputShape function + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, excutor_getoutputshape_001, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation innerCompilation(&innerModel); + Executor executor(&innerCompilation); + OH_NNExecutor* nnExecutor = nullptr; + + SetInputAndOutput(executor); + + int32_t* ptr = nullptr; + int32_t** shape = &ptr; + uint32_t length = 2; + uint32_t outputIndex = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNExecutor_GetOutputShape(nnExecutor, outputIndex, + shape, &length)); +} + +/** + * @tc.name: excutor_getoutputshape_002 + * @tc.desc: Verify the shape is nullptr of the OH_NNExecutor_GetOutputShape function + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, excutor_getoutputshape_002, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation innerCompilation(&innerModel); + Executor executor(&innerCompilation); + OH_NNExecutor* nnExecutor = reinterpret_cast(&executor); + + SetInputAndOutput(executor); + + uint32_t outputIndex = 0; + int32_t** shape = nullptr; + uint32_t length = 2; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNExecutor_GetOutputShape(nnExecutor, outputIndex, + shape, &length)); +} + +/** + * @tc.name: excutor_getoutputshape_003 + * @tc.desc: Verify the *shape is not nullptr of the OH_NNExecutor_GetOutputShape function + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, excutor_getoutputshape_003, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation innerCompilation(&innerModel); + Executor executor(&innerCompilation); + OH_NNExecutor* nnExecutor = reinterpret_cast(&executor); + + SetInputAndOutput(executor); + + int32_t expectDim[2] = {3, 3}; + int32_t* ptr = expectDim; + int32_t** shape = &ptr; + uint32_t length = 2; + uint32_t outputIndex = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNExecutor_GetOutputShape(nnExecutor, outputIndex, + shape, &length)); +} + +/** + * @tc.name: excutor_getoutputshape_004 + * @tc.desc: Verify the length is nullptr of the OH_NNExecutor_GetOutputShape function + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, excutor_getoutputshape_004, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation innerCompilation(&innerModel); + Executor executor(&innerCompilation); + OH_NNExecutor* nnExecutor = reinterpret_cast(&executor); + + SetInputAndOutput(executor); + + int32_t* ptr = nullptr; + int32_t** shape = &ptr; + uint32_t outputIndex = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNExecutor_GetOutputShape(nnExecutor, outputIndex, shape, nullptr)); +} + +/** + * @tc.name: excutor_getoutputshape_005 + * @tc.desc: Verify the success of the OH_NNExecutor_GetOutputShape function + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, excutor_getoutputshape_005, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation innerCompilation(&innerModel); + Executor executor(&innerCompilation); + OH_NNExecutor* nnExecutor = reinterpret_cast(&executor); + + SetInputAndOutput(executor); + + int32_t* ptr = nullptr; + int32_t** shape = &ptr; + uint32_t length = 2; + uint32_t outputIndex = 0; + EXPECT_EQ(OH_NN_SUCCESS, OH_NNExecutor_GetOutputShape(nnExecutor, outputIndex, shape, &length)); +} + +/** + * @tc.name: excutor_run_001 + * @tc.desc: Verify the OH_NNExecutor is nullptr of the OH_NNExecutor_Run function + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, excutor_run_001, testing::ext::TestSize.Level0) +{ + OH_NNExecutor* nnExecutor = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNExecutor_Run(nnExecutor)); +} + +/** + * @tc.name: excutor_run_002 + * @tc.desc: Verify the success of the OH_NNExecutor_Run function + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, excutor_run_002, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation innerCompilation(&innerModel); + Executor executor(&innerCompilation); + OH_NNExecutor* nnExecutor = reinterpret_cast(&executor); + + uint32_t index = 0; + size_t length = 9 * sizeof(int32_t); + float input[9] = {0, 1, 2, 3, 4, 5, 6, 7, 8}; + void *buffer = input; + + SetTensor(); + EXPECT_EQ(OH_NN_SUCCESS, executor.SetInput(index, m_tensor, buffer, length)); + EXPECT_EQ(OH_NN_SUCCESS, executor.SetOutput(index, buffer, length)); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNExecutor_Run(nnExecutor)); +} + +/* + * @tc.name: executor_allocate_input_memory_001 + * @tc.desc: Verify the OH_NNExecutor is nullptr of the OH_NNExecutor_AllocateInputMemory function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, executor_allocate_input_memory_001, testing::ext::TestSize.Level0) +{ + OH_NNExecutor* nnExecutor = nullptr; + uint32_t outputIndex = 0; + size_t length = 9 * sizeof(float); + + OH_NN_Memory* ret = OH_NNExecutor_AllocateInputMemory(nnExecutor, outputIndex, length); + EXPECT_EQ(nullptr, ret); +} + +/* + * @tc.name: executor_allocate_input_memory_002 + * @tc.desc: Verify the passed length equals 0 of the OH_NNExecutor_AllocateInputMemory function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, executor_allocate_input_memory_002, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation innerCompilation(&innerModel); + Executor executor(&innerCompilation); + OH_NNExecutor* nnExecutor = reinterpret_cast(&executor); + + uint32_t outputIndex = 0; + size_t length = 0; + + OH_NN_Memory* ret = OH_NNExecutor_AllocateInputMemory(nnExecutor, outputIndex, length); + EXPECT_EQ(nullptr, ret); +} + +/* + * @tc.name: executor_allocate_input_memory_003 + * @tc.desc: Verify the error when creating input memory in executor of the OH_NNExecutor_AllocateInputMemory function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, executor_allocate_input_memory_003, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation innerCompilation(&innerModel); + Executor executor(&innerCompilation); + OH_NNExecutor* nnExecutor = reinterpret_cast(&executor); + + uint32_t outputIndex = 6; + size_t length = 9 * sizeof(float); + + OH_NN_Memory* ret = OH_NNExecutor_AllocateInputMemory(nnExecutor, outputIndex, length); + EXPECT_EQ(nullptr, ret); +} + +/* + * @tc.name: executor_allocate_input_memory_004 + * @tc.desc: Verify the success of the OH_NNExecutor_AllocateInputMemory function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, executor_allocate_input_memory_004, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation innerCompilation(&innerModel); + Executor executor(&innerCompilation); + OH_NNExecutor* nnExecutor = reinterpret_cast(&executor); + + uint32_t outputIndex = 0; + size_t length = 9 * sizeof(float); + + OH_NN_Memory* ret = OH_NNExecutor_AllocateInputMemory(nnExecutor, outputIndex, length); + EXPECT_NE(nullptr, ret); +} + +/* + * @tc.name: executor_allocate_output_memory_001 + * @tc.desc: Verify the OH_NNExecutor is nullptr of the OH_NNExecutor_AllocateOutputMemory function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, executor_allocate_output_memory_001, testing::ext::TestSize.Level0) +{ + OH_NNExecutor* nnExecutor = nullptr; + uint32_t outputIndex = 0; + size_t length = 9 * sizeof(float); + + OH_NN_Memory* ret = OH_NNExecutor_AllocateOutputMemory(nnExecutor, outputIndex, length); + EXPECT_EQ(nullptr, ret); +} + +/* + * @tc.name: executor_allocate_output_memory_002 + * @tc.desc: Verify the passed length equals 0 of the OH_NNExecutor_AllocateOutputMemory function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, executor_allocate_output_memory_002, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation innerCompilation(&innerModel); + Executor executor(&innerCompilation); + OH_NNExecutor* nnExecutor = reinterpret_cast(&executor); + + uint32_t outputIndex = 0; + size_t length = 0; + + OH_NN_Memory* ret = OH_NNExecutor_AllocateOutputMemory(nnExecutor, outputIndex, length); + EXPECT_EQ(nullptr, ret); +} + +/* + * @tc.name: executor_allocate_output_memory_003 + * @tc.desc: Verify the error when create output memory in executor of the OH_NNExecutor_AllocateOutputMemory function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, executor_allocate_output_memory_003, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation innerCompilation(&innerModel); + Executor executor(&innerCompilation); + OH_NNExecutor* nnExecutor = reinterpret_cast(&executor); + + uint32_t outputIndex = 6; + size_t length = 9 * sizeof(float); + + OH_NN_Memory* ret = OH_NNExecutor_AllocateOutputMemory(nnExecutor, outputIndex, length); + EXPECT_EQ(nullptr, ret); +} + +/* + * @tc.name: executor_allocate_output_memory_004 + * @tc.desc: Verify the success of the OH_NNExecutor_AllocateOutputMemory function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, executor_allocate_output_memory_004, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation innerCompilation(&innerModel); + Executor executor(&innerCompilation); + OH_NNExecutor* nnExecutor = reinterpret_cast(&executor); + + uint32_t outputIndex = 0; + size_t length = 9 * sizeof(float); + + OH_NN_Memory* ret = OH_NNExecutor_AllocateOutputMemory(nnExecutor, outputIndex, length); + EXPECT_NE(nullptr, ret); +} + + +/* + * @tc.name: executor_destroy_input_memory_001 + * @tc.desc: Verify the OH_NNExecutor is nullptr of the OH_NNExecutor_DestroyInputMemory function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, executor_destroy_input_memory_001, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + BuildModelGraph(innerModel); + Compilation innerCompilation(&innerModel); + Executor executor(&innerCompilation); + OH_NNExecutor* nnExecutor = nullptr; + + uint32_t inputIndex = 0; + float dataArry[9] {0, 1, 2, 3, 4, 5, 6, 7, 8}; + void* const data = dataArry; + OH_NN_Memory memory = {data, 9 * sizeof(float)}; + OH_NN_Memory* pMemory = &memory; + size_t length = 9 * sizeof(float); + EXPECT_EQ(OH_NN_SUCCESS, executor.CreateInputMemory(inputIndex, length, &pMemory)); + OH_NNExecutor_DestroyInputMemory(nnExecutor, inputIndex, &pMemory); + EXPECT_EQ(nullptr, nnExecutor); +} + +/* + * @tc.name: executor_destroy_input_memory_002 + * @tc.desc: Verify the memory is nullptr of the OH_NNExecutor_DestroyInputMemory function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, executor_destroy_input_memory_002, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + BuildModelGraph(innerModel); + Compilation innerCompilation(&innerModel); + Executor executor(&innerCompilation); + OH_NNExecutor* nnExecutor = reinterpret_cast(&executor); + + uint32_t inputIndex = 0; + OH_NN_Memory** memory = nullptr; + OH_NNExecutor_DestroyInputMemory(nnExecutor, inputIndex, memory); + EXPECT_EQ(nullptr, memory); +} + +/* + * @tc.name: executor_destroy_input_memory_003 + * @tc.desc: Verify the *memory is nullptr of the OH_NNExecutor_DestroyInputMemory function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, executor_destroy_input_memory_003, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + BuildModelGraph(innerModel); + Compilation innerCompilation(&innerModel); + Executor executor(&innerCompilation); + OH_NNExecutor* nnExecutor = reinterpret_cast(&executor); + + uint32_t inputIndex = 0; + OH_NN_Memory* memory = nullptr; + OH_NN_Memory** pMemory = &memory; + OH_NNExecutor_DestroyInputMemory(nnExecutor, inputIndex, pMemory); + EXPECT_EQ(nullptr, memory); +} + +/* + * @tc.name: executor_destroy_input_memory_004 + * @tc.desc: Verify the error happened when destroying input memory of the OH_NNExecutor_DestroyInputMemory function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, executor_destroy_input_memory_004, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + BuildModelGraph(innerModel); + Compilation innerCompilation(&innerModel); + Executor executor(&innerCompilation); + OH_NNExecutor* nnExecutor = reinterpret_cast(&executor); + + uint32_t inputIndex = 6; + float dataArry[9] {0, 1, 2, 3, 4, 5, 6, 7, 8}; + void* const data = dataArry; + OH_NN_Memory memory = {data, 9 * sizeof(float)}; + OH_NN_Memory* pMemory = &memory; + OH_NNExecutor_DestroyInputMemory(nnExecutor, inputIndex, &pMemory); + EXPECT_NE(nullptr, pMemory); +} + +/* + * @tc.name: executor_destroy_input_memory_005 + * @tc.desc: Verify the success of the OH_NNExecutor_DestroyInputMemory function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, executor_destroy_input_memory_005, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + BuildModelGraph(innerModel); + Compilation innerCompilation(&innerModel); + Executor executor(&innerCompilation); + OH_NNExecutor* nnExecutor = reinterpret_cast(&executor); + + uint32_t inputIndex = 0; + float dataArry[9] {0, 1, 2, 3, 4, 5, 6, 7, 8}; + void* const data = dataArry; + OH_NN_Memory memory = {data, 9 * sizeof(float)}; + OH_NN_Memory* pMemory = &memory; + size_t length = 9 * sizeof(float); + EXPECT_EQ(OH_NN_SUCCESS, executor.CreateInputMemory(inputIndex, length, &pMemory)); + OH_NNExecutor_DestroyInputMemory(nnExecutor, inputIndex, &pMemory); + EXPECT_EQ(nullptr, pMemory); +} + +/* + * @tc.name: executor_destroy_output_memory_001 + * @tc.desc: Verify the OH_NNExecutor is nullptr of the OH_NNExecutor_DestroyOutputMemory function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, executor_destroy_output_memory_001, testing::ext::TestSize.Level0) +{ + OH_NNExecutor* nnExecutor = nullptr; + uint32_t outputIndex = 0; + float dataArry[9] {0, 1, 2, 3, 4, 5, 6, 7, 8}; + void* const data = dataArry; + OH_NN_Memory memory = {data, 9 * sizeof(float)}; + OH_NN_Memory* pMemory = &memory; + OH_NNExecutor_DestroyOutputMemory(nnExecutor, outputIndex, &pMemory); + EXPECT_EQ(nullptr, nnExecutor); +} + +/* + * @tc.name: executor_destroy_output_memory_002 + * @tc.desc: Verify the memory is nullptr of the OH_NNExecutor_DestroyOutputMemory function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, executor_destroy_output_memory_002, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation innerCompilation(&innerModel); + Executor executor(&innerCompilation); + OH_NNExecutor* nnExecutor = reinterpret_cast(&executor); + + uint32_t outputIndex = 0; + OH_NN_Memory** memory = nullptr; + OH_NNExecutor_DestroyOutputMemory(nnExecutor, outputIndex, memory); + EXPECT_EQ(nullptr, memory); +} + +/* + * @tc.name: executor_destroy_output_memory_003 + * @tc.desc: Verify the *memory is nullptr of the OH_NNExecutor_DestroyOutputMemory function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, executor_destroy_output_memory_003, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation innerCompilation(&innerModel); + Executor executor(&innerCompilation); + OH_NNExecutor* nnExecutor = reinterpret_cast(&executor); + + uint32_t outputIndex = 0; + OH_NN_Memory* memory = nullptr; + OH_NN_Memory** pMemory = &memory; + OH_NNExecutor_DestroyOutputMemory(nnExecutor, outputIndex, pMemory); + EXPECT_EQ(nullptr, memory); +} + +/* + * @tc.name: executor_destroy_output_memory_004 + * @tc.desc: Verify the error happened when destroying output memory of the OH_NNExecutor_DestroyOutputMemory function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, executor_destroy_output_memory_004, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation innerCompilation(&innerModel); + Executor executor(&innerCompilation); + OH_NNExecutor* nnExecutor = reinterpret_cast(&executor); + + uint32_t outputIndex = 6; + float dataArry[9] {0, 1, 2, 3, 4, 5, 6, 7, 8}; + void* const data = dataArry; + OH_NN_Memory memory = {data, 9 * sizeof(float)}; + OH_NN_Memory* pMemory = &memory; + OH_NNExecutor_DestroyOutputMemory(nnExecutor, outputIndex, &pMemory); + EXPECT_NE(nullptr, pMemory); +} + +/* + * @tc.name: executor_destroy_output_memory_005 + * @tc.desc: Verify the success of the OH_NNExecutor_DestroyOutputMemory function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, executor_destroy_output_memory_005, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation innerCompilation(&innerModel); + Executor executor(&innerCompilation); + OH_NNExecutor* nnExecutor = reinterpret_cast(&executor); + + float dataArry[9] {0, 1, 2, 3, 4, 5, 6, 7, 8}; + void* const data = dataArry; + OH_NN_Memory memory = {data, 9 * sizeof(float)}; + OH_NN_Memory* pMemory = &memory; + size_t length = 9 * sizeof(float); + uint32_t outputIndex = 0; + EXPECT_EQ(OH_NN_SUCCESS, executor.CreateOutputMemory(outputIndex, length, &pMemory)); + OH_NNExecutor_DestroyOutputMemory(nnExecutor, outputIndex, &pMemory); + EXPECT_EQ(nullptr, pMemory); +} + +/* + * @tc.name: executor_set_input_with_memory_001 + * @tc.desc: Verify the OH_NNExecutor is nullptr of the OH_NNExecutor_SetInputWithMemory function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, executor_set_input_with_memory_001, testing::ext::TestSize.Level0) +{ + OH_NNExecutor* nnExecutor = nullptr; + + SetTensor(); + + uint32_t inputIndex = 0; + float dataArry[9] {0, 1, 2, 3, 4, 5, 6, 7, 8}; + void* const data = dataArry; + OH_NN_Memory memory = {data, 9 * sizeof(float)}; + + OH_NN_ReturnCode ret = OH_NNExecutor_SetInputWithMemory(nnExecutor, inputIndex, &m_tensor, &memory); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: executor_set_input_with_memory_002 + * @tc.desc: Verify the operand is nullptr of the OH_NNExecutor_SetInputWithMemory function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, executor_set_input_with_memory_002, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation innerCompilation(&innerModel); + Executor executor(&innerCompilation); + OH_NNExecutor* nnExecutor = reinterpret_cast(&executor); + + OH_NN_Tensor* operand = nullptr; + + uint32_t inputIndex = 0; + float dataArry[9] {0, 1, 2, 3, 4, 5, 6, 7, 8}; + void* const data = dataArry; + OH_NN_Memory memory = {data, 9 * sizeof(float)}; + + OH_NN_ReturnCode ret = OH_NNExecutor_SetInputWithMemory(nnExecutor, inputIndex, operand, &memory); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: executor_set_input_with_memory_003 + * @tc.desc: Verify the memory is nullptr of the OH_NNExecutor_SetInputWithMemory function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, executor_set_input_with_memory_003, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation innerCompilation(&innerModel); + Executor executor(&innerCompilation); + OH_NNExecutor* nnExecutor = reinterpret_cast(&executor); + + SetTensor(); + + uint32_t inputIndex = 0; + OH_NN_Memory* memory = nullptr; + OH_NN_ReturnCode ret = OH_NNExecutor_SetInputWithMemory(nnExecutor, inputIndex, &m_tensor, memory); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: executor_set_input_with_memory_004 + * @tc.desc: Verify the success of the OH_NNExecutor_SetInputWithMemory function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, executor_set_input_with_memory_004, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation innerCompilation(&innerModel); + Executor executor(&innerCompilation); + OH_NNExecutor* nnExecutor = reinterpret_cast(&executor); + + SetTensor(); + + uint32_t inputIndex = 0; + float dataArry[9] {0, 1, 2, 3, 4, 5, 6, 7, 8}; + void* const data = dataArry; + OH_NN_Memory memory = {data, 9 * sizeof(float)}; + + OH_NN_ReturnCode ret = OH_NNExecutor_SetInputWithMemory(nnExecutor, inputIndex, &m_tensor, &memory); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + + +/* + * @tc.name: executor_set_output_with_memory_001 + * @tc.desc: Verify the OH_NNExecutor is nullptr of the OH_NNExecutor_SetOutputWithMemory function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, executor_set_output_with_memory_001, testing::ext::TestSize.Level0) +{ + OH_NNExecutor* nnExecutor = nullptr; + uint32_t outputIndex = 0; + float dataArry[9] {0, 1, 2, 3, 4, 5, 6, 7, 8}; + void* const data = dataArry; + OH_NN_Memory memory = {data, 9 * sizeof(float)}; + OH_NN_ReturnCode ret = OH_NNExecutor_SetOutputWithMemory(nnExecutor, outputIndex, &memory); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: executor_set_output_with_memory_002 + * @tc.desc: Verify the memory is nullptr of the OH_NNExecutor_SetOutputWithMemory function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, executor_set_output_with_memory_002, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation innerCompilation(&innerModel); + Executor executor(&innerCompilation); + OH_NNExecutor* nnExecutor = reinterpret_cast(&executor); + + uint32_t outputIndex = 0; + OH_NN_Memory* memory = nullptr; + OH_NN_ReturnCode ret = OH_NNExecutor_SetOutputWithMemory(nnExecutor, outputIndex, memory); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: executor_set_output_with_memory_003 + * @tc.desc: Verify the success of the OH_NNExecutor_SetOutputWithMemory function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, executor_set_output_with_memory_003, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation innerCompilation(&innerModel); + Executor executor(&innerCompilation); + OH_NNExecutor* nnExecutor = reinterpret_cast(&executor); + + uint32_t outputIndex = 0; + float dataArry[9] {0, 1, 2, 3, 4, 5, 6, 7, 8}; + void* const data = dataArry; + OH_NN_Memory memory = {data, 9 * sizeof(float)}; + OH_NN_ReturnCode ret = OH_NNExecutor_SetOutputWithMemory(nnExecutor, outputIndex, &memory); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/* + * @tc.name: executor_destroy_001 + * @tc.desc: Verify the OH_NNExecutor is nullptr of the OH_NNExecutor_Destroy function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, executor_destroy_001, testing::ext::TestSize.Level0) +{ + OH_NNExecutor** pExecutor = nullptr; + OH_NNExecutor_Destroy(pExecutor); + EXPECT_EQ(nullptr, pExecutor); +} + +/* + * @tc.name: executor_destroy_002 + * @tc.desc: Verify the *OH_NNExecutor is nullptr of the OH_NNExecutor_Destroy function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, executor_destroy_002, testing::ext::TestSize.Level0) +{ + OH_NNExecutor* nnExecutor = nullptr; + OH_NNExecutor** pExecutor = &nnExecutor; + OH_NNExecutor_Destroy(pExecutor); + EXPECT_EQ(nullptr, nnExecutor); +} + +/* + * @tc.name: executor_destroy_003 + * @tc.desc: Verify the normal model of the OH_NNExecutor_Destroy function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, executor_destroy_003, testing::ext::TestSize.Level0) +{ + InnerModel* innerModel = new InnerModel(); + EXPECT_NE(nullptr, innerModel); + Compilation* innerCompilation = new(std::nothrow) Compilation(innerModel); + EXPECT_NE(nullptr, innerCompilation); + Executor* executor = new(std::nothrow) Executor(innerCompilation); + EXPECT_NE(nullptr, executor); + + OH_NNExecutor* nnExecutor = reinterpret_cast(executor); + OH_NNExecutor_Destroy(&nnExecutor); + EXPECT_EQ(nullptr, nnExecutor); +} + +/* + * @tc.name: device_get_all_devices_id_001 + * @tc.desc: Verify the allDevicesID is nullptr of the OH_NNDevice_GetAllDevicesID function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, device_get_all_devices_id_001, testing::ext::TestSize.Level0) +{ + const size_t** allDevicesId = nullptr; + uint32_t deviceCount = 1; + uint32_t* pDeviceCount = &deviceCount; + OH_NN_ReturnCode ret = OH_NNDevice_GetAllDevicesID(allDevicesId, pDeviceCount); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: device_get_all_devices_id_002 + * @tc.desc: Verify the *allDevicesID is not nullptr of the OH_NNDevice_GetAllDevicesID function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, device_get_all_devices_id_002, testing::ext::TestSize.Level0) +{ + const size_t devicesId = 1; + const size_t* allDevicesId = &devicesId; + const size_t** pAllDevicesId = &allDevicesId; + uint32_t deviceCount = 1; + uint32_t* pDeviceCount = &deviceCount; + OH_NN_ReturnCode ret = OH_NNDevice_GetAllDevicesID(pAllDevicesId, pDeviceCount); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: device_get_all_devices_id_003 + * @tc.desc: Verify the deviceCount is nullptr of the OH_NNDevice_GetAllDevicesID function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, device_get_all_devices_id_003, testing::ext::TestSize.Level0) +{ + const size_t* allDevicesId = nullptr; + const size_t** pAllDevicesId = &allDevicesId; + uint32_t* pDeviceCount = nullptr; + OH_NN_ReturnCode ret = OH_NNDevice_GetAllDevicesID(pAllDevicesId, pDeviceCount); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: device_get_all_devices_id_004 + * @tc.desc: Verify the get no device of the OH_NNDevice_GetAllDevicesID function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, device_get_all_devices_id_004, testing::ext::TestSize.Level0) +{ + const size_t* allDevicesId = nullptr; + const size_t** pAllDevicesId = &allDevicesId; + uint32_t deviceCount = 1; + uint32_t* pDeviceCount = &deviceCount; + OHOS::HDI::Nnrt::V1_0::MockIPreparedModel::m_ExpectRetCode = OH_NN_FAILED; + OH_NN_ReturnCode ret = OH_NNDevice_GetAllDevicesID(pAllDevicesId, pDeviceCount); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/* + * @tc.name: device_get_all_devices_id_005 + * @tc.desc: Verify the success of the OH_NNDevice_GetAllDevicesID function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, device_get_all_devices_id_005, testing::ext::TestSize.Level0) +{ + const size_t* allDevicesId = nullptr; + const size_t** pAllDevicesId = &allDevicesId; + uint32_t deviceCount = 1; + uint32_t* pDeviceCount = &deviceCount; + OH_NN_ReturnCode ret = OH_NNDevice_GetAllDevicesID(pAllDevicesId, pDeviceCount); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/* + * @tc.name: device_get_name_001 + * @tc.desc: Verify the name is nullptr of the OH_NNDevice_GetName function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, device_get_name_001, testing::ext::TestSize.Level0) +{ + size_t deviceID = 1; + const char **name = nullptr; + OH_NN_ReturnCode ret = OH_NNDevice_GetName(deviceID, name); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: device_get_name_002 + * @tc.desc: Verify the *name is not nullptr of the OH_NNDevice_GetName function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, device_get_name_002, testing::ext::TestSize.Level0) +{ + size_t deviceID = 1; + const char* name = "diviceId"; + const char** pName = &name; + OH_NN_ReturnCode ret = OH_NNDevice_GetName(deviceID, pName); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: device_get_name_003 + * @tc.desc: Verify the error happened when getting name of deviceID of the OH_NNDevice_GetName function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, device_get_name_003, testing::ext::TestSize.Level0) +{ + size_t deviceID = 0; + const char* name = nullptr; + const char** pName = &name; + OH_NN_ReturnCode ret = OH_NNDevice_GetName(deviceID, pName); + EXPECT_EQ(OH_NN_FAILED, ret); +} + +/* + * @tc.name: device_get_name_004 + * @tc.desc: Verify the success of the OH_NNDevice_GetName function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, device_get_name_004, testing::ext::TestSize.Level0) +{ + size_t deviceID = 1; + const char* name = nullptr; + const char** pName = &name; + OH_NN_ReturnCode ret = OH_NNDevice_GetName(deviceID, pName); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/* + * @tc.name: device_get_type_001 + * @tc.desc: Verify the device is nullptr of the OH_NNDevice_GetType function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, device_get_type_001, testing::ext::TestSize.Level0) +{ + size_t deviceID = 0; + OH_NN_DeviceType deviceType = OH_NN_CPU; + OH_NN_DeviceType* pDeviceType = &deviceType; + OH_NN_ReturnCode ret = OH_NNDevice_GetType(deviceID, pDeviceType); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: device_get_type_002 + * @tc.desc: Verify the OH_NN_DeviceType is nullptr of the OH_NNDevice_GetType function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, device_get_type_002, testing::ext::TestSize.Level0) +{ + size_t deviceID = 1; + OH_NN_DeviceType* pDeviceType = nullptr; + OH_NN_ReturnCode ret = OH_NNDevice_GetType(deviceID, pDeviceType); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: device_get_type_003 + * @tc.desc: Verify the error happened when getting name of deviceID of the OH_NNDevice_GetType function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, device_get_type_003, testing::ext::TestSize.Level0) +{ + size_t deviceID = 1; + OH_NN_DeviceType deviceType = OH_NN_OTHERS; + OH_NN_DeviceType* pDeviceType = &deviceType; + OH_NN_ReturnCode ret = OH_NNDevice_GetType(deviceID, pDeviceType); + EXPECT_EQ(OH_NN_UNAVALIDABLE_DEVICE, ret); +} + +/* + * @tc.name: device_get_type_004 + * @tc.desc: Verify the success of the OH_NNDevice_GetType function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, device_get_type_004, testing::ext::TestSize.Level0) +{ + size_t deviceID = 1; + OH_NN_DeviceType deviceType = OH_NN_CPU; + OH_NN_DeviceType* pDeviceType = &deviceType; + OH_NN_ReturnCode ret = OH_NNDevice_GetType(deviceID, pDeviceType); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} +} // namespace Unittest +} // namespace NeuralNetworkRuntime +} // namespace OHOS diff --git a/test/unittest/components/neural_network_runtime_test/neural_network_runtime_test.h b/test/unittest/components/neural_network_runtime_test/neural_network_runtime_test.h new file mode 100644 index 0000000..61f1ed2 --- /dev/null +++ b/test/unittest/components/neural_network_runtime_test/neural_network_runtime_test.h @@ -0,0 +1,52 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_UNITTEST_H +#define NEURAL_NETWORK_RUNTIME_UNITTEST_H + +#include + +#include "interfaces/kits/c/neural_network_runtime.h" +#include "frameworks/native/inner_model.h" +#include "frameworks/native/executor.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Unittest { +class NeuralNetworkRuntimeTest : public testing::Test { +public: + OH_NN_ReturnCode BuildModelGraph(InnerModel& innerModel); + void InitIndices(); + void AddModelTensor(InnerModel& innerModel); + void SetInnerBuild(InnerModel& innerModel); + void SetExecutor(Executor& executor); + void SetInputAndOutput(Executor& executor); + void SetTensor(); + +public: + OH_NN_UInt32Array m_inputIndices; + OH_NN_UInt32Array m_outputIndices; + OH_NN_UInt32Array m_paramIndices; + OH_NN_Tensor m_tensor; + + uint32_t m_inputIndexs[2]{0, 1}; + uint32_t m_outputIndexs[1]{2}; + uint32_t m_paramIndexs[1]{3}; +}; +} // namespace Unittest +} // namespace NeuralNetworkRuntime +} // namespace OHOS + +#endif // NEURAL_NETWORK_RUNTIME_UNITTEST_H diff --git a/test/unittest/components/transform/transform_test.cpp b/test/unittest/components/transform/transform_test.cpp new file mode 100644 index 0000000..ae9e4ca --- /dev/null +++ b/test/unittest/components/transform/transform_test.cpp @@ -0,0 +1,912 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include + +#include "frameworks/native/transform.h" +#include "frameworks/native/memory_manager.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime; +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class TransformTestTest : public testing::Test { +public: + TransformTestTest() = default; + ~TransformTestTest() = default; +}; + +/** + * @tc.name: transform_transhdidevicetype_001 + * @tc.desc: Verify the TransHDIDeviceType function return OH_NN_CPU + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_transhdidevicetype_001, TestSize.Level0) +{ + V1_0::DeviceType iDeviceType = V1_0::DeviceType::CPU; + OH_NN_DeviceType result = HDIToNN::TransHDIDeviceType(iDeviceType); + EXPECT_EQ(OH_NN_CPU, result); +} + +/** + * @tc.name: transform_transhdidevicetype_002 + * @tc.desc: Verify the TransHDIDeviceType function return OH_NN_GPU + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_transhdidevicetype_002, TestSize.Level0) +{ + V1_0::DeviceType iDeviceType = V1_0::DeviceType::GPU; + OH_NN_DeviceType result = HDIToNN::TransHDIDeviceType(iDeviceType); + EXPECT_EQ(OH_NN_GPU, result); +} + +/** + * @tc.name: transform_transhdidevicetype_003 + * @tc.desc: Verify the TransHDIDeviceType function return OH_NN_ACCELERATOR + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_transhdidevicetype_003, TestSize.Level0) +{ + V1_0::DeviceType iDeviceType = V1_0::DeviceType::ACCELERATOR; + OH_NN_DeviceType result = HDIToNN::TransHDIDeviceType(iDeviceType); + EXPECT_EQ(OH_NN_ACCELERATOR, result); +} + +/** + * @tc.name: transform_transhdidevicetype_004 + * @tc.desc: Verify the TransHDIDeviceType function return OH_NN_OTHERS + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_transhdidevicetype_004, TestSize.Level0) +{ + V1_0::DeviceType iDeviceType = V1_0::DeviceType::OTHER; + OH_NN_DeviceType result = HDIToNN::TransHDIDeviceType(iDeviceType); + EXPECT_EQ(OH_NN_OTHERS, result); +} + +/** + * @tc.name: transform_transhdidevicestatus_001 + * @tc.desc: Verify the TransHDIDeviceStatus function return AVAILABLE + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_transhdidevicestatus_001, TestSize.Level0) +{ + V1_0::DeviceStatus iDeviceStatus = V1_0::DeviceStatus::AVAILABLE; + DeviceStatus result = HDIToNN::TransHDIDeviceStatus(iDeviceStatus); + EXPECT_EQ(DeviceStatus::AVAILABLE, result); +} + +/** + * @tc.name: transform_transhdidevicestatus_002 + * @tc.desc: Verify the TransHDIDeviceStatus function return BUSY. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_transhdidevicestatus_002, TestSize.Level0) +{ + V1_0::DeviceStatus iDeviceStatus = V1_0::DeviceStatus::BUSY; + DeviceStatus result = HDIToNN::TransHDIDeviceStatus(iDeviceStatus); + EXPECT_EQ(DeviceStatus::BUSY, result); +} + +/** + * @tc.name: transform_transhdidevicestatus_003 + * @tc.desc: Verify the TransHDIDeviceStatus function return OFFLINE. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_transhdidevicestatus_003, TestSize.Level0) +{ + V1_0::DeviceStatus iDeviceStatus = V1_0::DeviceStatus::OFFLINE; + DeviceStatus result = HDIToNN::TransHDIDeviceStatus(iDeviceStatus); + EXPECT_EQ(DeviceStatus::OFFLINE, result); +} + +/** + * @tc.name: transform_transhdidevicestatus_004 + * @tc.desc: Verify the TransHDIDeviceStatus function return UNKNOWN. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_transhdidevicestatus_004, TestSize.Level0) +{ + V1_0::DeviceStatus iDeviceStatus = V1_0::DeviceStatus::UNKNOWN; + DeviceStatus result = HDIToNN::TransHDIDeviceStatus(iDeviceStatus); + EXPECT_EQ(DeviceStatus::UNKNOWN, result); +} + +/** + * @tc.name: transform_transperformancemode_001 + * @tc.desc: Verify the TransPerformanceMode function return PERFORMANCE_LOW. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_transperformancemode_001, TestSize.Level0) +{ + OH_NN_PerformanceMode mode = OH_NN_PERFORMANCE_LOW; + V1_0::PerformanceMode result = NNToHDI::TransPerformanceMode(mode); + EXPECT_EQ(V1_0::PerformanceMode::PERFORMANCE_LOW, result); +} + +/** + * @tc.name: transform_transperformancemode_002 + * @tc.desc: Verify the TransPerformanceMode function return PERFORMANCE_MEDIUM. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_transperformancemode_002, TestSize.Level0) +{ + OH_NN_PerformanceMode mode = OH_NN_PERFORMANCE_MEDIUM; + V1_0::PerformanceMode result = NNToHDI::TransPerformanceMode(mode); + EXPECT_EQ(V1_0::PerformanceMode::PERFORMANCE_MEDIUM, result); +} + +/** + * @tc.name: transform_transperformancemode_003 + * @tc.desc: Verify the TransPerformanceMode function return PERFORMANCE_HIGH. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_transperformancemode_003, TestSize.Level0) +{ + OH_NN_PerformanceMode mode = OH_NN_PERFORMANCE_HIGH; + V1_0::PerformanceMode result = NNToHDI::TransPerformanceMode(mode); + EXPECT_EQ(V1_0::PerformanceMode::PERFORMANCE_HIGH, result); +} + +/** + * @tc.name: transform_transperformancemode_004 + * @tc.desc: Verify the TransPerformanceMode function return PERFORMANCE_EXTREME. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_transperformancemode_004, TestSize.Level0) +{ + OH_NN_PerformanceMode mode = OH_NN_PERFORMANCE_EXTREME; + V1_0::PerformanceMode result = NNToHDI::TransPerformanceMode(mode); + EXPECT_EQ(V1_0::PerformanceMode::PERFORMANCE_EXTREME, result); +} + +/** + * @tc.name: transform_transperformancemode_005 + * @tc.desc: Verify the TransPerformanceMode function return PERFORMANCE_NONE. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_transperformancemode_005, TestSize.Level0) +{ + OH_NN_PerformanceMode mode = OH_NN_PERFORMANCE_NONE; + V1_0::PerformanceMode result = NNToHDI::TransPerformanceMode(mode); + EXPECT_EQ(V1_0::PerformanceMode::PERFORMANCE_NONE, result); +} + +/** + * @tc.name: transform_transpriority_001 + * @tc.desc: Verify the TransPriority function return PRIORITY_LOW. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_transpriority_001, TestSize.Level0) +{ + OH_NN_Priority priority = OH_NN_PRIORITY_LOW; + V1_0::Priority result = NNToHDI::TransPriority(priority); + EXPECT_EQ(V1_0::Priority::PRIORITY_LOW, result); +} + +/** + * @tc.name: transform_transpriority_002 + * @tc.desc: Verify the TransPriority function return PRIORITY_MEDIUM. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_transpriority_002, TestSize.Level0) +{ + OH_NN_Priority priority = OH_NN_PRIORITY_MEDIUM; + V1_0::Priority result = NNToHDI::TransPriority(priority); + EXPECT_EQ(V1_0::Priority::PRIORITY_MEDIUM, result); +} + +/** + * @tc.name: transform_transpriority_003 + * @tc.desc: Verify the TransPriority function return PRIORITY_HIGH. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_transpriority_003, TestSize.Level0) +{ + OH_NN_Priority priority = OH_NN_PRIORITY_HIGH; + V1_0::Priority result = NNToHDI::TransPriority(priority); + EXPECT_EQ(V1_0::Priority::PRIORITY_HIGH, result); +} + +/** + * @tc.name: transform_transdatatype_001 + * @tc.desc: Verify the TransDataType function return DATA_TYPE_BOOL. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_transdatatype_001, TestSize.Level0) +{ + OH_NN_DataType dataType = OH_NN_BOOL; + V1_0::DataType result = NNToHDI::TransDataType(dataType); + EXPECT_EQ(V1_0::DataType::DATA_TYPE_BOOL, result); +} + +/** + * @tc.name: transform_transdatatype_002 + * @tc.desc: Verify the TransDataType function return DATA_TYPE_INT8. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_transdatatype_002, TestSize.Level0) +{ + OH_NN_DataType dataType = OH_NN_INT8; + V1_0::DataType result = NNToHDI::TransDataType(dataType); + EXPECT_EQ(V1_0::DataType::DATA_TYPE_INT8, result); +} + +/** + * @tc.name: transform_transdatatype_003 + * @tc.desc: Verify the TransDataType function return DATA_TYPE_INT16. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_transdatatype_003, TestSize.Level0) +{ + OH_NN_DataType dataType = OH_NN_INT16; + V1_0::DataType result = NNToHDI::TransDataType(dataType); + EXPECT_EQ(V1_0::DataType::DATA_TYPE_INT16, result); +} + +/** + * @tc.name: transform_transdatatype_004 + * @tc.desc: Verify the TransDataType function return DATA_TYPE_INT32. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_transdatatype_004, TestSize.Level0) +{ + OH_NN_DataType dataType = OH_NN_INT32; + V1_0::DataType result = NNToHDI::TransDataType(dataType); + EXPECT_EQ(V1_0::DataType::DATA_TYPE_INT32, result); +} + +/** + * @tc.name: transform_transdatatype_005 + * @tc.desc: Verify the TransDataType function return DATA_TYPE_INT64. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_transdatatype_005, TestSize.Level0) +{ + OH_NN_DataType dataType = OH_NN_INT64; + V1_0::DataType result = NNToHDI::TransDataType(dataType); + EXPECT_EQ(V1_0::DataType::DATA_TYPE_INT64, result); +} + +/** + * @tc.name: transform_transdatatype_006 + * @tc.desc: Verify the TransDataType function return DATA_TYPE_UINT8. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_transdatatype_006, TestSize.Level0) +{ + OH_NN_DataType dataType = OH_NN_UINT8; + V1_0::DataType result = NNToHDI::TransDataType(dataType); + EXPECT_EQ(V1_0::DataType::DATA_TYPE_UINT8, result); +} + +/** + * @tc.name: transform_transdatatype_007 + * @tc.desc: Verify the TransDataType function return DATA_TYPE_UINT16. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_transdatatype_007, TestSize.Level0) +{ + OH_NN_DataType dataType = OH_NN_UINT16; + V1_0::DataType result = NNToHDI::TransDataType(dataType); + EXPECT_EQ(V1_0::DataType::DATA_TYPE_UINT16, result); +} + +/** + * @tc.name: transform_transdatatype_008 + * @tc.desc: Verify the TransDataType function return DATA_TYPE_UINT32. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_transdatatype_008, TestSize.Level0) +{ + OH_NN_DataType dataType = OH_NN_UINT32; + V1_0::DataType result = NNToHDI::TransDataType(dataType); + EXPECT_EQ(V1_0::DataType::DATA_TYPE_UINT32, result); +} + +/** + * @tc.name: transform_transdatatype_009 + * @tc.desc: Verify the TransDataType function return DATA_TYPE_UINT64. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_transdatatype_009, TestSize.Level0) +{ + OH_NN_DataType dataType = OH_NN_UINT64; + V1_0::DataType result = NNToHDI::TransDataType(dataType); + EXPECT_EQ(V1_0::DataType::DATA_TYPE_UINT64, result); +} + +/** + * @tc.name: transform_transdatatype_010 + * @tc.desc: Verify the TransDataType function return DATA_TYPE_FLOAT16. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_transdatatype_010, TestSize.Level0) +{ + OH_NN_DataType dataType = OH_NN_FLOAT16; + V1_0::DataType result = NNToHDI::TransDataType(dataType); + EXPECT_EQ(V1_0::DataType::DATA_TYPE_FLOAT16, result); +} + +/** + * @tc.name: transform_transdatatype_011 + * @tc.desc: Verify the TransDataType function return DATA_TYPE_FLOAT32. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_transdatatype_011, TestSize.Level0) +{ + OH_NN_DataType dataType = OH_NN_FLOAT32; + V1_0::DataType result = NNToHDI::TransDataType(dataType); + EXPECT_EQ(V1_0::DataType::DATA_TYPE_FLOAT32, result); +} + +/** + * @tc.name: transform_transdatatype_012 + * @tc.desc: Verify the TransDataType function return DATA_TYPE_UNKNOWN. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_transdatatype_012, TestSize.Level0) +{ + OH_NN_DataType dataType = OH_NN_UNKNOWN; + V1_0::DataType result = NNToHDI::TransDataType(dataType); + EXPECT_EQ(V1_0::DataType::DATA_TYPE_UNKNOWN, result); +} + +/** + * @tc.name: transform_transdatatype_013 + * @tc.desc: Verify the TransDataType function return DATA_TYPE_FLOAT64. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_transdatatype_013, TestSize.Level0) +{ + OH_NN_DataType dataType = OH_NN_FLOAT64; + V1_0::DataType result = NNToHDI::TransDataType(dataType); + EXPECT_EQ(V1_0::DataType::DATA_TYPE_FLOAT64, result); +} + +/** + * @tc.name: transform_transformat_001 + * @tc.desc: Verify the TransFormat function return FORMAT_NCHW. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_transformat_001, TestSize.Level0) +{ + OH_NN_Format format = OH_NN_FORMAT_NCHW; + V1_0::Format result = NNToHDI::TransFormat(format); + EXPECT_EQ(V1_0::Format::FORMAT_NCHW, result); +} + +/** + * @tc.name: transform_transformat_002 + * @tc.desc: Verify the TransFormat function return FORMAT_NHWC. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_transformat_002, TestSize.Level0) +{ + OH_NN_Format format = OH_NN_FORMAT_NHWC; + V1_0::Format result = NNToHDI::TransFormat(format); + EXPECT_EQ(V1_0::Format::FORMAT_NHWC, result); +} + +/** + * @tc.name: transform_transformat_003 + * @tc.desc: Verify the TransFormat function return FORMAT_NONE. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_transformat_003, TestSize.Level0) +{ + OH_NN_Format format = OH_NN_FORMAT_NONE; + V1_0::Format result = NNToHDI::TransFormat(format); + EXPECT_EQ(V1_0::Format::FORMAT_NONE, result); +} + +/** + * @tc.name: transform_transiotensor_001 + * @tc.desc: Verify the TransIOTensor function return int8 + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_transiotensor_001, TestSize.Level0) +{ + IOTensor tensor; + tensor.dataType = OH_NN_INT8; + V1_0::IOTensor result = NNToHDI::TransIOTensor(tensor); + EXPECT_EQ(V1_0::DataType::DATA_TYPE_INT8, result.dataType); +} + +/** + * @tc.name: transform_gettypesize_001 + * @tc.desc: Verify the TransIOTensor function return 1. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_gettypesize_001, TestSize.Level0) +{ + OH_NN_DataType dataType = OH_NN_BOOL; + uint32_t result = GetTypeSize(dataType); + EXPECT_EQ(static_cast(1), result); +} + +/** + * @tc.name: transform_gettypesize_002 + * @tc.desc: Verify the TransIOTensor function return 2. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_gettypesize_002, TestSize.Level0) +{ + OH_NN_DataType dataType = OH_NN_INT16; + uint32_t result = GetTypeSize(dataType); + EXPECT_EQ(static_cast(2), result); +} + +/** + * @tc.name: transform_gettypesize_003 + * @tc.desc: Verify the TransIOTensor function return 4. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_gettypesize_003, TestSize.Level0) +{ + OH_NN_DataType dataType = OH_NN_INT32; + uint32_t result = GetTypeSize(dataType); + EXPECT_EQ(static_cast(4), result); +} + +/** + * @tc.name: transform_gettypesize_004 + * @tc.desc: Verify the TransIOTensor function return 8. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_gettypesize_004, TestSize.Level0) +{ + OH_NN_DataType dataType = OH_NN_INT64; + uint32_t result = GetTypeSize(dataType); + EXPECT_EQ(static_cast(8), result); +} + +/** + * @tc.name: transform_gettypesize_005 + * @tc.desc: Verify the TransIOTensor function return 0. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_gettypesize_005, TestSize.Level0) +{ + OH_NN_DataType dataType = OH_NN_UNKNOWN; + uint32_t result = GetTypeSize(dataType); + EXPECT_EQ(static_cast(0), result); +} + +/** + * @tc.name: transform_nntoms_transformdatatype_001 + * @tc.desc: Verify the TransIOTensor function return DATA_TYPE_BOOL. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_nntoms_transformdatatype_001, TestSize.Level0) +{ + OH_NN_DataType dataType = OH_NN_BOOL; + mindspore::lite::DataType result = NNToMS::TransformDataType(dataType); + EXPECT_EQ(mindspore::lite::DATA_TYPE_BOOL, result); +} + +/** + * @tc.name: transform_nntoms_transformdatatype_002 + * @tc.desc: Verify the TransDataType function return DATA_TYPE_INT8. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_nntoms_transformdatatype_002, TestSize.Level0) +{ + OH_NN_DataType dataType = OH_NN_INT8; + mindspore::lite::DataType result = NNToMS::TransformDataType(dataType); + EXPECT_EQ(mindspore::lite::DATA_TYPE_INT8, result); +} + +/** + * @tc.name: transform_nntoms_transformdatatype_003 + * @tc.desc: Verify the TransDataType function return DATA_TYPE_INT16. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_nntoms_transformdatatype_003, TestSize.Level0) +{ + OH_NN_DataType dataType = OH_NN_INT16; + mindspore::lite::DataType result = NNToMS::TransformDataType(dataType); + EXPECT_EQ(mindspore::lite::DATA_TYPE_INT16, result); +} + +/** + * @tc.name: transform_nntoms_transformdatatype_004 + * @tc.desc: Verify the TransDataType function return DATA_TYPE_INT32. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_nntoms_transformdatatype_004, TestSize.Level0) +{ + OH_NN_DataType dataType = OH_NN_INT32; + mindspore::lite::DataType result = NNToMS::TransformDataType(dataType); + EXPECT_EQ(mindspore::lite::DATA_TYPE_INT32, result); +} + +/** + * @tc.name: transform_nntoms_transformdatatype_005 + * @tc.desc: Verify the TransDataType function return DATA_TYPE_INT64. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_nntoms_transformdatatype_005, TestSize.Level0) +{ + OH_NN_DataType dataType = OH_NN_INT64; + mindspore::lite::DataType result = NNToMS::TransformDataType(dataType); + EXPECT_EQ(mindspore::lite::DATA_TYPE_INT64, result); +} + +/** + * @tc.name: transform_nntoms_transformdatatype_006 + * @tc.desc: Verify the TransDataType function return DATA_TYPE_UINT8. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_nntoms_transformdatatype_006, TestSize.Level0) +{ + OH_NN_DataType dataType = OH_NN_UINT8; + mindspore::lite::DataType result = NNToMS::TransformDataType(dataType); + EXPECT_EQ(mindspore::lite::DATA_TYPE_UINT8, result); +} + +/** + * @tc.name: transform_nntoms_transformdatatype_007 + * @tc.desc: Verify the TransDataType function return DATA_TYPE_UINT16. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_nntoms_transformdatatype_007, TestSize.Level0) +{ + OH_NN_DataType dataType = OH_NN_UINT16; + mindspore::lite::DataType result = NNToMS::TransformDataType(dataType); + EXPECT_EQ(mindspore::lite::DATA_TYPE_UINT16, result); +} + +/** + * @tc.name: transform_nntoms_transformdatatype_008 + * @tc.desc: Verify the TransDataType function return DATA_TYPE_UINT32. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_nntoms_transformdatatype_008, TestSize.Level0) +{ + OH_NN_DataType dataType = OH_NN_UINT32; + mindspore::lite::DataType result = NNToMS::TransformDataType(dataType); + EXPECT_EQ(mindspore::lite::DATA_TYPE_UINT32, result); +} + +/** + * @tc.name: transform_nntoms_transformdatatype_009 + * @tc.desc: Verify the TransDataType function return DATA_TYPE_UINT64. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_nntoms_transformdatatype_009, TestSize.Level0) +{ + OH_NN_DataType dataType = OH_NN_UINT64; + mindspore::lite::DataType result = NNToMS::TransformDataType(dataType); + EXPECT_EQ(mindspore::lite::DATA_TYPE_UINT64, result); +} + +/** + * @tc.name: transform_nntoms_transformdatatype_010 + * @tc.desc: Verify the TransDataType function return DATA_TYPE_FLOAT16. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_nntoms_transformdatatype_010, TestSize.Level0) +{ + OH_NN_DataType dataType = OH_NN_FLOAT16; + mindspore::lite::DataType result = NNToMS::TransformDataType(dataType); + EXPECT_EQ(mindspore::lite::DATA_TYPE_FLOAT16, result); +} + +/** + * @tc.name: transform_nntoms_transformdatatype_011 + * @tc.desc: Verify the TransDataType function return DATA_TYPE_FLOAT32. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_nntoms_transformdatatype_011, TestSize.Level0) +{ + OH_NN_DataType dataType = OH_NN_FLOAT32; + mindspore::lite::DataType result = NNToMS::TransformDataType(dataType); + EXPECT_EQ(mindspore::lite::DATA_TYPE_FLOAT32, result); +} + +/** + * @tc.name: transform_nntoms_transformdatatype_012 + * @tc.desc: Verify the TransDataType function return DATA_TYPE_UNKNOWN. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_nntoms_transformdatatype_012, TestSize.Level0) +{ + OH_NN_DataType dataType = OH_NN_UNKNOWN; + mindspore::lite::DataType result = NNToMS::TransformDataType(dataType); + EXPECT_EQ(mindspore::lite::DATA_TYPE_UNKNOWN, result); +} + +/** + * @tc.name: transform_nntoms_transformdatatype_013 + * @tc.desc: Verify the TransDataType function return DATA_TYPE_FLOAT64 + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_nntoms_transformdatatype_013, TestSize.Level0) +{ + OH_NN_DataType dataType = OH_NN_FLOAT64; + mindspore::lite::DataType result = NNToMS::TransformDataType(dataType); + EXPECT_EQ(mindspore::lite::DATA_TYPE_FLOAT64, result); +} + +/** + * @tc.name: transform_nntoms_transformformat_001 + * @tc.desc: Verify the TransFormat function return FORMAT_NCHW. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_nntoms_transformformat_001, TestSize.Level0) +{ + OH_NN_Format format = OH_NN_FORMAT_NCHW; + mindspore::lite::Format result = NNToMS::TransformFormat(format); + EXPECT_EQ(mindspore::lite::FORMAT_NCHW, result); +} + +/** + * @tc.name: transform_nntoms_transformformat_002 + * @tc.desc: Verify the TransFormat function return FORMAT_NHWC. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_nntoms_transformformat_002, TestSize.Level0) +{ + OH_NN_Format format = OH_NN_FORMAT_NHWC; + mindspore::lite::Format result = NNToMS::TransformFormat(format); + EXPECT_EQ(mindspore::lite::FORMAT_NHWC, result); +} + +/** + * @tc.name: transform_nntoms_transformformat_003 + * @tc.desc: Verify the TransFormat function return FORMAT_NHWC. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_nntoms_transformformat_003, TestSize.Level0) +{ + OH_NN_Format format = OH_NN_FORMAT_NONE; + mindspore::lite::Format result = NNToMS::TransformFormat(format); + EXPECT_EQ(mindspore::lite::FORMAT_NHWC, result); +} + +/** + * @tc.name: transform_nntoms_transformfusiontype_001 + * @tc.desc: Verify the TransFormat function return ACTIVATION_TYPE_NO_ACTIVATION. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_nntoms_transformfusiontype_001, TestSize.Level0) +{ + OH_NN_FuseType type = OH_NN_FUSED_NONE; + mindspore::lite::ActivationType result = NNToMS::TransfromFusionType(type); + EXPECT_EQ(mindspore::lite::ACTIVATION_TYPE_NO_ACTIVATION, result); +} + +/** + * @tc.name: transform_nntoms_transformfusiontype_002 + * @tc.desc: Verify the TransFormat function return ACTIVATION_TYPE_RELU. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_nntoms_transformfusiontype_002, TestSize.Level0) +{ + OH_NN_FuseType type = OH_NN_FUSED_RELU; + mindspore::lite::ActivationType result = NNToMS::TransfromFusionType(type); + EXPECT_EQ(mindspore::lite::ACTIVATION_TYPE_RELU, result); +} + +/** + * @tc.name: transform_nntoms_transformfusiontype_003 + * @tc.desc: Verify the TransFormat function return ACTIVATION_TYPE_RELU6. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_nntoms_transformfusiontype_003, TestSize.Level0) +{ + OH_NN_FuseType type = OH_NN_FUSED_RELU6; + mindspore::lite::ActivationType result = NNToMS::TransfromFusionType(type); + EXPECT_EQ(mindspore::lite::ACTIVATION_TYPE_RELU6, result); +} + +/** + * @tc.name: transform_nntoms_transformquanttype_001 + * @tc.desc: Verify the TransFormat function return QUANT_TYPE_NONE. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_nntoms_transformquanttype_001, TestSize.Level0) +{ + OHOS::NeuralNetworkRuntime::Ops::OpsQuantType type = OHOS::NeuralNetworkRuntime::Ops::OpsQuantType::QUANT_NONE; + mindspore::lite::QuantType result = NNToMS::TransformQuantType(type); + EXPECT_EQ(mindspore::lite::QUANT_TYPE_NONE, result); +} + +/** + * @tc.name: transform_nntoms_transformquanttype_002 + * @tc.desc: Verify the TransFormat function return QUANT_TYPE_ALL. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_nntoms_transformquanttype_002, TestSize.Level0) +{ + OHOS::NeuralNetworkRuntime::Ops::OpsQuantType type = OHOS::NeuralNetworkRuntime::Ops::OpsQuantType::QUANT_ALL; + mindspore::lite::QuantType result = NNToMS::TransformQuantType(type); + EXPECT_EQ(mindspore::lite::QUANT_TYPE_ALL, result); +} + + +/** + * @tc.name: transform_mstonn_transformdatatype_001 + * @tc.desc: Verify the TransIOTensor function return OH_NN_BOOL. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_mstonn_transformdatatype_001, TestSize.Level0) +{ + mindspore::lite::DataType dataType = mindspore::lite::DATA_TYPE_BOOL; + OH_NN_DataType result = MSToNN::TransformDataType(dataType); + EXPECT_EQ(OH_NN_BOOL, result); +} + +/** + * @tc.name: transform_mstonn_transformdatatype_002 + * @tc.desc: Verify the TransDataType function return OH_NN_INT8. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_mstonn_transformdatatype_002, TestSize.Level0) +{ + mindspore::lite::DataType dataType = mindspore::lite::DATA_TYPE_INT8; + OH_NN_DataType result = MSToNN::TransformDataType(dataType); + EXPECT_EQ(OH_NN_INT8, result); +} + +/** + * @tc.name: transform_mstonn_transformdatatype_003 + * @tc.desc: Verify the TransDataType function return OH_NN_INT16. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_mstonn_transformdatatype_003, TestSize.Level0) +{ + mindspore::lite::DataType dataType = mindspore::lite::DATA_TYPE_INT16; + OH_NN_DataType result = MSToNN::TransformDataType(dataType); + EXPECT_EQ(OH_NN_INT16, result); +} + +/** + * @tc.name: transform_mstonn_transformdatatype_004 + * @tc.desc: Verify the TransDataType function return OH_NN_INT32. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_mstonn_transformdatatype_004, TestSize.Level0) +{ + mindspore::lite::DataType dataType = mindspore::lite::DATA_TYPE_INT32; + OH_NN_DataType result = MSToNN::TransformDataType(dataType); + EXPECT_EQ(OH_NN_INT32, result); +} + +/** + * @tc.name: transform_mstonn_transformdatatype_005 + * @tc.desc: Verify the TransDataType function return OH_NN_INT64. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_mstonn_transformdatatype_005, TestSize.Level0) +{ + mindspore::lite::DataType dataType = mindspore::lite::DATA_TYPE_INT64; + OH_NN_DataType result = MSToNN::TransformDataType(dataType); + EXPECT_EQ(OH_NN_INT64, result); +} + +/** + * @tc.name: transform_mstonn_transformdatatype_006 + * @tc.desc: Verify the TransDataType function return OH_NN_UINT8. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_mstonn_transformdatatype_006, TestSize.Level0) +{ + mindspore::lite::DataType dataType = mindspore::lite::DATA_TYPE_UINT8; + OH_NN_DataType result = MSToNN::TransformDataType(dataType); + EXPECT_EQ(OH_NN_UINT8, result); +} + +/** + * @tc.name: transform_mstonn_transformdatatype_007 + * @tc.desc: Verify the TransDataType function return OH_NN_UINT16. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_mstonn_transformdatatype_007, TestSize.Level0) +{ + mindspore::lite::DataType dataType = mindspore::lite::DATA_TYPE_UINT16; + OH_NN_DataType result = MSToNN::TransformDataType(dataType); + EXPECT_EQ(OH_NN_UINT16, result); +} + +/** + * @tc.name: transform_mstonn_transformdatatype_008 + * @tc.desc: Verify the TransDataType function return OH_NN_UINT32. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_mstonn_transformdatatype_008, TestSize.Level0) +{ + mindspore::lite::DataType dataType = mindspore::lite::DATA_TYPE_UINT32; + OH_NN_DataType result = MSToNN::TransformDataType(dataType); + EXPECT_EQ(OH_NN_UINT32, result); +} + +/** + * @tc.name: transform_mstonn_transformdatatype_009 + * @tc.desc: Verify the TransDataType function return OH_NN_UINT64. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_mstonn_transformdatatype_009, TestSize.Level0) +{ + mindspore::lite::DataType dataType = mindspore::lite::DATA_TYPE_UINT64; + OH_NN_DataType result = MSToNN::TransformDataType(dataType); + EXPECT_EQ(OH_NN_UINT64, result); +} + +/** + * @tc.name: transform_mstonn_transformdatatype_010 + * @tc.desc: Verify the TransDataType function return OH_NN_FLOAT16 + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_mstonn_transformdatatype_010, TestSize.Level0) +{ + mindspore::lite::DataType dataType = mindspore::lite::DATA_TYPE_FLOAT16; + OH_NN_DataType result = MSToNN::TransformDataType(dataType); + EXPECT_EQ(OH_NN_FLOAT16, result); +} + +/** + * @tc.name: transform_mstonn_transformdatatype_011 + * @tc.desc: Verify the TransDataType function return OH_NN_FLOAT32. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_mstonn_transformdatatype_011, TestSize.Level0) +{ + mindspore::lite::DataType dataType = mindspore::lite::DATA_TYPE_FLOAT32; + OH_NN_DataType result = MSToNN::TransformDataType(dataType); + EXPECT_EQ(OH_NN_FLOAT32, result); +} + +/** + * @tc.name: transform_mstonn_transformdatatype_012 + * @tc.desc: Verify the TransDataType function return OH_NN_UNKNOWN. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_mstonn_transformdatatype_012, TestSize.Level0) +{ + mindspore::lite::DataType dataType = mindspore::lite::DATA_TYPE_UNKNOWN; + OH_NN_DataType result = MSToNN::TransformDataType(dataType); + EXPECT_EQ(OH_NN_UNKNOWN, result); +} + +/** + * @tc.name: transform_mstonn_transformdatatype_013 + * @tc.desc: Verify the TransDataType function return DATA_TYPE_FLOAT64 + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_mstonn_transformdatatype_013, TestSize.Level0) +{ + mindspore::lite::DataType dataType = mindspore::lite::DATA_TYPE_FLOAT64; + OH_NN_DataType result = MSToNN::TransformDataType(dataType); + EXPECT_EQ(OH_NN_FLOAT64, result); +} + +/** + * @tc.name: transform_mstonn_transformquantparams_001 + * @tc.desc: Verify the TransformQuantParams function. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_mstonn_transformquantparams_001, TestSize.Level0) +{ + std::vector msQuantParams = {{1, 1.0, 8}}; + std::vector result = MSToNN::TransformQuantParams(msQuantParams); + EXPECT_EQ(msQuantParams.size(), result.size()); +} +} // namespace UnitTest +} // namespace NeuralNetworkRuntime +} // namespace OHOS diff --git a/test/unittest/inner_kits/BUILD.gn b/test/unittest/inner_kits/BUILD.gn new file mode 100644 index 0000000..bea6436 --- /dev/null +++ b/test/unittest/inner_kits/BUILD.gn @@ -0,0 +1,64 @@ +# Copyright (c) 2022 Huawei Device Co., Ltd. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import("//build/test.gni") + +module_output_path = "neural_network_runtime/" + +config("module_private_config") { + visibility = [ ":*" ] + + include_dirs = [ + "//third_party/googletest/googlemock/include", + "//foundation/ai/neural_network_runtime", + "//foundation/communication/ipc/interfaces/innerkits/ipc_core/include", + "//third_party/mindspore/mindspore/lite/mindir/include" + ] + + cflags = [ + "-Wall", + "-Wextra", + "-Werror", + "--coverage", + ] + + ldflags = [ + "--coverage", + ] +} + +ohos_unittest("NeuralNetworkRuntimeInnerTest") { + module_out_path = module_output_path + sources = ["//foundation/ai/neural_network_runtime/test/unittest/inner_kits/neural_network_runtime_inner_test.cpp"] + + configs = [ ":module_private_config" ] + + deps = [ + "//foundation/ai/neural_network_runtime/frameworks:libneural_network_runtime", + "//third_party/googletest:gtest_main", + ] + + external_deps = [ + "hitrace_native:libhitracechain", + "hiviewdfx_hilog_native:libhilog", + "drivers_interface_nnrt:libnnrt_proxy_1.0", + "c_utils:utils", + "hdf_core:libhdf_utils", + "mindspore:mindir" + ] +} + +group("inner_kits_unittest") { + testonly = true + deps = [ ":NeuralNetworkRuntimeInnerTest" ] +} diff --git a/test/unittest/inner_kits/neural_network_runtime_inner_test.cpp b/test/unittest/inner_kits/neural_network_runtime_inner_test.cpp new file mode 100644 index 0000000..31fba4c --- /dev/null +++ b/test/unittest/inner_kits/neural_network_runtime_inner_test.cpp @@ -0,0 +1,130 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "neural_network_runtime_inner_test.h" + +#include "mindir.h" +#include "frameworks/native/inner_model.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Unittest { +void NeuralNetworkRuntimeInnerTest::SetUpTestCase(void) +{ +} + +void NeuralNetworkRuntimeInnerTest::TearDownTestCase(void) +{ +} + +void NeuralNetworkRuntimeInnerTest::SetUp(void) +{ +} + +void NeuralNetworkRuntimeInnerTest::TearDown(void) +{ +} + +/* + * @tc.name: build_from_lite_graph_001 + * @tc.desc: Verify the OH_NNModel is nullptr of the OH_NNModel_BuildFromLiteGraph function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeInnerTest, build_from_lite_graph_001, testing::ext::TestSize.Level0) +{ + OH_NNModel* model = nullptr; + mindspore::lite::LiteGraph* liteGraph = new (std::nothrow) mindspore::lite::LiteGraph; + EXPECT_NE(nullptr, liteGraph); + OH_NN_ReturnCode ret = OH_NNModel_BuildFromLiteGraph(model, liteGraph); + delete liteGraph; + liteGraph = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: build_from_lite_graph_002 + * @tc.desc: Verify the liteGraph is nullptr of the OH_NNModel_BuildFromLiteGraph function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeInnerTest, build_from_lite_graph_002, testing::ext::TestSize.Level0) +{ + OHOS::NeuralNetworkRuntime::InnerModel* innerModel = new (std::nothrow) OHOS::NeuralNetworkRuntime::InnerModel(); + EXPECT_NE(nullptr, innerModel); + OH_NNModel* model = reinterpret_cast(innerModel); + const void* liteGraph = nullptr; + OH_NN_ReturnCode ret = OH_NNModel_BuildFromLiteGraph(model, liteGraph); + delete innerModel; + innerModel = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: build_from_lite_graph_003 + * @tc.desc: Verify the success of the OH_NNModel_BuildFromLiteGraph function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeInnerTest, build_from_lite_graph_003, testing::ext::TestSize.Level0) +{ + OHOS::NeuralNetworkRuntime::InnerModel* innerModel = new (std::nothrow) OHOS::NeuralNetworkRuntime::InnerModel(); + EXPECT_NE(nullptr, innerModel); + OH_NNModel* model = reinterpret_cast(innerModel); + mindspore::lite::LiteGraph* liteGraph = new (std::nothrow) mindspore::lite::LiteGraph; + EXPECT_NE(nullptr, liteGraph); + liteGraph->name_ = "testGraph"; + liteGraph->input_indices_ = {0}; + liteGraph->output_indices_ = {1}; + const std::vector quant_params {}; + for (size_t indexInput = 0; indexInput < liteGraph->input_indices_.size(); ++indexInput) { + const std::vector dim = {3, 3}; + const std::vector data(36, 1); + + liteGraph->all_tensors_.emplace_back(mindspore::lite::MindIR_Tensor_Create(liteGraph->name_, + mindspore::lite::DATA_TYPE_FLOAT32, dim, mindspore::lite::FORMAT_NCHW, data, quant_params)); + } + for (size_t indexOutput = 0; indexOutput < liteGraph->output_indices_.size(); ++indexOutput) { + const std::vector dimOut = {3, 3}; + const std::vector dataOut(36, 1); + liteGraph->all_tensors_.emplace_back(mindspore::lite::MindIR_Tensor_Create(liteGraph->name_, + mindspore::lite::DATA_TYPE_FLOAT32, dimOut, mindspore::lite::FORMAT_NCHW, dataOut, quant_params)); + } + OH_NN_ReturnCode ret = OH_NNModel_BuildFromLiteGraph(model, liteGraph); + delete innerModel; + innerModel = nullptr; + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/* + * @tc.name: build_from_lite_graph_004 + * @tc.desc: Verify that the liteGraph parameter passed to the OH_NNModel_BuildFromLiteGraph function is invalid. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeInnerTest, build_from_lite_graph_004, testing::ext::TestSize.Level0) +{ + OHOS::NeuralNetworkRuntime::InnerModel* innerModel = new (std::nothrow) OHOS::NeuralNetworkRuntime::InnerModel(); + EXPECT_NE(nullptr, innerModel); + OH_NNModel* model = reinterpret_cast(innerModel); + mindspore::lite::LiteGraph* liteGraph = new (std::nothrow) mindspore::lite::LiteGraph; + EXPECT_NE(nullptr, liteGraph); + liteGraph->name_ = "testGraph"; + OH_NN_ReturnCode ret = OH_NNModel_BuildFromLiteGraph(model, liteGraph); + delete innerModel; + delete liteGraph; + innerModel = nullptr; + liteGraph = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} +} // namespace Unittest +} // namespace NeuralNetworkRuntime +} // namespace OHOS diff --git a/test/unittest/inner_kits/neural_network_runtime_inner_test.h b/test/unittest/inner_kits/neural_network_runtime_inner_test.h new file mode 100644 index 0000000..03d7d92 --- /dev/null +++ b/test/unittest/inner_kits/neural_network_runtime_inner_test.h @@ -0,0 +1,36 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_INNER_UNITTEST_H +#define NEURAL_NETWORK_RUNTIME_INNER_UNITTEST_H + +#include "interfaces/innerkits/c/neural_network_runtime_inner.h" +#include + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Unittest { +class NeuralNetworkRuntimeInnerTest : public testing::Test { +public: + static void SetUpTestCase(void); + static void TearDownTestCase(void); + void SetUp(); + void TearDown(); +}; +} // namespace Unittest +} // namespace NeuralNetworkRuntime +} // namespace OHOS + +#endif // NEURAL_NETWORK_RUNTIME_INNER_UNITTEST_H \ No newline at end of file diff --git a/test/unittest/ops/BUILD.gn b/test/unittest/ops/BUILD.gn new file mode 100644 index 0000000..4983433 --- /dev/null +++ b/test/unittest/ops/BUILD.gn @@ -0,0 +1,119 @@ +# Copyright (c) 2022 Huawei Device Co., Ltd. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import("//build/test.gni") + +module_output_path = "neural_network_runtime/" + +config("module_private_config") { + visibility = [ ":*" ] + + include_dirs = [ + "//third_party/googletest/googlemock/include", + "//foundation/ai/neural_network_runtime", + "//foundation/communication/ipc/interfaces/innerkits/ipc_core/include", + "//third_party/mindspore/mindspore/lite/mindir/include" + ] +} + +ohos_unittest("OpsUnittest") { + module_out_path = module_output_path + + sources = [ "//foundation/ai/neural_network_runtime/test/unittest/ops/add_test.cpp" ] + sources += [ "//foundation/ai/neural_network_runtime/test/unittest/ops/argmax_test.cpp" ] + sources += [ "//foundation/ai/neural_network_runtime/test/unittest/ops/avgpool_pad_test.cpp" ] + sources += [ "//foundation/ai/neural_network_runtime/test/unittest/ops/avgpool_padmod_test.cpp" ] + sources += [ "//foundation/ai/neural_network_runtime/test/unittest/ops/batch_to_space_nd_test.cpp" ] + sources += [ "//foundation/ai/neural_network_runtime/test/unittest/ops/batchnorm_builder_test.cpp" ] + sources += [ "//foundation/ai/neural_network_runtime/test/unittest/ops/biasadd_test.cpp" ] + sources += [ "//foundation/ai/neural_network_runtime/test/unittest/ops/cast_test.cpp" ] + sources += [ "//foundation/ai/neural_network_runtime/test/unittest/ops/concat_three_inputs_test.cpp" ] + sources += [ "//foundation/ai/neural_network_runtime/test/unittest/ops/concat_two_inputs_test.cpp" ] + sources += [ "//foundation/ai/neural_network_runtime/test/unittest/ops/conv2d_pad_test.cpp" ] + sources += [ "//foundation/ai/neural_network_runtime/test/unittest/ops/conv2d_padmode_test.cpp" ] + sources += [ "//foundation/ai/neural_network_runtime/test/unittest/ops/conv2d_tranpose_padmode_test.cpp" ] + sources += [ "//foundation/ai/neural_network_runtime/test/unittest/ops/conv2d_transpose_pad_test.cpp" ] + sources += [ "//foundation/ai/neural_network_runtime/test/unittest/ops/depthwise_conv2d_native_pad_test.cpp" ] + sources += [ "//foundation/ai/neural_network_runtime/test/unittest/ops/depthwise_conv2d_native_padmode_test.cpp" ] + sources += [ "//foundation/ai/neural_network_runtime/test/unittest/ops/div_test.cpp" ] + sources += [ "//foundation/ai/neural_network_runtime/test/unittest/ops/eltwise_test.cpp" ] + sources += [ "//foundation/ai/neural_network_runtime/test/unittest/ops/expandims_test.cpp" ] + sources += [ "//foundation/ai/neural_network_runtime/test/unittest/ops/fullconnection_test.cpp" ] + sources += [ "//foundation/ai/neural_network_runtime/test/unittest/ops/fullconnection_with_axis_test.cpp" ] + sources += [ "//foundation/ai/neural_network_runtime/test/unittest/ops/fill_builder_test.cpp" ] + sources += [ "//foundation/ai/neural_network_runtime/test/unittest/ops/gather_builder_test.cpp" ] + sources += [ "//foundation/ai/neural_network_runtime/test/unittest/ops/gelu_builder_test.cpp" ] + sources += [ "//foundation/ai/neural_network_runtime/test/unittest/ops/hswish_builder_test.cpp" ] + sources += [ "//foundation/ai/neural_network_runtime/test/unittest/ops/layernorm_builder_test.cpp" ] + sources += [ "//foundation/ai/neural_network_runtime/test/unittest/ops/lessequal_builder_test.cpp" ] + sources += [ "//foundation/ai/neural_network_runtime/test/unittest/ops/maximum_builder_test.cpp" ] + sources += [ "//foundation/ai/neural_network_runtime/test/unittest/ops/maxpool_pad_test.cpp" ] + sources += [ "//foundation/ai/neural_network_runtime/test/unittest/ops/maxpool_padmode_test.cpp" ] + sources += [ "//foundation/ai/neural_network_runtime/test/unittest/ops/matmul_builder_test.cpp" ] + sources += [ "//foundation/ai/neural_network_runtime/test/unittest/ops/mul_builder_test.cpp" ] + sources += [ "//foundation/ai/neural_network_runtime/test/unittest/ops/onehot_builder_test.cpp" ] + sources += [ "//foundation/ai/neural_network_runtime/test/unittest/ops/pad_builder_test.cpp" ] + sources += [ "//foundation/ai/neural_network_runtime/test/unittest/ops/pow_builder_test.cpp" ] + sources += [ "//foundation/ai/neural_network_runtime/test/unittest/ops/prelu_builder_test.cpp" ] + sources += [ "//foundation/ai/neural_network_runtime/test/unittest/ops/quant_dtype_cast_builder_test.cpp" ] + sources += [ "//foundation/ai/neural_network_runtime/test/unittest/ops/reduce_all_builder_test.cpp" ] + sources += [ "//foundation/ai/neural_network_runtime/test/unittest/ops/reduce_mean_builder_test.cpp" ] + sources += [ "//foundation/ai/neural_network_runtime/test/unittest/ops/reduce_prod_builder_test.cpp" ] + sources += [ "//foundation/ai/neural_network_runtime/test/unittest/ops/relu_builder_test.cpp" ] + sources += [ "//foundation/ai/neural_network_runtime/test/unittest/ops/relu6_builder_test.cpp" ] + sources += [ "//foundation/ai/neural_network_runtime/test/unittest/ops/reshape_builder_test.cpp" ] + sources += [ "//foundation/ai/neural_network_runtime/test/unittest/ops/resize_bilinear_builder_test.cpp" ] + sources += [ "//foundation/ai/neural_network_runtime/test/unittest/ops/rsqrt_builder_test.cpp" ] + sources += [ "//foundation/ai/neural_network_runtime/test/unittest/ops/scale_builder_test.cpp" ] + sources += [ "//foundation/ai/neural_network_runtime/test/unittest/ops/shape_builder_test.cpp" ] + sources += [ "//foundation/ai/neural_network_runtime/test/unittest/ops/sigmoid_builder_test.cpp" ] + sources += [ "//foundation/ai/neural_network_runtime/test/unittest/ops/slice_builder_test.cpp" ] + sources += [ "//foundation/ai/neural_network_runtime/test/unittest/ops/softmax_builder_test.cpp" ] + sources += [ "//foundation/ai/neural_network_runtime/test/unittest/ops/spacetobatchnd_builder_test.cpp" ] + sources += [ "//foundation/ai/neural_network_runtime/test/unittest/ops/split_builder_test.cpp" ] + sources += [ "//foundation/ai/neural_network_runtime/test/unittest/ops/sqrt_builder_test.cpp" ] + sources += [ "//foundation/ai/neural_network_runtime/test/unittest/ops/squared_difference_builder_test.cpp" ] + sources += [ "//foundation/ai/neural_network_runtime/test/unittest/ops/squeeze_builder_test.cpp" ] + sources += [ "//foundation/ai/neural_network_runtime/test/unittest/ops/stack_builder_test.cpp" ] + sources += [ "//foundation/ai/neural_network_runtime/test/unittest/ops/strided_slice_builder_test.cpp" ] + sources += [ "//foundation/ai/neural_network_runtime/test/unittest/ops/sub_builder_test.cpp" ] + sources += [ "//foundation/ai/neural_network_runtime/test/unittest/ops/tanh_builder_test.cpp" ] + sources += [ "//foundation/ai/neural_network_runtime/test/unittest/ops/tile_builder_test.cpp" ] + sources += [ "//foundation/ai/neural_network_runtime/test/unittest/ops/topk_builder_test.cpp" ] + sources += [ "//foundation/ai/neural_network_runtime/test/unittest/ops/transpose_builder_test.cpp" ] + sources += [ "//foundation/ai/neural_network_runtime/test/unittest/ops/unsqueeze_builder_test.cpp" ] + sources += [ "//foundation/ai/neural_network_runtime/test/unittest/ops/ops_test.cpp" ] + sources += [ "//foundation/ai/neural_network_runtime/test/unittest/common/base_test.cpp" ] + + configs = [ ":module_private_config" ] + + deps = [ + "//foundation/ai/neural_network_runtime/frameworks:libneural_network_runtime", + "//third_party/googletest:gtest_main", + "//third_party/googletest:gmock_main", + ] + + external_deps = [ + "hitrace_native:hitrace_meter", + "hiviewdfx_hilog_native:libhilog", + "drivers_interface_nnrt:libnnrt_proxy_1.0", + "mindspore:mindir" + ] +} + +group("ops_unittest") { + testonly = true + deps = [ + ":OpsUnittest", + ] +} \ No newline at end of file diff --git a/test/unittest/ops/add_test.cpp b/test/unittest/ops/add_test.cpp new file mode 100644 index 0000000..50fed28 --- /dev/null +++ b/test/unittest/ops/add_test.cpp @@ -0,0 +1,277 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "frameworks/native/ops/add_builder.h" + +#include "ops_test.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Ops; + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class AddFusionBuilderTest : public OpsTest { +public: + void SetUp() override; + void TearDown() override; + + void SaveParamsTensor(const std::vector& m_param, OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + +public: + AddBuilder m_builder; + std::vector m_inputs{0, 1}; + std::vector m_outputs{2}; + std::vector m_param{3}; + std::vector m_input_dim{3, 3}; + std::vector m_output_dim{3, 3}; + std::vector m_param_dim{}; +}; + +void AddFusionBuilderTest::SetUp() {} + +void AddFusionBuilderTest::TearDown() {} + +void AddFusionBuilderTest::SaveParamsTensor(const std::vector& m_param, OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + m_paramsIndex = m_param; + std::shared_ptr tensor = TransToNNTensor(dataType, dim, quantParam, type); + int8_t* activationValue = new (std::nothrow) int8_t(0); + EXPECT_NE(nullptr, activationValue); + tensor->SetBuffer(activationValue, sizeof(int8_t)); + m_allTensors.emplace_back(tensor); +} + +/** + * @tc.name: add_build_001 + * @tc.desc: Verify the success of the build function + * @tc.type: FUNC + */ +HWTEST_F(AddFusionBuilderTest, add_build_001, TestSize.Level1) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SaveParamsTensor(m_param, OH_NN_INT8, m_param_dim, nullptr, OH_NN_ADD_ACTIVATIONTYPE); + + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: add_build_002 + * @tc.desc: Verify the forbidden of the build function + * @tc.type: FUNC + */ +HWTEST_F(AddFusionBuilderTest, add_build_002, TestSize.Level1) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SaveParamsTensor(m_param, OH_NN_INT8, m_param_dim, nullptr, OH_NN_ADD_ACTIVATIONTYPE); + + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: add_build_003 + * @tc.desc: Verify the missing input of the build function + * @tc.type: FUNC + */ +HWTEST_F(AddFusionBuilderTest, add_build_003, TestSize.Level1) +{ + m_inputs = {0}; + m_outputs = {1}; + m_param = {2}; + + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SaveParamsTensor(m_param, OH_NN_INT8, m_param_dim, nullptr, OH_NN_ADD_ACTIVATIONTYPE); + + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: add_build_004 + * @tc.desc: Verify the missing output of the build function + * @tc.type: FUNC + */ +HWTEST_F(AddFusionBuilderTest, add_build_004, TestSize.Level1) +{ + m_inputs = {0, 1}; + m_outputs = {}; + m_param = {2}; + + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SaveParamsTensor(m_param, OH_NN_INT8, m_param_dim, nullptr, OH_NN_ADD_ACTIVATIONTYPE); + + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: add_build_005 + * @tc.desc: Verify the inputIndex out of bounds of the build function + * @tc.type: FUNC + */ +HWTEST_F(AddFusionBuilderTest, add_build_005, TestSize.Level1) +{ + m_inputs = {0, 6}; + m_outputs = {2}; + m_param = {3}; + + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SaveParamsTensor(m_param, OH_NN_INT8, m_param_dim, nullptr, OH_NN_ADD_ACTIVATIONTYPE); + + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: add_build_006 + * @tc.desc: Verify the outputIndex out of bounds of the build function + * @tc.type: FUNC + */ +HWTEST_F(AddFusionBuilderTest, add_build_006, TestSize.Level1) +{ + m_inputs = {0, 1}; + m_outputs = {6}; + m_param = {3}; + + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SaveParamsTensor(m_param, OH_NN_INT8, m_param_dim, nullptr, OH_NN_ADD_ACTIVATIONTYPE); + + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: add_build_007 + * @tc.desc: Verify the param invalid type of the build function + * @tc.type: FUNC + */ +HWTEST_F(AddFusionBuilderTest, add_build_007, TestSize.Level1) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + m_paramsIndex = m_param; + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT32, m_param_dim, nullptr, OH_NN_ADD_ACTIVATIONTYPE); + int32_t* activationValueTest = new (std::nothrow) int32_t[0]; + EXPECT_NE(nullptr, activationValueTest); + tensor->SetBuffer(activationValueTest, sizeof(int32_t)); + m_allTensors.emplace_back(tensor); + + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: add_build_008 + * @tc.desc: Verify the param invalid value of the build function + * @tc.type: FUNC + */ +HWTEST_F(AddFusionBuilderTest, add_build_008, TestSize.Level1) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT8, m_param_dim, nullptr, OH_NN_ADD_ACTIVATIONTYPE); + int8_t* activationValueTest = new (std::nothrow) int8_t[40]; + EXPECT_NE(nullptr, activationValueTest); + tensor->SetBuffer(activationValueTest, sizeof(int8_t)); + m_allTensors.emplace_back(tensor); + + m_paramsIndex = m_param; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: add_build_009 + * @tc.desc: Verify the param invalid to add of the build function + * @tc.type: FUNC + */ +HWTEST_F(AddFusionBuilderTest, add_build_009, TestSize.Level1) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT8, m_param_dim, nullptr, OH_NN_ARG_MAX_KEEPDIMS); + int8_t* activationValueTest = new (std::nothrow) int8_t[0]; + EXPECT_NE(nullptr, activationValueTest); + tensor->SetBuffer(activationValueTest, sizeof(int8_t)); + m_allTensors.emplace_back(tensor); + + m_paramsIndex = m_param; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: add_build_010 + * @tc.desc: Verify the param invalid to add of the build function + * @tc.type: FUNC + */ +HWTEST_F(AddFusionBuilderTest, add_build_010, TestSize.Level1) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT8, m_param_dim, nullptr, OH_NN_ADD_ACTIVATIONTYPE); + m_allTensors.emplace_back(tensor); + + m_paramsIndex = m_param; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: add_getprimitive_001 + * @tc.desc: Verify the success of the GetPrimitive function + * @tc.type: FUNC + */ +HWTEST_F(AddFusionBuilderTest, add_getprimitive_001, TestSize.Level1) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SaveParamsTensor(m_param, OH_NN_INT8, m_param_dim, nullptr, OH_NN_ADD_ACTIVATIONTYPE); + + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + LiteGraphTensorPtr primitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive = {nullptr, DestroyLiteGraphPrimitive}; + EXPECT_NE(expectPrimitive, primitive); + + int8_t activationValueTest = 0; + int8_t returnValue = mindspore::lite::MindIR_AddFusion_GetActivationType(primitive.get()); + EXPECT_EQ(returnValue, activationValueTest); +} + +/** + * @tc.name: add_getprimitive_002 + * @tc.desc: Verify the nullptr return of the GetPrimitive function + * @tc.type: FUNC + */ +HWTEST_F(AddFusionBuilderTest, add_getprimitive_002, TestSize.Level1) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SaveParamsTensor(m_param, OH_NN_INT8, m_param_dim, nullptr, OH_NN_ADD_ACTIVATIONTYPE); + + LiteGraphTensorPtr primitive = {nullptr, DestroyLiteGraphPrimitive}; + LiteGraphTensorPtr expectPrimitive = m_builder.GetPrimitive(); + + EXPECT_EQ(primitive, expectPrimitive); +} +} // namespace UnitTest +} // namespace NeuralNetworkRuntime +} // namespace OHOS \ No newline at end of file diff --git a/test/unittest/ops/argmax_test.cpp b/test/unittest/ops/argmax_test.cpp new file mode 100644 index 0000000..5a7ee65 --- /dev/null +++ b/test/unittest/ops/argmax_test.cpp @@ -0,0 +1,330 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "frameworks/native/ops/argmax_builder.h" + +#include "ops_test.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Ops; + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class ArgMaxBuilderTest : public OpsTest { +public: + void SetUp(); + void TearDown(); + + void SetArgmaxAxis(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + void SetArgmaxKeepdims(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + +public: + ArgMaxBuilder m_builder; + std::vector m_inputs{0}; + std::vector m_outputs{1}; + std::vector m_params{2, 3}; + std::vector m_input_dim{3, 3}; + std::vector m_output_dim{3, 3}; + std::vector m_param_dim{}; +}; + +void ArgMaxBuilderTest::SetUp() {} + +void ArgMaxBuilderTest::TearDown() {} + +void ArgMaxBuilderTest::SetArgmaxAxis(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + std::shared_ptr tensor = TransToNNTensor(dataType, dim, quantParam, type); + int64_t* axisValue = new (std::nothrow) int64_t(0); + EXPECT_NE(nullptr, axisValue); + tensor->SetBuffer(axisValue, sizeof(int64_t)); + m_allTensors.emplace_back(tensor); +} + +void ArgMaxBuilderTest::SetArgmaxKeepdims(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + std::shared_ptr tensor = TransToNNTensor(dataType, dim, quantParam, type); + bool* keepdimsValue = new (std::nothrow) bool(false); + EXPECT_NE(nullptr, keepdimsValue); + tensor->SetBuffer(keepdimsValue, sizeof(keepdimsValue)); + m_allTensors.emplace_back(tensor); +} + +/** + * @tc.name: argmax_build_001 + * @tc.desc: Verify the success of the build function + * @tc.type: FUNC + */ +HWTEST_F(ArgMaxBuilderTest, argmax_build_001, TestSize.Level1) +{ + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetArgmaxAxis(OH_NN_INT64, m_param_dim, nullptr, OH_NN_ARG_MAX_AXIS); + SetArgmaxKeepdims(OH_NN_BOOL, m_param_dim, nullptr, OH_NN_ARG_MAX_KEEPDIMS); + + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} +/** + * @tc.name: argmax_build_002 + * @tc.desc: Verify the forbidden of the build function + * @tc.type: FUNC + */ +HWTEST_F(ArgMaxBuilderTest, argmax_build_002, TestSize.Level1) +{ + m_inputs = {0}; + m_outputs = {1}; + m_params = {2, 3}; + + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetArgmaxAxis(OH_NN_INT64, m_param_dim, nullptr, OH_NN_ARG_MAX_AXIS); + SetArgmaxKeepdims(OH_NN_BOOL, m_param_dim, nullptr, OH_NN_ARG_MAX_KEEPDIMS); + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: argmax_build_003 + * @tc.desc: Verify the missing input of the build function + * @tc.type: FUNC + */ +HWTEST_F(ArgMaxBuilderTest, argmax_build_003, TestSize.Level1) +{ + m_inputs = {}; + m_outputs = {1}; + m_params = {2, 3}; + + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetArgmaxAxis(OH_NN_INT64, m_param_dim, nullptr, OH_NN_ARG_MAX_AXIS); + SetArgmaxKeepdims(OH_NN_BOOL, m_param_dim, nullptr, OH_NN_ARG_MAX_KEEPDIMS); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: argmax_build_004 + * @tc.desc: Verify the missing output of the build function + * @tc.type: FUNC + */ +HWTEST_F(ArgMaxBuilderTest, argmax_build_004, TestSize.Level1) +{ + m_inputs = {0}; + m_outputs = {}; + m_params = {1, 2}; + + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetArgmaxAxis(OH_NN_INT64, m_param_dim, nullptr, OH_NN_ARG_MAX_AXIS); + SetArgmaxKeepdims(OH_NN_BOOL, m_param_dim, nullptr, OH_NN_ARG_MAX_KEEPDIMS); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: argmax_build_005 + * @tc.desc: Verify the inputIndex out of bounds of the build function + * @tc.type: FUNC + */ +HWTEST_F(ArgMaxBuilderTest, argmax_build_005, TestSize.Level1) +{ + m_inputs = {6}; + m_outputs = {1}; + m_params = {2, 3}; + + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetArgmaxAxis(OH_NN_INT64, m_param_dim, nullptr, OH_NN_ARG_MAX_AXIS); + SetArgmaxKeepdims(OH_NN_BOOL, m_param_dim, nullptr, OH_NN_ARG_MAX_KEEPDIMS); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: argmax_build_006 + * @tc.desc: Verify the outputIndex out of bounds of the build function + * @tc.type: FUNC + */ +HWTEST_F(ArgMaxBuilderTest, argmax_build_006, TestSize.Level1) +{ + m_inputs = {0}; + m_outputs = {6}; + m_params = {2, 3}; + + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetArgmaxAxis(OH_NN_INT64, m_param_dim, nullptr, OH_NN_ARG_MAX_AXIS); + SetArgmaxKeepdims(OH_NN_BOOL, m_param_dim, nullptr, OH_NN_ARG_MAX_KEEPDIMS); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: argmax_build_007 + * @tc.desc: Verify the invalid axis of the build function + * @tc.type: FUNC + */ +HWTEST_F(ArgMaxBuilderTest, argmax_build_007, TestSize.Level1) +{ + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + std::shared_ptr tensor = TransToNNTensor(OH_NN_FLOAT32, m_param_dim, nullptr, OH_NN_ARG_MAX_AXIS); + float* axisValueTest = new (std::nothrow) float(0); + EXPECT_NE(nullptr, axisValueTest); + + tensor->SetBuffer(axisValueTest, sizeof(float)); + m_allTensors.emplace_back(tensor); + SetArgmaxKeepdims(OH_NN_BOOL, m_param_dim, nullptr, OH_NN_ARG_MAX_KEEPDIMS); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: argmax_build_008 + * @tc.desc: Verify the invalid keepdims of the build function + * @tc.type: FUNC + */ +HWTEST_F(ArgMaxBuilderTest, argmax_build_008, TestSize.Level1) +{ + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetArgmaxAxis(OH_NN_INT64, m_param_dim, nullptr, OH_NN_ARG_MAX_AXIS); + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT64, m_param_dim, nullptr, OH_NN_ARG_MAX_KEEPDIMS); + int64_t* keepdimsValue = new (std::nothrow) int64_t(0); + EXPECT_NE(nullptr, keepdimsValue); + + tensor->SetBuffer(keepdimsValue, sizeof(int64_t)); + m_allTensors.emplace_back(tensor); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: argmax_build_009 + * @tc.desc: Verify the invalid param to argmax of the build function + * @tc.type: FUNC + */ +HWTEST_F(ArgMaxBuilderTest, argmax_build_009, TestSize.Level1) +{ + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetArgmaxAxis(OH_NN_INT64, m_param_dim, nullptr, OH_NN_ARG_MAX_AXIS); + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT64, m_param_dim, nullptr, OH_NN_AVG_POOL_STRIDE); + int64_t* strideValue = new (std::nothrow) int64_t(0); + EXPECT_NE(nullptr, strideValue); + + tensor->SetBuffer(strideValue, sizeof(int64_t)); + m_allTensors.emplace_back(tensor); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: argmax_build_010 + * @tc.desc: Verify the argmax without set axis of the build function + * @tc.type: FUNC + */ +HWTEST_F(ArgMaxBuilderTest, argmax_build_010, TestSize.Level1) +{ + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT64, m_param_dim, nullptr, OH_NN_ARG_MAX_AXIS); + m_allTensors.emplace_back(tensor); + SetArgmaxKeepdims(OH_NN_BOOL, m_param_dim, nullptr, OH_NN_ARG_MAX_KEEPDIMS); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: argmax_build_011 + * @tc.desc: Verify the argmax without set keepdims of the build function + * @tc.type: FUNC + */ +HWTEST_F(ArgMaxBuilderTest, argmax_build_011, TestSize.Level1) +{ + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetArgmaxAxis(OH_NN_INT64, m_param_dim, nullptr, OH_NN_ARG_MAX_AXIS); + std::shared_ptr tensor = TransToNNTensor(OH_NN_BOOL, m_param_dim, nullptr, OH_NN_ARG_MAX_KEEPDIMS); + m_allTensors.emplace_back(tensor); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: add_getprimitive_001 + * @tc.desc: Verify the behavior of the GetPrimitive function + * @tc.type: FUNC + */ +HWTEST_F(ArgMaxBuilderTest, add_getprimitive_001, TestSize.Level1) +{ + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetArgmaxAxis(OH_NN_INT64, m_param_dim, nullptr, OH_NN_ARG_MAX_AXIS); + SetArgmaxKeepdims(OH_NN_BOOL, m_param_dim, nullptr, OH_NN_ARG_MAX_KEEPDIMS); + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + + LiteGraphTensorPtr primitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive = {nullptr, DestroyLiteGraphPrimitive}; + EXPECT_NE(expectPrimitive, primitive); + EXPECT_NE(nullptr, primitive); + + int64_t returnValue = mindspore::lite::MindIR_ArgMaxFusion_GetAxis(primitive.get()); + EXPECT_EQ(returnValue, 0); + bool keepdimsReturn = mindspore::lite::MindIR_ArgMaxFusion_GetKeepDims(primitive.get()); + EXPECT_EQ(keepdimsReturn, false); +} + +/** + * @tc.name: add_getprimitive_002 + * @tc.desc: Verify the behavior of the GetPrimitive function + * @tc.type: FUNC + */ +HWTEST_F(ArgMaxBuilderTest, add_getprimitive_002, TestSize.Level1) +{ + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetArgmaxAxis(OH_NN_INT64, m_param_dim, nullptr, OH_NN_ARG_MAX_AXIS); + SetArgmaxKeepdims(OH_NN_BOOL, m_param_dim, nullptr, OH_NN_ARG_MAX_KEEPDIMS); + LiteGraphTensorPtr primitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive = {nullptr, DestroyLiteGraphPrimitive}; + EXPECT_EQ(expectPrimitive, primitive); +} +} // namespace UnitTest +} // namespace NeuralNetworkRuntime +} // namespace OHOS \ No newline at end of file diff --git a/test/unittest/ops/avgpool_pad_test.cpp b/test/unittest/ops/avgpool_pad_test.cpp new file mode 100644 index 0000000..665896a --- /dev/null +++ b/test/unittest/ops/avgpool_pad_test.cpp @@ -0,0 +1,431 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "frameworks/native/ops/avgpool_builder.h" + +#include "ops_test.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Ops; + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class AvgPoolPadBuilderTest : public OpsTest { +public: + void SetUp() override; + void TearDown() override; + + void SetPad(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + void SetPadParams(); + +public: + AvgPoolBuilder m_builder; + std::vector m_input_dim{1, 3, 3, 1}; + std::vector m_output_dim{1, 2, 2, 1}; + std::vector m_kenelsize_dim{2}; + std::vector m_stride_dim{2}; + std::vector m_pad_dim{4}; + std::vector m_param_dim{}; + std::vector m_inputs{0}; + std::vector m_outputs{1}; + std::vector m_params{2, 3, 4, 5}; +}; + +void AvgPoolPadBuilderTest::SetUp() {} + +void AvgPoolPadBuilderTest::TearDown() {} + +void AvgPoolPadBuilderTest::SetPad(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + int32_t padNum{4}; + std::shared_ptr tensor = TransToNNTensor(dataType, dim, quantParam, type); + int64_t* padValue = new (std::nothrow) int64_t[padNum]{0, 0, 0, 0}; + EXPECT_NE(nullptr, padValue); + tensor->SetBuffer(padValue, sizeof(int64_t) * padNum); + m_allTensors.emplace_back(tensor); +} + +void AvgPoolPadBuilderTest::SetPadParams() +{ + SetKernelSize(OH_NN_INT64, m_kenelsize_dim, nullptr, OH_NN_AVG_POOL_KERNEL_SIZE); + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_AVG_POOL_STRIDE); + SetPad(OH_NN_INT64, m_pad_dim, nullptr, OH_NN_AVG_POOL_PAD); + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_AVG_POOL_ACTIVATION_TYPE); +} + +/** + * @tc.name: avgpool_build_pad_001 + * @tc.desc: Verify the success of the build function + * @tc.type: FUNC + */ +HWTEST_F(AvgPoolPadBuilderTest, avgpool_build_pad_001, TestSize.Level1) +{ + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetPadParams(); + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: avgpool_build_pad_002 + * @tc.desc: Verify the forbidden of the build function + * @tc.type: FUNC + */ +HWTEST_F(AvgPoolPadBuilderTest, avgpool_build_pad_002, TestSize.Level1) +{ + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetPadParams(); + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: avgpool_build_pad_003 + * @tc.desc: Verify the missing input of the build function + * @tc.type: FUNC + */ +HWTEST_F(AvgPoolPadBuilderTest, avgpool_build_pad_003, TestSize.Level1) +{ + m_inputs = {}; + m_outputs = {0}; + m_params = {1, 2, 3, 4}; + m_paramsIndex = m_params; + + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetPadParams(); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: avgpool_build_pad_004 + * @tc.desc: Verify the missing output of the build function + * @tc.type: FUNC + */ +HWTEST_F(AvgPoolPadBuilderTest, avgpool_build_pad_004, TestSize.Level1) +{ + m_inputs = {0}; + m_outputs = {}; + m_params = {1, 2, 3, 4}; + m_paramsIndex = m_params; + + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetPadParams(); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: avgpool_build_pad_005 + * @tc.desc: Verify the inputIndex out of bounds of the build function + * @tc.type: FUNC + */ +HWTEST_F(AvgPoolPadBuilderTest, avgpool_build_pad_005, TestSize.Level1) +{ + m_inputs = {6}; + m_outputs = {1}; + m_params = {2, 3, 4, 5}; + m_paramsIndex = m_params; + + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetPadParams(); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: avgpool_build_pad_006 + * @tc.desc: Verify the outputIndex out of bounds of the build function + * @tc.type: FUNC + */ +HWTEST_F(AvgPoolPadBuilderTest, avgpool_build_pad_006, TestSize.Level1) +{ + m_inputs = {0}; + m_outputs = {6}; + m_params = {2, 3, 4, 5}; + m_paramsIndex = m_params; + + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetPadParams(); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: avgpool_build_pad_007 + * @tc.desc: Verify the invalid kernelSize of the build function + * @tc.type: FUNC + */ +HWTEST_F(AvgPoolPadBuilderTest, avgpool_build_pad_007, TestSize.Level1) +{ + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + int32_t numKernels{2}; + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT32, m_kenelsize_dim, nullptr, + OH_NN_AVG_POOL_KERNEL_SIZE); + int32_t* kernelSizeValue = new (std::nothrow) int32_t[numKernels]{1, 1}; + EXPECT_NE(nullptr, kernelSizeValue); + tensor->SetBuffer(kernelSizeValue, sizeof(int32_t) * numKernels); + m_allTensors.emplace_back(tensor); + + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_AVG_POOL_STRIDE); + SetPad(OH_NN_INT64, m_pad_dim, nullptr, OH_NN_AVG_POOL_PAD); + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_AVG_POOL_ACTIVATION_TYPE); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: avgpool_build_pad_008 + * @tc.desc: Verify the invalid stride of the build function + * @tc.type: FUNC + */ +HWTEST_F(AvgPoolPadBuilderTest, avgpool_build_pad_008, TestSize.Level1) +{ + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetKernelSize(OH_NN_INT64, m_kenelsize_dim, nullptr, OH_NN_AVG_POOL_KERNEL_SIZE); + + int32_t numStride{2}; + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT32, m_stride_dim, nullptr, OH_NN_AVG_POOL_STRIDE); + int32_t* strideValue = new (std::nothrow) int32_t[numStride]{1, 1}; + EXPECT_NE(nullptr, strideValue); + tensor->SetBuffer(strideValue, sizeof(int32_t) * numStride); + m_allTensors.emplace_back(tensor); + + SetPad(OH_NN_INT64, m_pad_dim, nullptr, OH_NN_AVG_POOL_PAD); + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_AVG_POOL_ACTIVATION_TYPE); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: avgpool_build_pad_009 + * @tc.desc: Verify the invalid pad of the build function + * @tc.type: FUNC + */ +HWTEST_F(AvgPoolPadBuilderTest, avgpool_build_pad_009, TestSize.Level1) +{ + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetKernelSize(OH_NN_INT64, m_kenelsize_dim, nullptr, OH_NN_AVG_POOL_KERNEL_SIZE); + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_AVG_POOL_STRIDE); + int32_t padNum{4}; + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT32, m_pad_dim, nullptr, OH_NN_AVG_POOL_PAD); + int32_t* padValue = new (std::nothrow) int32_t[padNum]{0, 0, 0, 0}; + EXPECT_NE(nullptr, padValue); + + tensor->SetBuffer(padValue, sizeof(int32_t) * padNum); + m_allTensors.emplace_back(tensor); + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_AVG_POOL_ACTIVATION_TYPE); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + + +/** + * @tc.name: avgpool_build_pad_010 + * @tc.desc: Verify the invalid activation of the build function + * @tc.type: FUNC + */ +HWTEST_F(AvgPoolPadBuilderTest, avgpool_build_pad_010, TestSize.Level1) +{ + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetKernelSize(OH_NN_INT64, m_kenelsize_dim, nullptr, OH_NN_AVG_POOL_KERNEL_SIZE); + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_AVG_POOL_STRIDE); + SetPad(OH_NN_INT64, m_pad_dim, nullptr, OH_NN_AVG_POOL_PAD); + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT32, m_param_dim, nullptr, + OH_NN_AVG_POOL_ACTIVATION_TYPE); + int32_t* activationValue = new (std::nothrow) int32_t(0); + EXPECT_NE(nullptr, activationValue); + + tensor->SetBuffer(activationValue, sizeof(int32_t)); + m_allTensors.emplace_back(tensor); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: avgpool_build_pad_011 + * @tc.desc: Verify the activation scalar length of the build function + * @tc.type: FUNC + */ +HWTEST_F(AvgPoolPadBuilderTest, avgpool_build_pad_011, TestSize.Level1) +{ + m_param_dim = {2}; + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetKernelSize(OH_NN_INT64, m_kenelsize_dim, nullptr, OH_NN_AVG_POOL_KERNEL_SIZE); + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_AVG_POOL_STRIDE); + SetPad(OH_NN_INT64, m_pad_dim, nullptr, OH_NN_AVG_POOL_PAD); + int8_t* activationValue = new (std::nothrow) int8_t[2]{1, 2}; + EXPECT_NE(nullptr, activationValue); + + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT8, m_param_dim, nullptr, + OH_NN_AVG_POOL_ACTIVATION_TYPE); + tensor->SetBuffer(activationValue, 2 * sizeof(int8_t)); + m_allTensors.emplace_back(tensor); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: avgpool_build_pad_012 + * @tc.desc: Verify the avgpool without set kernelsize of the build function + * @tc.type: FUNC + */ +HWTEST_F(AvgPoolPadBuilderTest, avgpool_build_pad_012, TestSize.Level1) +{ + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT64, m_kenelsize_dim, nullptr, + OH_NN_AVG_POOL_KERNEL_SIZE); + m_allTensors.emplace_back(tensor); + + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_AVG_POOL_STRIDE); + SetPad(OH_NN_INT64, m_pad_dim, nullptr, OH_NN_AVG_POOL_PAD); + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_AVG_POOL_ACTIVATION_TYPE); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: avgpool_build_pad_013 + * @tc.desc: Verify the avgpool without set stride of the build function + * @tc.type: FUNC + */ +HWTEST_F(AvgPoolPadBuilderTest, avgpool_build_pad_013, TestSize.Level1) +{ + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetKernelSize(OH_NN_INT64, m_kenelsize_dim, nullptr, OH_NN_AVG_POOL_KERNEL_SIZE); + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_AVG_POOL_STRIDE); + m_allTensors.emplace_back(tensor); + + SetPad(OH_NN_INT64, m_pad_dim, nullptr, OH_NN_AVG_POOL_PAD); + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_AVG_POOL_ACTIVATION_TYPE); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: avgpool_build_pad_014 + * @tc.desc: Verify the avgpool without set pad of the build function + * @tc.type: FUNC + */ +HWTEST_F(AvgPoolPadBuilderTest, avgpool_build_pad_014, TestSize.Level1) +{ + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetKernelSize(OH_NN_INT64, m_kenelsize_dim, nullptr, OH_NN_AVG_POOL_KERNEL_SIZE); + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_AVG_POOL_STRIDE); + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT64, m_pad_dim, nullptr, OH_NN_AVG_POOL_PAD); + m_allTensors.emplace_back(tensor); + + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_AVG_POOL_ACTIVATION_TYPE); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: avgpool_build_pad_015 + * @tc.desc: Verify the avgpool without set activation of the build function + * @tc.type: FUNC + */ +HWTEST_F(AvgPoolPadBuilderTest, avgpool_build_pad_015, TestSize.Level1) +{ + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetKernelSize(OH_NN_INT64, m_kenelsize_dim, nullptr, OH_NN_AVG_POOL_KERNEL_SIZE); + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_AVG_POOL_STRIDE); + SetPad(OH_NN_INT64, m_pad_dim, nullptr, OH_NN_AVG_POOL_PAD); + + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT8, m_param_dim, nullptr, + OH_NN_AVG_POOL_ACTIVATION_TYPE); + m_allTensors.emplace_back(tensor); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: avgpool_getprimitive_pad_001 + * @tc.desc: Verify the behavior of the GetPrimitive function + * @tc.type: FUNC + */ +HWTEST_F(AvgPoolPadBuilderTest, avgpool_getprimitive_pad_001, TestSize.Level1) +{ + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetPadParams(); + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + LiteGraphTensorPtr primitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive = {nullptr, DestroyLiteGraphPrimitive}; + EXPECT_NE(expectPrimitive, primitive); + + std::vector expetctKernelSize = mindspore::lite::MindIR_AvgPoolFusion_GetKernelSize(primitive.get()); + std::vector kernelSizeValueTest{1, 1}; + EXPECT_EQ(kernelSizeValueTest, expetctKernelSize); + std::vector expetctStrides = mindspore::lite::MindIR_AvgPoolFusion_GetStrides(primitive.get()); + std::vector strideValueTest{1, 1}; + std::vector expetctPadValue = mindspore::lite::MindIR_AvgPoolFusion_GetPad(primitive.get()); + std::vector padValueValueTest{0, 0, 0, 0}; + EXPECT_EQ(padValueValueTest, expetctPadValue); + + int8_t activationValue = 0; + int expectActivation = mindspore::lite::MindIR_AvgPoolFusion_GetActivationType(primitive.get()); + EXPECT_EQ(activationValue, expectActivation); +} + +/** + * @tc.name: avgpool_getprimitive_pad_002 + * @tc.desc: Verify the behavior of the GetPrimitive function + * @tc.type: FUNC + */ +HWTEST_F(AvgPoolPadBuilderTest, avgpool_getprimitive_pad_002, TestSize.Level1) +{ + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetPadParams(); + LiteGraphTensorPtr primitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive = {nullptr, DestroyLiteGraphPrimitive}; + EXPECT_EQ(expectPrimitive, primitive); +} +} // namespace UnitTest +} // namespace NeuralNetworkRuntime +} // namespace OHOS diff --git a/test/unittest/ops/avgpool_padmod_test.cpp b/test/unittest/ops/avgpool_padmod_test.cpp new file mode 100644 index 0000000..61fb147 --- /dev/null +++ b/test/unittest/ops/avgpool_padmod_test.cpp @@ -0,0 +1,420 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "frameworks/native/ops/avgpool_builder.h" + +#include "ops_test.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Ops; + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class AvgPoolBuilderTest : public OpsTest { +public: + void SetUp() override; + void TearDown() override; + + void SetPadMode(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + void SetParams(); + +public: + AvgPoolBuilder m_builder; + std::vector m_inputs{0}; + std::vector m_outputs{1}; + std::vector m_params{2, 3, 4, 5}; + std::vector m_input_dim{1, 3, 3, 1}; + std::vector m_output_dim{1, 2, 2, 1}; + std::vector m_kenelsize_dim{2}; + std::vector m_stride_dim{2}; + std::vector m_param_dim{}; +}; + +void AvgPoolBuilderTest::SetUp() {} + +void AvgPoolBuilderTest::TearDown() {} + +void AvgPoolBuilderTest::SetPadMode(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + std::shared_ptr tensor = TransToNNTensor(dataType, dim, quantParam, type); + int8_t* padModeValue = new (std::nothrow) int8_t(0); + EXPECT_NE(nullptr, padModeValue); + tensor->SetBuffer(padModeValue, sizeof(int8_t)); + m_allTensors.emplace_back(tensor); +} + +void AvgPoolBuilderTest::SetParams() +{ + SetKernelSize(OH_NN_INT64, m_kenelsize_dim, nullptr, OH_NN_AVG_POOL_KERNEL_SIZE); + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_AVG_POOL_STRIDE); + SetPadMode(OH_NN_INT8, m_param_dim, nullptr, OH_NN_AVG_POOL_PAD_MODE); + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_AVG_POOL_ACTIVATION_TYPE); +} + +/** + * @tc.name: avgpool_build_pad_mode_001 + * @tc.desc: Verify the success of the build function + * @tc.type: FUNC + */ +HWTEST_F(AvgPoolBuilderTest, avgpool_build_pad_mode_001, TestSize.Level1) +{ + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetParams(); + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: avgpool_build_pad_mode_002 + * @tc.desc: Verify the forbidden of the build function + * @tc.type: FUNC + */ +HWTEST_F(AvgPoolBuilderTest, avgpool_build_pad_mode_002, TestSize.Level1) +{ + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetParams(); + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: avgpool_build_pad_mode_003 + * @tc.desc: Verify the missing input of the build function + * @tc.type: FUNC + */ +HWTEST_F(AvgPoolBuilderTest, avgpool_build_pad_mode_003, TestSize.Level1) +{ + m_inputs = {}; + m_outputs = {0}; + m_params = {1, 2, 3, 4}; + m_paramsIndex = m_params; + + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetParams(); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: avgpool_build_pad_mode_004 + * @tc.desc: Verify the missing output of the build function + * @tc.type: FUNC + */ +HWTEST_F(AvgPoolBuilderTest, avgpool_build_pad_mode_004, TestSize.Level1) +{ + m_inputs = {0}; + m_outputs = {}; + m_params = {1, 2, 3, 4}; + m_paramsIndex = m_params; + + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetParams(); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: avgpool_build_pad_mode_005 + * @tc.desc: Verify the inputIndex out of bounds of the build function + * @tc.type: FUNC + */ +HWTEST_F(AvgPoolBuilderTest, avgpool_build_pad_mode_005, TestSize.Level1) +{ + m_inputs = {6}; + m_outputs = {1}; + m_params = {2, 3, 4, 5}; + m_paramsIndex = m_params; + + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetParams(); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: avgpool_build_pad_mode_006 + * @tc.desc: Verify the outputIndex out of bounds of the build function + * @tc.type: FUNC + */ +HWTEST_F(AvgPoolBuilderTest, avgpool_build_pad_mode_006, TestSize.Level1) +{ + m_inputs = {0}; + m_outputs = {6}; + m_params = {2, 3, 4, 5}; + m_paramsIndex = m_params; + + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetParams(); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: avgpool_build_pad_mode_007 + * @tc.desc: Verify the invalid kernelSize of the build function + * @tc.type: FUNC + */ +HWTEST_F(AvgPoolBuilderTest, avgpool_build_pad_mode_007, TestSize.Level1) +{ + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + int32_t kernelsNum{2}; + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT32, m_kenelsize_dim, nullptr, + OH_NN_AVG_POOL_KERNEL_SIZE); + int32_t* kernelSizeValue = new (std::nothrow) int32_t[kernelsNum]{1, 1}; + EXPECT_NE(nullptr, kernelSizeValue); + + tensor->SetBuffer(kernelSizeValue, sizeof(int32_t) * kernelsNum); + m_allTensors.emplace_back(tensor); + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_AVG_POOL_STRIDE); + SetPadMode(OH_NN_INT8, m_param_dim, nullptr, OH_NN_AVG_POOL_PAD_MODE); + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_AVG_POOL_ACTIVATION_TYPE); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: avgpool_build_pad_mode_008 + * @tc.desc: Verify the invalid stride of the build function + * @tc.type: FUNC + */ +HWTEST_F(AvgPoolBuilderTest, avgpool_build_pad_mode_008, TestSize.Level1) +{ + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetKernelSize(OH_NN_INT64, m_kenelsize_dim, nullptr, OH_NN_AVG_POOL_KERNEL_SIZE); + + int32_t strideNum{2}; + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT32, m_stride_dim, nullptr, OH_NN_AVG_POOL_STRIDE); + int32_t* strideValue = new (std::nothrow) int32_t[strideNum]{1, 1}; + EXPECT_NE(nullptr, strideValue); + + tensor->SetBuffer(strideValue, sizeof(int32_t) * strideNum); + m_allTensors.emplace_back(tensor); + + SetPadMode(OH_NN_INT8, m_param_dim, nullptr, OH_NN_AVG_POOL_PAD_MODE); + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_AVG_POOL_ACTIVATION_TYPE); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: avgpool_build_pad_mode_009 + * @tc.desc: Verify the invalid padmode of the build function + * @tc.type: FUNC + */ +HWTEST_F(AvgPoolBuilderTest, avgpool_build_pad_mode_009, TestSize.Level1) +{ + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetKernelSize(OH_NN_INT64, m_kenelsize_dim, nullptr, OH_NN_AVG_POOL_KERNEL_SIZE); + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_AVG_POOL_STRIDE); + + int32_t *padValueTest = new (std::nothrow) int32_t(0); + EXPECT_NE(nullptr, padValueTest); + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT32, m_param_dim, nullptr, OH_NN_AVG_POOL_PAD_MODE); + tensor->SetBuffer(padValueTest, sizeof(int32_t)); + m_allTensors.emplace_back(tensor); + + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_AVG_POOL_ACTIVATION_TYPE); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + + +/** + * @tc.name: avgpool_build_pad_mode_010 + * @tc.desc: Verify the invalid activation type of the build function + * @tc.type: FUNC + */ +HWTEST_F(AvgPoolBuilderTest, avgpool_build_pad_mode_010, TestSize.Level1) +{ + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetKernelSize(OH_NN_INT64, m_kenelsize_dim, nullptr, OH_NN_AVG_POOL_KERNEL_SIZE); + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_AVG_POOL_STRIDE); + SetPadMode(OH_NN_INT8, m_param_dim, nullptr, OH_NN_AVG_POOL_PAD_MODE); + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT32, m_param_dim, nullptr, + OH_NN_AVG_POOL_ACTIVATION_TYPE); + int32_t* activationValue = new (std::nothrow) int32_t(0); + EXPECT_NE(nullptr, activationValue); + + tensor->SetBuffer(activationValue, sizeof(int32_t)); + m_allTensors.emplace_back(tensor); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: avgpool_build_pad_mode_011 + * @tc.desc: Verify the scalar length of the build function + * @tc.type: FUNC + */ +HWTEST_F(AvgPoolBuilderTest, avgpool_build_pad_mode_011, TestSize.Level1) +{ + m_param_dim = {2}; + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetKernelSize(OH_NN_INT64, m_kenelsize_dim, nullptr, OH_NN_AVG_POOL_KERNEL_SIZE); + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_AVG_POOL_STRIDE); + SetPadMode(OH_NN_INT8, m_param_dim, nullptr, OH_NN_AVG_POOL_PAD_MODE); + int8_t* activationValue = new (std::nothrow) int8_t[2]{1, 2}; + EXPECT_NE(nullptr, activationValue); + + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT8, m_param_dim, nullptr, + OH_NN_AVG_POOL_ACTIVATION_TYPE); + tensor->SetBuffer(activationValue, 2 * sizeof(int8_t)); + m_allTensors.emplace_back(tensor); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: avgpool_build_pad_mode_012 + * @tc.desc: Verify the param invalid to avgpool of the build function + * @tc.type: FUNC + */ +HWTEST_F(AvgPoolBuilderTest, avgpool_build_pad_mode_012, TestSize.Level1) +{ + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetKernelSize(OH_NN_INT64, m_kenelsize_dim, nullptr, OH_NN_AVG_POOL_KERNEL_SIZE); + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_AVG_POOL_STRIDE); + SetPadMode(OH_NN_INT8, m_param_dim, nullptr, OH_NN_AVG_POOL_PAD_MODE); + int8_t* activationValue = new (std::nothrow) int8_t(0); + EXPECT_NE(nullptr, activationValue); + + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT8, m_param_dim, nullptr, + OH_NN_DIV_ACTIVATIONTYPE); + tensor->SetBuffer(activationValue, sizeof(int8_t)); + m_allTensors.emplace_back(tensor); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: avgpool_build_pad_mode_013 + * @tc.desc: Verify the invalid padmode of the build function + * @tc.type: FUNC + */ +HWTEST_F(AvgPoolBuilderTest, avgpool_build_pad_mode_013, TestSize.Level1) +{ + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetKernelSize(OH_NN_INT64, m_kenelsize_dim, nullptr, OH_NN_AVG_POOL_KERNEL_SIZE); + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_AVG_POOL_STRIDE); + int8_t *padValueTest = new (std::nothrow) int8_t(6); + EXPECT_NE(nullptr, padValueTest); + + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT8, m_param_dim, nullptr, OH_NN_AVG_POOL_PAD_MODE); + tensor->SetBuffer(padValueTest, sizeof(int8_t)); + m_allTensors.emplace_back(tensor); + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_AVG_POOL_ACTIVATION_TYPE); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: avgpool_build_pad_mode_014 + * @tc.desc: Verify the invalid activation value of the build function + * @tc.type: FUNC + */ +HWTEST_F(AvgPoolBuilderTest, avgpool_build_pad_mode_014, TestSize.Level1) +{ + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetKernelSize(OH_NN_INT64, m_kenelsize_dim, nullptr, OH_NN_AVG_POOL_KERNEL_SIZE); + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_AVG_POOL_STRIDE); + SetPadMode(OH_NN_INT8, m_param_dim, nullptr, OH_NN_AVG_POOL_PAD_MODE); + + int8_t* activationValue = new (std::nothrow) int8_t(6); + EXPECT_NE(nullptr, activationValue); + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT8, m_param_dim, nullptr, + OH_NN_AVG_POOL_ACTIVATION_TYPE); + tensor->SetBuffer(activationValue, sizeof(int8_t)); + m_allTensors.emplace_back(tensor); + + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: avgpool_getprimitive_pad_mode_001 + * @tc.desc: Verify the behavior of the GetPrimitive function + * @tc.type: FUNC + */ +HWTEST_F(AvgPoolBuilderTest, avgpool_getprimitive_pad_mode_001, TestSize.Level1) +{ + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetParams(); + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + LiteGraphTensorPtr primitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive = {nullptr, DestroyLiteGraphPrimitive}; + EXPECT_NE(expectPrimitive, primitive); + + std::vector returnKernelSize = mindspore::lite::MindIR_AvgPoolFusion_GetKernelSize(primitive.get()); + std::vector kernelSizeValueTest{1, 1}; + EXPECT_EQ(kernelSizeValueTest, returnKernelSize); + + std::vector returnStrides = mindspore::lite::MindIR_AvgPoolFusion_GetStrides(primitive.get()); + std::vector strideValueTest{1, 1}; + int returnPadMode = mindspore::lite::MindIR_AvgPoolFusion_GetPadMode(primitive.get()); + EXPECT_EQ(1, returnPadMode); + int returnActivation = mindspore::lite::MindIR_AvgPoolFusion_GetActivationType(primitive.get()); + EXPECT_EQ(0, returnActivation); +} + +/** + * @tc.name: avgpool_getprimitive_pad_mode_002 + * @tc.desc: Verify the behavior of the GetPrimitive function + * @tc.type: FUNC + */ +HWTEST_F(AvgPoolBuilderTest, avgpool_getprimitive_pad_mode_002, TestSize.Level1) +{ + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetParams(); + LiteGraphTensorPtr primitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive = {nullptr, DestroyLiteGraphPrimitive}; + EXPECT_EQ(expectPrimitive, primitive); +} +} // namespace UnitTest +} // namespace NeuralNetworkRuntime +} // namespace OHOS \ No newline at end of file diff --git a/test/unittest/ops/batch_to_space_nd_test.cpp b/test/unittest/ops/batch_to_space_nd_test.cpp new file mode 100644 index 0000000..df5897e --- /dev/null +++ b/test/unittest/ops/batch_to_space_nd_test.cpp @@ -0,0 +1,338 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "frameworks/native/ops/batch_to_space_nd_builder.h" + +#include "ops_test.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Ops; + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class BatchToSpaceNDBuilderTest : public OpsTest { +public: + void SetUp() override; + void TearDown() override; + + void SetBlockSize(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + void SetCrops(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + +public: + BatchToSpaceNDBuilder m_builder; + std::vector m_inputs{0}; + std::vector m_outputs{1}; + std::vector m_params{2, 3}; + std::vector m_input_dim{4, 1, 1, 1}; + std::vector m_output_dim{1, 2, 2, 1}; + std::vector m_block_dim{2}; + std::vector m_crops_dim{2, 2}; +}; + +void BatchToSpaceNDBuilderTest::SetUp() {} + +void BatchToSpaceNDBuilderTest::TearDown() {} + +void BatchToSpaceNDBuilderTest::SetBlockSize(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + std::shared_ptr tensor = TransToNNTensor(dataType, dim, quantParam, type); + int32_t blockNum = 2; + int64_t* blockSizeValue = new (std::nothrow) int64_t[2]{2, 2}; + EXPECT_NE(nullptr, blockSizeValue); + tensor->SetBuffer(blockSizeValue, sizeof(int64_t) * blockNum); + m_allTensors.emplace_back(tensor); +} + +void BatchToSpaceNDBuilderTest::SetCrops(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + int32_t cropsNum = 4; + std::shared_ptr tensor = TransToNNTensor(dataType, dim, quantParam, type); + int64_t* cropsValue = new (std::nothrow) int64_t[4]{0, 0, 0, 0}; + EXPECT_NE(nullptr, cropsValue); + tensor->SetBuffer(cropsValue, sizeof(int64_t) * cropsNum); + m_allTensors.emplace_back(tensor); +} + +/** + * @tc.name: batch_to_space_nd_build_001 + * @tc.desc: Verify the success of the build function + * @tc.type: FUNC + */ +HWTEST_F(BatchToSpaceNDBuilderTest, batch_to_space_nd_build_001, TestSize.Level1) +{ + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetBlockSize(OH_NN_INT64, m_block_dim, nullptr, OH_NN_BATCH_TO_SPACE_ND_BLOCKSIZE); + SetCrops(OH_NN_INT64, m_crops_dim, nullptr, OH_NN_BATCH_TO_SPACE_ND_CROPS); + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: batch_to_space_nd_build_002 + * @tc.desc: Verify the forbidden of the build function + * @tc.type: FUNC + */ +HWTEST_F(BatchToSpaceNDBuilderTest, batch_to_space_nd_build_002, TestSize.Level1) +{ + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetBlockSize(OH_NN_INT64, m_block_dim, nullptr, OH_NN_BATCH_TO_SPACE_ND_BLOCKSIZE); + SetCrops(OH_NN_INT64, m_crops_dim, nullptr, OH_NN_BATCH_TO_SPACE_ND_CROPS); + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: batch_to_space_nd_build_003 + * @tc.desc: Verify the missing input of the build function + * @tc.type: FUNC + */ +HWTEST_F(BatchToSpaceNDBuilderTest, batch_to_space_nd_build_003, TestSize.Level1) +{ + m_params = {1, 2}; + m_paramsIndex = m_params; + m_inputs = {}; + m_outputs = {0}; + + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetBlockSize(OH_NN_INT64, m_block_dim, nullptr, OH_NN_BATCH_TO_SPACE_ND_BLOCKSIZE); + SetCrops(OH_NN_INT64, m_crops_dim, nullptr, OH_NN_BATCH_TO_SPACE_ND_CROPS); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: batch_to_space_nd_build_004 + * @tc.desc: Verify the missing output of the build function + * @tc.type: FUNC + */ +HWTEST_F(BatchToSpaceNDBuilderTest, batch_to_space_nd_build_004, TestSize.Level1) +{ + m_inputs = {}; + m_outputs = {0}; + m_params = {1, 2}; + + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetBlockSize(OH_NN_INT64, m_block_dim, nullptr, OH_NN_BATCH_TO_SPACE_ND_BLOCKSIZE); + SetCrops(OH_NN_INT64, m_crops_dim, nullptr, OH_NN_BATCH_TO_SPACE_ND_CROPS); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: batch_to_space_nd_build_005 + * @tc.desc: Verify the inputIndex out of bounds of the build function + * @tc.type: FUNC + */ +HWTEST_F(BatchToSpaceNDBuilderTest, batch_to_space_nd_build_005, TestSize.Level1) +{ + m_inputs = {6}; + m_outputs = {1}; + m_params = {2, 3}; + + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetBlockSize(OH_NN_INT64, m_block_dim, nullptr, OH_NN_BATCH_TO_SPACE_ND_BLOCKSIZE); + SetCrops(OH_NN_INT64, m_crops_dim, nullptr, OH_NN_BATCH_TO_SPACE_ND_CROPS); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: batch_to_space_nd_build_006 + * @tc.desc: Verify the outputIndex out of bounds of the build function + * @tc.type: FUNC + */ +HWTEST_F(BatchToSpaceNDBuilderTest, batch_to_space_nd_build_006, TestSize.Level1) +{ + m_inputs = {0}; + m_outputs = {6}; + m_params = {2, 3}; + + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetBlockSize(OH_NN_INT64, m_block_dim, nullptr, OH_NN_BATCH_TO_SPACE_ND_BLOCKSIZE); + SetCrops(OH_NN_INT64, m_crops_dim, nullptr, OH_NN_BATCH_TO_SPACE_ND_CROPS); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: batch_to_space_nd_build_007 + * @tc.desc: Verify the invalid crops of the build function + * @tc.type: FUNC + */ +HWTEST_F(BatchToSpaceNDBuilderTest, batch_to_space_nd_build_007, TestSize.Level1) +{ + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetBlockSize(OH_NN_INT64, m_block_dim, nullptr, OH_NN_BATCH_TO_SPACE_ND_BLOCKSIZE); + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT32, m_crops_dim, nullptr, + OH_NN_BATCH_TO_SPACE_ND_CROPS); + int32_t cropsNum = 4; + int32_t* cropsValue = new (std::nothrow) int32_t[4]{0, 0, 0, 0}; + EXPECT_NE(nullptr, cropsValue); + + tensor->SetBuffer(cropsValue, sizeof(int32_t) * cropsNum); + m_allTensors.emplace_back(tensor); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: batch_to_space_nd_build_008 + * @tc.desc: Verify the invalid blocksize of the build function + * @tc.type: FUNC + */ +HWTEST_F(BatchToSpaceNDBuilderTest, batch_to_space_nd_build_008, TestSize.Level1) +{ + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT32, m_block_dim, nullptr, + OH_NN_BATCH_TO_SPACE_ND_BLOCKSIZE); + int32_t blockNum = 2; + int32_t* blockSizeValue = new (std::nothrow) int32_t[2]{2, 2}; + EXPECT_NE(nullptr, blockSizeValue); + tensor->SetBuffer(blockSizeValue, sizeof(int32_t) * blockNum); + m_allTensors.emplace_back(tensor); + + SetCrops(OH_NN_INT64, m_crops_dim, nullptr, OH_NN_BATCH_TO_SPACE_ND_CROPS); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: batch_to_space_nd_build_009 + * @tc.desc: Verify the invalid param to batchtospace of the build function + * @tc.type: FUNC + */ +HWTEST_F(BatchToSpaceNDBuilderTest, batch_to_space_nd_build_009, TestSize.Level1) +{ + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT32, m_block_dim, nullptr, + OH_NN_CONV2D_STRIDES); + int64_t blockNum = 2; + int64_t* blockSizeValue = new (std::nothrow) int64_t[2]{2, 2}; + EXPECT_NE(nullptr, blockSizeValue); + tensor->SetBuffer(blockSizeValue, sizeof(int64_t) * blockNum); + + m_allTensors.emplace_back(tensor); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: batch_to_space_nd_build_010 + * @tc.desc: Verify the batchtospacend without set blocksize of the build function + * @tc.type: FUNC + */ +HWTEST_F(BatchToSpaceNDBuilderTest, batch_to_space_nd_build_010, TestSize.Level1) +{ + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT64, m_block_dim, nullptr, + OH_NN_BATCH_TO_SPACE_ND_BLOCKSIZE); + m_allTensors.emplace_back(tensor); + + SetCrops(OH_NN_INT64, m_crops_dim, nullptr, OH_NN_BATCH_TO_SPACE_ND_CROPS); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: batch_to_space_nd_build_011 + * @tc.desc: Verify the batchtospacend without set crops of the build function + * @tc.type: FUNC + */ +HWTEST_F(BatchToSpaceNDBuilderTest, batch_to_space_nd_build_011, TestSize.Level1) +{ + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetBlockSize(OH_NN_INT64, m_block_dim, nullptr, OH_NN_BATCH_TO_SPACE_ND_BLOCKSIZE); + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT64, m_crops_dim, nullptr, + OH_NN_BATCH_TO_SPACE_ND_CROPS); + m_allTensors.emplace_back(tensor); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: batch_to_space_nd_getprimitive_001 + * @tc.desc: Verify the success of the GetPrimitive function + * @tc.type: FUNC + */ +HWTEST_F(BatchToSpaceNDBuilderTest, batch_to_space_nd_getprimitive_001, TestSize.Level1) +{ + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetBlockSize(OH_NN_INT64, m_block_dim, nullptr, OH_NN_BATCH_TO_SPACE_ND_BLOCKSIZE); + SetCrops(OH_NN_INT64, m_crops_dim, nullptr, OH_NN_BATCH_TO_SPACE_ND_CROPS); + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + LiteGraphTensorPtr primitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive = {nullptr, DestroyLiteGraphPrimitive}; + EXPECT_NE(expectPrimitive, primitive); + + std::vector blockSizeValue{2, 2}; + std::vector> cropsValue{{0, 0}, {0, 0}}; + std::vector returnValue = mindspore::lite::MindIR_BatchToSpaceND_GetBlockShape(primitive.get()); + EXPECT_EQ(returnValue, blockSizeValue); + std::vector> cropsReturn = mindspore::lite::MindIR_BatchToSpaceND_GetCrops(primitive.get()); + EXPECT_EQ(cropsReturn, cropsValue); +} + +/** + * @tc.name: batch_to_space_nd_getprimitive_002 + * @tc.desc: Verify the nullptr of the GetPrimitive function + * @tc.type: FUNC + */ +HWTEST_F(BatchToSpaceNDBuilderTest, batch_to_space_nd_getprimitive_002, TestSize.Level1) +{ + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetBlockSize(OH_NN_INT64, m_block_dim, nullptr, OH_NN_BATCH_TO_SPACE_ND_BLOCKSIZE); + SetCrops(OH_NN_INT64, m_crops_dim, nullptr, OH_NN_BATCH_TO_SPACE_ND_CROPS); + + LiteGraphTensorPtr primitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive = {nullptr, DestroyLiteGraphPrimitive}; + EXPECT_EQ(expectPrimitive, primitive); +} +} // namespace UnitTest +} // namespace NeuralNetworkRuntime +} // namespace OHOS diff --git a/test/unittest/ops/batchnorm_builder_test.cpp b/test/unittest/ops/batchnorm_builder_test.cpp new file mode 100644 index 0000000..a8eb800 --- /dev/null +++ b/test/unittest/ops/batchnorm_builder_test.cpp @@ -0,0 +1,261 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "frameworks/native/ops/batchnorm_builder.h" + +#include "ops_test.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Ops; + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class BatchNormBuilderTest : public OpsTest { +public: + void SetUp() override; + void TearDown() override; + +protected: + void SaveParamsTensor(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + +protected: + BatchNormBuilder m_batchNorm; + std::vector m_inputs {0, 1, 2, 3, 4}; + std::vector m_outputs {5}; + std::vector m_params {6}; + std::vector m_inputDim {2, 2}; + std::vector m_outputDim {2, 2}; + std::vector m_paramDim {}; +}; + +void BatchNormBuilderTest::SetUp() {} + +void BatchNormBuilderTest::TearDown() {} + +void BatchNormBuilderTest::SaveParamsTensor(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + std::shared_ptr epsilonTensor = TransToNNTensor(dataType, dim, quantParam, type); + float *epsilonValue = new (std::nothrow) float(0.0f); + EXPECT_NE(nullptr, epsilonValue); + epsilonTensor->SetBuffer(epsilonValue, sizeof(float)); + m_allTensors.emplace_back(epsilonTensor); +} + +/** + * @tc.name: batchnorm_build_001 + * @tc.desc: Verify that the build function returns a successful message. + * @tc.type: FUNC + */ +HWTEST_F(BatchNormBuilderTest, batchnorm_build_001, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + SaveParamsTensor(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_BATCH_NORM_EPSILON); + + OH_NN_ReturnCode ret = m_batchNorm.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/** + * @tc.name: batchnorm_build_002 + * @tc.desc: Verify that the build function returns a failed message with true m_isBuild. + * @tc.type: FUNC + */ +HWTEST_F(BatchNormBuilderTest, batchnorm_build_002, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + SaveParamsTensor(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_BATCH_NORM_EPSILON); + + EXPECT_EQ(OH_NN_SUCCESS, m_batchNorm.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors)); + OH_NN_ReturnCode ret = m_batchNorm.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/** + * @tc.name: batchnorm_build_003 + * @tc.desc: Verify that the build function returns a failed message with invalided input. + * @tc.type: FUNC + */ +HWTEST_F(BatchNormBuilderTest, batchnorm_build_003, TestSize.Level0) +{ + m_inputs = {0, 1, 2, 3, 4, 5}; + m_outputs = {6}; + m_params = {7}; + + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + SaveParamsTensor(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_BATCH_NORM_EPSILON); + + OH_NN_ReturnCode ret = m_batchNorm.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: batchnorm_build_004 + * @tc.desc: Verify that the build function returns a failed message with invalided output. + * @tc.type: FUNC + */ +HWTEST_F(BatchNormBuilderTest, batchnorm_build_004, TestSize.Level0) +{ + m_outputs = {5, 6}; + m_params = {7}; + + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + SaveParamsTensor(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_BATCH_NORM_EPSILON); + + OH_NN_ReturnCode ret = m_batchNorm.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: batchnorm_build_005 + * @tc.desc: Verify that the build function returns a failed message with null allTensor. + * @tc.type: FUNC + */ +HWTEST_F(BatchNormBuilderTest, batchnorm_build_005, TestSize.Level0) +{ + OH_NN_ReturnCode ret = m_batchNorm.Build(m_params, m_inputs, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: batchnorm_build_006 + * @tc.desc: Verify that the build function returns a failed message without output tensor. + * @tc.type: FUNC + */ +HWTEST_F(BatchNormBuilderTest, batchnorm_build_006, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + + OH_NN_ReturnCode ret = m_batchNorm.Build(m_params, m_inputsIndex, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: batchnorm_build_007 + * @tc.desc: Verify that the build function returns a failed message with invalid epsilon's dataType. + * @tc.type: FUNC + */ +HWTEST_F(BatchNormBuilderTest, batchnorm_build_007, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + std::shared_ptr epsilonTensor = TransToNNTensor(OH_NN_INT32, m_paramDim, + nullptr, OH_NN_BATCH_NORM_EPSILON); + float epsilonValue = 0.0f; + epsilonTensor->SetBuffer(&epsilonValue, sizeof(epsilonValue)); + m_allTensors.emplace_back(epsilonTensor); + + OH_NN_ReturnCode ret = m_batchNorm.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + epsilonTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: batchnorm_build_008 + * @tc.desc: Verify that the build function returns a failed message with invalid epsilon's dimension. + * @tc.type: FUNC + */ +HWTEST_F(BatchNormBuilderTest, batchnorm_build_008, TestSize.Level0) +{ + std::vector m_paramDim = { 2 }; + + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + std::shared_ptr epsilonTensor = TransToNNTensor(OH_NN_FLOAT32, m_paramDim, + nullptr, OH_NN_BATCH_NORM_EPSILON); + float epsilonValue[2] = {0.0f, 0.0f}; + epsilonTensor->SetBuffer(epsilonValue, 2 * sizeof(float)); + m_allTensors.emplace_back(epsilonTensor); + + OH_NN_ReturnCode ret = m_batchNorm.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + epsilonTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: batchnorm_build_009 + * @tc.desc: Verify that the build function returns a failed message with invalid param. + * @tc.type: FUNC + */ +HWTEST_F(BatchNormBuilderTest, batchnorm_build_009, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + SaveParamsTensor(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_MATMUL_TRANSPOSE_A); + + OH_NN_ReturnCode ret = m_batchNorm.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: batchnorm_build_010 + * @tc.desc: Verify that the build function returns a failed message without set buffer successfully. + * @tc.type: FUNC + */ +HWTEST_F(BatchNormBuilderTest, batchnorm_build_010, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + std::shared_ptr epsilonTensor = TransToNNTensor(OH_NN_FLOAT32, m_paramDim, nullptr, + OH_NN_BATCH_NORM_EPSILON); + m_allTensors.emplace_back(epsilonTensor); + + OH_NN_ReturnCode ret = m_batchNorm.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: batchnorm_getprimitive_001 + * @tc.desc: Verify that the getPrimitive function returns a successful message + * @tc.type: FUNC + */ +HWTEST_F(BatchNormBuilderTest, batchnorm_getprimitive_001, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + SaveParamsTensor(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_BATCH_NORM_EPSILON); + + float epsilonValue = 0.9; + EXPECT_EQ(OH_NN_SUCCESS, m_batchNorm.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors)); + LiteGraphPrimitvePtr primitive = m_batchNorm.GetPrimitive(); + LiteGraphPrimitvePtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); + EXPECT_NE(expectPrimitive, primitive); + + auto returnValue = mindspore::lite::MindIR_FusedBatchNorm_GetEpsilon(primitive.get()); + EXPECT_EQ(returnValue, epsilonValue); +} + +/** + * @tc.name: batchnorm_getprimitive_002 + * @tc.desc: Verify that the getPrimitive function returns a failed message without build. + * @tc.type: FUNC + */ +HWTEST_F(BatchNormBuilderTest, batchnorm_getprimitive_002, TestSize.Level0) +{ + BatchNormBuilder batchNorm; + LiteGraphPrimitvePtr primitive = m_batchNorm.GetPrimitive(); + LiteGraphPrimitvePtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); + EXPECT_EQ(expectPrimitive, primitive); +} +} +} +} \ No newline at end of file diff --git a/test/unittest/ops/biasadd_test.cpp b/test/unittest/ops/biasadd_test.cpp new file mode 100644 index 0000000..0c848c3 --- /dev/null +++ b/test/unittest/ops/biasadd_test.cpp @@ -0,0 +1,212 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "frameworks/native/ops/bias_add_builder.h" + +#include "ops_test.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Ops; + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class BiasAddBuilderTest : public OpsTest { +public: + void SetUp() override; + void TearDown() override; + + void SetBiasAddToallTensors(); +public: + BiasAddBuilder m_builder; + std::vector m_inputs{0, 1}; + std::vector m_outputs{2}; + std::vector m_params{}; + std::vector m_output_dim{2, 3}; +}; + +void BiasAddBuilderTest::SetUp() {} + +void BiasAddBuilderTest::TearDown() {} + +void BiasAddBuilderTest::SetBiasAddToallTensors() +{ + std::vector m_input_dim{2, 3}; + std::vector biasDim{3}; + std::shared_ptr inputTensor; + inputTensor = TransToNNTensor(OH_NN_FLOAT32, m_input_dim, nullptr, OH_NN_TENSOR); + m_allTensors.emplace_back(inputTensor); + inputTensor = TransToNNTensor(OH_NN_FLOAT32, biasDim, nullptr, OH_NN_TENSOR); + m_allTensors.emplace_back(inputTensor); +} + +/** + * @tc.name: biasadd_build_001 + * @tc.desc: Verify the success of the build function + * @tc.type: FUNC + */ +HWTEST_F(BiasAddBuilderTest, biasadd_build_001, TestSize.Level1) +{ + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + SetBiasAddToallTensors(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: biasadd_build_002 + * @tc.desc: Verify the forbidden of the build function + * @tc.type: FUNC + */ +HWTEST_F(BiasAddBuilderTest, biasadd_build_002, TestSize.Level1) +{ + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetBiasAddToallTensors(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: biasadd_build_003 + * @tc.desc: Verify the missing input of the build function + * @tc.type: FUNC + */ +HWTEST_F(BiasAddBuilderTest, biasadd_build_003, TestSize.Level1) +{ + m_inputs = {0}; + m_outputs = {1}; + m_params = {}; + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetBiasAddToallTensors(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: biasadd_build_004 + * @tc.desc: Verify the missing output of the build function + * @tc.type: FUNC + */ +HWTEST_F(BiasAddBuilderTest, biasadd_build_004, TestSize.Level1) +{ + m_inputs = {0, 1}; + m_outputs = {}; + m_params = {}; + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + SetBiasAddToallTensors(); + + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: biasadd_build_005 + * @tc.desc: Verify the inputIndex out of bounds of the build function + * @tc.type: FUNC + */ +HWTEST_F(BiasAddBuilderTest, biasadd_build_005, TestSize.Level1) +{ + m_inputs = {0, 6}; + m_outputs = {2}; + m_params = {}; + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + SetBiasAddToallTensors(); + + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: biasadd_build_006 + * @tc.desc: Verify the outputIndex out of bounds of the build function + * @tc.type: FUNC + */ +HWTEST_F(BiasAddBuilderTest, biasadd_build_006, TestSize.Level1) +{ + m_inputs = {0, 1}; + m_outputs = {6}; + m_params = {}; + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + SetBiasAddToallTensors(); + + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: biasadd_build_007 + * @tc.desc: Verify the paramIndex not empty of the build function + * @tc.type: FUNC + */ +HWTEST_F(BiasAddBuilderTest, biasadd_build_007, TestSize.Level1) +{ + m_params = {1}; + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + SetBiasAddToallTensors(); + + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: biasadd_getprimitive_001 + * @tc.desc: Verify the success of the GetPrimitive function + * @tc.type: FUNC + */ +HWTEST_F(BiasAddBuilderTest, biasadd_getprimitive_001, TestSize.Level1) +{ + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetBiasAddToallTensors(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + + LiteGraphTensorPtr primitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive = {nullptr, DestroyLiteGraphPrimitive}; + EXPECT_NE(expectPrimitive, primitive); +} + +/** + * @tc.name: biasadd_getprimitive_002 + * @tc.desc: Verify the nullptr return of the GetPrimitive function + * @tc.type: FUNC + */ +HWTEST_F(BiasAddBuilderTest, biasadd_getprimitive_002, TestSize.Level1) +{ + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetBiasAddToallTensors(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + LiteGraphTensorPtr primitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive = {nullptr, DestroyLiteGraphPrimitive}; + EXPECT_EQ(expectPrimitive, primitive); +} +} // namespace UnitTest +} // namespace NeuralNetworkRuntime +} // namespace OHOS \ No newline at end of file diff --git a/test/unittest/ops/cast_test.cpp b/test/unittest/ops/cast_test.cpp new file mode 100644 index 0000000..fb52b4b --- /dev/null +++ b/test/unittest/ops/cast_test.cpp @@ -0,0 +1,260 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "frameworks/native/ops/cast_builder.h" + +#include "ops_test.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Ops; + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class CastBuilderTest : public OpsTest { +public: + void SetUp() override; + void TearDown() override; + + void SetCastAddToallTensors(); + +public: + CastBuilder m_builder; + std::vector m_inputs{0, 1}; + std::vector m_outputs{2}; + std::vector m_params{}; + std::vector m_output_dim{1, 2, 2, 1}; +}; + +void CastBuilderTest::SetUp() {} + +void CastBuilderTest::TearDown() {} + +void CastBuilderTest::SetCastAddToallTensors() +{ + std::vector m_input_dim{1, 2, 2, 1}; + std::vector typeDim = {}; + std::shared_ptr inputTensor; + inputTensor = TransToNNTensor(OH_NN_FLOAT32, m_input_dim, nullptr, OH_NN_TENSOR); + m_allTensors.emplace_back(inputTensor); + + inputTensor = TransToNNTensor(OH_NN_INT32, typeDim, nullptr, OH_NN_TENSOR); + int32_t* typeValue = new (std::nothrow) int32_t(4); + EXPECT_NE(nullptr, typeValue); + inputTensor->SetBuffer(typeValue, sizeof(int32_t)); + m_allTensors.emplace_back(inputTensor); +} + +/** + * @tc.name: cast_build_001 + * @tc.desc: Verify the success of the build function + * @tc.type: FUNC + */ +HWTEST_F(CastBuilderTest, cast_build_001, TestSize.Level1) +{ + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetCastAddToallTensors(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: cast_build_002 + * @tc.desc: Verify the forbidden of the build function + * @tc.type: FUNC + */ +HWTEST_F(CastBuilderTest, cast_build_002, TestSize.Level1) +{ + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetCastAddToallTensors(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: cast_build_003 + * @tc.desc: Verify the missing input of the build function + * @tc.type: FUNC + */ +HWTEST_F(CastBuilderTest, cast_build_003, TestSize.Level1) +{ + m_inputs = {0}; + m_outputs = {1}; + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetCastAddToallTensors(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: cast_build_004 + * @tc.desc: Verify the missing output of the build function + * @tc.type: FUNC + */ +HWTEST_F(CastBuilderTest, cast_build_004, TestSize.Level1) +{ + m_inputs = {0, 1}; + m_outputs = {}; + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetCastAddToallTensors(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: cast_build_005 + * @tc.desc: Verify the inputIndex out of bounds of the build function + * @tc.type: FUNC + */ +HWTEST_F(CastBuilderTest, cast_build_005, TestSize.Level1) +{ + m_inputs = {0, 6}; + m_outputs = {2}; + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetCastAddToallTensors(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: cast_build_006 + * @tc.desc: Verify the outputIndex out of bounds of the build function + * @tc.type: FUNC + */ +HWTEST_F(CastBuilderTest, cast_build_006, TestSize.Level1) +{ + m_inputs = {0, 1}; + m_outputs = {6}; + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetCastAddToallTensors(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: cast_build_007 + * @tc.desc: Verify the paramIndex not empty of the build function + * @tc.type: FUNC + */ +HWTEST_F(CastBuilderTest, cast_build_007, TestSize.Level1) +{ + m_params = {1}; + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetCastAddToallTensors(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: cast_build_008 + * @tc.desc: Verify the paramIndex not empty of the build function + * @tc.type: FUNC + */ +HWTEST_F(CastBuilderTest, cast_build_008, TestSize.Level1) +{ + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + std::vector m_input_dim{1, 2, 2, 1}; + std::vector typeDim = {}; + std::shared_ptr inputTensor; + inputTensor = TransToNNTensor(OH_NN_FLOAT32, m_input_dim, nullptr, OH_NN_TENSOR); + m_allTensors.emplace_back(inputTensor); + + inputTensor = TransToNNTensor(OH_NN_INT32, typeDim, nullptr, OH_NN_TENSOR); + int32_t* typeValue = new (std::nothrow) int32_t(40); + EXPECT_NE(nullptr, typeValue); + + inputTensor->SetBuffer(typeValue, sizeof(int32_t)); + m_allTensors.emplace_back(inputTensor); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: cast_build_009 + * @tc.desc: Verify the cast without set types of the build function + * @tc.type: FUNC + */ +HWTEST_F(CastBuilderTest, cast_build_009, TestSize.Level1) +{ + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + std::vector m_input_dim{1, 2, 2, 1}; + std::vector typeDim = {}; + std::shared_ptr inputTensor; + inputTensor = TransToNNTensor(OH_NN_FLOAT32, m_input_dim, nullptr, OH_NN_TENSOR); + m_allTensors.emplace_back(inputTensor); + + inputTensor = TransToNNTensor(OH_NN_INT32, typeDim, nullptr, OH_NN_TENSOR); + m_allTensors.emplace_back(inputTensor); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: cast_getprimitive_001 + * @tc.desc: Verify the behavior of the GetPrimitive function + * @tc.type: FUNC + */ +HWTEST_F(CastBuilderTest, cast_getprimitive_001, TestSize.Level1) +{ + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + SetCastAddToallTensors(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + + LiteGraphTensorPtr primitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive = {nullptr, DestroyLiteGraphPrimitive}; + EXPECT_NE(expectPrimitive, primitive); +} + +/** + * @tc.name: cast_getprimitive_002 + * @tc.desc: Verify the behavior of the GetPrimitive function + * @tc.type: FUNC + */ +HWTEST_F(CastBuilderTest, cast_getprimitive_002, TestSize.Level1) +{ + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + SetCastAddToallTensors(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + LiteGraphTensorPtr primitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive = {nullptr, DestroyLiteGraphPrimitive}; + EXPECT_EQ(expectPrimitive, primitive); +} +} // namespace UnitTest +} // namespace NeuralNetworkRuntime +} // namespace OHOS \ No newline at end of file diff --git a/test/unittest/ops/concat_three_inputs_test.cpp b/test/unittest/ops/concat_three_inputs_test.cpp new file mode 100644 index 0000000..093b1c5 --- /dev/null +++ b/test/unittest/ops/concat_three_inputs_test.cpp @@ -0,0 +1,244 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "frameworks/native/ops/concat_builder.h" + +#include "ops_test.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Ops; + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class ConcatBuilderTest : public OpsTest { +public: + void SetUp() override; + void TearDown() override; + + void SetAxis(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + +public: + ConcatBuilder m_builder; + std::vector m_inputs{0, 1, 2}; + std::vector m_outputs{3}; + std::vector m_params{4}; + std::vector m_input_dim{3, 3}; + std::vector m_output_dim{3, 3}; + std::vector m_param_dim{}; +}; + +void ConcatBuilderTest::SetUp() {} + +void ConcatBuilderTest::TearDown() {} + +void ConcatBuilderTest::SetAxis(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + std::shared_ptr tensor = TransToNNTensor(dataType, dim, quantParam, type); + int64_t* axisValue = new (std::nothrow) int64_t(0); + EXPECT_NE(nullptr, axisValue); + tensor->SetBuffer(axisValue, sizeof(int64_t)); + m_allTensors.emplace_back(tensor); +} + +/** + * @tc.name: concat_build_three_input_001 + * @tc.desc: Verify the success of the build function + * @tc.type: FUNC + */ +HWTEST_F(ConcatBuilderTest, concat_build_three_input_001, TestSize.Level1) +{ + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetAxis(OH_NN_INT64, m_param_dim, nullptr, OH_NN_CONCAT_AXIS); + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: concat_build_three_input_002 + * @tc.desc: Verify the forbidden of the build function + * @tc.type: FUNC + */ +HWTEST_F(ConcatBuilderTest, concat_build_three_input_002, TestSize.Level1) +{ + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetAxis(OH_NN_INT64, m_param_dim, nullptr, OH_NN_CONCAT_AXIS); + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: concat_build_three_input_003 + * @tc.desc: Verify the missing input of the build function + * @tc.type: FUNC + */ +HWTEST_F(ConcatBuilderTest, concat_build_three_input_003, TestSize.Level1) +{ + m_inputs = {0}; + m_outputs = {1}; + m_params = {2}; + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetAxis(OH_NN_INT64, m_param_dim, nullptr, OH_NN_CONCAT_AXIS); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: concat_build_three_input_004 + * @tc.desc: Verify the missing output of the build function + * @tc.type: FUNC + */ +HWTEST_F(ConcatBuilderTest, concat_build_three_input_004, TestSize.Level1) +{ + m_inputs = {0, 1, 2}; + m_outputs = {}; + m_params = {3}; + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetAxis(OH_NN_INT64, m_param_dim, nullptr, OH_NN_CONCAT_AXIS); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: concat_build_three_input_005 + * @tc.desc: Verify the inputIndex out of bounds of the build function + * @tc.type: FUNC + */ +HWTEST_F(ConcatBuilderTest, concat_build_three_input_005, TestSize.Level1) +{ + m_outputs = {3}; + m_params = {4}; + m_inputs = {0, 1, 6}; + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetAxis(OH_NN_INT64, m_param_dim, nullptr, OH_NN_CONCAT_AXIS); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: concat_build_three_input_007 + * @tc.desc: Verify the invalid axis of the build function + * @tc.type: FUNC + */ +HWTEST_F(ConcatBuilderTest, concat_build_three_input_007, TestSize.Level1) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT32, m_param_dim, nullptr, OH_NN_CONCAT_AXIS); + m_paramsIndex = m_params; + int32_t* axisValue = new (std::nothrow) int32_t(0); + EXPECT_NE(nullptr, axisValue); + + tensor->SetBuffer(axisValue, sizeof(int32_t)); + m_allTensors.emplace_back(tensor); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: concat_build_three_input_008 + * @tc.desc: Verify the scalar length of the build function + * @tc.type: FUNC + */ +HWTEST_F(ConcatBuilderTest, concat_build_three_input_008, TestSize.Level1) +{ + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + m_param_dim = {2}; + + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT64, m_param_dim, nullptr, OH_NN_CONCAT_AXIS); + int64_t* axisValue = new (std::nothrow) int64_t[2]{0, 0}; + EXPECT_NE(nullptr, axisValue); + + tensor->SetBuffer(axisValue, 2 * sizeof(int64_t)); + m_allTensors.emplace_back(tensor); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: concat_build_three_input_009 + * @tc.desc: Verify the invalid param to concat of the build function + * @tc.type: FUNC + */ +HWTEST_F(ConcatBuilderTest, concat_build_three_input_009, TestSize.Level1) +{ + m_param_dim = {2}; + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT64, m_param_dim, nullptr, OH_NN_CONV2D_STRIDES); + int64_t* axisValue = new (std::nothrow) int64_t[2]{0, 0}; + EXPECT_NE(nullptr, axisValue); + + tensor->SetBuffer(axisValue, 2 * sizeof(int64_t)); + m_allTensors.emplace_back(tensor); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: concat_getprimitive_three_input_001 + * @tc.desc: Verify the success of the GetPrimitive function + * @tc.type: FUNC + */ +HWTEST_F(ConcatBuilderTest, concat_getprimitive_three_input_001, TestSize.Level1) +{ + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetAxis(OH_NN_INT64, m_param_dim, nullptr, OH_NN_CONCAT_AXIS); + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + LiteGraphTensorPtr primitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive = {nullptr, DestroyLiteGraphPrimitive}; + EXPECT_NE(expectPrimitive, primitive); + + int64_t expectValue = mindspore::lite::MindIR_Concat_GetAxis(primitive.get()); + EXPECT_EQ(expectValue, 0); +} + +/** + * @tc.name: concat_getprimitive_three_input_002 + * @tc.desc: Verify the nullptr return of the GetPrimitive function + * @tc.type: FUNC + */ +HWTEST_F(ConcatBuilderTest, concat_getprimitive_three_input_002, TestSize.Level1) +{ + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetAxis(OH_NN_INT64, m_param_dim, nullptr, OH_NN_CONCAT_AXIS); + LiteGraphTensorPtr primitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr returnPrimitive = {nullptr, DestroyLiteGraphPrimitive}; + EXPECT_EQ(returnPrimitive, primitive); +} +} // namespace UnitTest +} // namespace NeuralNetworkRuntime +} // namespace OHOS diff --git a/test/unittest/ops/concat_two_inputs_test.cpp b/test/unittest/ops/concat_two_inputs_test.cpp new file mode 100644 index 0000000..8fa81b0 --- /dev/null +++ b/test/unittest/ops/concat_two_inputs_test.cpp @@ -0,0 +1,265 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "frameworks/native/ops/concat_builder.h" + +#include "ops_test.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Ops; + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class ConcatTwoInputBuilderTest : public OpsTest { +public: + void SetUp() override; + void TearDown() override; + + void SetAxis(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + +public: + ConcatBuilder m_builder; + std::vector m_inputs{0, 1}; + std::vector m_outputs{2}; + std::vector m_params{3}; + std::vector m_input_dim{3, 3}; + std::vector m_output_dim{3, 3}; + std::vector m_param_dim{}; +}; + +void ConcatTwoInputBuilderTest::SetUp() {} + +void ConcatTwoInputBuilderTest::TearDown() {} + +void ConcatTwoInputBuilderTest::SetAxis(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + std::shared_ptr tensor = TransToNNTensor(dataType, dim, quantParam, type); + int64_t* axisValue = new (std::nothrow) int64_t(0); + EXPECT_NE(nullptr, axisValue); + + tensor->SetBuffer(axisValue, sizeof(int64_t)); + m_allTensors.emplace_back(tensor); +} + +/** + * @tc.name: concat_build_two_input_001 + * @tc.desc: Verify the success of the build function + * @tc.type: FUNC + */ +HWTEST_F(ConcatTwoInputBuilderTest, concat_build_two_input_001, TestSize.Level1) +{ + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetAxis(OH_NN_INT64, m_param_dim, nullptr, OH_NN_CONCAT_AXIS); + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: concat_build_two_input_002 + * @tc.desc: Verify the forbidden of the build function + * @tc.type: FUNC + */ +HWTEST_F(ConcatTwoInputBuilderTest, concat_build_two_input_002, TestSize.Level1) +{ + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetAxis(OH_NN_INT64, m_param_dim, nullptr, OH_NN_CONCAT_AXIS); + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: concat_build_two_input_003 + * @tc.desc: Verify the missing input of the build function + * @tc.type: FUNC + */ +HWTEST_F(ConcatTwoInputBuilderTest, concat_build_two_input_003, TestSize.Level1) +{ + m_inputs = {0}; + m_outputs = {2}; + m_params = {3}; + m_paramsIndex = m_params; + + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetAxis(OH_NN_INT64, m_param_dim, nullptr, OH_NN_CONCAT_AXIS); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: concat_build_two_input_004 + * @tc.desc: Verify the missing output of the build function + * @tc.type: FUNC + */ +HWTEST_F(ConcatTwoInputBuilderTest, concat_build_two_input_004, TestSize.Level1) +{ + m_inputs = {0, 1}; + m_outputs = {}; + m_params = {3}; + m_paramsIndex = m_params; + + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetAxis(OH_NN_INT64, m_param_dim, nullptr, OH_NN_CONCAT_AXIS); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: concat_build_two_input_005 + * @tc.desc: Verify the inputIndex out of bounds of the build function + * @tc.type: FUNC + */ +HWTEST_F(ConcatTwoInputBuilderTest, concat_build_two_input_005, TestSize.Level1) +{ + m_inputs = {0, 1, 6}; + m_outputs = {3}; + m_params = {4}; + m_paramsIndex = m_params; + + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetAxis(OH_NN_INT64, m_param_dim, nullptr, OH_NN_CONCAT_AXIS); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: concat_build_two_input_006 + * @tc.desc: Verify the outputIndex out of bounds of the build function + * @tc.type: FUNC + */ +HWTEST_F(ConcatTwoInputBuilderTest, concat_build_two_input_006, TestSize.Level1) +{ + m_inputs = {0, 1}; + m_outputs = {6}; + m_params = {3}; + m_paramsIndex = m_params; + + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetAxis(OH_NN_INT64, m_param_dim, nullptr, OH_NN_CONCAT_AXIS); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: concat_build_two_input_007 + * @tc.desc: Verify the invalid axis of the build function + * @tc.type: FUNC + */ +HWTEST_F(ConcatTwoInputBuilderTest, concat_build_two_input_007, TestSize.Level1) +{ + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT32, m_param_dim, nullptr, OH_NN_CONCAT_AXIS); + int32_t* axisValue = new (std::nothrow) int32_t(0); + EXPECT_NE(nullptr, axisValue); + + tensor->SetBuffer(axisValue, sizeof(int32_t)); + m_allTensors.emplace_back(tensor); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: concat_build_two_input_008 + * @tc.desc: Verify the scalar length of the build function + * @tc.type: FUNC + */ +HWTEST_F(ConcatTwoInputBuilderTest, concat_build_two_input_008, TestSize.Level1) +{ + m_param_dim = {2}; + m_paramsIndex = m_params; + + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT64, m_param_dim, nullptr, OH_NN_CONCAT_AXIS); + int64_t* axisValue = new (std::nothrow) int64_t[2]{0, 0}; + EXPECT_NE(nullptr, axisValue); + + tensor->SetBuffer(axisValue, 2 * sizeof(int64_t)); + m_allTensors.emplace_back(tensor); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: concat_build_two_input_009 + * @tc.desc: This is OH_NN_INVALID_PARAMETER case, course the value of axis is nullptr. + * @tc.type: FUNC + */ +HWTEST_F(ConcatTwoInputBuilderTest, concat_build_two_input_010, TestSize.Level1) +{ + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT64, m_param_dim, nullptr, OH_NN_CONCAT_AXIS); + m_allTensors.emplace_back(tensor); + + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: concat_getprimitive_two_input_001 + * @tc.desc: Verify the success of the GetPrimitive function + * @tc.type: FUNC + */ +HWTEST_F(ConcatTwoInputBuilderTest, concat_getprimitive_two_input_001, TestSize.Level1) +{ + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetAxis(OH_NN_INT64, m_param_dim, nullptr, OH_NN_CONCAT_AXIS); + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + LiteGraphTensorPtr primitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive = {nullptr, DestroyLiteGraphPrimitive}; + EXPECT_NE(expectPrimitive, primitive); + + int64_t returnValue = mindspore::lite::MindIR_Concat_GetAxis(primitive.get()); + EXPECT_EQ(returnValue, 0); +} + +/** + * @tc.name: concat_getprimitive_two_input_001 + * @tc.desc: Verify the nullptr return of the GetPrimitive function + * @tc.type: FUNC + */ +HWTEST_F(ConcatTwoInputBuilderTest, concat_getprimitive_two_input_002, TestSize.Level1) +{ + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetAxis(OH_NN_INT64, m_param_dim, nullptr, OH_NN_CONCAT_AXIS); + LiteGraphTensorPtr primitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive = {nullptr, DestroyLiteGraphPrimitive}; + EXPECT_EQ(expectPrimitive, primitive); +} +} // namespace UnitTest +} // namespace NeuralNetworkRuntime +} // namespace OHOS \ No newline at end of file diff --git a/test/unittest/ops/conv2d_pad_test.cpp b/test/unittest/ops/conv2d_pad_test.cpp new file mode 100644 index 0000000..c19c402 --- /dev/null +++ b/test/unittest/ops/conv2d_pad_test.cpp @@ -0,0 +1,561 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "frameworks/native/ops/conv2d_builder.h" + +#include "ops_test.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Ops; + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class Conv2DBuilderTest : public OpsTest { +public: + void SetUp() override; + void TearDown() override; + + void SetConv2dInput(); + void SetPad(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + void SetPadParam(); + +public: + Conv2DBuilder m_builder; + std::vector m_inputs{0, 1, 2}; + std::vector m_outputs{3}; + std::vector m_params{4, 5, 6, 7, 8}; + std::vector m_output_dim{1, 3, 3, 1}; + std::vector m_stride_dim{2}; + std::vector m_dilation_dim{2}; + std::vector m_pad_dim{4}; + std::vector m_param_dim{}; +}; + +void Conv2DBuilderTest::SetUp() {} + +void Conv2DBuilderTest::TearDown() {} + +void Conv2DBuilderTest::SetPad(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + int32_t padNum = 4; + std::shared_ptr tensor = TransToNNTensor(dataType, dim, quantParam, type); + int64_t* padValue = new (std::nothrow) int64_t[4]{1, 1, 1, 1}; + EXPECT_NE(nullptr, padValue); + tensor->SetBuffer(padValue, padNum * sizeof(int64_t)); + m_allTensors.emplace_back(tensor); +} + +void Conv2DBuilderTest::SetPadParam() +{ + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_CONV2D_STRIDES); + SetDilation(OH_NN_INT64, m_dilation_dim, nullptr, OH_NN_CONV2D_DILATION); + SetPad(OH_NN_INT64, m_pad_dim, nullptr, OH_NN_CONV2D_PAD); + SetGroup(OH_NN_INT64, m_param_dim, nullptr, OH_NN_CONV2D_GROUP); + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_CONV2D_ACTIVATION_TYPE); +} + +void Conv2DBuilderTest::SetConv2dInput() +{ + int32_t weightNum = 4; + std::vector m_input_dim{1, 4, 4, 1}; + std::vector weightDim = {1, 2, 2, 1}; + std::vector biasDim = {1}; + std::shared_ptr inputsTensor; + inputsTensor = TransToNNTensor(OH_NN_FLOAT32, m_input_dim, nullptr, OH_NN_TENSOR); + m_allTensors.emplace_back(inputsTensor); + + inputsTensor = TransToNNTensor(OH_NN_FLOAT32, weightDim, nullptr, OH_NN_TENSOR); + float* weightValue = new (std::nothrow) float[4]{1, 1, 1, 1}; + EXPECT_NE(nullptr, weightValue); + + inputsTensor->SetBuffer(weightValue, weightNum * sizeof(weightValue)); + m_allTensors.emplace_back(inputsTensor); + inputsTensor = TransToNNTensor(OH_NN_FLOAT32, biasDim, nullptr, OH_NN_TENSOR); + float* biasValue = new (std::nothrow) float[1]{0}; + EXPECT_NE(nullptr, biasValue); + + inputsTensor->SetBuffer(biasValue, sizeof(float)); + m_allTensors.emplace_back(inputsTensor); +} + +/** + * @tc.name: conv2d_build_pad_001 + * @tc.desc: Verify the success of the build function + * @tc.type: FUNC + */ +HWTEST_F(Conv2DBuilderTest, conv2d_build_pad_001, TestSize.Level1) +{ + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetConv2dInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetPadParam(); + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: conv2d_build_pad_002 + * @tc.desc: Verify the forbidden of the build function + * @tc.type: FUNC + */ +HWTEST_F(Conv2DBuilderTest, conv2d_build_pad_002, TestSize.Level1) +{ + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetConv2dInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetPadParam(); + + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: conv2d_build_pad_003 + * @tc.desc: Verify the missing input of the build function + * @tc.type: FUNC + */ +HWTEST_F(Conv2DBuilderTest, conv2d_build_pad_003, TestSize.Level1) +{ + m_inputs = {0}; + m_outputs = {1}; + m_params = {2, 3, 4, 5, 6}; + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetConv2dInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetPadParam(); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: conv2d_build_pad_004 + * @tc.desc: Verify the missing output of the build function + * @tc.type: FUNC + */ +HWTEST_F(Conv2DBuilderTest, conv2d_build_pad_004, TestSize.Level1) +{ + m_inputs = {0, 1, 2}; + m_outputs = {}; + m_params = {3, 4, 5, 6, 7}; + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetConv2dInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetPadParam(); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} +/** + * @tc.name: conv2d_build_pad_005 + * @tc.desc: Verify the inputIndex out of bounds of the build function + * @tc.type: FUNC + */ +HWTEST_F(Conv2DBuilderTest, conv2d_build_pad_005, TestSize.Level1) +{ + m_inputs = {0, 1, 9}; + m_outputs = {3}; + m_params = {4, 5, 6, 7, 8}; + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetConv2dInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetPadParam(); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: conv2d_build_pad_006 + * @tc.desc: Verify the outputIndex out of bounds of the build function + * @tc.type: FUNC + */ +HWTEST_F(Conv2DBuilderTest, conv2d_build_pad_006, TestSize.Level1) +{ + m_inputs = {0, 1, 2}; + m_outputs = {9}; + m_params = {4, 5, 6, 7, 8}; + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetConv2dInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetPadParam(); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: conv2d_build_pad_007 + * @tc.desc: Verify the invalid stride of the build function + * @tc.type: FUNC + */ +HWTEST_F(Conv2DBuilderTest, conv2d_build_pad_007, TestSize.Level1) +{ + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetConv2dInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT32, m_stride_dim, nullptr, OH_NN_CONV2D_STRIDES); + int32_t* strideValue = new (std::nothrow) int32_t[2]{1, 1}; + EXPECT_NE(nullptr, strideValue); + + tensor->SetBuffer(strideValue, 2 * sizeof(int32_t)); + m_allTensors.emplace_back(tensor); + SetDilation(OH_NN_INT64, m_dilation_dim, nullptr, OH_NN_CONV2D_DILATION); + SetPad(OH_NN_INT64, m_pad_dim, nullptr, OH_NN_CONV2D_PAD); + SetGroup(OH_NN_INT64, m_param_dim, nullptr, OH_NN_CONV2D_GROUP); + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_CONV2D_ACTIVATION_TYPE); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: conv2d_build_pad_008 + * @tc.desc: Verify the invalid dilation of the build function + * @tc.type: FUNC + */ +HWTEST_F(Conv2DBuilderTest, conv2d_build_pad_008, TestSize.Level1) +{ + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetConv2dInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_CONV2D_STRIDES); + + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT32, m_dilation_dim, nullptr, OH_NN_CONV2D_DILATION); + int32_t* dilationValue = new (std::nothrow) int32_t[2]{1, 1}; + EXPECT_NE(nullptr, dilationValue); + + tensor->SetBuffer(dilationValue, 2 * sizeof(int32_t)); + m_allTensors.emplace_back(tensor); + SetPad(OH_NN_INT64, m_pad_dim, nullptr, OH_NN_CONV2D_PAD); + SetGroup(OH_NN_INT64, m_param_dim, nullptr, OH_NN_CONV2D_GROUP); + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_CONV2D_ACTIVATION_TYPE); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: conv2d_build_pad_009 + * @tc.desc: Verify the invalid pad of the build function + * @tc.type: FUNC + */ +HWTEST_F(Conv2DBuilderTest, conv2d_build_pad_009, TestSize.Level1) +{ + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + SetConv2dInput(); + + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_CONV2D_STRIDES); + SetDilation(OH_NN_INT64, m_dilation_dim, nullptr, OH_NN_CONV2D_DILATION); + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT32, m_pad_dim, nullptr, OH_NN_CONV2D_PAD); + + int32_t* padValue = new (std::nothrow) int32_t[4]{1, 1, 1, 1}; + EXPECT_NE(nullptr, padValue); + tensor->SetBuffer(padValue, 4 * sizeof(int32_t)); + m_allTensors.emplace_back(tensor); + + SetGroup(OH_NN_INT64, m_param_dim, nullptr, OH_NN_CONV2D_GROUP); + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_CONV2D_ACTIVATION_TYPE); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: conv2d_build_pad_010 + * @tc.desc: Verify the invalid group of the build function + * @tc.type: FUNC + */ +HWTEST_F(Conv2DBuilderTest, conv2d_build_pad_010, TestSize.Level1) +{ + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetConv2dInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_CONV2D_STRIDES); + SetDilation(OH_NN_INT64, m_dilation_dim, nullptr, OH_NN_CONV2D_DILATION); + SetPad(OH_NN_INT64, m_pad_dim, nullptr, OH_NN_CONV2D_PAD); + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT32, m_param_dim, nullptr, OH_NN_CONV2D_GROUP); + + int32_t* groupValue = new (std::nothrow) int32_t(0); + EXPECT_NE(nullptr, groupValue); + tensor->SetBuffer(groupValue, sizeof(int32_t)); + m_allTensors.emplace_back(tensor); + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_CONV2D_ACTIVATION_TYPE); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + + +/** + * @tc.name: conv2d_build_pad_011 + * @tc.desc: Verify the invalid activation of the build function + * @tc.type: FUNC + */ +HWTEST_F(Conv2DBuilderTest, conv2d_build_pad_011, TestSize.Level1) +{ + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + SetConv2dInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_CONV2D_STRIDES); + SetDilation(OH_NN_INT64, m_dilation_dim, nullptr, OH_NN_CONV2D_DILATION); + + SetPad(OH_NN_INT64, m_pad_dim, nullptr, OH_NN_CONV2D_PAD); + SetGroup(OH_NN_INT64, m_param_dim, nullptr, OH_NN_CONV2D_GROUP); + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT32, m_param_dim, nullptr, OH_NN_CONV2D_ACTIVATION_TYPE); + int32_t* activationValue = new (std::nothrow) int32_t(0); + EXPECT_NE(nullptr, activationValue); + + tensor->SetBuffer(activationValue, sizeof(int32_t)); + m_allTensors.emplace_back(tensor); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: conv2d_build_pad_012 + * @tc.desc: Verify the group scalar length of the build function + * @tc.type: FUNC + */ +HWTEST_F(Conv2DBuilderTest, conv2d_build_pad_012, TestSize.Level1) +{ + std::vector groupDim = {2}; + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetConv2dInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_CONV2D_STRIDES); + SetDilation(OH_NN_INT64, m_dilation_dim, nullptr, OH_NN_CONV2D_DILATION); + SetPad(OH_NN_INT64, m_pad_dim, nullptr, OH_NN_CONV2D_PAD); + + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT64, groupDim, nullptr, OH_NN_CONV2D_GROUP); + int64_t* groupValue = new (std::nothrow) int64_t[2]{0, 0}; + EXPECT_NE(nullptr, groupValue); + + tensor->SetBuffer(groupValue, 2 * sizeof(int64_t)); + m_allTensors.emplace_back(tensor); + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_CONV2D_ACTIVATION_TYPE); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: conv2d_build_pad_013 + * @tc.desc: Verify the scalar activation of the build function + * @tc.type: FUNC + */ +HWTEST_F(Conv2DBuilderTest, conv2d_build_pad_013, TestSize.Level1) +{ + std::vector activationDim = {2}; + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetConv2dInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_CONV2D_STRIDES); + SetDilation(OH_NN_INT64, m_dilation_dim, nullptr, OH_NN_CONV2D_DILATION); + SetPad(OH_NN_INT64, m_pad_dim, nullptr, OH_NN_CONV2D_PAD); + SetGroup(OH_NN_INT64, m_param_dim, nullptr, OH_NN_CONV2D_GROUP); + + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT8, activationDim, nullptr, + OH_NN_CONV2D_ACTIVATION_TYPE); + int8_t* activationValue = new (std::nothrow) int8_t[2]{0, 0}; + EXPECT_NE(nullptr, activationValue); + tensor->SetBuffer(activationValue, 2 * sizeof(int8_t)); + m_allTensors.emplace_back(tensor); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: conv2d_build_pad_014 + * @tc.desc: Verify the conv2d without set stride of the build function + * @tc.type: FUNC + */ +HWTEST_F(Conv2DBuilderTest, conv2d_build_pad_014, TestSize.Level1) +{ + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + SetConv2dInput(); + + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT64, m_stride_dim, nullptr, + OH_NN_CONV2D_STRIDES); + m_allTensors.emplace_back(tensor); + + SetDilation(OH_NN_INT64, m_dilation_dim, nullptr, OH_NN_CONV2D_DILATION); + SetPad(OH_NN_INT64, m_pad_dim, nullptr, OH_NN_CONV2D_PAD); + SetGroup(OH_NN_INT64, m_param_dim, nullptr, OH_NN_CONV2D_GROUP); + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_CONV2D_ACTIVATION_TYPE); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: conv2d_build_pad_015 + * @tc.desc: Verify the conv2d without set dilation of the build function + * @tc.type: FUNC + */ +HWTEST_F(Conv2DBuilderTest, conv2d_build_pad_015, TestSize.Level1) +{ + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetConv2dInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_CONV2D_STRIDES); + + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT64, m_dilation_dim, nullptr, + OH_NN_CONV2D_DILATION); + m_allTensors.emplace_back(tensor); + + SetPad(OH_NN_INT64, m_pad_dim, nullptr, OH_NN_CONV2D_PAD); + SetGroup(OH_NN_INT64, m_param_dim, nullptr, OH_NN_CONV2D_GROUP); + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_CONV2D_ACTIVATION_TYPE); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: conv2d_build_pad_016 + * @tc.desc: Verify the conv2d without set pad of the build function + * @tc.type: FUNC + */ +HWTEST_F(Conv2DBuilderTest, conv2d_build_pad_016, TestSize.Level1) +{ + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetConv2dInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_CONV2D_STRIDES); + SetDilation(OH_NN_INT64, m_dilation_dim, nullptr, OH_NN_CONV2D_DILATION); + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT64, m_pad_dim, nullptr, + OH_NN_CONV2D_PAD); + m_allTensors.emplace_back(tensor); + + SetGroup(OH_NN_INT64, m_param_dim, nullptr, OH_NN_CONV2D_GROUP); + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_CONV2D_ACTIVATION_TYPE); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: conv2d_build_pad_017 + * @tc.desc: Verify the conv2d without set group of the build function + * @tc.type: FUNC + */ +HWTEST_F(Conv2DBuilderTest, conv2d_build_pad_017, TestSize.Level1) +{ + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetConv2dInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_CONV2D_STRIDES); + SetDilation(OH_NN_INT64, m_dilation_dim, nullptr, OH_NN_CONV2D_DILATION); + SetPad(OH_NN_INT64, m_pad_dim, nullptr, OH_NN_CONV2D_PAD); + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT64, m_param_dim, nullptr, + OH_NN_CONV2D_GROUP); + m_allTensors.emplace_back(tensor); + + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_CONV2D_ACTIVATION_TYPE); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: conv2d_build_pad_018 + * @tc.desc: Verify the conv2d without set activation of the build function + * @tc.type: FUNC + */ +HWTEST_F(Conv2DBuilderTest, conv2d_build_pad_018, TestSize.Level1) +{ + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetConv2dInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_CONV2D_STRIDES); + SetDilation(OH_NN_INT64, m_dilation_dim, nullptr, OH_NN_CONV2D_DILATION); + SetPad(OH_NN_INT64, m_pad_dim, nullptr, OH_NN_CONV2D_PAD); + SetGroup(OH_NN_INT64, m_param_dim, nullptr, OH_NN_CONV2D_GROUP); + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT8, m_param_dim, nullptr, + OH_NN_CONV2D_ACTIVATION_TYPE); + m_allTensors.emplace_back(tensor); + + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: conv2d_getprimitive_pad_001 + * @tc.desc: Verify the nullptr return of the GetPrimitive function + * @tc.type: FUNC + */ +HWTEST_F(Conv2DBuilderTest, conv2d_getprimitive_pad_001, TestSize.Level1) +{ + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetConv2dInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetPadParam(); + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + + LiteGraphTensorPtr primitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); + EXPECT_NE(expectPrimitive, primitive); + + std::vector returnStrides = mindspore::lite::MindIR_Conv2DFusion_GetStride(primitive.get()); + std::vector strideValueTest{1, 1}; + EXPECT_EQ(strideValueTest, returnStrides); + + std::vector returnDliation = mindspore::lite::MindIR_Conv2DFusion_GetDilation(primitive.get()); + std::vector dilationValueTest{1, 1}; + EXPECT_EQ(dilationValueTest, returnDliation); + + std::vector returnPaddings = mindspore::lite::MindIR_Conv2DFusion_GetPadList(primitive.get()); + std::vector padValueTest{1, 1, 1, 1}; + EXPECT_EQ(padValueTest, returnPaddings); + + int returnGroup = mindspore::lite::MindIR_Conv2DFusion_GetGroup(primitive.get()); + EXPECT_EQ(0, returnGroup); + int returnActivation = mindspore::lite::MindIR_Conv2DFusion_GetActivationType(primitive.get()); + EXPECT_EQ(0, returnActivation); +} + +/** + * @tc.name: conv2d_getprimitive_pad_002 + * @tc.desc: Verify the behavior of the GetPrimitive function + * @tc.type: FUNC + */ +HWTEST_F(Conv2DBuilderTest, conv2d_getprimitive_pad_002, TestSize.Level1) +{ + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetConv2dInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetPadParam(); + + LiteGraphTensorPtr primitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); + EXPECT_EQ(expectPrimitive, primitive); +} +} // namespace UnitTest +} // namespace NeuralNetworkRuntime +} // namespace OHOS diff --git a/test/unittest/ops/conv2d_padmode_test.cpp b/test/unittest/ops/conv2d_padmode_test.cpp new file mode 100644 index 0000000..4250d44 --- /dev/null +++ b/test/unittest/ops/conv2d_padmode_test.cpp @@ -0,0 +1,591 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "frameworks/native/ops/conv2d_builder.h" + +#include "ops_test.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Ops; + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class Conv2DBuilderPadmodeTest : public OpsTest { +public: + void SetUp() override; + void TearDown() override; + + void SetConv2dInput(); + void SetPadMode(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + void SetParam(); + +public: + Conv2DBuilder m_builder; + std::vector m_inputs{0, 1, 2}; + std::vector m_outputs{3}; + std::vector m_params{4, 5, 6, 7, 8}; + std::vector m_output_dim{1, 3, 3, 1}; + std::vector m_stride_dim{2}; + std::vector m_dilation_dim{2}; + std::vector m_param_dim{}; +}; + +void Conv2DBuilderPadmodeTest::SetUp() {} + +void Conv2DBuilderPadmodeTest::TearDown() {} + +void Conv2DBuilderPadmodeTest::SetPadMode(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + std::shared_ptr tensor = TransToNNTensor(dataType, dim, quantParam, type); + int8_t* padModeValue = new (std::nothrow) int8_t(0); + EXPECT_NE(nullptr, padModeValue); + + tensor->SetBuffer(padModeValue, sizeof(int8_t)); + m_allTensors.emplace_back(tensor); +} + +void Conv2DBuilderPadmodeTest::SetParam() +{ + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_CONV2D_STRIDES); + SetDilation(OH_NN_INT64, m_dilation_dim, nullptr, OH_NN_CONV2D_DILATION); + SetPadMode(OH_NN_INT8, m_param_dim, nullptr, OH_NN_CONV2D_PAD_MODE); + SetGroup(OH_NN_INT64, m_param_dim, nullptr, OH_NN_CONV2D_GROUP); + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_CONV2D_ACTIVATION_TYPE); +} + +void Conv2DBuilderPadmodeTest::SetConv2dInput() +{ + int32_t weightNum = 4; + std::vector m_input_dim{1, 4, 4, 1}; + std::vector weightDim = {1, 2, 2, 1}; + std::vector biasDim = {1}; + + std::shared_ptr inputTensor; + inputTensor = TransToNNTensor(OH_NN_FLOAT32, m_input_dim, nullptr, OH_NN_TENSOR); + m_allTensors.emplace_back(inputTensor); + + inputTensor = TransToNNTensor(OH_NN_FLOAT32, weightDim, nullptr, OH_NN_TENSOR); + float* weightValue = new (std::nothrow) float[4]{1, 1, 1, 1}; + EXPECT_NE(nullptr, weightValue); + inputTensor->SetBuffer(weightValue, weightNum * sizeof(weightValue)); + m_allTensors.emplace_back(inputTensor); + + inputTensor = TransToNNTensor(OH_NN_FLOAT32, biasDim, nullptr, OH_NN_TENSOR); + float* biasValue = new (std::nothrow) float[1]{0}; + EXPECT_NE(nullptr, biasValue); + + inputTensor->SetBuffer(biasValue, sizeof(float)); + m_allTensors.emplace_back(inputTensor); +} + + +/** + * @tc.name: conv2d_build_padmode_001 + * @tc.desc: Verify the success of the build function + * @tc.type: FUNC + */ +HWTEST_F(Conv2DBuilderPadmodeTest, conv2d_build_padmode_001, TestSize.Level1) +{ + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetConv2dInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetParam(); + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: conv2d_build_padmode_002 + * @tc.desc: Verify the forbidden of the build function + * @tc.type: FUNC + */ +HWTEST_F(Conv2DBuilderPadmodeTest, conv2d_build_padmode_002, TestSize.Level1) +{ + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetConv2dInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetParam(); + + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: conv2d_build_padmode_003 + * @tc.desc: Verify the missing input of the build function + * @tc.type: FUNC + */ +HWTEST_F(Conv2DBuilderPadmodeTest, conv2d_build_padmode_003, TestSize.Level1) +{ + m_inputs = {0}; + m_outputs = {1}; + m_params = {2, 3, 4, 5, 6}; + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetConv2dInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetParam(); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: conv2d_build_padmode_004 + * @tc.desc: Verify the missing output of the build function + * @tc.type: FUNC + */ +HWTEST_F(Conv2DBuilderPadmodeTest, conv2d_build_padmode_004, TestSize.Level1) +{ + m_inputs = {0, 1, 2}; + m_outputs = {}; + m_params = {3, 4, 5, 6, 7}; + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetConv2dInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetParam(); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} +/** + * @tc.name: conv2d_build_padmode_005 + * @tc.desc: Verify the inputIndex out of bounds of the build function + * @tc.type: FUNC + */ +HWTEST_F(Conv2DBuilderPadmodeTest, conv2d_build_padmode_005, TestSize.Level1) +{ + m_inputs = {0, 1, 9}; + m_outputs = {3}; + m_params = {4, 5, 6, 7, 8}; + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetConv2dInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetParam(); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: conv2d_build_padmode_006 + * @tc.desc: Verify the outputIndex out of bounds of the build function + * @tc.type: FUNC + */ +HWTEST_F(Conv2DBuilderPadmodeTest, conv2d_build_padmode_006, TestSize.Level1) +{ + m_inputs = {0, 1, 2}; + m_outputs = {9}; + m_params = {4, 5, 6, 7, 8}; + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetConv2dInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetParam(); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: conv2d_build_padmode_007 + * @tc.desc: Verify the invalid stride of the build function + * @tc.type: FUNC + */ +HWTEST_F(Conv2DBuilderPadmodeTest, conv2d_build_padmode_007, TestSize.Level1) +{ + SetConv2dInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT32, m_stride_dim, nullptr, OH_NN_CONV2D_STRIDES); + int32_t* strideValue = new (std::nothrow) int32_t[2]{1, 1}; + EXPECT_NE(nullptr, strideValue); + + tensor->SetBuffer(strideValue, 2 * sizeof(int32_t)); + m_allTensors.emplace_back(tensor); + + SetDilation(OH_NN_INT64, m_dilation_dim, nullptr, OH_NN_CONV2D_DILATION); + SetPadMode(OH_NN_INT8, m_param_dim, nullptr, OH_NN_CONV2D_PAD_MODE); + SetGroup(OH_NN_INT64, m_param_dim, nullptr, OH_NN_CONV2D_GROUP); + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_CONV2D_ACTIVATION_TYPE); + + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: conv2d_build_padmode_008 + * @tc.desc: Verify the invalid dilation of the build function + * @tc.type: FUNC + */ +HWTEST_F(Conv2DBuilderPadmodeTest, conv2d_build_padmode_008, TestSize.Level1) +{ + SetConv2dInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_CONV2D_STRIDES); + + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT32, m_dilation_dim, nullptr, OH_NN_CONV2D_DILATION); + int32_t* dilationValue = new (std::nothrow) int32_t[2]{1, 1}; + EXPECT_NE(nullptr, dilationValue); + + tensor->SetBuffer(dilationValue, 2 * sizeof(int32_t)); + m_allTensors.emplace_back(tensor); + + SetPadMode(OH_NN_INT8, m_param_dim, nullptr, OH_NN_CONV2D_PAD_MODE); + SetGroup(OH_NN_INT64, m_param_dim, nullptr, OH_NN_CONV2D_GROUP); + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_CONV2D_ACTIVATION_TYPE); + + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: conv2d_build_padmode_009 + * @tc.desc: Verify the invalid padMode of the build function + * @tc.type: FUNC + */ +HWTEST_F(Conv2DBuilderPadmodeTest, conv2d_build_padmode_009, TestSize.Level1) +{ + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetConv2dInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_CONV2D_STRIDES); + SetDilation(OH_NN_INT64, m_dilation_dim, nullptr, OH_NN_CONV2D_DILATION); + + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT32, m_param_dim, nullptr, OH_NN_CONV2D_PAD_MODE); + int32_t* padModeValue = new (std::nothrow) int32_t(0); + EXPECT_NE(nullptr, padModeValue); + tensor->SetBuffer(padModeValue, sizeof(int32_t)); + m_allTensors.emplace_back(tensor); + + SetGroup(OH_NN_INT64, m_param_dim, nullptr, OH_NN_CONV2D_GROUP); + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_CONV2D_ACTIVATION_TYPE); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: conv2d_build_padmode_010 + * @tc.desc: Verify the invalid group of the build function + * @tc.type: FUNC + */ +HWTEST_F(Conv2DBuilderPadmodeTest, conv2d_build_padmode_010, TestSize.Level1) +{ + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetConv2dInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_CONV2D_STRIDES); + SetDilation(OH_NN_INT64, m_dilation_dim, nullptr, OH_NN_CONV2D_DILATION); + SetPadMode(OH_NN_INT8, m_param_dim, nullptr, OH_NN_CONV2D_PAD_MODE); + + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT32, m_param_dim, nullptr, OH_NN_CONV2D_GROUP); + int32_t* groupValue = new (std::nothrow) int32_t(0); + EXPECT_NE(nullptr, groupValue); + + tensor->SetBuffer(groupValue, sizeof(int32_t)); + m_allTensors.emplace_back(tensor); + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_CONV2D_ACTIVATION_TYPE); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + + +/** + * @tc.name: conv2d_build_padmode_011 + * @tc.desc: Verify the invalid activation of the build function + * @tc.type: FUNC + */ +HWTEST_F(Conv2DBuilderPadmodeTest, conv2d_build_padmode_011, TestSize.Level1) +{ + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetConv2dInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_CONV2D_STRIDES); + SetDilation(OH_NN_INT64, m_dilation_dim, nullptr, OH_NN_CONV2D_DILATION); + SetPadMode(OH_NN_INT8, m_param_dim, nullptr, OH_NN_CONV2D_PAD_MODE); + SetGroup(OH_NN_INT64, m_param_dim, nullptr, OH_NN_CONV2D_GROUP); + + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT32, m_param_dim, nullptr, OH_NN_CONV2D_ACTIVATION_TYPE); + int32_t* activationValue = new (std::nothrow) int32_t(0); + EXPECT_NE(nullptr, activationValue); + tensor->SetBuffer(activationValue, sizeof(int32_t)); + + m_allTensors.emplace_back(tensor); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: conv2d_build_padmode_012 + * @tc.desc: Verify the group scalar length of the build function + * @tc.type: FUNC + */ +HWTEST_F(Conv2DBuilderPadmodeTest, conv2d_build_padmode_012, TestSize.Level1) +{ + std::vector groupDim = {2}; + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetConv2dInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_CONV2D_STRIDES); + SetDilation(OH_NN_INT64, m_dilation_dim, nullptr, OH_NN_CONV2D_DILATION); + SetPadMode(OH_NN_INT8, m_param_dim, nullptr, OH_NN_CONV2D_PAD_MODE); + + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT64, groupDim, nullptr, OH_NN_CONV2D_GROUP); + int64_t* groupValue = new (std::nothrow) int64_t[2]{0, 0}; + EXPECT_NE(nullptr, groupValue); + tensor->SetBuffer(groupValue, 2 * sizeof(int64_t)); + m_allTensors.emplace_back(tensor); + + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_CONV2D_ACTIVATION_TYPE); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: conv2d_build_padmode_013 + * @tc.desc: Verify the activation scalar length of the build function + * @tc.type: FUNC + */ +HWTEST_F(Conv2DBuilderPadmodeTest, conv2d_build_padmode_013, TestSize.Level1) +{ + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetConv2dInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_CONV2D_STRIDES); + SetDilation(OH_NN_INT64, m_dilation_dim, nullptr, OH_NN_CONV2D_DILATION); + SetPadMode(OH_NN_INT8, m_param_dim, nullptr, OH_NN_CONV2D_PAD_MODE); + SetGroup(OH_NN_INT64, m_param_dim, nullptr, OH_NN_CONV2D_GROUP); + + std::vector activationDim = {2}; + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT8, activationDim, nullptr, + OH_NN_CONV2D_ACTIVATION_TYPE); + int8_t* activationValue = new (std::nothrow) int8_t[2]{0, 0}; + EXPECT_NE(nullptr, activationValue); + + tensor->SetBuffer(activationValue, 2 * sizeof(int8_t)); + m_allTensors.emplace_back(tensor); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: conv2d_build_padmode_014 + * @tc.desc: Verify the param invalid to conv2d of the build function + * @tc.type: FUNC + */ +HWTEST_F(Conv2DBuilderPadmodeTest, conv2d_build_padmode_014, TestSize.Level1) +{ + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + std::vector activationDim = {2}; + + SetConv2dInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_CONV2D_STRIDES); + SetDilation(OH_NN_INT64, m_dilation_dim, nullptr, OH_NN_CONV2D_DILATION); + SetPadMode(OH_NN_INT8, m_param_dim, nullptr, OH_NN_CONV2D_PAD_MODE); + SetGroup(OH_NN_INT64, m_param_dim, nullptr, OH_NN_CONV2D_GROUP); + + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT8, activationDim, nullptr, + OH_NN_DIV_ACTIVATIONTYPE); + int8_t* activationValue = new (std::nothrow) int8_t(0); + EXPECT_NE(nullptr, activationValue); + tensor->SetBuffer(activationValue, sizeof(int8_t)); + m_allTensors.emplace_back(tensor); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: conv2d_build_padmode_015 + * @tc.desc: Verify the pad value of the build function + * @tc.type: FUNC + */ +HWTEST_F(Conv2DBuilderPadmodeTest, conv2d_build_padmode_015, TestSize.Level1) +{ + std::vector activationDim = {2}; + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetConv2dInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_CONV2D_STRIDES); + SetDilation(OH_NN_INT64, m_dilation_dim, nullptr, OH_NN_CONV2D_DILATION); + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT8, m_param_dim, nullptr, OH_NN_CONV2D_PAD_MODE); + int8_t* padModeValue = new (std::nothrow) int8_t(10); + EXPECT_NE(nullptr, padModeValue); + + tensor->SetBuffer(padModeValue, sizeof(int8_t)); + m_allTensors.emplace_back(tensor); + SetGroup(OH_NN_INT64, m_param_dim, nullptr, OH_NN_CONV2D_GROUP); + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_CONV2D_ACTIVATION_TYPE); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: conv2d_build_padmode_016 + * @tc.desc: Verify the activation scalar length of the build function + * @tc.type: FUNC + */ +HWTEST_F(Conv2DBuilderPadmodeTest, conv2d_build_padmode_016, TestSize.Level1) +{ + std::vector activationDim = {2}; + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + int32_t weightNum = 3; + std::vector m_input_dim{1, 4, 4, 1}; + std::vector weightDim = {1, 3, 1}; + std::vector biasDim = {1}; + + std::shared_ptr inputTensor; + inputTensor = TransToNNTensor(OH_NN_FLOAT32, m_input_dim, nullptr, OH_NN_TENSOR); + m_allTensors.emplace_back(inputTensor); + inputTensor = TransToNNTensor(OH_NN_FLOAT32, weightDim, nullptr, OH_NN_TENSOR); + float* weightValue = new (std::nothrow) float[3]{1, 1, 1}; + EXPECT_NE(nullptr, weightValue); + + inputTensor->SetBuffer(weightValue, weightNum * sizeof(weightValue)); + m_allTensors.emplace_back(inputTensor); + inputTensor = TransToNNTensor(OH_NN_FLOAT32, biasDim, nullptr, OH_NN_TENSOR); + float* biasValue = new (std::nothrow) float[1]{0}; + EXPECT_NE(nullptr, biasValue); + + inputTensor->SetBuffer(biasValue, sizeof(float)); + m_allTensors.emplace_back(inputTensor); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: conv2d_build_padmode_017 + * @tc.desc: Verify the activation value of the build function + * @tc.type: FUNC + */ +HWTEST_F(Conv2DBuilderPadmodeTest, conv2d_build_padmode_017, TestSize.Level1) +{ + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetConv2dInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_CONV2D_STRIDES); + SetDilation(OH_NN_INT64, m_dilation_dim, nullptr, OH_NN_CONV2D_DILATION); + SetPadMode(OH_NN_INT8, m_param_dim, nullptr, OH_NN_CONV2D_PAD_MODE); + SetGroup(OH_NN_INT64, m_param_dim, nullptr, OH_NN_CONV2D_GROUP); + + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT8, m_param_dim, nullptr, + OH_NN_CONV2D_ACTIVATION_TYPE); + int8_t* activationValue = new (std::nothrow) int8_t(10); + EXPECT_NE(nullptr, activationValue); + + tensor->SetBuffer(activationValue, sizeof(int8_t)); + m_allTensors.emplace_back(tensor); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: conv2d_build_padmode_018 + * @tc.desc: Verify the activation value of the build function + * @tc.type: FUNC + */ +HWTEST_F(Conv2DBuilderPadmodeTest, conv2d_build_padmode_018, TestSize.Level1) +{ + std::vector m_pad_dim = {3}; + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetConv2dInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_CONV2D_STRIDES); + SetDilation(OH_NN_INT64, m_dilation_dim, nullptr, OH_NN_CONV2D_DILATION); + + int32_t padNum = 3; + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT64, m_pad_dim, nullptr, OH_NN_CONV2D_PAD); + int64_t* padValue = new (std::nothrow) int64_t[3]{1, 1, 1}; + EXPECT_NE(nullptr, padValue); + + tensor->SetBuffer(padValue, padNum * sizeof(int64_t)); + m_allTensors.emplace_back(tensor); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: conv2d_getprimitive_padmode_001 + * @tc.desc: Verify the success of the GetPrimitive function + * @tc.type: FUNC + */ +HWTEST_F(Conv2DBuilderPadmodeTest, conv2d_getprimitive_padmode_001, TestSize.Level1) +{ + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetConv2dInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetParam(); + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + LiteGraphTensorPtr primitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); + EXPECT_NE(expectPrimitive, primitive); + + std::vector expectStrides = mindspore::lite::MindIR_Conv2DFusion_GetStride(primitive.get()); + std::vector strideValueTest{1, 1}; + EXPECT_EQ(strideValueTest, expectStrides); + + std::vector expectDliation = mindspore::lite::MindIR_Conv2DFusion_GetDilation(primitive.get()); + std::vector dilationValueTest{1, 1}; + EXPECT_EQ(dilationValueTest, expectDliation); + int expectpadMode = mindspore::lite::MindIR_Conv2DFusion_GetPadMode(primitive.get()); + EXPECT_EQ(1, expectpadMode); + + int expectGroup = mindspore::lite::MindIR_Conv2DFusion_GetGroup(primitive.get()); + EXPECT_EQ(0, expectGroup); + + int expectActivation = mindspore::lite::MindIR_Conv2DFusion_GetActivationType(primitive.get()); + EXPECT_EQ(0, expectActivation); +} + +/** + * @tc.name: conv2d_getprimitive_padmode_002 + * @tc.desc: Verify the nullptr return of the GetPrimitive function + * @tc.type: FUNC + */ +HWTEST_F(Conv2DBuilderPadmodeTest, conv2d_getprimitive_padmode_002, TestSize.Level1) +{ + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetConv2dInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetParam(); + + LiteGraphTensorPtr primitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); + EXPECT_EQ(expectPrimitive, primitive); +} +} // namespace UnitTest +} // namespace NeuralNetworkRuntime +} // namespace OHOS \ No newline at end of file diff --git a/test/unittest/ops/conv2d_tranpose_padmode_test.cpp b/test/unittest/ops/conv2d_tranpose_padmode_test.cpp new file mode 100644 index 0000000..eea61e9 --- /dev/null +++ b/test/unittest/ops/conv2d_tranpose_padmode_test.cpp @@ -0,0 +1,790 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "frameworks/native/ops/conv2d_transpose_builder.h" + +#include "ops_test.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Ops; + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class Conv2DTransposePadmodeBuilderTest : public OpsTest { +public: + void SetUp() override; + void TearDown() override; + + void SetConv2dTransposeInput(); + void SetPadMode(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + void SetOutPaddings(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + void SetParam(); + +public: + Conv2DTransposeBuilder m_builder; + std::vector m_inputs{0, 1, 2}; + std::vector m_outputs{3}; + std::vector m_params{4, 5, 6, 7, 8, 9}; + std::vector m_output_dim{1, 3, 3, 1}; + std::vector m_stride_dim{2}; + std::vector m_dilation_dim{2}; + std::vector outPaddingsDim{2}; + std::vector m_param_dim{}; +}; + +void Conv2DTransposePadmodeBuilderTest::SetUp() {} + +void Conv2DTransposePadmodeBuilderTest::TearDown() {} + +void Conv2DTransposePadmodeBuilderTest::SetPadMode(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + std::shared_ptr tensor = TransToNNTensor(dataType, dim, quantParam, type); + int8_t* padModeValue = new (std::nothrow) int8_t(0); + EXPECT_NE(nullptr, padModeValue); + tensor->SetBuffer(padModeValue, sizeof(int8_t)); + m_allTensors.emplace_back(tensor); +} + +void Conv2DTransposePadmodeBuilderTest::SetOutPaddings(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + int32_t outPaddingsNum = 2; + std::shared_ptr tensor = TransToNNTensor(dataType, dim, quantParam, type); + int64_t* outPaddingsValue = new (std::nothrow) int64_t[2]{0, 0}; + EXPECT_NE(nullptr, outPaddingsValue); + tensor->SetBuffer(outPaddingsValue, outPaddingsNum * sizeof(int64_t)); + m_allTensors.emplace_back(tensor); +} + +void Conv2DTransposePadmodeBuilderTest::SetParam() +{ + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_STRIDES); + SetDilation(OH_NN_INT64, m_dilation_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_DILATION); + SetPadMode(OH_NN_INT8, m_param_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_PAD_MODE); + SetOutPaddings(OH_NN_INT64, outPaddingsDim, nullptr, OH_NN_CONV2D_TRANSPOSE_OUTPUT_PADDINGS); + SetGroup(OH_NN_INT64, m_param_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_GROUP); + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_ACTIVATION_TYPE); +} + +void Conv2DTransposePadmodeBuilderTest::SetConv2dTransposeInput() +{ + int32_t weightNum = 4; + std::vector m_input_dim{1, 4, 4, 1}; + std::vector weightDim = {1, 2, 2, 1}; + std::vector biasDim = {1}; + std::shared_ptr inTensor; + inTensor = TransToNNTensor(OH_NN_FLOAT32, m_input_dim, nullptr, OH_NN_TENSOR); + m_allTensors.emplace_back(inTensor); + inTensor = TransToNNTensor(OH_NN_FLOAT32, weightDim, nullptr, OH_NN_TENSOR); + float* weightValue = new (std::nothrow) float[4]{1, 1, 1, 1}; + EXPECT_NE(nullptr, weightValue); + inTensor->SetBuffer(weightValue, weightNum * sizeof(weightValue)); + m_allTensors.emplace_back(inTensor); + inTensor = TransToNNTensor(OH_NN_FLOAT32, biasDim, nullptr, OH_NN_TENSOR); + float* biasValue = new (std::nothrow) float[1]{0}; + EXPECT_NE(nullptr, biasValue); + inTensor->SetBuffer(biasValue, sizeof(float)); + m_allTensors.emplace_back(inTensor); +} + +/** + * @tc.name: conv2dtranpose_build_padmode_001 + * @tc.desc: Verify the success of the build function + * @tc.type: FUNC + */ +HWTEST_F(Conv2DTransposePadmodeBuilderTest, conv2dtranpose_build_padmode_001, TestSize.Level1) +{ + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetConv2dTransposeInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetParam(); + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: conv2dtranpose_build_padmode_002 + * @tc.desc: Verify the forbidden of the build function + * @tc.type: FUNC + */ +HWTEST_F(Conv2DTransposePadmodeBuilderTest, conv2dtranpose_build_padmode_002, TestSize.Level1) +{ + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetConv2dTransposeInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetParam(); + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: conv2dtranpose_build_padmode_003 + * @tc.desc: Verify the missing input of the build function + * @tc.type: FUNC + */ +HWTEST_F(Conv2DTransposePadmodeBuilderTest, conv2dtranpose_build_padmode_003, TestSize.Level1) +{ + m_inputs = {0}; + m_outputs = {1}; + m_params = {2, 3, 4, 5, 6, 7}; + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetConv2dTransposeInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetParam(); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: conv2dtranpose_build_padmode_004 + * @tc.desc: Verify the missing output of the build function + * @tc.type: FUNC + */ +HWTEST_F(Conv2DTransposePadmodeBuilderTest, conv2dtranpose_build_padmode_004, TestSize.Level1) +{ + m_inputs = {0, 1, 2}; + m_outputs = {}; + m_params = {3, 4, 5, 6, 7, 8}; + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetConv2dTransposeInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetParam(); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: conv2dtranpose_build_padmode_005 + * @tc.desc: Verify the inputIndex out of bounds of the build function + * @tc.type: FUNC + */ +HWTEST_F(Conv2DTransposePadmodeBuilderTest, conv2dtranpose_build_padmode_005, TestSize.Level1) +{ + m_inputs = {0, 1, 10}; + m_outputs = {3}; + m_params = {4, 5, 6, 7, 8, 9}; + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetConv2dTransposeInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetParam(); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: conv2dtranpose_build_padmode_006 + * @tc.desc: Verify the outputIndex out of bounds of the build function + * @tc.type: FUNC + */ +HWTEST_F(Conv2DTransposePadmodeBuilderTest, conv2dtranpose_build_padmode_006, TestSize.Level1) +{ + m_inputs = {0, 1, 2}; + m_outputs = {10}; + m_params = {4, 5, 6, 7, 8, 9}; + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetConv2dTransposeInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetParam(); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: conv2dtranpose_build_padmode_007 + * @tc.desc: Verify the invalid stride of the build function + * @tc.type: FUNC + */ +HWTEST_F(Conv2DTransposePadmodeBuilderTest, conv2dtranpose_build_padmode_007, TestSize.Level1) +{ + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetConv2dTransposeInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT32, m_stride_dim, nullptr, + OH_NN_CONV2D_TRANSPOSE_STRIDES); + int32_t* strideValue = new (std::nothrow) int32_t[2]{1, 1}; + EXPECT_NE(nullptr, strideValue); + + tensor->SetBuffer(strideValue, 2 * sizeof(int32_t)); + m_allTensors.emplace_back(tensor); + + SetDilation(OH_NN_INT64, m_dilation_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_DILATION); + SetPadMode(OH_NN_INT8, m_param_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_PAD_MODE); + SetOutPaddings(OH_NN_INT64, outPaddingsDim, nullptr, OH_NN_CONV2D_TRANSPOSE_OUTPUT_PADDINGS); + SetGroup(OH_NN_INT64, m_param_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_GROUP); + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_ACTIVATION_TYPE); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: conv2dtranpose_build_padmode_008 + * @tc.desc: Verify the invalid dilation of the build function + * @tc.type: FUNC + */ +HWTEST_F(Conv2DTransposePadmodeBuilderTest, conv2dtranpose_build_padmode_008, TestSize.Level1) +{ + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetConv2dTransposeInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_STRIDES); + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT32, m_dilation_dim, nullptr, + OH_NN_CONV2D_TRANSPOSE_DILATION); + int32_t* dilationValue = new (std::nothrow) int32_t[2]{1, 1}; + EXPECT_NE(nullptr, dilationValue); + + tensor->SetBuffer(dilationValue, 2 * sizeof(int32_t)); + m_allTensors.emplace_back(tensor); + + SetPadMode(OH_NN_INT8, m_param_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_PAD_MODE); + SetOutPaddings(OH_NN_INT64, outPaddingsDim, nullptr, OH_NN_CONV2D_TRANSPOSE_OUTPUT_PADDINGS); + SetGroup(OH_NN_INT64, m_param_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_GROUP); + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_ACTIVATION_TYPE); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: conv2dtranpose_build_padmode_009 + * @tc.desc: Verify the invalid padmode of the build function + * @tc.type: FUNC + */ +HWTEST_F(Conv2DTransposePadmodeBuilderTest, conv2dtranpose_build_padmode_009, TestSize.Level1) +{ + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetConv2dTransposeInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_STRIDES); + SetDilation(OH_NN_INT64, m_dilation_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_DILATION); + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT32, m_param_dim, nullptr, + OH_NN_CONV2D_TRANSPOSE_PAD_MODE); + int32_t* padModeValue = new (std::nothrow) int32_t(0); + EXPECT_NE(nullptr, padModeValue); + + tensor->SetBuffer(padModeValue, sizeof(int32_t)); + m_allTensors.emplace_back(tensor); + SetOutPaddings(OH_NN_INT64, outPaddingsDim, nullptr, OH_NN_CONV2D_TRANSPOSE_OUTPUT_PADDINGS); + SetGroup(OH_NN_INT64, m_param_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_GROUP); + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_ACTIVATION_TYPE); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: conv2dtranpose_build_padmode_010 + * @tc.desc: Verify the invalid outpaddings of the build function + * @tc.type: FUNC + */ +HWTEST_F(Conv2DTransposePadmodeBuilderTest, conv2dtranpose_build_padmode_010, TestSize.Level1) +{ + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetConv2dTransposeInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_STRIDES); + SetDilation(OH_NN_INT64, m_dilation_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_DILATION); + SetPadMode(OH_NN_INT8, m_param_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_PAD_MODE); + + std::shared_ptr outPadtensor = TransToNNTensor(OH_NN_INT32, outPaddingsDim, nullptr, + OH_NN_CONV2D_TRANSPOSE_OUTPUT_PADDINGS); + int32_t* outPaddingsTypeInvalid = new (std::nothrow) int32_t(0); + EXPECT_NE(nullptr, outPaddingsTypeInvalid); + + outPadtensor->SetBuffer(outPaddingsTypeInvalid, sizeof(int32_t)); + m_allTensors.emplace_back(outPadtensor); + + SetGroup(OH_NN_INT64, m_param_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_GROUP); + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_ACTIVATION_TYPE); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: conv2dtranpose_build_padmode_011 + * @tc.desc: Verify the invalid group of the build function + * @tc.type: FUNC + */ +HWTEST_F(Conv2DTransposePadmodeBuilderTest, conv2dtranpose_build_padmode_011, TestSize.Level1) +{ + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetConv2dTransposeInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_STRIDES); + SetDilation(OH_NN_INT64, m_dilation_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_DILATION); + SetPadMode(OH_NN_INT8, m_param_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_PAD_MODE); + SetOutPaddings(OH_NN_INT64, outPaddingsDim, nullptr, OH_NN_CONV2D_TRANSPOSE_OUTPUT_PADDINGS); + + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT32, m_param_dim, nullptr, + OH_NN_CONV2D_TRANSPOSE_GROUP); + int32_t* groupValue = new (std::nothrow) int32_t(0); + EXPECT_NE(nullptr, groupValue); + + tensor->SetBuffer(groupValue, sizeof(int32_t)); + m_allTensors.emplace_back(tensor); + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_ACTIVATION_TYPE); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: conv2dtranpose_build_padmode_012 + * @tc.desc: Verify the invalid activation of the build function + * @tc.type: FUNC + */ +HWTEST_F(Conv2DTransposePadmodeBuilderTest, conv2dtranpose_build_padmode_012, TestSize.Level1) +{ + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetConv2dTransposeInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_STRIDES); + SetDilation(OH_NN_INT64, m_dilation_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_DILATION); + SetPadMode(OH_NN_INT8, m_param_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_PAD_MODE); + SetOutPaddings(OH_NN_INT64, outPaddingsDim, nullptr, OH_NN_CONV2D_TRANSPOSE_OUTPUT_PADDINGS); + SetGroup(OH_NN_INT64, m_param_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_GROUP); + + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT32, m_param_dim, nullptr, + OH_NN_CONV2D_TRANSPOSE_ACTIVATION_TYPE); + int32_t* activationTest = new (std::nothrow) int32_t(1); + EXPECT_NE(nullptr, activationTest); + + tensor->SetBuffer(activationTest, sizeof(int32_t)); + m_allTensors.emplace_back(tensor); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: conv2dtranpose_build_padmode_013 + * @tc.desc: Verify the group scalar length of the build function + * @tc.type: FUNC + */ +HWTEST_F(Conv2DTransposePadmodeBuilderTest, conv2dtranpose_build_padmode_013, TestSize.Level1) +{ + std::vector groupDim = {2}; + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetConv2dTransposeInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_STRIDES); + SetDilation(OH_NN_INT64, m_dilation_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_DILATION); + SetPadMode(OH_NN_INT8, m_param_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_PAD_MODE); + SetOutPaddings(OH_NN_INT64, outPaddingsDim, nullptr, OH_NN_CONV2D_TRANSPOSE_OUTPUT_PADDINGS); + + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT64, outPaddingsDim, nullptr, + OH_NN_CONV2D_TRANSPOSE_GROUP); + int64_t* groupTest = new (std::nothrow) int64_t[2]{0, 0}; + EXPECT_NE(nullptr, groupTest); + + tensor->SetBuffer(groupTest, 2 * sizeof(int64_t)); + m_allTensors.emplace_back(tensor); + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_ACTIVATION_TYPE); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: conv2dtranpose_build_padmode_014 + * @tc.desc: Verify the activation scalar length of the build function + * @tc.type: FUNC + */ +HWTEST_F(Conv2DTransposePadmodeBuilderTest, conv2dtranpose_build_padmode_014, TestSize.Level1) +{ + m_inputsIndex = m_inputs; + m_paramsIndex = m_params; + + SetConv2dTransposeInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_STRIDES); + SetDilation(OH_NN_INT64, m_dilation_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_DILATION); + SetPadMode(OH_NN_INT8, m_param_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_PAD_MODE); + SetOutPaddings(OH_NN_INT64, outPaddingsDim, nullptr, OH_NN_CONV2D_TRANSPOSE_OUTPUT_PADDINGS); + SetGroup(OH_NN_INT64, m_param_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_GROUP); + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT8, outPaddingsDim, nullptr, + OH_NN_CONV2D_TRANSPOSE_ACTIVATION_TYPE); + int8_t* activationTest = new (std::nothrow) int8_t[2]{0, 0}; + EXPECT_NE(nullptr, activationTest); + + tensor->SetBuffer(activationTest, 2 * sizeof(int8_t)); + m_allTensors.emplace_back(tensor); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: conv2dtranpose_build_padmode_015 + * @tc.desc: Verify the invalid weight dims of the build function + * @tc.type: FUNC + */ +HWTEST_F(Conv2DTransposePadmodeBuilderTest, conv2dtranpose_build_padmode_015, TestSize.Level1) +{ + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + int32_t weightNum = 3; + std::vector m_input_dim{1, 4, 4, 1}; + std::vector weightDim = {1, 3, 1}; + std::vector biasDim = {1}; + + std::shared_ptr inTensor; + inTensor = TransToNNTensor(OH_NN_FLOAT32, m_input_dim, nullptr, OH_NN_TENSOR); + m_allTensors.emplace_back(inTensor); + inTensor = TransToNNTensor(OH_NN_FLOAT32, weightDim, nullptr, OH_NN_TENSOR); + float* weightValue = new (std::nothrow) float[3]{1, 1, 1}; + EXPECT_NE(nullptr, weightValue); + + inTensor->SetBuffer(weightValue, weightNum * sizeof(weightValue)); + m_allTensors.emplace_back(inTensor); + inTensor = TransToNNTensor(OH_NN_FLOAT32, biasDim, nullptr, OH_NN_TENSOR); + float* biasValue = new (std::nothrow) float[1]{0}; + EXPECT_NE(nullptr, biasValue); + + inTensor->SetBuffer(biasValue, sizeof(float)); + m_allTensors.emplace_back(inTensor); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: conv2dtranpose_build_padmode_016 + * @tc.desc: Verify the invalid param to conv2d transpose length of the build function + * @tc.type: FUNC + */ +HWTEST_F(Conv2DTransposePadmodeBuilderTest, conv2dtranpose_build_padmode_016, TestSize.Level1) +{ + SetConv2dTransposeInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_STRIDES); + SetDilation(OH_NN_INT64, m_dilation_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_DILATION); + SetPadMode(OH_NN_INT8, m_param_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_PAD_MODE); + SetOutPaddings(OH_NN_INT64, outPaddingsDim, nullptr, OH_NN_CONV2D_TRANSPOSE_OUTPUT_PADDINGS); + SetGroup(OH_NN_INT64, m_param_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_GROUP); + + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT8, m_param_dim, nullptr, + OH_NN_CONV2D_ACTIVATION_TYPE); + int8_t* activationValue = new (std::nothrow) int8_t(0); + EXPECT_NE(nullptr, activationValue); + + tensor->SetBuffer(activationValue, sizeof(int8_t)); + m_allTensors.emplace_back(tensor); + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: conv2dtranpose_build_padmode_017 + * @tc.desc: Verify the activation value of the build function + * @tc.type: FUNC + */ +HWTEST_F(Conv2DTransposePadmodeBuilderTest, conv2dtranpose_build_padmode_017, TestSize.Level1) +{ + SetConv2dTransposeInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_STRIDES); + SetDilation(OH_NN_INT64, m_dilation_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_DILATION); + SetPadMode(OH_NN_INT8, m_param_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_PAD_MODE); + SetOutPaddings(OH_NN_INT64, outPaddingsDim, nullptr, OH_NN_CONV2D_TRANSPOSE_OUTPUT_PADDINGS); + SetGroup(OH_NN_INT64, m_param_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_GROUP); + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT8, m_param_dim, nullptr, + OH_NN_CONV2D_TRANSPOSE_ACTIVATION_TYPE); + int8_t* activationValue = new (std::nothrow) int8_t(10); + EXPECT_NE(nullptr, activationValue); + + tensor->SetBuffer(activationValue, sizeof(int8_t)); + m_allTensors.emplace_back(tensor); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: conv2dtranpose_build_padmode_018 + * @tc.desc: Verify the padmode value of the build function + * @tc.type: FUNC + */ +HWTEST_F(Conv2DTransposePadmodeBuilderTest, conv2dtranpose_build_padmode_018, TestSize.Level1) +{ + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetConv2dTransposeInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_STRIDES); + SetDilation(OH_NN_INT64, m_dilation_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_DILATION); + + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT8, m_param_dim, nullptr, + OH_NN_CONV2D_TRANSPOSE_PAD_MODE); + int8_t* padModeValue = new (std::nothrow) int8_t(10); + EXPECT_NE(nullptr, padModeValue); + + tensor->SetBuffer(padModeValue, sizeof(int8_t)); + m_allTensors.emplace_back(tensor); + SetOutPaddings(OH_NN_INT64, outPaddingsDim, nullptr, OH_NN_CONV2D_TRANSPOSE_OUTPUT_PADDINGS); + SetGroup(OH_NN_INT64, m_param_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_GROUP); + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_ACTIVATION_TYPE); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: conv2dtranpose_build_padmode_019 + * @tc.desc: Verify the pad dim invalid of the build function + * @tc.type: FUNC + */ +HWTEST_F(Conv2DTransposePadmodeBuilderTest, conv2dtranpose_build_padmode_019, TestSize.Level1) +{ + std::vector m_pad_dim = {3}; + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetConv2dTransposeInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_STRIDES); + SetDilation(OH_NN_INT64, m_dilation_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_DILATION); + SetPadMode(OH_NN_INT8, m_param_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_PAD_MODE); + + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT64, m_pad_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_PAD); + int64_t* padValue = new (std::nothrow) int64_t[3]{1, 1, 1}; + EXPECT_NE(nullptr, padValue); + + tensor->SetBuffer(padValue, 3 * sizeof(int64_t)); + m_allTensors.emplace_back(tensor); + SetGroup(OH_NN_INT64, m_param_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_GROUP); + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_ACTIVATION_TYPE); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: conv2dtranpose_build_padmode_020 + * @tc.desc: Verify the conv2dtranspose without set stride of the build function + * @tc.type: FUNC + */ +HWTEST_F(Conv2DTransposePadmodeBuilderTest, conv2dtranpose_build_padmode_020, TestSize.Level1) +{ + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetConv2dTransposeInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT64, m_stride_dim, nullptr, + OH_NN_CONV2D_TRANSPOSE_STRIDES); + m_allTensors.emplace_back(tensor); + + SetDilation(OH_NN_INT64, m_dilation_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_DILATION); + SetPadMode(OH_NN_INT8, m_param_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_PAD_MODE); + SetOutPaddings(OH_NN_INT64, outPaddingsDim, nullptr, OH_NN_CONV2D_TRANSPOSE_OUTPUT_PADDINGS); + SetGroup(OH_NN_INT64, m_param_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_GROUP); + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_ACTIVATION_TYPE); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: conv2dtranpose_build_padmode_021 + * @tc.desc: Verify the conv2dtranspose without set dilation of the build function + * @tc.type: FUNC + */ +HWTEST_F(Conv2DTransposePadmodeBuilderTest, conv2dtranpose_build_padmode_021, TestSize.Level1) +{ + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetConv2dTransposeInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_STRIDES); + + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT64, m_dilation_dim, nullptr, + OH_NN_CONV2D_TRANSPOSE_DILATION); + m_allTensors.emplace_back(tensor); + + SetPadMode(OH_NN_INT8, m_param_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_PAD_MODE); + SetOutPaddings(OH_NN_INT64, outPaddingsDim, nullptr, OH_NN_CONV2D_TRANSPOSE_OUTPUT_PADDINGS); + SetGroup(OH_NN_INT64, m_param_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_GROUP); + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_ACTIVATION_TYPE); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: conv2dtranpose_build_padmode_022 + * @tc.desc: Verify the conv2dtranspose without set pad of the build function + * @tc.type: FUNC + */ +HWTEST_F(Conv2DTransposePadmodeBuilderTest, conv2dtranpose_build_padmode_022, TestSize.Level1) +{ + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetConv2dTransposeInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_STRIDES); + SetDilation(OH_NN_INT64, m_dilation_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_DILATION); + + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT8, m_param_dim, nullptr, + OH_NN_CONV2D_TRANSPOSE_PAD_MODE); + m_allTensors.emplace_back(tensor); + + SetOutPaddings(OH_NN_INT64, outPaddingsDim, nullptr, OH_NN_CONV2D_TRANSPOSE_OUTPUT_PADDINGS); + SetGroup(OH_NN_INT64, m_param_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_GROUP); + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_ACTIVATION_TYPE); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: conv2dtranpose_build_padmode_023 + * @tc.desc: Verify the conv2dtranspose without set outpaddings of the build function + * @tc.type: FUNC + */ +HWTEST_F(Conv2DTransposePadmodeBuilderTest, conv2dtranpose_build_padmode_023, TestSize.Level1) +{ + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetConv2dTransposeInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_STRIDES); + SetDilation(OH_NN_INT64, m_dilation_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_DILATION); + SetPadMode(OH_NN_INT8, m_param_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_PAD_MODE); + + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT64, outPaddingsDim, nullptr, + OH_NN_CONV2D_TRANSPOSE_OUTPUT_PADDINGS); + m_allTensors.emplace_back(tensor); + + SetGroup(OH_NN_INT64, m_param_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_GROUP); + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_ACTIVATION_TYPE); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: conv2dtranpose_build_padmode_024 + * @tc.desc: Verify the conv2dtranspose without set group of the build function + * @tc.type: FUNC + */ +HWTEST_F(Conv2DTransposePadmodeBuilderTest, conv2dtranpose_build_padmode_024, TestSize.Level1) +{ + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetConv2dTransposeInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_STRIDES); + SetDilation(OH_NN_INT64, m_dilation_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_DILATION); + SetPadMode(OH_NN_INT8, m_param_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_PAD_MODE); + SetOutPaddings(OH_NN_INT64, outPaddingsDim, nullptr, OH_NN_CONV2D_TRANSPOSE_OUTPUT_PADDINGS); + + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT64, m_param_dim, nullptr, + OH_NN_CONV2D_TRANSPOSE_GROUP); + m_allTensors.emplace_back(tensor); + + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_ACTIVATION_TYPE); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: conv2dtranpose_build_padmode_025 + * @tc.desc: Verify the conv2dtranspose without set activation of the build function + * @tc.type: FUNC + */ +HWTEST_F(Conv2DTransposePadmodeBuilderTest, conv2dtranpose_build_padmode_025, TestSize.Level1) +{ + m_paramsIndex = m_params; + SetConv2dTransposeInput(); + + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_STRIDES); + SetDilation(OH_NN_INT64, m_dilation_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_DILATION); + SetPadMode(OH_NN_INT8, m_param_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_PAD_MODE); + SetOutPaddings(OH_NN_INT64, outPaddingsDim, nullptr, OH_NN_CONV2D_TRANSPOSE_OUTPUT_PADDINGS); + SetGroup(OH_NN_INT64, m_param_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_GROUP); + + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT8, m_param_dim, nullptr, + OH_NN_CONV2D_TRANSPOSE_ACTIVATION_TYPE); + m_allTensors.emplace_back(tensor); + m_inputsIndex = m_inputs; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: conv2dtranpose_getprimitive_padmode_001 + * @tc.desc: Verify the behavior of the GetPrimitive function + * @tc.type: FUNC + */ +HWTEST_F(Conv2DTransposePadmodeBuilderTest, conv2dtranpose_getprimitive_padmode_001, TestSize.Level1) +{ + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetConv2dTransposeInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetParam(); + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + + LiteGraphTensorPtr primitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); + EXPECT_NE(expectPrimitive, primitive); + + std::vector returnStrides = mindspore::lite::MindIR_Conv2dTransposeFusion_GetStride(primitive.get()); + std::vector strideValueTest{1, 1}; + std::vector expectDliation = mindspore::lite::MindIR_Conv2dTransposeFusion_GetDilation(primitive.get()); + std::vector dilationValueTest{1, 1}; + int expectpadMode = mindspore::lite::MindIR_Conv2dTransposeFusion_GetPadMode(primitive.get()); + EXPECT_EQ(1, expectpadMode); + + int expectGroup = mindspore::lite::MindIR_Conv2dTransposeFusion_GetGroup(primitive.get()); + EXPECT_EQ(0, expectGroup); + + std::vector expectoutPadding = + mindspore::lite::MindIR_Conv2dTransposeFusion_GetOutputPaddings(primitive.get()); + std::vector outPaddingTest{0, 0}; + EXPECT_EQ(outPaddingTest, expectoutPadding); + + int expectActivation = mindspore::lite::MindIR_Conv2dTransposeFusion_GetActivationType(primitive.get()); + EXPECT_EQ(0, expectActivation); +} + +/** + * @tc.name: conv2dtranpose_getprimitive_padmode_002 + * @tc.desc: Verify the behavior of the GetPrimitive function + * @tc.type: FUNC + */ +HWTEST_F(Conv2DTransposePadmodeBuilderTest, conv2dtranpose_getprimitive_padmode_002, TestSize.Level1) +{ + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetConv2dTransposeInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetParam(); + + LiteGraphTensorPtr primitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); + EXPECT_EQ(expectPrimitive, primitive); +} +} // namespace UnitTest +} // namespace NeuralNetworkRuntime +} // namespace OHOS diff --git a/test/unittest/ops/conv2d_transpose_pad_test.cpp b/test/unittest/ops/conv2d_transpose_pad_test.cpp new file mode 100644 index 0000000..a0a8179 --- /dev/null +++ b/test/unittest/ops/conv2d_transpose_pad_test.cpp @@ -0,0 +1,488 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "frameworks/native/ops/conv2d_transpose_builder.h" + +#include "ops_test.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Ops; + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class Conv2DTransposeBuilderTest : public OpsTest { +public: + void SetUp() override; + void TearDown() override; + + void SetConv2dTransposeInput(); + void SetPad(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + void SetOutPaddings(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + void SetPadParam(); + +public: + Conv2DTransposeBuilder m_builder; + std::vector m_inputs{0, 1, 2}; + std::vector m_outputs{3}; + std::vector m_params{4, 5, 6, 7, 8, 9}; + std::vector m_output_dim{1, 3, 3, 1}; + std::vector m_stride_dim{2}; + std::vector m_dilation_dim{2}; + std::vector m_outpaddings_dim{2}; + std::vector m_pad_dim{4}; + std::vector m_param_dim{}; +}; + +void Conv2DTransposeBuilderTest::SetUp() {} + +void Conv2DTransposeBuilderTest::TearDown() {} + +void Conv2DTransposeBuilderTest::SetPad(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + int32_t padNum = 4; + std::shared_ptr tensor = TransToNNTensor(dataType, dim, quantParam, type); + int64_t* padValue = new (std::nothrow) int64_t[4]{1, 1, 1, 1}; + EXPECT_NE(nullptr, padValue); + + tensor->SetBuffer(padValue, padNum * sizeof(int64_t)); + m_allTensors.emplace_back(tensor); +} + +void Conv2DTransposeBuilderTest::SetOutPaddings(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + int32_t outPaddingsNum = 2; + std::shared_ptr tensor = TransToNNTensor(dataType, dim, quantParam, type); + int64_t* outPaddingsValue = new (std::nothrow) int64_t[2]{0, 0}; + EXPECT_NE(nullptr, outPaddingsValue); + + tensor->SetBuffer(outPaddingsValue, outPaddingsNum * sizeof(int64_t)); + m_allTensors.emplace_back(tensor); +} + +void Conv2DTransposeBuilderTest::SetPadParam() +{ + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_STRIDES); + SetDilation(OH_NN_INT64, m_dilation_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_DILATION); + SetPad(OH_NN_INT64, m_pad_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_PAD); + SetOutPaddings(OH_NN_INT64, m_outpaddings_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_OUTPUT_PADDINGS); + SetGroup(OH_NN_INT64, m_param_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_GROUP); + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_ACTIVATION_TYPE); +} + +void Conv2DTransposeBuilderTest::SetConv2dTransposeInput() +{ + int32_t weightNum = 4; + std::vector m_input_dim{1, 4, 4, 1}; + std::vector weightDim = {1, 2, 2, 1}; + std::vector biasDim = {1}; + std::shared_ptr tensor; + tensor = TransToNNTensor(OH_NN_FLOAT32, m_input_dim, nullptr, OH_NN_TENSOR); + m_allTensors.emplace_back(tensor); + + tensor = TransToNNTensor(OH_NN_FLOAT32, weightDim, nullptr, OH_NN_TENSOR); + float* weightValue = new (std::nothrow) float[4]{1, 1, 1, 1}; + EXPECT_NE(nullptr, weightValue); + + tensor->SetBuffer(weightValue, weightNum * sizeof(weightValue)); + m_allTensors.emplace_back(tensor); + tensor = TransToNNTensor(OH_NN_FLOAT32, biasDim, nullptr, OH_NN_TENSOR); + float* biasValue = new (std::nothrow) float[1]{0}; + EXPECT_NE(nullptr, biasValue); + + tensor->SetBuffer(biasValue, sizeof(float)); + m_allTensors.emplace_back(tensor); +} + +/** + * @tc.name: conv2dtranpose_build_pad_001 + * @tc.desc: Verify the success of the build function + * @tc.type: FUNC + */ +HWTEST_F(Conv2DTransposeBuilderTest, conv2dtranpose_build_pad_001, TestSize.Level1) +{ + SetConv2dTransposeInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetPadParam(); + + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: conv2dtranpose_build_pad_002 + * @tc.desc: Verify the forbidden of the build function + * @tc.type: FUNC + */ +HWTEST_F(Conv2DTransposeBuilderTest, conv2dtranpose_build_pad_002, TestSize.Level1) +{ + SetConv2dTransposeInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetPadParam(); + + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: conv2dtranpose_build_pad_003 + * @tc.desc: Verify the missing input of the build function + * @tc.type: FUNC + */ +HWTEST_F(Conv2DTransposeBuilderTest, conv2dtranpose_build_pad_003, TestSize.Level1) +{ + m_inputs = {0}; + m_outputs = {1}; + m_params = {2, 3, 4, 5, 6, 7}; + + SetConv2dTransposeInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetPadParam(); + + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: conv2dtranpose_build_pad_004 + * @tc.desc: Verify the missing output of the build function + * @tc.type: FUNC + */ +HWTEST_F(Conv2DTransposeBuilderTest, conv2dtranpose_build_pad_004, TestSize.Level1) +{ + m_inputs = {0, 1, 2}; + m_outputs = {}; + m_params = {3, 4, 5, 6, 7, 8}; + + SetConv2dTransposeInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetPadParam(); + + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: conv2dtranpose_build_pad_005 + * @tc.desc: Verify the inputIndex out of bounds of the build function + * @tc.type: FUNC + */ +HWTEST_F(Conv2DTransposeBuilderTest, conv2dtranpose_build_pad_005, TestSize.Level1) +{ + m_inputs = {0, 1, 10}; + m_outputs = {3}; + m_params = {4, 5, 6, 7, 8, 9}; + + SetConv2dTransposeInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetPadParam(); + + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: conv2dtranpose_build_pad_006 + * @tc.desc: Verify the outputIndex out of bounds of the build function + * @tc.type: FUNC + */ +HWTEST_F(Conv2DTransposeBuilderTest, conv2dtranpose_build_pad_006, TestSize.Level1) +{ + m_inputs = {0, 1, 2}; + m_outputs = {10}; + m_params = {4, 5, 6, 7, 8, 9};; + + SetConv2dTransposeInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetPadParam(); + + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: conv2dtranpose_build_pad_007 + * @tc.desc: Verify the invalid stride of the build function + * @tc.type: FUNC + */ +HWTEST_F(Conv2DTransposeBuilderTest, conv2dtranpose_build_pad_007, TestSize.Level1) +{ + SetConv2dTransposeInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT32, m_stride_dim, nullptr, + OH_NN_CONV2D_TRANSPOSE_STRIDES); + int32_t* strideValue = new (std::nothrow) int32_t[2]{1, 1}; + EXPECT_NE(nullptr, strideValue); + + tensor->SetBuffer(strideValue, 2 * sizeof(int32_t)); + m_allTensors.emplace_back(tensor); + + SetDilation(OH_NN_INT64, m_dilation_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_DILATION); + SetPad(OH_NN_INT64, m_pad_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_PAD); + SetOutPaddings(OH_NN_INT64, m_outpaddings_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_OUTPUT_PADDINGS); + SetGroup(OH_NN_INT64, m_param_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_GROUP); + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_ACTIVATION_TYPE); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: conv2dtranpose_build_pad_008 + * @tc.desc: Verify the invalid dilation of the build function + * @tc.type: FUNC + */ +HWTEST_F(Conv2DTransposeBuilderTest, conv2dtranpose_build_pad_008, TestSize.Level1) +{ + SetConv2dTransposeInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_STRIDES); + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT32, m_dilation_dim, nullptr, + OH_NN_CONV2D_TRANSPOSE_DILATION); + int32_t* dilationValue = new (std::nothrow) int32_t[2]{1, 1}; + EXPECT_NE(nullptr, dilationValue); + + tensor->SetBuffer(dilationValue, 2 * sizeof(int32_t)); + m_allTensors.emplace_back(tensor); + + SetPad(OH_NN_INT64, m_pad_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_PAD); + SetOutPaddings(OH_NN_INT64, m_outpaddings_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_OUTPUT_PADDINGS); + SetGroup(OH_NN_INT64, m_param_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_GROUP); + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_ACTIVATION_TYPE); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: conv2dtranpose_build_pad_009 + * @tc.desc: Verify the invalid pad of the build function + * @tc.type: FUNC + */ +HWTEST_F(Conv2DTransposeBuilderTest, conv2dtranpose_build_pad_009, TestSize.Level1) +{ + SetConv2dTransposeInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_STRIDES); + SetDilation(OH_NN_INT64, m_dilation_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_DILATION); + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT32, m_pad_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_PAD); + int32_t* padValue = new (std::nothrow) int32_t[4]{1, 1, 1, 1}; + EXPECT_NE(nullptr, padValue); + + tensor->SetBuffer(padValue, 4 * sizeof(int32_t)); + m_allTensors.emplace_back(tensor); + + SetOutPaddings(OH_NN_INT64, m_outpaddings_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_OUTPUT_PADDINGS); + SetGroup(OH_NN_INT64, m_param_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_GROUP); + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_ACTIVATION_TYPE); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: conv2dtranpose_build_pad_010 + * @tc.desc: Verify the invalid outpaddings of the build function + * @tc.type: FUNC + */ +HWTEST_F(Conv2DTransposeBuilderTest, conv2dtranpose_build_pad_010, TestSize.Level1) +{ + SetConv2dTransposeInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_STRIDES); + SetDilation(OH_NN_INT64, m_dilation_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_DILATION); + SetPad(OH_NN_INT64, m_pad_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_PAD); + + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT32, m_outpaddings_dim, nullptr, + OH_NN_CONV2D_TRANSPOSE_OUTPUT_PADDINGS); + + int32_t* outPaddingsTypeInvalid = new (std::nothrow) int32_t(0); + EXPECT_NE(nullptr, outPaddingsTypeInvalid); + tensor->SetBuffer(outPaddingsTypeInvalid, sizeof(int32_t)); + m_allTensors.emplace_back(tensor); + + SetGroup(OH_NN_INT64, m_param_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_GROUP); + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_ACTIVATION_TYPE); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: conv2dtranpose_build_pad_011 + * @tc.desc: Verify the invalid group of the build function + * @tc.type: FUNC + */ +HWTEST_F(Conv2DTransposeBuilderTest, conv2dtranpose_build_pad_011, TestSize.Level1) +{ + SetConv2dTransposeInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_STRIDES); + SetDilation(OH_NN_INT64, m_dilation_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_DILATION); + SetPad(OH_NN_INT64, m_pad_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_PAD); + SetOutPaddings(OH_NN_INT64, m_outpaddings_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_OUTPUT_PADDINGS); + + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT32, m_param_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_GROUP); + int32_t* groupValue = new (std::nothrow) int32_t(0); + EXPECT_NE(nullptr, groupValue); + + tensor->SetBuffer(groupValue, sizeof(int32_t)); + m_allTensors.emplace_back(tensor); + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_ACTIVATION_TYPE); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: conv2dtranpose_build_pad_012 + * @tc.desc: Verify the invalid activation of the build function + * @tc.type: FUNC + */ +HWTEST_F(Conv2DTransposeBuilderTest, conv2dtranpose_build_pad_012, TestSize.Level1) +{ + SetConv2dTransposeInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_STRIDES); + SetDilation(OH_NN_INT64, m_dilation_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_DILATION); + SetPad(OH_NN_INT64, m_pad_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_PAD); + SetOutPaddings(OH_NN_INT64, m_outpaddings_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_OUTPUT_PADDINGS); + SetGroup(OH_NN_INT64, m_param_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_GROUP); + + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT32, m_param_dim, nullptr, + OH_NN_CONV2D_TRANSPOSE_ACTIVATION_TYPE); + int32_t* activationValue = new (std::nothrow) int32_t(0); + EXPECT_NE(nullptr, activationValue); + + tensor->SetBuffer(activationValue, sizeof(int32_t)); + m_allTensors.emplace_back(tensor); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: conv2dtranpose_build_pad_013 + * @tc.desc: Verify the group scalar length of the build function + * @tc.type: FUNC + */ +HWTEST_F(Conv2DTransposeBuilderTest, conv2dtranpose_build_pad_013, TestSize.Level1) +{ + SetConv2dTransposeInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_STRIDES); + SetDilation(OH_NN_INT64, m_dilation_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_DILATION); + SetPad(OH_NN_INT64, m_pad_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_PAD); + SetOutPaddings(OH_NN_INT64, m_outpaddings_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_OUTPUT_PADDINGS); + + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT64, m_outpaddings_dim, nullptr, + OH_NN_CONV2D_TRANSPOSE_GROUP); + int64_t* groupValue = new (std::nothrow) int64_t[2]{0, 0}; + EXPECT_NE(nullptr, groupValue); + + tensor->SetBuffer(groupValue, 2 * sizeof(int64_t)); + m_allTensors.emplace_back(tensor); + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_ACTIVATION_TYPE); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: conv2dtranpose_build_pad_014 + * @tc.desc: Verify the activation scalar length of the build function + * @tc.type: FUNC + */ +HWTEST_F(Conv2DTransposeBuilderTest, conv2dtranpose_build_pad_014, TestSize.Level1) +{ + SetConv2dTransposeInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_STRIDES); + SetDilation(OH_NN_INT64, m_dilation_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_DILATION); + SetPad(OH_NN_INT64, m_pad_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_PAD); + SetOutPaddings(OH_NN_INT64, m_outpaddings_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_OUTPUT_PADDINGS); + SetGroup(OH_NN_INT64, m_param_dim, nullptr, OH_NN_CONV2D_TRANSPOSE_GROUP); + + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT8, m_outpaddings_dim, nullptr, + OH_NN_CONV2D_TRANSPOSE_ACTIVATION_TYPE); + int8_t* activationValue = new (std::nothrow) int8_t[2]{0, 0}; + EXPECT_NE(nullptr, activationValue); + tensor->SetBuffer(activationValue, 2 * sizeof(int8_t)); + m_allTensors.emplace_back(tensor); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: conv2dtranpose_getprimitive_padmode_001 + * @tc.desc: Verify the behavior of the GetPrimitive function + * @tc.type: FUNC + */ +HWTEST_F(Conv2DTransposeBuilderTest, conv2dtranpose_getprimitive_padmode_001, TestSize.Level1) +{ + SetConv2dTransposeInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetPadParam(); + + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + + LiteGraphTensorPtr primitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); + EXPECT_NE(expectPrimitive, primitive); + + std::vector returnStrides = mindspore::lite::MindIR_Conv2dTransposeFusion_GetStride(primitive.get()); + std::vector strideValueTest{1, 1}; + std::vector returnDliation = mindspore::lite::MindIR_Conv2dTransposeFusion_GetDilation(primitive.get()); + std::vector dilationValueTest{1, 1}; + std::vector returnPad = mindspore::lite::MindIR_Conv2dTransposeFusion_GetPadList(primitive.get()); + std::vector padValueTest{1, 1, 1, 1}; + int returnGroup = mindspore::lite::MindIR_Conv2dTransposeFusion_GetGroup(primitive.get()); + EXPECT_EQ(0, returnGroup); + + std::vector outPaddingReturn = + mindspore::lite::MindIR_Conv2dTransposeFusion_GetOutputPaddings(primitive.get()); + std::vector outPaddingTest{0, 0}; + EXPECT_EQ(outPaddingTest, outPaddingReturn); + + int returnActivation = mindspore::lite::MindIR_Conv2dTransposeFusion_GetActivationType(primitive.get()); + EXPECT_EQ(0, returnActivation); +} + +/** + * @tc.name: conv2dtranpose_getprimitive_padmode_002 + * @tc.desc: Verify the behavior of the GetPrimitive function + * @tc.type: FUNC + */ +HWTEST_F(Conv2DTransposeBuilderTest, conv2dtranpose_getprimitive_padmode_002, TestSize.Level1) +{ + SetConv2dTransposeInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetPadParam(); + + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + LiteGraphTensorPtr primitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); + EXPECT_EQ(expectPrimitive, primitive); +} +} // namespace UnitTest +} // namespace NeuralNetworkRuntime +} // namespace OHOS diff --git a/test/unittest/ops/depthwise_conv2d_native_pad_test.cpp b/test/unittest/ops/depthwise_conv2d_native_pad_test.cpp new file mode 100644 index 0000000..0e4a6ae --- /dev/null +++ b/test/unittest/ops/depthwise_conv2d_native_pad_test.cpp @@ -0,0 +1,629 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "frameworks/native/ops/depthwise_conv2d_native_builder.h" + +#include "ops_test.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Ops; + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class DepthwiseConv2DNativeBuilderTest : public OpsTest { +public: + void SetUp() override; + void TearDown() override; + + void SetDepthwiseConv2dInput(); + void SetPad(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + void SetPadParam(); + +public: + DepthwiseConv2DNativeBuilder m_builder; + std::vector m_inputs{0, 1, 2}; + std::vector m_outputs{3}; + std::vector m_params{4, 5, 6, 7}; + std::vector m_output_dim{1, 4, 4, 2}; + std::vector m_stride_dim{2}; + std::vector m_dilation_dim{2}; + std::vector m_pad_dim{4}; + std::vector m_param_dim{}; +}; + +void DepthwiseConv2DNativeBuilderTest::SetUp() {} + +void DepthwiseConv2DNativeBuilderTest::TearDown() {} + +void DepthwiseConv2DNativeBuilderTest::SetPad(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + int32_t padNum = 4; + std::shared_ptr tensor = TransToNNTensor(dataType, dim, quantParam, type); + int64_t* padValue = new (std::nothrow) int64_t[4]{1, 1, 1, 1}; + EXPECT_NE(nullptr, padValue); + + tensor->SetBuffer(padValue, padNum * sizeof(int64_t)); + m_allTensors.emplace_back(tensor); +} + +void DepthwiseConv2DNativeBuilderTest::SetPadParam() +{ + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_DEPTHWISE_CONV2D_NATIVE_STRIDES); + SetDilation(OH_NN_INT64, m_dilation_dim, nullptr, OH_NN_DEPTHWISE_CONV2D_NATIVE_DILATION); + SetPad(OH_NN_INT64, m_pad_dim, nullptr, OH_NN_DEPTHWISE_CONV2D_NATIVE_PAD); + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_DEPTHWISE_CONV2D_NATIVE_ACTIVATION_TYPE); +} + +void DepthwiseConv2DNativeBuilderTest::SetDepthwiseConv2dInput() +{ + int32_t weightNum = 8; + int32_t biasNum = 2; + std::vector m_input_dim{1, 3, 3, 2}; + std::vector weightDim = {2, 2, 2, 1}; + std::vector biasDim = {2}; + + std::shared_ptr inputsTensor; + inputsTensor = TransToNNTensor(OH_NN_FLOAT32, m_input_dim, nullptr, OH_NN_TENSOR); + m_allTensors.emplace_back(inputsTensor); + inputsTensor = TransToNNTensor(OH_NN_FLOAT32, weightDim, nullptr, OH_NN_TENSOR); + float* weightValue = new (std::nothrow) float[8]{1, 0, 0, 1, 0, 1, 1, 0}; + EXPECT_NE(nullptr, weightValue); + + inputsTensor->SetBuffer(weightValue, weightNum * sizeof(weightValue)); + m_allTensors.emplace_back(inputsTensor); + inputsTensor = TransToNNTensor(OH_NN_FLOAT32, biasDim, nullptr, OH_NN_TENSOR); + float* biasValue = new (std::nothrow) float[2]{0, 0}; + EXPECT_NE(nullptr, biasValue); + inputsTensor->SetBuffer(biasValue, biasNum * sizeof(float)); + m_allTensors.emplace_back(inputsTensor); +} + +/** + * @tc.name: depthwiseconv2d_build_padmode_001 + * @tc.desc: Verify the success of the build function + * @tc.type: FUNC + */ +HWTEST_F(DepthwiseConv2DNativeBuilderTest, depthwiseconv2d_build_padmode_001, TestSize.Level1) +{ + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetDepthwiseConv2dInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetPadParam(); + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: depthwiseconv2d_build_padmode_002 + * @tc.desc: Verify the forbidden of the build function + * @tc.type: FUNC + */ +HWTEST_F(DepthwiseConv2DNativeBuilderTest, depthwiseconv2d_build_padmode_002, TestSize.Level1) +{ + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetDepthwiseConv2dInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetPadParam(); + + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: depthwiseconv2d_build_padmode_003 + * @tc.desc: Verify the missing input of the build function + * @tc.type: FUNC + */ +HWTEST_F(DepthwiseConv2DNativeBuilderTest, depthwiseconv2d_build_padmode_003, TestSize.Level1) +{ + m_inputs = {0}; + m_outputs = {1}; + m_params = {2, 3, 4, 5}; + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetDepthwiseConv2dInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetPadParam(); + + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: depthwiseconv2d_build_padmode_004 + * @tc.desc: Verify the missing output of the build function + * @tc.type: FUNC + */ +HWTEST_F(DepthwiseConv2DNativeBuilderTest, depthwiseconv2d_build_padmode_004, TestSize.Level1) +{ + m_inputs = {0, 1, 2}; + m_outputs = {}; + m_params = {3, 4, 5, 6}; + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetDepthwiseConv2dInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetPadParam(); + + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: depthwiseconv2d_build_padmode_005 + * @tc.desc: Verify the inputIndex out of bounds of the build function + * @tc.type: FUNC + */ +HWTEST_F(DepthwiseConv2DNativeBuilderTest, depthwiseconv2d_build_padmode_005, TestSize.Level1) +{ + m_inputs = {0, 1, 9}; + m_outputs = {3}; + m_params = {4, 5, 6, 7}; + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetDepthwiseConv2dInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetPadParam(); + + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: depthwiseconv2d_build_padmode_006 + * @tc.desc: Verify the outputIndex out of bounds of the build function + * @tc.type: FUNC + */ +HWTEST_F(DepthwiseConv2DNativeBuilderTest, depthwiseconv2d_build_padmode_006, TestSize.Level1) +{ + m_inputs = {0, 1, 2}; + m_outputs = {9}; + m_params = {4, 5, 6, 7}; + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetDepthwiseConv2dInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetPadParam(); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: depthwiseconv2d_build_padmode_007 + * @tc.desc: Verify the invalid stride of the build function + * @tc.type: FUNC + */ +HWTEST_F(DepthwiseConv2DNativeBuilderTest, depthwiseconv2d_build_padmode_007, TestSize.Level1) +{ + SetDepthwiseConv2dInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT32, m_stride_dim, nullptr, + OH_NN_DEPTHWISE_CONV2D_NATIVE_STRIDES); + int32_t* strideValue = new (std::nothrow) int32_t[2]{1, 1}; + EXPECT_NE(nullptr, strideValue); + + tensor->SetBuffer(strideValue, 2 * sizeof(int32_t)); + m_allTensors.emplace_back(tensor); + SetDilation(OH_NN_INT64, m_dilation_dim, nullptr, OH_NN_DEPTHWISE_CONV2D_NATIVE_DILATION); + SetPad(OH_NN_INT64, m_pad_dim, nullptr, OH_NN_DEPTHWISE_CONV2D_NATIVE_PAD); + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_DEPTHWISE_CONV2D_NATIVE_ACTIVATION_TYPE); + + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: depthwiseconv2d_build_padmode_008 + * @tc.desc: Verify the invalid dilation of the build function + * @tc.type: FUNC + */ +HWTEST_F(DepthwiseConv2DNativeBuilderTest, depthwiseconv2d_build_padmode_008, TestSize.Level1) +{ + SetDepthwiseConv2dInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_DEPTHWISE_CONV2D_NATIVE_STRIDES); + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT32, m_dilation_dim, nullptr, + OH_NN_DEPTHWISE_CONV2D_NATIVE_DILATION); + int32_t* dilationValue = new (std::nothrow) int32_t[2]{1, 1}; + EXPECT_NE(nullptr, dilationValue); + + tensor->SetBuffer(dilationValue, 2 * sizeof(int32_t)); + m_allTensors.emplace_back(tensor); + SetPad(OH_NN_INT64, m_pad_dim, nullptr, OH_NN_DEPTHWISE_CONV2D_NATIVE_PAD); + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_DEPTHWISE_CONV2D_NATIVE_ACTIVATION_TYPE); + + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: depthwiseconv2d_build_padmode_009 + * @tc.desc: Verify the invalid pad of the build function + * @tc.type: FUNC + */ +HWTEST_F(DepthwiseConv2DNativeBuilderTest, depthwiseconv2d_build_padmode_009, TestSize.Level1) +{ + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + SetDepthwiseConv2dInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_DEPTHWISE_CONV2D_NATIVE_STRIDES); + SetDilation(OH_NN_INT64, m_dilation_dim, nullptr, OH_NN_DEPTHWISE_CONV2D_NATIVE_DILATION); + + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT32, m_pad_dim, nullptr, + OH_NN_DEPTHWISE_CONV2D_NATIVE_PAD); + int32_t* padValue = new (std::nothrow) int32_t[4]{1, 1, 1, 1}; + EXPECT_NE(nullptr, padValue); + + tensor->SetBuffer(padValue, 4 * sizeof(int32_t)); + m_allTensors.emplace_back(tensor); + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_DEPTHWISE_CONV2D_NATIVE_ACTIVATION_TYPE); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} +/** + * @tc.name: depthwiseconv2d_build_padmode_010 + * @tc.desc: Verify the invalid activation of the build function + * @tc.type: FUNC + */ +HWTEST_F(DepthwiseConv2DNativeBuilderTest, depthwiseconv2d_build_padmode_010, TestSize.Level1) +{ + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetDepthwiseConv2dInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_DEPTHWISE_CONV2D_NATIVE_STRIDES); + SetDilation(OH_NN_INT64, m_dilation_dim, nullptr, OH_NN_DEPTHWISE_CONV2D_NATIVE_DILATION); + SetPad(OH_NN_INT64, m_pad_dim, nullptr, OH_NN_DEPTHWISE_CONV2D_NATIVE_PAD); + + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT32, m_param_dim, nullptr, + OH_NN_DEPTHWISE_CONV2D_NATIVE_ACTIVATION_TYPE); + int32_t* activationValue = new (std::nothrow) int32_t(0); + EXPECT_NE(nullptr, activationValue); + + tensor->SetBuffer(activationValue, sizeof(int32_t)); + m_allTensors.emplace_back(tensor); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: depthwiseconv2d_build_padmode_011 + * @tc.desc: Verify the scalar activation of the build function + * @tc.type: FUNC + */ +HWTEST_F(DepthwiseConv2DNativeBuilderTest, depthwiseconv2d_build_padmode_011, TestSize.Level1) +{ + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetDepthwiseConv2dInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_DEPTHWISE_CONV2D_NATIVE_STRIDES); + SetDilation(OH_NN_INT64, m_dilation_dim, nullptr, OH_NN_DEPTHWISE_CONV2D_NATIVE_DILATION); + SetPad(OH_NN_INT64, m_pad_dim, nullptr, OH_NN_DEPTHWISE_CONV2D_NATIVE_PAD); + + std::vector activationDim = {2}; + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT8, activationDim, nullptr, + OH_NN_DEPTHWISE_CONV2D_NATIVE_ACTIVATION_TYPE); + int8_t* activationValue = new (std::nothrow) int8_t[2]{0, 0}; + EXPECT_NE(nullptr, activationValue); + + tensor->SetBuffer(activationValue, 2 * sizeof(int8_t)); + m_allTensors.emplace_back(tensor); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: depthwiseconv2d_build_padmode_012 + * @tc.desc: Verify the invalid param to depthwiseconv2d of the build function + * @tc.type: FUNC + */ +HWTEST_F(DepthwiseConv2DNativeBuilderTest, depthwiseconv2d_build_padmode_012, TestSize.Level1) +{ + std::vector activationDim = {2}; + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetDepthwiseConv2dInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_DEPTHWISE_CONV2D_NATIVE_STRIDES); + SetDilation(OH_NN_INT64, m_dilation_dim, nullptr, OH_NN_DEPTHWISE_CONV2D_NATIVE_DILATION); + SetPad(OH_NN_INT64, m_pad_dim, nullptr, OH_NN_DEPTHWISE_CONV2D_NATIVE_PAD); + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT8, activationDim, nullptr, + OH_NN_DIV_ACTIVATIONTYPE); + int8_t* activationValue = new (std::nothrow) int8_t(0); + EXPECT_NE(nullptr, activationValue); + + tensor->SetBuffer(activationValue, sizeof(int8_t)); + m_allTensors.emplace_back(tensor); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: depthwiseconv2d_build_padmode_013 + * @tc.desc: Verify the invalid activation value of the build function + * @tc.type: FUNC + */ +HWTEST_F(DepthwiseConv2DNativeBuilderTest, depthwiseconv2d_build_padmode_013, TestSize.Level1) +{ + std::vector activationDim = {}; + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetDepthwiseConv2dInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_DEPTHWISE_CONV2D_NATIVE_STRIDES); + SetDilation(OH_NN_INT64, m_dilation_dim, nullptr, OH_NN_DEPTHWISE_CONV2D_NATIVE_DILATION); + SetPad(OH_NN_INT64, m_pad_dim, nullptr, OH_NN_DEPTHWISE_CONV2D_NATIVE_PAD); + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT8, activationDim, nullptr, + OH_NN_DEPTHWISE_CONV2D_NATIVE_ACTIVATION_TYPE); + int8_t* activationValue = new (std::nothrow) int8_t(10); + EXPECT_NE(nullptr, activationValue); + + tensor->SetBuffer(activationValue, sizeof(int8_t)); + m_allTensors.emplace_back(tensor); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: depthwiseconv2d_build_padmode_014 + * @tc.desc: Verify the invalid pad dim value of the build function + * @tc.type: FUNC + */ +HWTEST_F(DepthwiseConv2DNativeBuilderTest, depthwiseconv2d_build_padmode_014, TestSize.Level1) +{ + std::vector m_pad_dim = {3}; + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetDepthwiseConv2dInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_DEPTHWISE_CONV2D_NATIVE_STRIDES); + SetDilation(OH_NN_INT64, m_dilation_dim, nullptr, OH_NN_DEPTHWISE_CONV2D_NATIVE_DILATION); + + int32_t padNum = 3; + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT64, m_pad_dim, nullptr, + OH_NN_DEPTHWISE_CONV2D_NATIVE_PAD); + int64_t* padValue = new (std::nothrow) int64_t[3]{1, 1, 1}; + EXPECT_NE(nullptr, padValue); + + tensor->SetBuffer(padValue, padNum * sizeof(int64_t)); + m_allTensors.emplace_back(tensor); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: depthwiseconv2d_build_padmode_015 + * @tc.desc: Verify the invalid weigth size of the build function + * @tc.type: FUNC + */ +HWTEST_F(DepthwiseConv2DNativeBuilderTest, depthwiseconv2d_build_padmode_015, TestSize.Level1) +{ + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + int32_t weightNum = 3; + int32_t biasNum = 2; + std::vector m_input_dim{1, 3, 3, 2}; + std::vector weightDim = {1, 3, 3}; + std::vector biasDim = {2}; + + std::shared_ptr inputsTensor; + inputsTensor = TransToNNTensor(OH_NN_FLOAT32, m_input_dim, nullptr, OH_NN_TENSOR); + m_allTensors.emplace_back(inputsTensor); + + inputsTensor = TransToNNTensor(OH_NN_FLOAT32, weightDim, nullptr, OH_NN_TENSOR); + float* weightValue = new (std::nothrow) float[3]{1, 0, 0}; + EXPECT_NE(nullptr, weightValue); + + inputsTensor->SetBuffer(weightValue, weightNum * sizeof(weightValue)); + m_allTensors.emplace_back(inputsTensor); + + inputsTensor = TransToNNTensor(OH_NN_FLOAT32, biasDim, nullptr, OH_NN_TENSOR); + float* biasValue = new (std::nothrow) float[2]{0, 0}; + EXPECT_NE(nullptr, biasValue); + + inputsTensor->SetBuffer(biasValue, biasNum * sizeof(float)); + m_allTensors.emplace_back(inputsTensor); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: depthwiseconv2d_build_padmode_016 + * @tc.desc: Verify the invalid inputdim of the build function + * @tc.type: FUNC + */ +HWTEST_F(DepthwiseConv2DNativeBuilderTest, depthwiseconv2d_build_padmode_016, TestSize.Level1) +{ + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + int32_t weightNum = 3; + int32_t biasNum = 2; + std::vector m_input_dim{1, 3, 3}; + std::vector weightDim = {2, 2, 2, 1}; + std::vector biasDim = {2}; + + std::shared_ptr inTensor; + inTensor = TransToNNTensor(OH_NN_FLOAT32, m_input_dim, nullptr, OH_NN_TENSOR); + m_allTensors.emplace_back(inTensor); + + inTensor = TransToNNTensor(OH_NN_FLOAT32, weightDim, nullptr, OH_NN_TENSOR); + float* weightValue = new (std::nothrow) float[8]{1, 0, 0, 1, 0, 1, 1, 0}; + EXPECT_NE(nullptr, weightValue); + + inTensor->SetBuffer(weightValue, weightNum * sizeof(weightValue)); + m_allTensors.emplace_back(inTensor); + + inTensor = TransToNNTensor(OH_NN_FLOAT32, biasDim, nullptr, OH_NN_TENSOR); + float* biasValue = new (std::nothrow) float[2]{0, 0}; + EXPECT_NE(nullptr, biasValue); + + inTensor->SetBuffer(biasValue, biasNum * sizeof(float)); + m_allTensors.emplace_back(inTensor); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: depthwiseconv2d_build_padmode_017 + * @tc.desc: Verify the depthwiseconv2d without set stride of the build function + * @tc.type: FUNC + */ +HWTEST_F(DepthwiseConv2DNativeBuilderTest, depthwiseconv2d_build_padmode_017, TestSize.Level1) +{ + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetDepthwiseConv2dInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT64, m_stride_dim, nullptr, + OH_NN_DEPTHWISE_CONV2D_NATIVE_STRIDES); + m_allTensors.emplace_back(tensor); + + SetDilation(OH_NN_INT64, m_dilation_dim, nullptr, OH_NN_DEPTHWISE_CONV2D_NATIVE_DILATION); + SetPad(OH_NN_INT64, m_pad_dim, nullptr, OH_NN_DEPTHWISE_CONV2D_NATIVE_PAD); + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_DEPTHWISE_CONV2D_NATIVE_ACTIVATION_TYPE); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: depthwiseconv2d_build_padmode_018 + * @tc.desc: Verify the depthwiseconv2d without set dilation of the build function + * @tc.type: FUNC + */ +HWTEST_F(DepthwiseConv2DNativeBuilderTest, depthwiseconv2d_build_padmode_018, TestSize.Level1) +{ + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetDepthwiseConv2dInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_DEPTHWISE_CONV2D_NATIVE_STRIDES); + + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT64, m_dilation_dim, nullptr, + OH_NN_DEPTHWISE_CONV2D_NATIVE_DILATION); + m_allTensors.emplace_back(tensor); + + SetPad(OH_NN_INT64, m_pad_dim, nullptr, OH_NN_DEPTHWISE_CONV2D_NATIVE_PAD); + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_DEPTHWISE_CONV2D_NATIVE_ACTIVATION_TYPE); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: depthwiseconv2d_build_padmode_019 + * @tc.desc: Verify the depthwiseconv2d without set pad of the build function + * @tc.type: FUNC + */ +HWTEST_F(DepthwiseConv2DNativeBuilderTest, depthwiseconv2d_build_padmode_019, TestSize.Level1) +{ + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetDepthwiseConv2dInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_DEPTHWISE_CONV2D_NATIVE_STRIDES); + SetDilation(OH_NN_INT64, m_dilation_dim, nullptr, OH_NN_DEPTHWISE_CONV2D_NATIVE_DILATION); + + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT64, m_pad_dim, nullptr, + OH_NN_DEPTHWISE_CONV2D_NATIVE_PAD); + m_allTensors.emplace_back(tensor); + + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_DEPTHWISE_CONV2D_NATIVE_ACTIVATION_TYPE); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: depthwiseconv2d_build_padmode_020 + * @tc.desc: Verify the depthwiseconv2d without set activation of the build function + * @tc.type: FUNC + */ +HWTEST_F(DepthwiseConv2DNativeBuilderTest, depthwiseconv2d_build_padmode_020, TestSize.Level1) +{ + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetDepthwiseConv2dInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_DEPTHWISE_CONV2D_NATIVE_STRIDES); + SetDilation(OH_NN_INT64, m_dilation_dim, nullptr, OH_NN_DEPTHWISE_CONV2D_NATIVE_DILATION); + SetPad(OH_NN_INT64, m_pad_dim, nullptr, OH_NN_DEPTHWISE_CONV2D_NATIVE_PAD); + + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT8, m_param_dim, nullptr, + OH_NN_DEPTHWISE_CONV2D_NATIVE_ACTIVATION_TYPE); + m_allTensors.emplace_back(tensor); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: depthwiseconv2d_getprimitive_padmode_001 + * @tc.desc: Verify the success of the GetPrimitive function + * @tc.type: FUNC + */ +HWTEST_F(DepthwiseConv2DNativeBuilderTest, depthwiseconv2d_getprimitive_padmode_001, TestSize.Level1) +{ + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetDepthwiseConv2dInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetPadParam(); + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + + std::vector padValueTest{1, 1, 1, 1}; + LiteGraphTensorPtr primitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive = {nullptr, DestroyLiteGraphPrimitive}; + EXPECT_NE(expectPrimitive, primitive); + + std::vector expectStrides = mindspore::lite::MindIR_Conv2DFusion_GetStride(primitive.get()); + std::vector strideValueTest{1, 1}; + std::vector expectDliation = mindspore::lite::MindIR_Conv2DFusion_GetDilation(primitive.get()); + std::vector dilationValueTest{1, 1}; + std::vector expectPad = mindspore::lite::MindIR_Conv2DFusion_GetPadList(primitive.get()); + EXPECT_EQ(padValueTest, expectPad); + + int returnActivation = mindspore::lite::MindIR_Conv2DFusion_GetActivationType(primitive.get()); + EXPECT_EQ(0, returnActivation); +} + +/** + * @tc.name: depthwiseconv2d_getprimitive_padmode_002 + * @tc.desc: Verify the nullptr return of the GetPrimitive function + * @tc.type: FUNC + */ +HWTEST_F(DepthwiseConv2DNativeBuilderTest, depthwiseconv2d_getprimitive_padmode_002, TestSize.Level1) +{ + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetDepthwiseConv2dInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetPadParam(); + + LiteGraphTensorPtr primitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive = {nullptr, DestroyLiteGraphPrimitive}; + EXPECT_EQ(expectPrimitive, primitive); +} +} // namespace UnitTest +} // namespace NeuralNetworkRuntime +} // namespace OHOS \ No newline at end of file diff --git a/test/unittest/ops/depthwise_conv2d_native_padmode_test.cpp b/test/unittest/ops/depthwise_conv2d_native_padmode_test.cpp new file mode 100644 index 0000000..8181be2 --- /dev/null +++ b/test/unittest/ops/depthwise_conv2d_native_padmode_test.cpp @@ -0,0 +1,409 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "frameworks/native/ops/depthwise_conv2d_native_builder.h" + +#include "ops_test.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Ops; + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class DepthwiseConv2DNativePadModeBuilderTest : public OpsTest { +public: + void SetUp(); + void TearDown(); + + void SetDepthwiseConv2dInput(); + void SetPadMode(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + void SetParam(); + +public: + DepthwiseConv2DNativeBuilder m_builder; + std::vector m_inputs{0, 1, 2}; + std::vector m_outputs{3}; + std::vector m_params{4, 5, 6, 7}; + std::vector m_output_dim{1, 4, 4, 2}; + std::vector m_stride_dim{2}; + std::vector m_dilation_dim{2}; + std::vector m_param_dim{}; +}; + +void DepthwiseConv2DNativePadModeBuilderTest::SetUp() {} + +void DepthwiseConv2DNativePadModeBuilderTest::TearDown() {} + +void DepthwiseConv2DNativePadModeBuilderTest::SetPadMode(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + std::shared_ptr tensor = TransToNNTensor(dataType, dim, quantParam, type); + int8_t* padModeValue = new (std::nothrow) int8_t(0); + EXPECT_NE(nullptr, padModeValue); + tensor->SetBuffer(padModeValue, sizeof(int8_t)); + m_allTensors.emplace_back(tensor); +} + +void DepthwiseConv2DNativePadModeBuilderTest::SetParam() +{ + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_DEPTHWISE_CONV2D_NATIVE_STRIDES); + SetDilation(OH_NN_INT64, m_dilation_dim, nullptr, OH_NN_DEPTHWISE_CONV2D_NATIVE_DILATION); + SetPadMode(OH_NN_INT8, m_param_dim, nullptr, OH_NN_DEPTHWISE_CONV2D_NATIVE_PAD_MODE); + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_DEPTHWISE_CONV2D_NATIVE_ACTIVATION_TYPE); +} + +void DepthwiseConv2DNativePadModeBuilderTest::SetDepthwiseConv2dInput() +{ + int32_t weightNum = 8; + int32_t biasNum = 2; + std::vector m_input_dim{1, 3, 3, 2}; + std::vector weightDim = {2, 2, 2, 1}; + std::vector biasDim = {2}; + std::shared_ptr inputTensor; + inputTensor = TransToNNTensor(OH_NN_FLOAT32, m_input_dim, nullptr, OH_NN_TENSOR); + m_allTensors.emplace_back(inputTensor); + + inputTensor = TransToNNTensor(OH_NN_FLOAT32, weightDim, nullptr, OH_NN_TENSOR); + float* weightValue = new (std::nothrow) float[8]{1, 0, 0, 1, 0, 1, 1, 0}; + EXPECT_NE(nullptr, weightValue); + + inputTensor->SetBuffer(weightValue, weightNum * sizeof(weightValue)); + m_allTensors.emplace_back(inputTensor); + + inputTensor = TransToNNTensor(OH_NN_FLOAT32, biasDim, nullptr, OH_NN_TENSOR); + float* biasValue = new (std::nothrow) float[2]{0, 0}; + EXPECT_NE(nullptr, biasValue); + + inputTensor->SetBuffer(biasValue, biasNum * sizeof(float)); + m_allTensors.emplace_back(inputTensor); +} + +/** + * @tc.name: depthwiseconv2d_build_padmode_001 + * @tc.desc: Verify the success of the build function + * @tc.type: FUNC + */ +HWTEST_F(DepthwiseConv2DNativePadModeBuilderTest, depthwiseconv2d_build_padmode_001, TestSize.Level1) +{ + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetDepthwiseConv2dInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetParam(); + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: depthwiseconv2d_build_padmode_002 + * @tc.desc: Verify the forbidden of the build function + * @tc.type: FUNC + */ +HWTEST_F(DepthwiseConv2DNativePadModeBuilderTest, depthwiseconv2d_build_padmode_002, TestSize.Level1) +{ + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetDepthwiseConv2dInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetParam(); + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: depthwiseconv2d_build_padmode_003 + * @tc.desc: Verify the missing input of the build function + * @tc.type: FUNC + */ +HWTEST_F(DepthwiseConv2DNativePadModeBuilderTest, depthwiseconv2d_build_padmode_003, TestSize.Level1) +{ + m_inputs = {0}; + m_outputs = {1}; + m_params = {2, 3, 4, 5}; + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetDepthwiseConv2dInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetParam(); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: depthwiseconv2d_build_padmode_004 + * @tc.desc: Verify the missing output of the build function + * @tc.type: FUNC + */ +HWTEST_F(DepthwiseConv2DNativePadModeBuilderTest, depthwiseconv2d_build_padmode_004, TestSize.Level1) +{ + m_inputs = {0, 1, 2}; + m_outputs = {}; + m_params = {3, 4, 5, 6}; + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetDepthwiseConv2dInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetParam(); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: depthwiseconv2d_build_padmode_005 + * @tc.desc: Verify the inputIndex out of bounds of the build function + * @tc.type: FUNC + */ +HWTEST_F(DepthwiseConv2DNativePadModeBuilderTest, depthwiseconv2d_build_padmode_005, TestSize.Level1) +{ + m_inputs = {0, 1, 9}; + m_outputs = {3}; + m_params = {4, 5, 6, 7}; + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetDepthwiseConv2dInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetParam(); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: depthwiseconv2d_build_padmode_006 + * @tc.desc: Verify the outputIndex out of bounds of the build function + * @tc.type: FUNC + */ +HWTEST_F(DepthwiseConv2DNativePadModeBuilderTest, depthwiseconv2d_build_padmode_006, TestSize.Level1) +{ + m_inputs = {0, 1, 2}; + m_outputs = {9}; + m_params = {4, 5, 6, 7}; + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetDepthwiseConv2dInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetParam(); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: depthwiseconv2d_build_padmode_007 + * @tc.desc: Verify the invalid stride of the build function + * @tc.type: FUNC + */ +HWTEST_F(DepthwiseConv2DNativePadModeBuilderTest, depthwiseconv2d_build_padmode_007, TestSize.Level1) +{ + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + SetDepthwiseConv2dInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT32, m_stride_dim, nullptr, + OH_NN_DEPTHWISE_CONV2D_NATIVE_STRIDES); + int32_t* strideValue = new (std::nothrow) int32_t[2]{1, 1}; + EXPECT_NE(nullptr, strideValue); + + tensor->SetBuffer(strideValue, 2 * sizeof(int32_t)); + m_allTensors.emplace_back(tensor); + + SetDilation(OH_NN_INT64, m_dilation_dim, nullptr, OH_NN_DEPTHWISE_CONV2D_NATIVE_DILATION); + SetPadMode(OH_NN_INT8, m_param_dim, nullptr, OH_NN_DEPTHWISE_CONV2D_NATIVE_PAD_MODE); + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_DEPTHWISE_CONV2D_NATIVE_ACTIVATION_TYPE); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: depthwiseconv2d_build_padmode_008 + * @tc.desc: Verify the invalid dilation of the build function + * @tc.type: FUNC + */ +HWTEST_F(DepthwiseConv2DNativePadModeBuilderTest, depthwiseconv2d_build_padmode_008, TestSize.Level1) +{ + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetDepthwiseConv2dInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_DEPTHWISE_CONV2D_NATIVE_STRIDES); + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT32, m_dilation_dim, nullptr, + OH_NN_DEPTHWISE_CONV2D_NATIVE_DILATION); + int32_t* dilationValue = new (std::nothrow) int32_t[2]{1, 1}; + EXPECT_NE(nullptr, dilationValue); + + tensor->SetBuffer(dilationValue, 2 * sizeof(int32_t)); + m_allTensors.emplace_back(tensor); + SetPadMode(OH_NN_INT8, m_param_dim, nullptr, OH_NN_DEPTHWISE_CONV2D_NATIVE_PAD_MODE); + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_DEPTHWISE_CONV2D_NATIVE_ACTIVATION_TYPE); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: depthwiseconv2d_build_padmode_009 + * @tc.desc: Verify the invalid pad of the build function + * @tc.type: FUNC + */ +HWTEST_F(DepthwiseConv2DNativePadModeBuilderTest, depthwiseconv2d_build_padmode_009, TestSize.Level1) +{ + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetDepthwiseConv2dInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_DEPTHWISE_CONV2D_NATIVE_STRIDES); + SetDilation(OH_NN_INT64, m_dilation_dim, nullptr, OH_NN_DEPTHWISE_CONV2D_NATIVE_DILATION); + + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT32, m_param_dim, nullptr, + OH_NN_DEPTHWISE_CONV2D_NATIVE_PAD_MODE); + int32_t* padModeValue = new (std::nothrow) int32_t(0); + EXPECT_NE(nullptr, padModeValue); + tensor->SetBuffer(padModeValue, sizeof(int32_t)); + m_allTensors.emplace_back(tensor); + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_DEPTHWISE_CONV2D_NATIVE_ACTIVATION_TYPE); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: depthwiseconv2d_build_padmode_010 + * @tc.desc: Verify the invalid activation of the build function + * @tc.type: FUNC + */ +HWTEST_F(DepthwiseConv2DNativePadModeBuilderTest, depthwiseconv2d_build_padmode_010, TestSize.Level1) +{ + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetDepthwiseConv2dInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_DEPTHWISE_CONV2D_NATIVE_STRIDES); + SetDilation(OH_NN_INT64, m_dilation_dim, nullptr, OH_NN_DEPTHWISE_CONV2D_NATIVE_DILATION); + SetPadMode(OH_NN_INT8, m_param_dim, nullptr, OH_NN_DEPTHWISE_CONV2D_NATIVE_PAD_MODE); + + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT32, m_param_dim, nullptr, + OH_NN_DEPTHWISE_CONV2D_NATIVE_ACTIVATION_TYPE); + int32_t* activationValue = new (std::nothrow) int32_t(0); + EXPECT_NE(nullptr, activationValue); + tensor->SetBuffer(activationValue, sizeof(int32_t)); + m_allTensors.emplace_back(tensor); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: depthwiseconv2d_build_padmode_011 + * @tc.desc: Verify the scalar activation of the build function + * @tc.type: FUNC + */ +HWTEST_F(DepthwiseConv2DNativePadModeBuilderTest, depthwiseconv2d_build_padmode_011, TestSize.Level1) +{ + std::vector activationDim = {2}; + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetDepthwiseConv2dInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_DEPTHWISE_CONV2D_NATIVE_STRIDES); + SetDilation(OH_NN_INT64, m_dilation_dim, nullptr, OH_NN_DEPTHWISE_CONV2D_NATIVE_DILATION); + SetPadMode(OH_NN_INT8, m_param_dim, nullptr, OH_NN_DEPTHWISE_CONV2D_NATIVE_PAD_MODE); + + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT8, activationDim, nullptr, + OH_NN_DEPTHWISE_CONV2D_NATIVE_ACTIVATION_TYPE); + int8_t* activationValue = new (std::nothrow) int8_t[2]{0, 0}; + EXPECT_NE(nullptr, activationValue); + + tensor->SetBuffer(activationValue, 2 * sizeof(int8_t)); + m_allTensors.emplace_back(tensor); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: depthwiseconv2d_build_padmode_012 + * @tc.desc: Verify the invalid pad of the build function + * @tc.type: FUNC + */ +HWTEST_F(DepthwiseConv2DNativePadModeBuilderTest, depthwiseconv2d_build_padmode_012, TestSize.Level1) +{ + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetDepthwiseConv2dInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_DEPTHWISE_CONV2D_NATIVE_STRIDES); + SetDilation(OH_NN_INT64, m_dilation_dim, nullptr, OH_NN_DEPTHWISE_CONV2D_NATIVE_DILATION); + + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT8, m_param_dim, nullptr, + OH_NN_DEPTHWISE_CONV2D_NATIVE_PAD_MODE); + int8_t* padModeValue = new (std::nothrow) int8_t(10); + EXPECT_NE(nullptr, padModeValue); + + tensor->SetBuffer(padModeValue, sizeof(int8_t)); + m_allTensors.emplace_back(tensor); + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_DEPTHWISE_CONV2D_NATIVE_ACTIVATION_TYPE); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: depthwiseconv2d_getprimitive_padmode_001 + * @tc.desc: Verify the success of the GetPrimitive function + * @tc.type: FUNC + */ +HWTEST_F(DepthwiseConv2DNativePadModeBuilderTest, depthwiseconv2d_getprimitive_padmode_001, TestSize.Level1) +{ + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetDepthwiseConv2dInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetParam(); + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + + LiteGraphTensorPtr primitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive = {nullptr, DestroyLiteGraphPrimitive}; + EXPECT_NE(expectPrimitive, primitive); + + std::vector returnStrides = mindspore::lite::MindIR_Conv2DFusion_GetStride(primitive.get()); + std::vector strideValueTest{1, 1}; + std::vector returnDliation = mindspore::lite::MindIR_Conv2DFusion_GetDilation(primitive.get()); + std::vector dilationValueTest{1, 1}; + EXPECT_EQ(dilationValueTest, returnDliation); + + int returnpadMode = mindspore::lite::MindIR_Conv2DFusion_GetPadMode(primitive.get()); + EXPECT_EQ(1, returnpadMode); + int returnActivation = mindspore::lite::MindIR_Conv2DFusion_GetActivationType(primitive.get()); + EXPECT_EQ(0, returnActivation); +} + +/** + * @tc.name: depthwiseconv2d_getprimitive_padmode_002 + * @tc.desc: Verify the nullptr return of the GetPrimitive function + * @tc.type: FUNC + */ +HWTEST_F(DepthwiseConv2DNativePadModeBuilderTest, depthwiseconv2d_getprimitive_padmode_002, TestSize.Level1) +{ + m_paramsIndex = m_params; + m_inputsIndex = m_inputs; + + SetDepthwiseConv2dInput(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetParam(); + LiteGraphTensorPtr primitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive = {nullptr, DestroyLiteGraphPrimitive}; + EXPECT_EQ(expectPrimitive, primitive); +} +} // namespace UnitTest +} // namespace NeuralNetworkRuntime +} // namespace OHOS diff --git a/test/unittest/ops/div_test.cpp b/test/unittest/ops/div_test.cpp new file mode 100644 index 0000000..31668cb --- /dev/null +++ b/test/unittest/ops/div_test.cpp @@ -0,0 +1,288 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "frameworks/native/ops/div_builder.h" + +#include "ops_test.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Ops; + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class DivFusionTest : public OpsTest { +public: + void SetUp() override; + void TearDown() override; + + void SaveParamsTensor(const std::vector& m_params, OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + +public: + DivBuilder m_builder; + std::vector m_inputs{0, 1}; + std::vector m_outputs{2}; + std::vector m_params{3}; + std::vector m_input_dim{3, 3}; + std::vector m_output_dim{3, 3}; + std::vector m_param_dim{}; +}; + +void DivFusionTest::SetUp() {} + +void DivFusionTest::TearDown() {} + +void DivFusionTest::SaveParamsTensor(const std::vector& m_params, OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + m_paramsIndex = m_params; + std::shared_ptr tensor = TransToNNTensor(dataType, dim, quantParam, type); + int8_t* activationValue = new (std::nothrow) int8_t(0); + EXPECT_NE(nullptr, activationValue); + tensor->SetBuffer(activationValue, sizeof(int8_t)); + m_allTensors.emplace_back(tensor); +} + +/** + * @tc.name: div_build_001 + * @tc.desc: Verify the success of the build function + * @tc.type: FUNC + */ +HWTEST_F(DivFusionTest, div_build_001, TestSize.Level1) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SaveParamsTensor(m_params, OH_NN_INT8, m_param_dim, nullptr, OH_NN_DIV_ACTIVATIONTYPE); + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: div_build_002 + * @tc.desc: Verify the forbidden of the build function + * @tc.type: FUNC + */ +HWTEST_F(DivFusionTest, div_build_002, TestSize.Level1) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SaveParamsTensor(m_params, OH_NN_INT8, m_param_dim, nullptr, OH_NN_DIV_ACTIVATIONTYPE); + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: div_build_003 + * @tc.desc: Verify the missing input of the build function + * @tc.type: FUNC + */ +HWTEST_F(DivFusionTest, div_build_003, TestSize.Level1) +{ + m_inputs = {0}; + m_outputs = {1}; + m_params = {2}; + + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SaveParamsTensor(m_params, OH_NN_INT8, m_param_dim, nullptr, OH_NN_DIV_ACTIVATIONTYPE); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: div_build_004 + * @tc.desc: Verify the missing output of the build function + * @tc.type: FUNC + */ +HWTEST_F(DivFusionTest, div_build_004, TestSize.Level1) +{ + m_inputs = {0, 1}; + m_outputs = {}; + m_params = {2}; + + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SaveParamsTensor(m_params, OH_NN_INT8, m_param_dim, nullptr, OH_NN_DIV_ACTIVATIONTYPE); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: div_build_005 + * @tc.desc: Verify the inputIndex out of bounds of the build function + * @tc.type: FUNC + */ +HWTEST_F(DivFusionTest, div_build_005, TestSize.Level1) +{ + m_inputs = {0, 6}; + m_outputs = {2}; + m_params = {3}; + + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SaveParamsTensor(m_params, OH_NN_INT8, m_param_dim, nullptr, OH_NN_DIV_ACTIVATIONTYPE); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: div_build_006 + * @tc.desc: Verify the outputIndex out of bounds of the build function + * @tc.type: FUNC + */ +HWTEST_F(DivFusionTest, div_build_006, TestSize.Level1) +{ + m_inputs = {0, 1}; + m_outputs = {6}; + m_params = {3}; + + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SaveParamsTensor(m_params, OH_NN_INT8, m_param_dim, nullptr, OH_NN_DIV_ACTIVATIONTYPE); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: div_build_007 + * @tc.desc: Verify the param invalid of the build function + * @tc.type: FUNC + */ +HWTEST_F(DivFusionTest, div_build_007, TestSize.Level1) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + m_paramsIndex = m_params; + + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT32, m_param_dim, nullptr, OH_NN_DIV_ACTIVATIONTYPE); + int32_t* activationValueTest = new (std::nothrow) int32_t[0]; + EXPECT_NE(nullptr, activationValueTest); + + tensor->SetBuffer(activationValueTest, sizeof(int32_t)); + m_allTensors.emplace_back(tensor); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: div_build_008 + * @tc.desc: Verify the scalar length of the build function + * @tc.type: FUNC + */ +HWTEST_F(DivFusionTest, div_build_008, TestSize.Level1) +{ + m_param_dim = {2}; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + m_paramsIndex = m_params; + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT8, m_param_dim, nullptr, OH_NN_DIV_ACTIVATIONTYPE); + int8_t* activationValueTest = new (std::nothrow) int8_t[2]{0, 0}; + EXPECT_NE(nullptr, activationValueTest); + + tensor->SetBuffer(activationValueTest, 2 * sizeof(int8_t)); + m_allTensors.emplace_back(tensor); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: div_build_009 + * @tc.desc: Verify the invalid activation value of the build function + * @tc.type: FUNC + */ +HWTEST_F(DivFusionTest, div_build_009, TestSize.Level1) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + m_paramsIndex = m_params; + + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT8, m_param_dim, nullptr, OH_NN_DIV_ACTIVATIONTYPE); + int8_t* activationValueTest = new (std::nothrow) int8_t(10); + EXPECT_NE(nullptr, activationValueTest); + + tensor->SetBuffer(activationValueTest, sizeof(int8_t)); + m_allTensors.emplace_back(tensor); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: div_build_010 + * @tc.desc: Verify the invalid param to div of the build function + * @tc.type: FUNC + */ +HWTEST_F(DivFusionTest, div_build_010, TestSize.Level1) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + m_paramsIndex = m_params; + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT8, m_param_dim, nullptr, OH_NN_ADD_ACTIVATIONTYPE); + int8_t* activationValueTest = new (std::nothrow) int8_t(0); + EXPECT_NE(nullptr, activationValueTest); + + tensor->SetBuffer(activationValueTest, sizeof(int8_t)); + m_allTensors.emplace_back(tensor); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: div_build_011 + * @tc.desc: Verify the div without set activation of the build function + * @tc.type: FUNC + */ +HWTEST_F(DivFusionTest, div_build_011, TestSize.Level1) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT8, m_param_dim, nullptr, + OH_NN_DIV_ACTIVATIONTYPE); + m_allTensors.emplace_back(tensor); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: div_getprimitive_001 + * @tc.desc: Verify the success of the GetPrimitive function + * @tc.type: FUNC + */ +HWTEST_F(DivFusionTest, div_getprimitive_001, TestSize.Level1) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SaveParamsTensor(m_params, OH_NN_INT8, m_param_dim, nullptr, OH_NN_DIV_ACTIVATIONTYPE); + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + + LiteGraphTensorPtr primitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive = {nullptr, DestroyLiteGraphPrimitive}; + EXPECT_NE(expectPrimitive, primitive); + + int8_t activationValueTest = 0; + int8_t returnValue = mindspore::lite::MindIR_DivFusion_GetActivationType(primitive.get()); + EXPECT_EQ(returnValue, activationValueTest); +} + +/** + * @tc.name: div_getprimitive_002 + * @tc.desc: Verify the nullptr return of the GetPrimitive function + * @tc.type: FUNC + */ +HWTEST_F(DivFusionTest, div_getprimitive_002, TestSize.Level1) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SaveParamsTensor(m_params, OH_NN_INT8, m_param_dim, nullptr, OH_NN_DIV_ACTIVATIONTYPE); + + LiteGraphTensorPtr primitive = {nullptr, DestroyLiteGraphPrimitive}; + LiteGraphTensorPtr expectPrimitive = m_builder.GetPrimitive(); + EXPECT_EQ(primitive, expectPrimitive); +} +} // namespace UnitTest +} // namespace NeuralNetworkRuntime +} // namespace OHOS \ No newline at end of file diff --git a/test/unittest/ops/eltwise_test.cpp b/test/unittest/ops/eltwise_test.cpp new file mode 100644 index 0000000..3a44e47 --- /dev/null +++ b/test/unittest/ops/eltwise_test.cpp @@ -0,0 +1,297 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "frameworks/native/ops/eltwise_builder.h" + +#include "ops_test.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Ops; + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class EltwiseBuilderTest : public OpsTest { +public: + void SetUp(); + void TearDown(); + + void SetEltwiseMode(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); +public: + EltwiseBuilder m_builder; + std::vector m_inputs {0, 1}; + std::vector m_outputs {2}; + std::vector m_params {3}; + std::vector m_input_dim {3, 3}; + std::vector m_output_dim {3, 3}; + std::vector m_param_dim {}; +}; + +void EltwiseBuilderTest::SetUp() {} + +void EltwiseBuilderTest::TearDown() {} + +void EltwiseBuilderTest::SetEltwiseMode(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + std::shared_ptr tensor = TransToNNTensor(dataType, dim, quantParam, type); + int8_t* modeValue = new (std::nothrow) int8_t(0); + EXPECT_NE(nullptr, modeValue); + tensor->SetBuffer(modeValue, sizeof(int8_t)); + m_allTensors.emplace_back(tensor); +} + +/** + * @tc.name: eltwise_build_001 + * @tc.desc: Verify the success of the build function + * @tc.type: FUNC + */ +HWTEST_F(EltwiseBuilderTest, eltwise_build_001, TestSize.Level1) +{ + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetEltwiseMode(OH_NN_INT8, m_param_dim, nullptr, OH_NN_ELTWISE_MODE); + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: eltwise_build_002 + * @tc.desc: Verify the forbidden of the build function + * @tc.type: FUNC + */ +HWTEST_F(EltwiseBuilderTest, eltwise_build_002, TestSize.Level1) +{ + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetEltwiseMode(OH_NN_INT8, m_param_dim, nullptr, OH_NN_ELTWISE_MODE); + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: eltwise_build_003 + * @tc.desc: Verify the missing input of the build function + * @tc.type: FUNC + */ +HWTEST_F(EltwiseBuilderTest, eltwise_build_003, TestSize.Level1) +{ + m_inputs = {0}; + m_outputs = {1}; + m_params = {2}; + m_paramsIndex = m_params; + + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetEltwiseMode(OH_NN_INT8, m_param_dim, nullptr, OH_NN_ELTWISE_MODE); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: eltwise_build_004 + * @tc.desc: Verify the missing output of the build function + * @tc.type: FUNC + */ +HWTEST_F(EltwiseBuilderTest, eltwise_build_004, TestSize.Level1) +{ + m_outputs = {}; + m_params = {2}; + m_paramsIndex = m_params; + + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetEltwiseMode(OH_NN_INT8, m_param_dim, nullptr, OH_NN_ELTWISE_MODE); + + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: eltwise_build_005 + * @tc.desc: Verify the inputIndex out of bounds of the build function + * @tc.type: FUNC + */ +HWTEST_F(EltwiseBuilderTest, eltwise_build_005, TestSize.Level1) +{ + m_inputs = {0, 6}; + m_paramsIndex = m_params; + + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetEltwiseMode(OH_NN_INT8, m_param_dim, nullptr, OH_NN_ELTWISE_MODE); + + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: eltwise_build_006 + * @tc.desc: Verify the outputIndex out of bounds of the build function + * @tc.type: FUNC + */ +HWTEST_F(EltwiseBuilderTest, eltwise_build_006, TestSize.Level1) +{ + m_outputs = {6}; + m_paramsIndex = m_params; + + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetEltwiseMode(OH_NN_INT8, m_param_dim, nullptr, OH_NN_ELTWISE_MODE); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: eltwise_build_007 + * @tc.desc: Verify the invalid eltwiseMode of the build function + * @tc.type: FUNC + */ + +HWTEST_F(EltwiseBuilderTest, eltwise_build_007, TestSize.Level1) +{ + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT32, m_param_dim, nullptr, OH_NN_ELTWISE_MODE); + int32_t* modeValue = new (std::nothrow) int32_t(0); + EXPECT_NE(nullptr, modeValue); + + tensor->SetBuffer(modeValue, sizeof(int32_t)); + m_allTensors.emplace_back(tensor); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: eltwise_build_008 + * @tc.desc: Verify the scalar length of the build function + * @tc.type: FUNC + */ + +HWTEST_F(EltwiseBuilderTest, eltwise_build_008, TestSize.Level1) +{ + m_paramsIndex = m_params; + m_param_dim = {2}; + + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT8, m_param_dim, nullptr, OH_NN_ELTWISE_MODE); + int8_t* modeValue = new (std::nothrow) int8_t[2]{0, 0}; + EXPECT_NE(nullptr, modeValue); + + tensor->SetBuffer(modeValue, 2 * sizeof(int8_t)); + m_allTensors.emplace_back(tensor); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: eltwise_build_008 + * @tc.desc: Verify the invalid mode value of the build function + * @tc.type: FUNC + */ + +HWTEST_F(EltwiseBuilderTest, eltwise_build_009, TestSize.Level1) +{ + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT8, m_param_dim, nullptr, OH_NN_ELTWISE_MODE); + int8_t* modeValue = new (std::nothrow) int8_t(10); + EXPECT_NE(nullptr, modeValue); + + tensor->SetBuffer(modeValue, sizeof(int8_t)); + m_allTensors.emplace_back(tensor); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: eltwise_build_010 + * @tc.desc: Verify the invalid param to eltwise of the build function + * @tc.type: FUNC + */ + +HWTEST_F(EltwiseBuilderTest, eltwise_build_010, TestSize.Level1) +{ + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT8, m_param_dim, nullptr, OH_NN_DIV_ACTIVATIONTYPE); + int8_t* modeValue = new (std::nothrow) int8_t(0); + EXPECT_NE(nullptr, modeValue); + + tensor->SetBuffer(modeValue, sizeof(int8_t)); + m_allTensors.emplace_back(tensor); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: eltwise_build_011 + * @tc.desc: Verify the eltwise without set mode of the build function + * @tc.type: FUNC + */ +HWTEST_F(EltwiseBuilderTest, eltwise_build_011, TestSize.Level1) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT8, m_param_dim, nullptr, OH_NN_ELTWISE_MODE); + m_allTensors.emplace_back(tensor); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: eltwise_getprimitive_001 + * @tc.desc: Verify the success of the GetPrimitive function + * @tc.type: FUNC + */ +HWTEST_F(EltwiseBuilderTest, eltwise_getprimitive_001, TestSize.Level1) +{ + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetEltwiseMode(OH_NN_INT8, m_param_dim, nullptr, OH_NN_ELTWISE_MODE); + + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + LiteGraphTensorPtr primitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive = {nullptr, DestroyLiteGraphPrimitive}; + EXPECT_NE(expectPrimitive, primitive); + bool eltwiseModeReturn = mindspore::lite::MindIR_Eltwise_GetMode(primitive.get()); + EXPECT_EQ(eltwiseModeReturn, eltwiseModeReturn); +} + +/** + * @tc.name: eltwise_getprimitive_002 + * @tc.desc: Verify the nullptr return of the GetPrimitive function + * @tc.type: FUNC + */ +HWTEST_F(EltwiseBuilderTest, eltwise_getprimitive_002, TestSize.Level1) +{ + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetEltwiseMode(OH_NN_INT8, m_param_dim, nullptr, OH_NN_ELTWISE_MODE); + + LiteGraphTensorPtr primitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive = {nullptr, DestroyLiteGraphPrimitive}; + EXPECT_EQ(expectPrimitive, primitive); +} +} // namespace UnitTest +} // namespace NeuralNetworkRuntime +} // namespace OHOS \ No newline at end of file diff --git a/test/unittest/ops/expandims_test.cpp b/test/unittest/ops/expandims_test.cpp new file mode 100644 index 0000000..ad3c040 --- /dev/null +++ b/test/unittest/ops/expandims_test.cpp @@ -0,0 +1,186 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "frameworks/native/ops/expandims_builder.h" + +#include "ops_test.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Ops; + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class ExpandDimsBuilderTest : public OpsTest { +public: + void SetUp() override; + void TearDown() override; + +public: + ExpandDimsBuilder m_builder; + std::vector m_inputs {0, 1}; + std::vector m_outputs {2}; + std::vector m_params {}; + std::vector m_input_dim {3, 3}; + std::vector m_output_dim {3, 3}; + std::vector m_param_dim {}; +}; + +void ExpandDimsBuilderTest::SetUp() {} + +void ExpandDimsBuilderTest::TearDown() {} + +/** + * @tc.name: expandims_build_001 + * @tc.desc: Verify the success of the build function + * @tc.type: FUNC + */ +HWTEST_F(ExpandDimsBuilderTest, expandims_build_001, TestSize.Level1) +{ + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: expandims_build_002 + * @tc.desc: Verify the forbidden of the build function + * @tc.type: FUNC + */ +HWTEST_F(ExpandDimsBuilderTest, expandims_build_002, TestSize.Level1) +{ + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: expandims_build_003 + * @tc.desc: Verify the misssing input of the build function + * @tc.type: FUNC + */ +HWTEST_F(ExpandDimsBuilderTest, expandims_build_003, TestSize.Level1) +{ + m_inputs = {0}; + m_outputs = {1}; + m_paramsIndex = m_params; + + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: expandims_build_004 + * @tc.desc: Verify the missing output of the build function + * @tc.type: FUNC + */ +HWTEST_F(ExpandDimsBuilderTest, expandims_build_004, TestSize.Level1) +{ + m_outputs = {}; + m_paramsIndex = m_params; + + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: expandims_build_005 + * @tc.desc: Verify the inputIndex out of bounds of the build function + * @tc.type: FUNC + */ +HWTEST_F(ExpandDimsBuilderTest, expandims_build_005, TestSize.Level1) +{ + m_inputs = {0, 6}; + m_paramsIndex = m_params; + + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: expandims_build_006 + * @tc.desc: Verify the outputIndex out of bounds of the build function + * @tc.type: FUNC + */ +HWTEST_F(ExpandDimsBuilderTest, expandims_build_006, TestSize.Level1) +{ + m_outputs = {6}; + m_paramsIndex = m_params; + + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: expandims_build_007 + * @tc.desc: Verify the paramIndex not empth of the build function + * @tc.type: FUNC + */ + +HWTEST_F(ExpandDimsBuilderTest, expandims_build_007, TestSize.Level1) +{ + m_params = {1}; + m_param_dim = {1}; + m_paramsIndex = m_params; + + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: expandims_getprimitive_001 + * @tc.desc: Verify the success of the GetPrimitive function + * @tc.type: FUNC + */ +HWTEST_F(ExpandDimsBuilderTest, expandims_getprimitive_001, TestSize.Level1) +{ + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + + LiteGraphTensorPtr primitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive = {nullptr, DestroyLiteGraphPrimitive}; + EXPECT_NE(expectPrimitive, primitive); +} + +/** + * @tc.name: expandims_getprimitive_002 + * @tc.desc: Verify the nullptr return of the GetPrimitive function + * @tc.type: FUNC + */ +HWTEST_F(ExpandDimsBuilderTest, expandims_getprimitive_002, TestSize.Level1) +{ + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + LiteGraphTensorPtr primitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive = {nullptr, DestroyLiteGraphPrimitive}; + EXPECT_EQ(expectPrimitive, primitive); +} +} // namespace UnitTest +} // namespace NeuralNetworkRuntime +} // namespace OHOS \ No newline at end of file diff --git a/test/unittest/ops/fill_builder_test.cpp b/test/unittest/ops/fill_builder_test.cpp new file mode 100644 index 0000000..59bde45 --- /dev/null +++ b/test/unittest/ops/fill_builder_test.cpp @@ -0,0 +1,180 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "frameworks/native/ops/fill_builder.h" + +#include "ops_test.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Ops; + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class FillBuilderTest : public OpsTest { +public: + void SetUp() override; + void TearDown() override; + +protected: + FillBuilder m_fill; + std::vector m_inputs {0, 1}; + std::vector m_outputs {2}; + std::vector m_params {}; + std::vector m_inputDim {}; + std::vector m_outputDim {2, 3}; +}; + +void FillBuilderTest::SetUp() {} + +void FillBuilderTest::TearDown() {} + +/** + * @tc.name: fill_build_001 + * @tc.desc: Verify that the build function returns a successful message. + * @tc.type: FUNC + */ +HWTEST_F(FillBuilderTest, fill_build_001, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + + OH_NN_ReturnCode ret = m_fill.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/** + * @tc.name: fill_build_002 + * @tc.desc: Verify that the build function returns a failed message with true m_isBuild. + * @tc.type: FUNC + */ +HWTEST_F(FillBuilderTest, fill_build_002, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + + EXPECT_EQ(OH_NN_SUCCESS, m_fill.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors)); + OH_NN_ReturnCode ret = m_fill.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/** + * @tc.name: fill_build_003 + * @tc.desc: Verify that the build function returns a failed message with invalided input. + * @tc.type: FUNC + */ +HWTEST_F(FillBuilderTest, fill_build_003, TestSize.Level0) +{ + m_inputs = {0, 1, 2}; + m_outputs = {3}; + + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + + OH_NN_ReturnCode ret = m_fill.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: fill_build_004 + * @tc.desc: Verify that the build function returns a failed message with invalided output. + * @tc.type: FUNC + */ +HWTEST_F(FillBuilderTest, fill_build_004, TestSize.Level0) +{ + m_outputs = {2, 3, 4}; + + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + + OH_NN_ReturnCode ret = m_fill.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: fill_build_005 + * @tc.desc: Verify that the build function returns a failed message with empty allTensor. + * @tc.type: FUNC + */ +HWTEST_F(FillBuilderTest, fill_build_005, TestSize.Level0) +{ + OH_NN_ReturnCode ret = m_fill.Build(m_params, m_inputs, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: fill_build_006 + * @tc.desc: Verify that the build function returns a failed message without output tensor. + * @tc.type: FUNC + */ +HWTEST_F(FillBuilderTest, fill_build_006, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + + OH_NN_ReturnCode ret = m_fill.Build(m_params, m_inputsIndex, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: fill_build_007 + * @tc.desc: Verify that the build function returns a failed message with a virtual parameter. + * @tc.type: FUNC + */ +HWTEST_F(FillBuilderTest, fill_build_007, TestSize.Level0) +{ + m_params = {3}; + std::vector paramDim = {}; + + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + std::shared_ptr paramTensor; + paramTensor = TransToNNTensor(OH_NN_INT32, paramDim, nullptr, OH_NN_TENSOR); + m_allTensors.emplace_back(paramTensor); + + OH_NN_ReturnCode ret = m_fill.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: fill_getprimitive_001 + * @tc.desc: Verify that the getPrimitive function returns a successful message + * @tc.type: FUNC + */ +HWTEST_F(FillBuilderTest, fill_getprimitive_001, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + + EXPECT_EQ(OH_NN_SUCCESS, m_fill.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors)); + LiteGraphPrimitvePtr primitive = m_fill.GetPrimitive(); + LiteGraphPrimitvePtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); + EXPECT_NE(expectPrimitive, primitive); +} + +/** + * @tc.name: fill_getprimitive_002 + * @tc.desc: Verify that the getPrimitive function returns a failed message without build. + * @tc.type: FUNC + */ +HWTEST_F(FillBuilderTest, fill_getprimitive_002, TestSize.Level0) +{ + LiteGraphPrimitvePtr primitive = m_fill.GetPrimitive(); + LiteGraphPrimitvePtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); + EXPECT_EQ(expectPrimitive, primitive); +} +} +} +} \ No newline at end of file diff --git a/test/unittest/ops/fullconnection_test.cpp b/test/unittest/ops/fullconnection_test.cpp new file mode 100644 index 0000000..0754a71 --- /dev/null +++ b/test/unittest/ops/fullconnection_test.cpp @@ -0,0 +1,289 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "frameworks/native/ops/fullconnection_builder.h" + +#include "ops_test.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Ops; + + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class FullConnectionBuilderTest : public OpsTest { +public: + void SetUp() override; + void TearDown() override; + + void SetInputToAlltensor(); + void SetActivation(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + +public: + FullConnectionBuilder m_builder; + std::vector m_inputs {0, 1, 2}; + std::vector m_outputs {3}; + std::vector m_params {4}; + std::vector m_output_dim {2, 2}; + std::vector m_param_dim {}; +}; + +void FullConnectionBuilderTest::SetUp() {} + +void FullConnectionBuilderTest::TearDown() {} + +void FullConnectionBuilderTest::SetInputToAlltensor() +{ + std::vector m_input_dim{2, 2}; + std::vector biasDim = {2}; + std::shared_ptr tensor = TransToNNTensor(OH_NN_FLOAT32, m_input_dim, nullptr, OH_NN_TENSOR); + m_allTensors.emplace_back(tensor); + + int32_t numWeight = 4; + int32_t numBias = 2; + tensor = TransToNNTensor(OH_NN_FLOAT32, m_input_dim, nullptr, OH_NN_TENSOR); + float* valueWeight = new (std::nothrow) float[4]{1, 1, 1, 1}; + EXPECT_NE(nullptr, valueWeight); + + tensor->SetBuffer(valueWeight, numWeight * sizeof(float)); + m_allTensors.emplace_back(tensor); + + tensor = TransToNNTensor(OH_NN_FLOAT32, biasDim, nullptr, OH_NN_TENSOR); + float* valueBias = new (std::nothrow) float[2]{0, 0}; + EXPECT_NE(nullptr, valueBias); + tensor->SetBuffer(valueBias, numBias * sizeof(float)); + m_allTensors.emplace_back(tensor); +} + +void FullConnectionBuilderTest::SetActivation(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + std::shared_ptr tensor = TransToNNTensor(dataType, dim, quantParam, type); + int8_t* activationValue = new (std::nothrow) int8_t(0); + EXPECT_NE(nullptr, activationValue); + + tensor->SetBuffer(activationValue, sizeof(int8_t)); + m_allTensors.emplace_back(tensor); +} + +/** + * @tc.name: fullconnection_build_001 + * @tc.desc: Verify the success of the build function + * @tc.type: FUNC + */ +HWTEST_F(FullConnectionBuilderTest, fullconnection_build_001, TestSize.Level1) +{ + m_inputsIndex = m_inputs; + m_paramsIndex = m_params; + SetInputToAlltensor(); + + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_FULL_CONNECTION_ACTIVATIONTYPE); + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: fullconnection_build_002 + * @tc.desc: Verify the forbidden of the build function + * @tc.type: FUNC + */ +HWTEST_F(FullConnectionBuilderTest, fullconnection_build_002, TestSize.Level1) +{ + m_inputsIndex = m_inputs; + m_paramsIndex = m_params; + SetInputToAlltensor(); + + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_FULL_CONNECTION_ACTIVATIONTYPE); + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: fullconnection_build_003 + * @tc.desc: Verify the missing output of the build function + * @tc.type: FUNC + */ +HWTEST_F(FullConnectionBuilderTest, fullconnection_build_003, TestSize.Level1) +{ + m_outputs = {}; + m_params = {3}; + m_inputsIndex = m_inputs; + m_paramsIndex = m_params; + SetInputToAlltensor(); + + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_FULL_CONNECTION_ACTIVATIONTYPE); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: fullconnection_build_004 + * @tc.desc: Verify the inputIndex out of bounds of the build function + * @tc.type: FUNC + */ +HWTEST_F(FullConnectionBuilderTest, fullconnection_build_004, TestSize.Level1) +{ + m_inputs = {0, 1, 6}; + m_outputs = {3}; + m_params = {4}; + + m_inputsIndex = m_inputs; + m_paramsIndex = m_params; + SetInputToAlltensor(); + + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_FULL_CONNECTION_ACTIVATIONTYPE); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: fullconnection_build_005 + * @tc.desc: Verify the behavior of the build function + * @tc.type: FUNC + */ + +HWTEST_F(FullConnectionBuilderTest, fullconnection_build_005, TestSize.Level1) +{ + m_inputsIndex = m_inputs; + m_paramsIndex = m_params; + SetInputToAlltensor(); + + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT32, m_param_dim, nullptr, + OH_NN_FULL_CONNECTION_ACTIVATIONTYPE); + int32_t *activationValue = new (std::nothrow) int32_t(0); + EXPECT_NE(nullptr, activationValue); + + tensor->SetBuffer(activationValue, sizeof(int32_t)); + m_allTensors.emplace_back(tensor); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: fullconnection_build_006 + * @tc.desc: Verify the behavior of the build function + * @tc.type: FUNC + */ + +HWTEST_F(FullConnectionBuilderTest, fullconnection_build_006, TestSize.Level1) +{ + m_param_dim = {2}; + m_inputsIndex = m_inputs; + m_paramsIndex = m_params; + SetInputToAlltensor(); + + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT8, m_param_dim, nullptr, + OH_NN_FULL_CONNECTION_ACTIVATIONTYPE); + int8_t *activationValue = new (std::nothrow) int8_t[2]{0, 0}; + EXPECT_NE(nullptr, activationValue); + + tensor->SetBuffer(activationValue, 2 * sizeof(int8_t)); + m_allTensors.emplace_back(tensor); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: fullconnection_build_007 + * @tc.desc: Verify the invalid avtivation value of the build function + * @tc.type: FUNC + */ + +HWTEST_F(FullConnectionBuilderTest, fullconnection_build_007, TestSize.Level1) +{ + m_inputsIndex = m_inputs; + m_paramsIndex = m_params; + SetInputToAlltensor(); + + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT8, m_param_dim, nullptr, + OH_NN_FULL_CONNECTION_ACTIVATIONTYPE); + int8_t *activationValue = new (std::nothrow) int8_t(10); + EXPECT_NE(nullptr, activationValue); + + tensor->SetBuffer(activationValue, sizeof(int8_t)); + m_allTensors.emplace_back(tensor); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: fullconnection_build_008 + * @tc.desc: Verify the invalid param to fullconnection of the build function + * @tc.type: FUNC + */ + +HWTEST_F(FullConnectionBuilderTest, fullconnection_build_008, TestSize.Level1) +{ + m_inputsIndex = m_inputs; + m_paramsIndex = m_params; + SetInputToAlltensor(); + + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT8, m_param_dim, nullptr, + OH_NN_DIV_ACTIVATIONTYPE); + int8_t *activationValue = new (std::nothrow) int8_t(0); + EXPECT_NE(nullptr, activationValue); + tensor->SetBuffer(activationValue, sizeof(int8_t)); + + m_allTensors.emplace_back(tensor); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: fullconnection_getprimitive_001 + * @tc.desc: Verify the success of the GetPrimitive function + * @tc.type: FUNC + */ +HWTEST_F(FullConnectionBuilderTest, fullconnection_getprimitive_001, TestSize.Level1) +{ + m_inputsIndex = m_inputs; + m_paramsIndex = m_params; + SetInputToAlltensor(); + + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_FULL_CONNECTION_ACTIVATIONTYPE); + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + LiteGraphTensorPtr primitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive = {nullptr, DestroyLiteGraphPrimitive}; + EXPECT_NE(expectPrimitive, primitive); + + int8_t activationReturn = mindspore::lite::MindIR_FullConnection_GetActivationType(primitive.get()); + EXPECT_EQ(activationReturn, 0); +} + +/** + * @tc.name: fullconnection_getprimitive_002 + * @tc.desc: Verify the nullptr return of the GetPrimitive function + * @tc.type: FUNC + */ +HWTEST_F(FullConnectionBuilderTest, fullconnection_getprimitive_002, TestSize.Level1) +{ + m_inputsIndex = m_inputs; + m_paramsIndex = m_params; + SetInputToAlltensor(); + + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_FULL_CONNECTION_ACTIVATIONTYPE); + LiteGraphTensorPtr primitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive = {nullptr, DestroyLiteGraphPrimitive}; + EXPECT_EQ(expectPrimitive, primitive); +} +} // namespace UnitTest +} // namespace NeuralNetworkRuntime +} // namespace OHOS \ No newline at end of file diff --git a/test/unittest/ops/fullconnection_with_axis_test.cpp b/test/unittest/ops/fullconnection_with_axis_test.cpp new file mode 100644 index 0000000..6f379e2 --- /dev/null +++ b/test/unittest/ops/fullconnection_with_axis_test.cpp @@ -0,0 +1,341 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "frameworks/native/ops/fullconnection_builder.h" + +#include "ops_test.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Ops; + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class FullConnectionAxisBuilderTest : public OpsTest { +public: + void SetUp(); + void TearDown(); + + void SetInputToAlltensor(); + void SetActivation(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + void SeAxis(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + +public: + FullConnectionBuilder m_builder; + std::vector m_inputs {0, 1, 2}; + std::vector m_outputs {3}; + std::vector m_params {4, 5}; + std::vector m_output_dim {2, 2}; + std::vector m_param_dim {}; +}; + +void FullConnectionAxisBuilderTest::SetUp() {} + +void FullConnectionAxisBuilderTest::TearDown() {} + +void FullConnectionAxisBuilderTest::SetInputToAlltensor() +{ + std::vector m_input_dim{2, 2}; + std::vector biasDim{2}; + std::shared_ptr tensor = TransToNNTensor(OH_NN_FLOAT32, m_input_dim, nullptr, OH_NN_TENSOR); + m_allTensors.emplace_back(tensor); + int32_t weightNum = 4; + int32_t biasNum = 2; + tensor = TransToNNTensor(OH_NN_FLOAT32, m_input_dim, nullptr, OH_NN_TENSOR); + float* valueWeight = new (std::nothrow) float[4]{1, 1, 1, 1}; + EXPECT_NE(nullptr, valueWeight); + tensor->SetBuffer(valueWeight, weightNum * sizeof(float)); + m_allTensors.emplace_back(tensor); + + tensor = TransToNNTensor(OH_NN_FLOAT32, biasDim, nullptr, OH_NN_TENSOR); + float* valueBias = new (std::nothrow) float[2]{0, 0}; + EXPECT_NE(nullptr, valueBias); + tensor->SetBuffer(valueBias, biasNum * sizeof(float)); + m_allTensors.emplace_back(tensor); +} + +void FullConnectionAxisBuilderTest::SetActivation(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + std::shared_ptr tensor = TransToNNTensor(dataType, dim, quantParam, type); + int8_t* activationValue = new (std::nothrow) int8_t(0); + EXPECT_NE(nullptr, activationValue); + + tensor->SetBuffer(activationValue, sizeof(int8_t)); + m_allTensors.emplace_back(tensor); +} + +void FullConnectionAxisBuilderTest::SeAxis(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + std::shared_ptr tensor = TransToNNTensor(dataType, dim, quantParam, type); + int64_t* axisValue = new (std::nothrow) int64_t(0); + EXPECT_NE(nullptr, axisValue); + + tensor->SetBuffer(axisValue, sizeof(int64_t)); + m_allTensors.emplace_back(tensor); +} + +/** + * @tc.name: fullconnection_build_axis_001 + * @tc.desc: Verify the behavior of the build function + * @tc.type: FUNC + */ +HWTEST_F(FullConnectionAxisBuilderTest, fullconnection_build_axis_001, TestSize.Level1) +{ + m_inputsIndex = m_inputs; + m_paramsIndex = m_params; + SetInputToAlltensor(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SeAxis(OH_NN_INT64, m_param_dim, nullptr, OH_NN_FULL_CONNECTION_AXIS); + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_FULL_CONNECTION_ACTIVATIONTYPE); + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: fullconnection_build_axis_002 + * @tc.desc: Verify the behavior of the build function + * @tc.type: FUNC + */ +HWTEST_F(FullConnectionAxisBuilderTest, fullconnection_build_axis_002, TestSize.Level1) +{ + m_inputsIndex = m_inputs; + m_paramsIndex = m_params; + SetInputToAlltensor(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SeAxis(OH_NN_INT64, m_param_dim, nullptr, OH_NN_FULL_CONNECTION_AXIS); + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_FULL_CONNECTION_ACTIVATIONTYPE); + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: fullconnection_build_axis_003 + * @tc.desc: Verify the missing output of the build function + * @tc.type: FUNC + */ +HWTEST_F(FullConnectionAxisBuilderTest, fullconnection_build_axis_003, TestSize.Level1) +{ + m_outputs = {}; + m_inputsIndex = m_inputs; + m_paramsIndex = m_params; + SetInputToAlltensor(); + + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SeAxis(OH_NN_INT64, m_param_dim, nullptr, OH_NN_FULL_CONNECTION_AXIS); + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_FULL_CONNECTION_ACTIVATIONTYPE); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: fullconnection_build_axis_004 + * @tc.desc: Verify the inputIndex out of bounds of the build function + * @tc.type: FUNC + */ +HWTEST_F(FullConnectionAxisBuilderTest, fullconnection_build_axis_004, TestSize.Level1) +{ + m_inputs = {0, 1, 6}; + m_inputsIndex = m_inputs; + m_paramsIndex = m_params; + SetInputToAlltensor(); + + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SeAxis(OH_NN_INT64, m_param_dim, nullptr, OH_NN_FULL_CONNECTION_AXIS); + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_FULL_CONNECTION_ACTIVATIONTYPE); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: fullconnection_build_axis_005 + * @tc.desc: Verify the invalid axis of the build function + * @tc.type: FUNC + */ +HWTEST_F(FullConnectionAxisBuilderTest, fullconnection_build_axis_005, TestSize.Level1) +{ + m_inputsIndex = m_inputs; + m_paramsIndex = m_params; + SetInputToAlltensor(); + + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT32, m_param_dim, nullptr, OH_NN_FULL_CONNECTION_AXIS); + int32_t *axisValueTest = new (std::nothrow) int32_t(0); + EXPECT_NE(nullptr, axisValueTest); + + tensor->SetBuffer(axisValueTest, sizeof(int32_t)); + m_allTensors.emplace_back(tensor); + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_FULL_CONNECTION_ACTIVATIONTYPE); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: fullconnection_build_axis_006 + * @tc.desc: Verify the behavior of the build function + * @tc.type: FUNC + */ +HWTEST_F(FullConnectionAxisBuilderTest, fullconnection_build_axis_006, TestSize.Level1) +{ + m_inputsIndex = m_inputs; + m_paramsIndex = m_params; + SetInputToAlltensor(); + + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SeAxis(OH_NN_INT64, m_param_dim, nullptr, OH_NN_FULL_CONNECTION_AXIS); + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT32, m_param_dim, nullptr, + OH_NN_FULL_CONNECTION_ACTIVATIONTYPE); + int32_t *activationValue = new (std::nothrow) int32_t(0); + EXPECT_NE(nullptr, activationValue); + + tensor->SetBuffer(activationValue, sizeof(int32_t)); + m_allTensors.emplace_back(tensor); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: fullconnection_build_axis_007 + * @tc.desc: Verify the behavior of the build function + * @tc.type: FUNC + */ +HWTEST_F(FullConnectionAxisBuilderTest, fullconnection_build_axis_007, TestSize.Level1) +{ + std::vector paramDimTest = {2}; + m_inputsIndex = m_inputs; + m_paramsIndex = m_params; + + SetInputToAlltensor(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT64, paramDimTest, nullptr, + OH_NN_FULL_CONNECTION_AXIS); + int64_t *axisValueTest = new (std::nothrow) int64_t[2]{0, 0}; + EXPECT_NE(nullptr, axisValueTest); + + tensor->SetBuffer(axisValueTest, 2 * sizeof(int64_t)); + m_allTensors.emplace_back(tensor); + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_FULL_CONNECTION_ACTIVATIONTYPE); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: fullconnection_build_axis_008 + * @tc.desc: Verify the behavior of the build function + * @tc.type: FUNC + */ +HWTEST_F(FullConnectionAxisBuilderTest, fullconnection_build_axis_008, TestSize.Level1) +{ + std::vector paramDimTest = {2}; + m_inputsIndex = m_inputs; + m_paramsIndex = m_params; + SetInputToAlltensor(); + + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SeAxis(OH_NN_INT64, m_param_dim, nullptr, OH_NN_FULL_CONNECTION_AXIS); + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT8, paramDimTest, nullptr, + OH_NN_FULL_CONNECTION_ACTIVATIONTYPE); + int8_t *activationValue = new (std::nothrow) int8_t[2]{0, 0}; + EXPECT_NE(nullptr, activationValue); + + tensor->SetBuffer(activationValue, 2 * sizeof(int8_t)); + m_allTensors.emplace_back(tensor); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: fullconnection_build_axis_009 + * @tc.desc: Verify the fullconnection without set axis of the build function + * @tc.type: FUNC + */ +HWTEST_F(FullConnectionAxisBuilderTest, fullconnection_build_axis_009, TestSize.Level1) +{ + m_inputsIndex = m_inputs; + m_paramsIndex = m_params; + SetInputToAlltensor(); + + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT64, m_param_dim, nullptr, OH_NN_FULL_CONNECTION_AXIS); + m_allTensors.emplace_back(tensor); + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_FULL_CONNECTION_ACTIVATIONTYPE); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: fullconnection_build_axis_010 + * @tc.desc: Verify the fullconnection without set activation of the build function + * @tc.type: FUNC + */ +HWTEST_F(FullConnectionAxisBuilderTest, fullconnection_build_axis_010, TestSize.Level1) +{ + m_inputsIndex = m_inputs; + m_paramsIndex = m_params; + SetInputToAlltensor(); + + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SeAxis(OH_NN_INT64, m_param_dim, nullptr, OH_NN_FULL_CONNECTION_AXIS); + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT8, m_param_dim, nullptr, + OH_NN_FULL_CONNECTION_ACTIVATIONTYPE); + m_allTensors.emplace_back(tensor); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: fullconnection_getprimitive_axis_001 + * @tc.desc: Verify the behavior of the GetPrimitive function + * @tc.type: FUNC + */ +HWTEST_F(FullConnectionAxisBuilderTest, fullconnection_getprimitive_axis_001, TestSize.Level1) +{ + m_inputsIndex = m_inputs; + m_paramsIndex = m_params; + SetInputToAlltensor(); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SeAxis(OH_NN_INT64, m_param_dim, nullptr, OH_NN_FULL_CONNECTION_AXIS); + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_FULL_CONNECTION_ACTIVATIONTYPE); + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + + LiteGraphTensorPtr primitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive = {nullptr, DestroyLiteGraphPrimitive}; + EXPECT_NE(expectPrimitive, primitive); + + int returnValue = mindspore::lite::MindIR_FullConnection_GetAxis(primitive.get()); + EXPECT_EQ(returnValue, 0); + bool activationReturn = mindspore::lite::MindIR_FullConnection_GetActivationType(primitive.get()); + EXPECT_EQ(activationReturn, 0); +} + +/** + * @tc.name: fullconnection_getprimitive_axis_002 + * @tc.desc: Verify the behavior of the GetPrimitive function + * @tc.type: FUNC + */ +HWTEST_F(FullConnectionAxisBuilderTest, fullconnection_getprimitive_axis_002, TestSize.Level1) +{ + m_inputsIndex = m_inputs; + m_paramsIndex = m_params; + SetInputToAlltensor(); + + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SeAxis(OH_NN_INT64, m_param_dim, nullptr, OH_NN_FULL_CONNECTION_AXIS); + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_FULL_CONNECTION_ACTIVATIONTYPE); + + LiteGraphTensorPtr primitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive = {nullptr, DestroyLiteGraphPrimitive}; + EXPECT_EQ(expectPrimitive, primitive); +} +} // namespace UnitTest +} // namespace NeuralNetworkRuntime +} // namespace OHOS \ No newline at end of file diff --git a/test/unittest/ops/gather_builder_test.cpp b/test/unittest/ops/gather_builder_test.cpp new file mode 100644 index 0000000..74a2a03 --- /dev/null +++ b/test/unittest/ops/gather_builder_test.cpp @@ -0,0 +1,181 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "frameworks/native/ops/gather_builder.h" + +#include "ops_test.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Ops; + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class GatherBuilderTest : public OpsTest { +public: + void SetUp() override; + void TearDown() override; + +protected: + GatherBuilder m_gather; + std::vector m_inputs {0, 1, 2}; + std::vector m_outputs {3}; + std::vector m_params {}; + std::vector m_inputDim {4, 3}; + std::vector m_outputDim {4, 2}; +}; + +void GatherBuilderTest::SetUp() {} + +void GatherBuilderTest::TearDown() {} + +/** + * @tc.name: gather_build_001 + * @tc.desc: Verify that the build function returns a successful message. + * @tc.type: FUNC + */ +HWTEST_F(GatherBuilderTest, gather_build_001, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + + OH_NN_ReturnCode ret = m_gather.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/** + * @tc.name: gather_build_002 + * @tc.desc: Verify that the build function returns a failed message with true m_isBuild. + * @tc.type: FUNC + */ +HWTEST_F(GatherBuilderTest, gather_build_002, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + + EXPECT_EQ(OH_NN_SUCCESS, m_gather.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors)); + OH_NN_ReturnCode ret = m_gather.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/** + * @tc.name: gather_build_003 + * @tc.desc: Verify that the build function returns a failed message with invalided input. + * @tc.type: FUNC + */ +HWTEST_F(GatherBuilderTest, gather_build_003, TestSize.Level0) +{ + m_inputs = {0, 1, 2, 3}; + m_outputs = {4}; + + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + + OH_NN_ReturnCode ret = m_gather.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: gather_build_004 + * @tc.desc: Verify that the build function returns a failed message with invalided output. + * @tc.type: FUNC + */ +HWTEST_F(GatherBuilderTest, gather_build_004, TestSize.Level0) +{ + std::vector m_outputs = {3, 4, 5}; + + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + + OH_NN_ReturnCode ret = m_gather.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: gather_build_005 + * @tc.desc: Verify that the build function returns a failed message with empty allTensor. + * @tc.type: FUNC + */ +HWTEST_F(GatherBuilderTest, gather_build_005, TestSize.Level0) +{ + OH_NN_ReturnCode ret = m_gather.Build(m_params, m_inputs, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: gather_build_006 + * @tc.desc: Verify that the build function returns a failed message without output tensor. + * @tc.type: FUNC + */ +HWTEST_F(GatherBuilderTest, gather_build_006, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + + OH_NN_ReturnCode ret = m_gather.Build(m_params, m_inputsIndex, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: gather_build_007 + * @tc.desc: Verify that the build function returns a failed message with a virtual parameter. + * @tc.type: FUNC + */ +HWTEST_F(GatherBuilderTest, gather_build_007, TestSize.Level0) +{ + m_params = {4}; + std::vector paramDim = {}; + + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + std::shared_ptr paramTensor; + paramTensor = TransToNNTensor(OH_NN_INT32, paramDim, nullptr, OH_NN_TENSOR); + m_allTensors.emplace_back(paramTensor); + + OH_NN_ReturnCode ret = m_gather.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: gather_getprimitive_001 + * @tc.desc: Verify that the getPrimitive function returns a successful message + * @tc.type: FUNC + */ +HWTEST_F(GatherBuilderTest, gather_getprimitive_001, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + + EXPECT_EQ(OH_NN_SUCCESS, m_gather.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors)); + LiteGraphPrimitvePtr primitive = m_gather.GetPrimitive(); + LiteGraphPrimitvePtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); + EXPECT_NE(expectPrimitive, primitive); +} + +/** + * @tc.name: gather_getprimitive_002 + * @tc.desc: Verify that the getPrimitive function returns a failed message without build. + * @tc.type: FUNC + */ +HWTEST_F(GatherBuilderTest, gather_getprimitive_002, TestSize.Level0) +{ + GatherBuilder gather; + LiteGraphPrimitvePtr primitive = m_gather.GetPrimitive(); + LiteGraphPrimitvePtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); + EXPECT_EQ(expectPrimitive, primitive); +} +} +} +} \ No newline at end of file diff --git a/test/unittest/ops/gelu_builder_test.cpp b/test/unittest/ops/gelu_builder_test.cpp new file mode 100644 index 0000000..e6b56b3 --- /dev/null +++ b/test/unittest/ops/gelu_builder_test.cpp @@ -0,0 +1,185 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "frameworks/native/ops/gelu_builder.h" + +#include "ops_test.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Ops; + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class GeluBuilderTest : public OpsTest { +public: + void SetUp() override; + void TearDown() override; + +protected: + GeluBuilder m_gelu; + std::vector m_inputs {0}; + std::vector m_outputs {1}; + std::vector m_params {}; + std::vector m_inputDim {1, 5, 1, 1}; + std::vector m_outputDim {1, 5, 1, 1}; +}; + +void GeluBuilderTest::SetUp() {} + +void GeluBuilderTest::TearDown() {} + +/** + * @tc.name: gelu_build_001 + * @tc.desc: Verify that the build function returns a successful message. + * @tc.type: FUNC + */ +HWTEST_F(GeluBuilderTest, gelu_build_001, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + + OH_NN_ReturnCode ret = m_gelu.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/** + * @tc.name: gelu_build_002 + * @tc.desc: Verify that the build function returns a failed message with true m_isBuild. + * @tc.type: FUNC + */ +HWTEST_F(GeluBuilderTest, gelu_build_002, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + + EXPECT_EQ(OH_NN_SUCCESS, m_gelu.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors)); + OH_NN_ReturnCode ret = m_gelu.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/** + * @tc.name: gelu_build_003 + * @tc.desc: Verify that the build function returns a failed message with invalided input. + * @tc.type: FUNC + */ +HWTEST_F(GeluBuilderTest, gelu_build_003, TestSize.Level0) +{ + m_inputs = {0, 1}; + m_outputs = {2}; + + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + + OH_NN_ReturnCode ret = m_gelu.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: gelu_build_004 + * @tc.desc: Verify that the build function returns a failed message with invalided output. + * @tc.type: FUNC + */ +HWTEST_F(GeluBuilderTest, gelu_build_004, TestSize.Level0) +{ + std::vector m_outputs = {1, 2}; + + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + + OH_NN_ReturnCode ret = m_gelu.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: gelu_build_005 + * @tc.desc: Verify that the build function returns a failed message with empty allTensor. + * @tc.type: FUNC + */ +HWTEST_F(GeluBuilderTest, gelu_build_005, TestSize.Level0) +{ + OH_NN_ReturnCode ret = m_gelu.Build(m_params, m_inputs, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: gelu_build_006 + * @tc.desc: Verify that the build function returns a failed message without output tensor. + * @tc.type: FUNC + */ +HWTEST_F(GeluBuilderTest, gelu_build_006, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + + OH_NN_ReturnCode ret = m_gelu.Build(m_params, m_inputsIndex, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: gelu_build_007 + * @tc.desc: Verify that the build function returns a failed message with a virtual parameter. + * @tc.type: FUNC + */ +HWTEST_F(GeluBuilderTest, gelu_build_007, TestSize.Level0) +{ + m_params = {2}; + std::vector paramDim = {}; + + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + std::shared_ptr paramTensor; + paramTensor = TransToNNTensor(OH_NN_INT32, paramDim, nullptr, OH_NN_TENSOR); + m_allTensors.emplace_back(paramTensor); + + OH_NN_ReturnCode ret = m_gelu.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: gelu_getprimitive_001 + * @tc.desc: Verify that the getPrimitive function returns a successful message + * @tc.type: FUNC + */ +HWTEST_F(GeluBuilderTest, gelu_getprimitive_001, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + + EXPECT_EQ(OH_NN_SUCCESS, m_gelu.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors)); + LiteGraphPrimitvePtr primitive = m_gelu.GetPrimitive(); + LiteGraphPrimitvePtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); + EXPECT_NE(expectPrimitive, primitive); + + mindspore::lite::ActivationType activationType = mindspore::lite::ACTIVATION_TYPE_GELU; + auto returnValue = mindspore::lite::MindIR_Activation_GetActivationType(primitive.get()); + EXPECT_EQ(returnValue, activationType); +} + +/** + * @tc.name: gelu_getprimitive_002 + * @tc.desc: Verify that the getPrimitive function returns a failed message without build. + * @tc.type: FUNC + */ +HWTEST_F(GeluBuilderTest, gelu_getprimitive_002, TestSize.Level0) +{ + GeluBuilder gelu; + LiteGraphPrimitvePtr primitive = m_gelu.GetPrimitive(); + LiteGraphPrimitvePtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); + EXPECT_EQ(expectPrimitive, primitive); +} +} +} +} \ No newline at end of file diff --git a/test/unittest/ops/hswish_builder_test.cpp b/test/unittest/ops/hswish_builder_test.cpp new file mode 100644 index 0000000..a80a48b --- /dev/null +++ b/test/unittest/ops/hswish_builder_test.cpp @@ -0,0 +1,185 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "frameworks/native/ops/hswish_builder.h" + +#include "ops_test.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Ops; + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class HswishBuilderTest : public OpsTest { +public: + void SetUp() override; + void TearDown() override; + +protected: + HswishBuilder m_hswish; + std::vector m_inputs {0}; + std::vector m_outputs {1}; + std::vector m_params {}; + std::vector m_inputDim {1, 5, 1, 1}; + std::vector m_outputDim {1, 5, 1, 1}; +}; + +void HswishBuilderTest::SetUp() {} + +void HswishBuilderTest::TearDown() {} + +/** + * @tc.name: hswish_build_001 + * @tc.desc: Verify that the build function returns a successful message. + * @tc.type: FUNC + */ +HWTEST_F(HswishBuilderTest, hswish_build_001, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + + OH_NN_ReturnCode ret = m_hswish.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/** + * @tc.name: hswish_build_002 + * @tc.desc: Verify that the build function returns a failed message with true m_isBuild. + * @tc.type: FUNC + */ +HWTEST_F(HswishBuilderTest, hswish_build_002, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + + EXPECT_EQ(OH_NN_SUCCESS, m_hswish.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors)); + OH_NN_ReturnCode ret = m_hswish.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/** + * @tc.name: hswish_build_003 + * @tc.desc: Verify that the build function returns a failed message with invalided input. + * @tc.type: FUNC + */ +HWTEST_F(HswishBuilderTest, hswish_build_003, TestSize.Level0) +{ + m_inputs = {0, 1}; + m_outputs = {2}; + + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + + OH_NN_ReturnCode ret = m_hswish.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: hswish_build_004 + * @tc.desc: Verify that the build function returns a failed message with invalided output. + * @tc.type: FUNC + */ +HWTEST_F(HswishBuilderTest, hswish_build_004, TestSize.Level0) +{ + std::vector m_outputs = {1, 2}; + + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + + OH_NN_ReturnCode ret = m_hswish.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: hswish_build_005 + * @tc.desc: Verify that the build function returns a failed message with empty allTensor. + * @tc.type: FUNC + */ +HWTEST_F(HswishBuilderTest, hswish_build_005, TestSize.Level0) +{ + OH_NN_ReturnCode ret = m_hswish.Build(m_params, m_inputs, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: hswish_build_006 + * @tc.desc: Verify that the build function returns a failed message without output tensor. + * @tc.type: FUNC + */ +HWTEST_F(HswishBuilderTest, hswish_build_006, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + + OH_NN_ReturnCode ret = m_hswish.Build(m_params, m_inputsIndex, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: hswish_build_007 + * @tc.desc: Verify that the build function returns a failed message with a virtual parameter. + * @tc.type: FUNC + */ +HWTEST_F(HswishBuilderTest, hswish_build_007, TestSize.Level0) +{ + std::vector m_params = {2}; + std::vector paramDim = {}; + + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + std::shared_ptr paramTensor; + paramTensor = TransToNNTensor(OH_NN_INT32, paramDim, nullptr, OH_NN_TENSOR); + m_allTensors.emplace_back(paramTensor); + + OH_NN_ReturnCode ret = m_hswish.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: hswish_getprimitive_001 + * @tc.desc: Verify that the getPrimitive function returns a successful message + * @tc.type: FUNC + */ +HWTEST_F(HswishBuilderTest, hswish_getprimitive_001, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + + EXPECT_EQ(OH_NN_SUCCESS, m_hswish.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors)); + LiteGraphPrimitvePtr primitive = m_hswish.GetPrimitive(); + LiteGraphPrimitvePtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); + EXPECT_NE(expectPrimitive, primitive); + + mindspore::lite::ActivationType activationType = mindspore::lite::ACTIVATION_TYPE_HSWISH; + auto returnValue = mindspore::lite::MindIR_Activation_GetActivationType(primitive.get()); + EXPECT_EQ(returnValue, activationType); +} + +/** + * @tc.name: hswish_getprimitive_001 + * @tc.desc: Verify that the getPrimitive function returns a failed message without build. + * @tc.type: FUNC + */ +HWTEST_F(HswishBuilderTest, hswish_getprimitive_002, TestSize.Level0) +{ + HswishBuilder hswish; + LiteGraphPrimitvePtr primitive = m_hswish.GetPrimitive(); + LiteGraphPrimitvePtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); + EXPECT_EQ(expectPrimitive, primitive); +} +} +} +} \ No newline at end of file diff --git a/test/unittest/ops/layernorm_builder_test.cpp b/test/unittest/ops/layernorm_builder_test.cpp new file mode 100644 index 0000000..254909e --- /dev/null +++ b/test/unittest/ops/layernorm_builder_test.cpp @@ -0,0 +1,465 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "frameworks/native/ops/layernorm_builder.h" + +#include "ops_test.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Ops; + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class LayerNormBuilderTest : public OpsTest { +public: + void SetUp() override; + void TearDown() override; + +protected: +void SaveNormAixsTensor(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); +void SaveEpsilonTensor(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); +void SaveParamAxisTensor(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); +void SetInputTensor(std::shared_ptr inputTensor); + +public: + LayerNormBuilder m_layerNorm; + std::vector m_inputs {0, 1, 2}; + std::vector m_outputs {3}; + std::vector m_params {4, 5, 6}; + std::vector m_inputDimNorm {2, 3}; + std::vector m_inputDimEpsilon {3}; + std::vector m_inputDimParam {3}; + std::vector m_outputDim {3}; + std::vector m_paramDim {}; + std::shared_ptr m_inputTensor {}; +}; + +void LayerNormBuilderTest::SetUp() {} + +void LayerNormBuilderTest::TearDown() {} + +void LayerNormBuilderTest::SaveNormAixsTensor(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + int32_t* beginNormAxisValue = new (std::nothrow) int32_t(1); + EXPECT_NE(nullptr, beginNormAxisValue); + std::shared_ptr normAxisTensor = TransToNNTensor(dataType, dim, quantParam, type); + normAxisTensor->SetBuffer(beginNormAxisValue, sizeof(int32_t)); + m_allTensors.emplace_back(normAxisTensor); +} + +void LayerNormBuilderTest::SaveEpsilonTensor(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + float* epsilonValue = new (std::nothrow) float(0.0f); + EXPECT_NE(nullptr, epsilonValue); + std::shared_ptr transposeBTensor = TransToNNTensor(dataType, dim, quantParam, type); + transposeBTensor->SetBuffer(epsilonValue, sizeof(float)); + m_allTensors.emplace_back(transposeBTensor); +} + +void LayerNormBuilderTest::SaveParamAxisTensor(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + int32_t* beginNormParamValue = new (std::nothrow) int32_t(1); + EXPECT_NE(nullptr, beginNormParamValue); + std::shared_ptr paramAxisTensor = TransToNNTensor(dataType, dim, quantParam, type); + paramAxisTensor->SetBuffer(beginNormParamValue, sizeof(int32_t)); + m_allTensors.emplace_back(paramAxisTensor); +} + +void LayerNormBuilderTest::SetInputTensor(std::shared_ptr inputTensor) +{ + inputTensor = TransToNNTensor(OH_NN_FLOAT32, m_inputDimNorm, nullptr, OH_NN_TENSOR); + m_allTensors.emplace_back(inputTensor); + + inputTensor = TransToNNTensor(OH_NN_FLOAT32, m_inputDimEpsilon, nullptr, OH_NN_TENSOR); + m_allTensors.emplace_back(inputTensor); + + inputTensor = TransToNNTensor(OH_NN_FLOAT32, m_inputDimParam, nullptr, OH_NN_TENSOR); + m_allTensors.emplace_back(inputTensor); +} + +/** + * @tc.name: layernorm_build_001 + * @tc.desc: Verify that the build function returns a successful message. + * @tc.type: FUNC + */ +HWTEST_F(LayerNormBuilderTest, layernorm_build_001, TestSize.Level0) +{ + SetInputTensor(m_inputTensor); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + SaveNormAixsTensor(OH_NN_INT32, m_paramDim, nullptr, OH_NN_LAYER_NORM_BEGIN_NORM_AXIS); + SaveEpsilonTensor(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_LAYER_NORM_EPSILON); + SaveParamAxisTensor(OH_NN_INT32, m_paramDim, nullptr, OH_NN_LAYER_NORM_BEGIN_PARAM_AXIS); + + OH_NN_ReturnCode ret = m_layerNorm.Build(m_params, m_inputs, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/** + * @tc.name: layernorm_build_002 + * @tc.desc: Verify that the build function returns a failed message with duplicate Build(). + * @tc.type: FUNC + */ +HWTEST_F(LayerNormBuilderTest, layernorm_build_002, TestSize.Level0) +{ + SetInputTensor(m_inputTensor); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + SaveNormAixsTensor(OH_NN_INT32, m_paramDim, nullptr, OH_NN_LAYER_NORM_BEGIN_NORM_AXIS); + SaveEpsilonTensor(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_LAYER_NORM_EPSILON); + SaveParamAxisTensor(OH_NN_INT32, m_paramDim, nullptr, OH_NN_LAYER_NORM_BEGIN_PARAM_AXIS); + + EXPECT_EQ(OH_NN_SUCCESS, m_layerNorm.Build(m_params, m_inputs, m_outputsIndex, m_allTensors)); + OH_NN_ReturnCode ret = m_layerNorm.Build(m_params, m_inputs, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/** + * @tc.name: layernorm_build_003 + * @tc.desc: Verify that the build function returns a failed message with invalided input. + * @tc.type: FUNC + */ +HWTEST_F(LayerNormBuilderTest, layernorm_build_003, TestSize.Level0) +{ + m_inputs = {0, 1, 2, 3}; + m_outputs = {4}; + m_params = {5, 6, 7}; + + SetInputTensor(m_inputTensor); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + SaveNormAixsTensor(OH_NN_INT32, m_paramDim, nullptr, OH_NN_LAYER_NORM_BEGIN_NORM_AXIS); + SaveEpsilonTensor(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_LAYER_NORM_EPSILON); + SaveParamAxisTensor(OH_NN_INT32, m_paramDim, nullptr, OH_NN_LAYER_NORM_BEGIN_PARAM_AXIS); + + OH_NN_ReturnCode ret = m_layerNorm.Build(m_params, m_inputs, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: layernorm_build_004 + * @tc.desc: Verify that the build function returns a failed message with invalided output. + * @tc.type: FUNC + */ +HWTEST_F(LayerNormBuilderTest, layernorm_build_004, TestSize.Level0) +{ + m_outputs = {3, 4}; + m_params = {5, 6, 7}; + + SetInputTensor(m_inputTensor); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + SaveNormAixsTensor(OH_NN_INT32, m_paramDim, nullptr, OH_NN_LAYER_NORM_BEGIN_NORM_AXIS); + SaveEpsilonTensor(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_LAYER_NORM_EPSILON); + SaveParamAxisTensor(OH_NN_INT32, m_paramDim, nullptr, OH_NN_LAYER_NORM_BEGIN_PARAM_AXIS); + + OH_NN_ReturnCode ret = m_layerNorm.Build(m_params, m_inputs, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: layernorm_build_005 + * @tc.desc: Verify that the build function returns a failed message with null allTensor. + * @tc.type: FUNC + */ +HWTEST_F(LayerNormBuilderTest, layernorm_build_005, TestSize.Level0) +{ + OH_NN_ReturnCode ret = m_layerNorm.Build(m_params, m_inputs, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: layernorm_build_006 + * @tc.desc: Verify that the build function returns a failed message with invalided allTensor. + * @tc.type: FUNC + */ +HWTEST_F(LayerNormBuilderTest, layernorm_build_006, TestSize.Level0) +{ + SetInputTensor(m_inputTensor); + + OH_NN_ReturnCode ret = m_layerNorm.Build(m_params, m_inputs, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: layernorm_build_007 + * @tc.desc: Verify that the build function returns a failed message with invalid beginNormAxis's dataType. + * @tc.type: FUNC + */ +HWTEST_F(LayerNormBuilderTest, layernorm_build_007, TestSize.Level0) +{ + SetInputTensor(m_inputTensor); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + + std::shared_ptr normAxisTensor; + normAxisTensor = TransToNNTensor(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_LAYER_NORM_BEGIN_NORM_AXIS); + float beginNormAxisValue = 1e-7; + normAxisTensor->SetBuffer(&beginNormAxisValue, sizeof(beginNormAxisValue)); + m_allTensors.emplace_back(normAxisTensor); + + SaveEpsilonTensor(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_LAYER_NORM_EPSILON); + SaveParamAxisTensor(OH_NN_INT32, m_paramDim, nullptr, OH_NN_LAYER_NORM_BEGIN_PARAM_AXIS); + + OH_NN_ReturnCode ret = m_layerNorm.Build(m_params, m_inputs, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + normAxisTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: layernorm_build_008 + * @tc.desc: Verify that the build function returns a failed message with invalid beginNormAxis's dimension. + * @tc.type: FUNC + */ +HWTEST_F(LayerNormBuilderTest, layernorm_build_008, TestSize.Level0) +{ + std::vector expectParamDim = {2}; + + SetInputTensor(m_inputTensor); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + + std::shared_ptr normAxisTensor; + normAxisTensor = TransToNNTensor(OH_NN_INT32, expectParamDim, nullptr, OH_NN_LAYER_NORM_BEGIN_NORM_AXIS); + int32_t beginNormAxisValue[2] = {1, 2}; + normAxisTensor->SetBuffer(beginNormAxisValue, 2 * sizeof(int32_t)); + m_allTensors.emplace_back(normAxisTensor); + + SaveEpsilonTensor(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_LAYER_NORM_EPSILON); + SaveParamAxisTensor(OH_NN_INT32, m_paramDim, nullptr, OH_NN_LAYER_NORM_BEGIN_PARAM_AXIS); + + OH_NN_ReturnCode ret = m_layerNorm.Build(m_params, m_inputs, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + normAxisTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: layernorm_build_009 + * @tc.desc: Verify that the build function returns a failed message with invalid epsilon's dataType. + * @tc.type: FUNC + */ +HWTEST_F(LayerNormBuilderTest, layernorm_build_009, TestSize.Level0) +{ + SetInputTensor(m_inputTensor); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + SaveNormAixsTensor(OH_NN_INT32, m_paramDim, nullptr, OH_NN_LAYER_NORM_BEGIN_NORM_AXIS); + SaveParamAxisTensor(OH_NN_INT32, m_paramDim, nullptr, OH_NN_LAYER_NORM_BEGIN_PARAM_AXIS); + + std::shared_ptr epsilonTensor; + epsilonTensor = TransToNNTensor(OH_NN_INT32, m_paramDim, nullptr, OH_NN_LAYER_NORM_EPSILON); + int32_t epsilonValue = 1; + epsilonTensor->SetBuffer(&epsilonValue, sizeof(epsilonValue)); + m_allTensors.emplace_back(epsilonTensor); + + OH_NN_ReturnCode ret = m_layerNorm.Build(m_params, m_inputs, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + epsilonTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: layernorm_build_010 + * @tc.desc: Verify that the build function returns a failed message with invalid epsilon's dimension. + * @tc.type: FUNC + */ +HWTEST_F(LayerNormBuilderTest, layernorm_build_010, TestSize.Level0) +{ + std::vector expectParamDim = {2}; + + SetInputTensor(m_inputTensor); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + SaveNormAixsTensor(OH_NN_INT32, m_paramDim, nullptr, OH_NN_LAYER_NORM_BEGIN_NORM_AXIS); + SaveParamAxisTensor(OH_NN_INT32, m_paramDim, nullptr, OH_NN_LAYER_NORM_BEGIN_PARAM_AXIS); + + std::shared_ptr epsilonTensor; + epsilonTensor = TransToNNTensor(OH_NN_FLOAT32, expectParamDim, nullptr, OH_NN_LAYER_NORM_EPSILON); + float epsilonValue[2] = {1e-7, 1e-7}; + epsilonTensor->SetBuffer(epsilonValue, 2 * sizeof(float)); + m_allTensors.emplace_back(epsilonTensor); + + OH_NN_ReturnCode ret = m_layerNorm.Build(m_params, m_inputs, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + epsilonTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: layernorm_build_011 + * @tc.desc: Verify that the build function returns a failed message with invalid beginParamAxis's dataType. + * @tc.type: FUNC + */ +HWTEST_F(LayerNormBuilderTest, layernorm_build_011, TestSize.Level0) +{ + SetInputTensor(m_inputTensor); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + SaveNormAixsTensor(OH_NN_INT32, m_paramDim, nullptr, OH_NN_LAYER_NORM_BEGIN_NORM_AXIS); + SaveEpsilonTensor(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_LAYER_NORM_EPSILON); + + std::shared_ptr paramAxisTensor; + paramAxisTensor = TransToNNTensor(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_LAYER_NORM_BEGIN_PARAM_AXIS); + float beginNormParamValue = 1; + paramAxisTensor->SetBuffer(&beginNormParamValue, sizeof(beginNormParamValue)); + m_allTensors.emplace_back(paramAxisTensor); + + OH_NN_ReturnCode ret = m_layerNorm.Build(m_params, m_inputs, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + paramAxisTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: layernorm_build_012 + * @tc.desc: Verify that the build function returns a failed message with invalid beginParamAxis's dimension. + * @tc.type: FUNC + */ +HWTEST_F(LayerNormBuilderTest, layernorm_build_012, TestSize.Level0) +{ + std::vector expectParamDim = {2}; + + SetInputTensor(m_inputTensor); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + SaveNormAixsTensor(OH_NN_INT32, m_paramDim, nullptr, OH_NN_LAYER_NORM_BEGIN_NORM_AXIS); + SaveEpsilonTensor(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_LAYER_NORM_EPSILON); + + std::shared_ptr paramAxisTensor; + paramAxisTensor = TransToNNTensor(OH_NN_INT32, expectParamDim, nullptr, OH_NN_LAYER_NORM_BEGIN_PARAM_AXIS); + int32_t beginNormParamValue[2] = {1, 1}; + paramAxisTensor->SetBuffer(beginNormParamValue, 2 * sizeof(int32_t)); + m_allTensors.emplace_back(paramAxisTensor); + + OH_NN_ReturnCode ret = m_layerNorm.Build(m_params, m_inputs, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + paramAxisTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: layernorm_build_0013 + * @tc.desc: Verify that the build function returns a failed message with invalid param. + * @tc.type: FUNC + */ +HWTEST_F(LayerNormBuilderTest, layernorm_build_0013, TestSize.Level0) +{ + SetInputTensor(m_inputTensor); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + SaveNormAixsTensor(OH_NN_INT32, m_paramDim, nullptr, OH_NN_BATCH_NORM_EPSILON); + SaveEpsilonTensor(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_LAYER_NORM_EPSILON); + SaveParamAxisTensor(OH_NN_INT32, m_paramDim, nullptr, OH_NN_LAYER_NORM_BEGIN_PARAM_AXIS); + + OH_NN_ReturnCode ret = m_layerNorm.Build(m_params, m_inputs, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: layernorm_build_014 + * @tc.desc: Verify that the build function returns a failed message without set buffer for normAxis. + * @tc.type: FUNC + */ +HWTEST_F(LayerNormBuilderTest, layernorm_build_014, TestSize.Level0) +{ + SetInputTensor(m_inputTensor); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + SaveEpsilonTensor(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_LAYER_NORM_EPSILON); + SaveParamAxisTensor(OH_NN_INT32, m_paramDim, nullptr, OH_NN_LAYER_NORM_BEGIN_PARAM_AXIS); + + std::shared_ptr normAxisTensor; + normAxisTensor = TransToNNTensor(OH_NN_INT32, m_paramDim, nullptr, OH_NN_LAYER_NORM_BEGIN_NORM_AXIS); + m_allTensors.emplace_back(normAxisTensor); + + OH_NN_ReturnCode ret = m_layerNorm.Build(m_params, m_inputs, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: layernorm_build_015 + * @tc.desc: Verify that the build function returns a failed message without set buffer for epsilon. + * @tc.type: FUNC + */ +HWTEST_F(LayerNormBuilderTest, layernorm_build_015, TestSize.Level0) +{ + SetInputTensor(m_inputTensor); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + SaveNormAixsTensor(OH_NN_INT32, m_paramDim, nullptr, OH_NN_LAYER_NORM_BEGIN_NORM_AXIS); + SaveParamAxisTensor(OH_NN_INT32, m_paramDim, nullptr, OH_NN_LAYER_NORM_BEGIN_PARAM_AXIS); + + std::shared_ptr epsilonTensor; + epsilonTensor = TransToNNTensor(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_LAYER_NORM_EPSILON); + m_allTensors.emplace_back(epsilonTensor); + + OH_NN_ReturnCode ret = m_layerNorm.Build(m_params, m_inputs, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: layernorm_build_016 + * @tc.desc: Verify that the build function returns a failed message without set buffer for paramsAxis. + * @tc.type: FUNC + */ +HWTEST_F(LayerNormBuilderTest, layernorm_build_016, TestSize.Level0) +{ + SetInputTensor(m_inputTensor); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + SaveNormAixsTensor(OH_NN_INT32, m_paramDim, nullptr, OH_NN_LAYER_NORM_BEGIN_NORM_AXIS); + SaveEpsilonTensor(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_LAYER_NORM_EPSILON); + + std::shared_ptr paramAxisTensor; + paramAxisTensor = TransToNNTensor(OH_NN_INT32, m_paramDim, nullptr, OH_NN_LAYER_NORM_BEGIN_PARAM_AXIS); + m_allTensors.emplace_back(paramAxisTensor); + + OH_NN_ReturnCode ret = m_layerNorm.Build(m_params, m_inputs, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: layernorm_getprimitive_001 + * @tc.desc: Verify that the getPrimitive function returns a successful message + * @tc.type: FUNC + */ +HWTEST_F(LayerNormBuilderTest, layernorm_getprimitive_001, TestSize.Level0) +{ + SetInputTensor(m_inputTensor); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + SaveNormAixsTensor(OH_NN_INT32, m_paramDim, nullptr, OH_NN_LAYER_NORM_BEGIN_NORM_AXIS); + SaveEpsilonTensor(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_LAYER_NORM_EPSILON); + SaveParamAxisTensor(OH_NN_INT32, m_paramDim, nullptr, OH_NN_LAYER_NORM_BEGIN_PARAM_AXIS); + + int32_t beginNormAxisValue = 1; + float epsilonValue = 0.0f; + int32_t beginNormParamValue = 1; + EXPECT_EQ(OH_NN_SUCCESS, m_layerNorm.Build(m_params, m_inputs, m_outputsIndex, m_allTensors)); + LiteGraphPrimitvePtr primitive = m_layerNorm.GetPrimitive(); + LiteGraphPrimitvePtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); + EXPECT_NE(expectPrimitive, primitive); + auto returnValue = mindspore::lite::MindIR_LayerNormFusion_GetBeginNormAxis(primitive.get()); + EXPECT_EQ(returnValue, beginNormAxisValue); + returnValue = mindspore::lite::MindIR_LayerNormFusion_GetEpsilon(primitive.get()); + EXPECT_EQ(returnValue, epsilonValue); + returnValue = mindspore::lite::MindIR_LayerNormFusion_GetBeginParamsAxis(primitive.get()); + EXPECT_EQ(returnValue, beginNormParamValue); +} + +/** + * @tc.name: layernorm_getprimitive_002 + * @tc.desc: Verify that the getPrimitive function returns a failed message without build. + * @tc.type: FUNC + */ +HWTEST_F(LayerNormBuilderTest, layernorm_getprimitive_002, TestSize.Level0) +{ + LayerNormBuilder layerNorm; + LiteGraphPrimitvePtr primitive = m_layerNorm.GetPrimitive(); + LiteGraphPrimitvePtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); + EXPECT_EQ(expectPrimitive, primitive); +} +} +} +} \ No newline at end of file diff --git a/test/unittest/ops/lessequal_builder_test.cpp b/test/unittest/ops/lessequal_builder_test.cpp new file mode 100644 index 0000000..6389818 --- /dev/null +++ b/test/unittest/ops/lessequal_builder_test.cpp @@ -0,0 +1,181 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "frameworks/native/ops/lessequal_builder.h" + +#include "ops_test.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Ops; + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class LessEqualBuilderTest : public OpsTest { +public: + void SetUp() override; + void TearDown() override; + +protected: + LessEqualBuilder m_lessEqual; + std::vector m_inputs {0, 1}; + std::vector m_outputs {2}; + std::vector m_params {}; + std::vector m_inputDim {1, 2, 1, 1}; + std::vector m_outputDim {1, 2, 1, 1}; +}; + +void LessEqualBuilderTest::SetUp() {} + +void LessEqualBuilderTest::TearDown() {} + +/** + * @tc.name: lessequal_build_001 + * @tc.desc: Verify that the build function returns a successful message. + * @tc.type: FUNC + */ +HWTEST_F(LessEqualBuilderTest, lessequal_build_001, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_BOOL, m_outputDim, nullptr); + + OH_NN_ReturnCode ret = m_lessEqual.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/** + * @tc.name: lessequal_build_002 + * @tc.desc: Verify that the build function returns a failed message with true m_isBuild. + * @tc.type: FUNC + */ +HWTEST_F(LessEqualBuilderTest, lessequal_build_002, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_BOOL, m_outputDim, nullptr); + + EXPECT_EQ(OH_NN_SUCCESS, m_lessEqual.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors)); + OH_NN_ReturnCode ret = m_lessEqual.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/** + * @tc.name: lessequal_build_003 + * @tc.desc: Verify that the build function returns a failed message with invalided input. + * @tc.type: FUNC + */ +HWTEST_F(LessEqualBuilderTest, lessequal_build_003, TestSize.Level0) +{ + m_inputs = {0, 1, 2, 3}; + m_outputs = {4}; + + SaveInputTensor(m_inputs, OH_NN_INT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_BOOL, m_outputDim, nullptr); + + OH_NN_ReturnCode ret = m_lessEqual.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: lessequal_build_004 + * @tc.desc: Verify that the build function returns a failed message with invalided output. + * @tc.type: FUNC + */ +HWTEST_F(LessEqualBuilderTest, lessequal_build_004, TestSize.Level0) +{ + std::vector m_outputs = {2, 3, 4}; + + SaveInputTensor(m_inputs, OH_NN_INT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_BOOL, m_outputDim, nullptr); + + OH_NN_ReturnCode ret = m_lessEqual.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: lessequal_build_005 + * @tc.desc: Verify that the build function returns a failed message with empty allTensor. + * @tc.type: FUNC + */ +HWTEST_F(LessEqualBuilderTest, lessequal_build_005, TestSize.Level0) +{ + OH_NN_ReturnCode ret = m_lessEqual.Build(m_params, m_inputs, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: lessequal_build_006 + * @tc.desc: Verify that the build function returns a failed message without output tensor. + * @tc.type: FUNC + */ +HWTEST_F(LessEqualBuilderTest, lessequal_build_006, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_inputDim, nullptr); + + OH_NN_ReturnCode ret = m_lessEqual.Build(m_params, m_inputsIndex, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: lessequal_build_007 + * @tc.desc: Verify that the build function returns a failed message with a virtual parameter. + * @tc.type: FUNC + */ +HWTEST_F(LessEqualBuilderTest, lessequal_build_007, TestSize.Level0) +{ + std::vector m_params = {3}; + std::vector paramDim = {}; + + SaveInputTensor(m_inputs, OH_NN_INT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_BOOL, m_outputDim, nullptr); + std::shared_ptr paramTensor; + paramTensor = TransToNNTensor(OH_NN_INT32, paramDim, nullptr, OH_NN_TENSOR); + m_allTensors.emplace_back(paramTensor); + + OH_NN_ReturnCode ret = m_lessEqual.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: lessequal_getprimitive_001 + * @tc.desc: Verify that the getPrimitive function returns a successful message + * @tc.type: FUNC + */ +HWTEST_F(LessEqualBuilderTest, lessequal_getprimitive_001, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_BOOL, m_outputDim, nullptr); + + EXPECT_EQ(OH_NN_SUCCESS, m_lessEqual.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors)); + LiteGraphPrimitvePtr primitive = m_lessEqual.GetPrimitive(); + LiteGraphPrimitvePtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); + EXPECT_NE(expectPrimitive, primitive); +} + +/** + * @tc.name: lessequal_getprimitive_002 + * @tc.desc: Verify that the getPrimitive function returns a failed message without build. + * @tc.type: FUNC + */ +HWTEST_F(LessEqualBuilderTest, lessequal_getprimitive_002, TestSize.Level0) +{ + LessEqualBuilder lessEqual; + LiteGraphPrimitvePtr primitive = m_lessEqual.GetPrimitive(); + LiteGraphPrimitvePtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); + EXPECT_EQ(expectPrimitive, primitive); +} +} +} +} \ No newline at end of file diff --git a/test/unittest/ops/matmul_builder_test.cpp b/test/unittest/ops/matmul_builder_test.cpp new file mode 100644 index 0000000..4af6c95 --- /dev/null +++ b/test/unittest/ops/matmul_builder_test.cpp @@ -0,0 +1,483 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "frameworks/native/ops/matmul_builder.h" + +#include "ops_test.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Ops; + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class MatMulBuilderTest : public OpsTest { +public: + void SetUp() override; + void TearDown() override; + +protected: + void SaveTransposeATensor(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + void SaveTransposeBTensor(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + void SaveActivationTensor(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + void SetInputTensor(std::shared_ptr inputTensor); + +protected: + MatmulBuilder m_matmul; + std::vector m_inputs {0, 1}; + std::vector m_outputs {2}; + std::vector m_params {3, 4, 5}; + std::vector m_inputXDim {1, 1, 3, 2}; + std::vector m_inputYDim {1, 1, 2, 3}; + std::vector m_outputDim {1, 1, 3, 3}; + std::vector m_paramDim {}; + std::shared_ptr m_inputTensor {}; +}; + +void MatMulBuilderTest::SetUp() {} + +void MatMulBuilderTest::TearDown() {} + +void MatMulBuilderTest::SaveTransposeATensor(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + std::shared_ptr transposeATensor = TransToNNTensor(dataType, dim, quantParam, type); + bool* transposeAValue = new (std::nothrow) bool(false); + EXPECT_NE(nullptr, transposeAValue); + transposeATensor->SetBuffer(transposeAValue, sizeof(bool)); + m_allTensors.emplace_back(transposeATensor); +} + +void MatMulBuilderTest::SaveTransposeBTensor(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + std::shared_ptr transposeBTensor = TransToNNTensor(dataType, dim, quantParam, type); + bool* transposeBValue = new (std::nothrow) bool(false); + EXPECT_NE(nullptr, transposeBValue); + transposeBTensor->SetBuffer(transposeBValue, sizeof(bool)); + m_allTensors.emplace_back(transposeBTensor); +} + +void MatMulBuilderTest::SaveActivationTensor(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + std::shared_ptr activationTensor = TransToNNTensor(dataType, dim, quantParam, type); + int8_t* activationValue = new (std::nothrow) int8_t(0); + EXPECT_NE(nullptr, activationValue); + activationTensor->SetBuffer(activationValue, sizeof(int8_t)); + m_allTensors.emplace_back(activationTensor); +} + +void MatMulBuilderTest::SetInputTensor(std::shared_ptr inputTensor) +{ + inputTensor = TransToNNTensor(OH_NN_FLOAT32, m_inputXDim, nullptr, OH_NN_TENSOR); + m_allTensors.emplace_back(inputTensor); + + inputTensor = TransToNNTensor(OH_NN_FLOAT32, m_inputYDim, nullptr, OH_NN_TENSOR); + m_allTensors.emplace_back(inputTensor); +} +/** + * @tc.name: matmul_build_001 + * @tc.desc: Verify that the build function returns a successful message. + * @tc.type: FUNC + */ +HWTEST_F(MatMulBuilderTest, matmul_build_001, TestSize.Level0) +{ + SetInputTensor(m_inputTensor); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + SaveTransposeATensor(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_MATMUL_TRANSPOSE_A); + SaveTransposeBTensor(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_MATMUL_TRANSPOSE_B); + SaveActivationTensor(OH_NN_INT8, m_paramDim, nullptr, OH_NN_MATMUL_ACTIVATION_TYPE); + + OH_NN_ReturnCode ret = m_matmul.Build(m_params, m_inputs, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/** + * @tc.name: matmul_build_002 + * @tc.desc: Verify that the build function returns a failed message with true m_isBuild. + * @tc.type: FUNC + */ +HWTEST_F(MatMulBuilderTest, matmul_build_002, TestSize.Level0) +{ + SetInputTensor(m_inputTensor); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + SaveTransposeATensor(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_MATMUL_TRANSPOSE_A); + SaveTransposeBTensor(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_MATMUL_TRANSPOSE_B); + SaveActivationTensor(OH_NN_INT8, m_paramDim, nullptr, OH_NN_MATMUL_ACTIVATION_TYPE); + + EXPECT_EQ(OH_NN_SUCCESS, m_matmul.Build(m_params, m_inputs, m_outputsIndex, m_allTensors)); + OH_NN_ReturnCode ret = m_matmul.Build(m_params, m_inputs, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/** + * @tc.name: matmul_build_003 + * @tc.desc: Verify that the build function returns a failed message with invalided input. + * @tc.type: FUNC + */ +HWTEST_F(MatMulBuilderTest, matmul_build_003, TestSize.Level0) +{ + m_inputs = {0, 1, 2}; + m_outputs = {3}; + m_params = {4, 5, 6}; + + SetInputTensor(m_inputTensor); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + SaveTransposeATensor(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_MATMUL_TRANSPOSE_A); + SaveTransposeBTensor(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_MATMUL_TRANSPOSE_B); + SaveActivationTensor(OH_NN_INT8, m_paramDim, nullptr, OH_NN_MATMUL_ACTIVATION_TYPE); + + OH_NN_ReturnCode ret = m_matmul.Build(m_params, m_inputs, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: matmul_build_004 + * @tc.desc: Verify that the build function returns a failed message with invalided output. + * @tc.type: FUNC + */ +HWTEST_F(MatMulBuilderTest, matmul_build_004, TestSize.Level0) +{ + m_outputs = {2, 3}; + m_params = {4, 5, 6}; + + SetInputTensor(m_inputTensor); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + SaveTransposeATensor(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_MATMUL_TRANSPOSE_A); + SaveTransposeBTensor(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_MATMUL_TRANSPOSE_B); + SaveActivationTensor(OH_NN_INT8, m_paramDim, nullptr, OH_NN_MATMUL_ACTIVATION_TYPE); + OH_NN_ReturnCode ret = m_matmul.Build(m_params, m_inputs, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: matmul_build_005 + * @tc.desc: Verify that the build function returns a failed message with empty allTensor. + * @tc.type: FUNC + */ +HWTEST_F(MatMulBuilderTest, matmul_build_005, TestSize.Level0) +{ + OH_NN_ReturnCode ret = m_matmul.Build(m_params, m_inputs, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: matmul_build_006 + * @tc.desc: Verify that the build function returns a failed message without output tensor. + * @tc.type: FUNC + */ +HWTEST_F(MatMulBuilderTest, matmul_build_006, TestSize.Level0) +{ + SetInputTensor(m_inputTensor); + + OH_NN_ReturnCode ret = m_matmul.Build(m_params, m_inputs, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: matmul_build_007 + * @tc.desc: Verify that the build function returns a failed message with invalid transposeA's dataType. + * @tc.type: FUNC + */ +HWTEST_F(MatMulBuilderTest, matmul_build_007, TestSize.Level0) +{ + SetInputTensor(m_inputTensor); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + SaveTransposeBTensor(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_MATMUL_TRANSPOSE_B); + SaveActivationTensor(OH_NN_INT8, m_paramDim, nullptr, OH_NN_MATMUL_ACTIVATION_TYPE); + + std::shared_ptr transposeATensor; + transposeATensor = TransToNNTensor(OH_NN_INT32, m_paramDim, nullptr, OH_NN_MATMUL_TRANSPOSE_A); + int32_t transposeAValue = 1; + transposeATensor->SetBuffer(&transposeAValue, sizeof(transposeAValue)); + m_allTensors.emplace_back(transposeATensor); + + OH_NN_ReturnCode ret = m_matmul.Build(m_params, m_inputs, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + transposeATensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: matmul_build_008 + * @tc.desc: Verify that the build function returns a failed message with invalid transposeA's dimension. + * @tc.type: FUNC + */ +HWTEST_F(MatMulBuilderTest, matmul_build_008, TestSize.Level0) +{ + std::vector expectParamDim = {2}; + + SetInputTensor(m_inputTensor); + + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + SaveTransposeBTensor(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_MATMUL_TRANSPOSE_B); + SaveActivationTensor(OH_NN_INT8, m_paramDim, nullptr, OH_NN_MATMUL_ACTIVATION_TYPE); + + std::shared_ptr transposeATensor; + transposeATensor = TransToNNTensor(OH_NN_BOOL, expectParamDim, nullptr, OH_NN_MATMUL_TRANSPOSE_A); + bool transposeAValue[2] = {false, false}; + transposeATensor->SetBuffer(transposeAValue, 2 * sizeof(bool)); + m_allTensors.emplace_back(transposeATensor); + + OH_NN_ReturnCode ret = m_matmul.Build(m_params, m_inputs, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + transposeATensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: matmul_build_009 + * @tc.desc: Verify that the build function returns a failed message with invalid transposeB's dataType. + * @tc.type: FUNC + */ +HWTEST_F(MatMulBuilderTest, matmul_build_009, TestSize.Level0) +{ + SetInputTensor(m_inputTensor); + + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + SaveTransposeATensor(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_MATMUL_TRANSPOSE_A); + SaveActivationTensor(OH_NN_INT8, m_paramDim, nullptr, OH_NN_MATMUL_ACTIVATION_TYPE); + + std::shared_ptr transposeBTensor; + transposeBTensor = TransToNNTensor(OH_NN_INT32, m_paramDim, nullptr, OH_NN_MATMUL_TRANSPOSE_B); + int32_t transposeBValue = 1; + transposeBTensor->SetBuffer(&transposeBValue, sizeof(transposeBValue)); + m_allTensors.emplace_back(transposeBTensor); + + OH_NN_ReturnCode ret = m_matmul.Build(m_params, m_inputs, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + transposeBTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: matmul_build_010 + * @tc.desc: Verify that the build function returns a failed message with invalid transposeB's dimension. + * @tc.type: FUNC + */ +HWTEST_F(MatMulBuilderTest, matMul_build_010, TestSize.Level0) +{ + std::vector expectParamDim = {2}; + + SetInputTensor(m_inputTensor); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + SaveTransposeATensor(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_MATMUL_TRANSPOSE_A); + SaveActivationTensor(OH_NN_INT8, m_paramDim, nullptr, OH_NN_MATMUL_ACTIVATION_TYPE); + + std::shared_ptr transposeBTensor; + transposeBTensor = TransToNNTensor(OH_NN_BOOL, expectParamDim, nullptr, OH_NN_MATMUL_TRANSPOSE_B); + bool transposeBValue[2] = {false, false}; + transposeBTensor->SetBuffer(transposeBValue, 2 * sizeof(bool)); + m_allTensors.emplace_back(transposeBTensor); + + OH_NN_ReturnCode ret = m_matmul.Build(m_params, m_inputs, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + transposeBTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: matmul_build_011 + * @tc.desc: Verify that the build function returns a failed message with invalid activation's dataType. + * @tc.type: FUNC + */ +HWTEST_F(MatMulBuilderTest, matMul_build_011, TestSize.Level0) +{ + SetInputTensor(m_inputTensor); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + SaveTransposeATensor(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_MATMUL_TRANSPOSE_A); + SaveTransposeBTensor(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_MATMUL_TRANSPOSE_B); + + std::shared_ptr activationTensor; + activationTensor = TransToNNTensor(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_MATMUL_ACTIVATION_TYPE); + bool activationValue = false; + activationTensor->SetBuffer(&activationValue, sizeof(activationValue)); + m_allTensors.emplace_back(activationTensor); + + OH_NN_ReturnCode ret = m_matmul.Build(m_params, m_inputs, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + activationTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: matmul_build_012 + * @tc.desc: Verify that the build function returns a failed message with invalid activation's dimension. + * @tc.type: FUNC + */ +HWTEST_F(MatMulBuilderTest, matmul_build_012, TestSize.Level0) +{ + std::vector expectParamDim = {2}; + + SetInputTensor(m_inputTensor); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + SaveTransposeATensor(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_MATMUL_TRANSPOSE_A); + SaveTransposeBTensor(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_MATMUL_TRANSPOSE_B); + + std::shared_ptr activationTensor; + activationTensor = TransToNNTensor(OH_NN_INT8, expectParamDim, nullptr, OH_NN_MATMUL_ACTIVATION_TYPE); + int8_t activationValue[2] = {0, 1}; + activationTensor->SetBuffer(activationValue, 2 * sizeof(int8_t)); + m_allTensors.emplace_back(activationTensor); + + OH_NN_ReturnCode ret = m_matmul.Build(m_params, m_inputs, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + activationTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: matmul_build_013 + * @tc.desc: Verify that the build function returns a failed message with invalid activation's data. + * @tc.type: FUNC + */ +HWTEST_F(MatMulBuilderTest, matmul_build_013, TestSize.Level0) +{ + SetInputTensor(m_inputTensor); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + SaveTransposeATensor(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_MATMUL_TRANSPOSE_A); + SaveTransposeBTensor(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_MATMUL_TRANSPOSE_B); + + std::shared_ptr activationTensor; + activationTensor = TransToNNTensor(OH_NN_INT8, m_paramDim, nullptr, OH_NN_MATMUL_ACTIVATION_TYPE); + int8_t activationValue = -1; + activationTensor->SetBuffer(&activationValue, sizeof(activationValue)); + m_allTensors.emplace_back(activationTensor); + + OH_NN_ReturnCode ret = m_matmul.Build(m_params, m_inputs, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + activationTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: matmul_build_014 + * @tc.desc: Verify that the build function returns a failed message with passing invalid param. + * @tc.type: FUNC + */ +HWTEST_F(MatMulBuilderTest, matmul_build_014, TestSize.Level0) +{ + SetInputTensor(m_inputTensor); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + SaveTransposeATensor(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_LAYER_NORM_BEGIN_PARAM_AXIS); + SaveTransposeBTensor(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_MATMUL_TRANSPOSE_B); + SaveActivationTensor(OH_NN_INT8, m_paramDim, nullptr, OH_NN_MATMUL_ACTIVATION_TYPE); + + OH_NN_ReturnCode ret = m_matmul.Build(m_params, m_inputs, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: matmul_build_015 + * @tc.desc: Verify that the build function returns a failed message without set buffer for transposeA. + * @tc.type: FUNC + */ +HWTEST_F(MatMulBuilderTest, matmul_build_015, TestSize.Level0) +{ + SetInputTensor(m_inputTensor); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + SaveTransposeBTensor(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_MATMUL_TRANSPOSE_B); + SaveActivationTensor(OH_NN_INT8, m_paramDim, nullptr, OH_NN_MATMUL_ACTIVATION_TYPE); + + std::shared_ptr transposeATensor; + transposeATensor = TransToNNTensor(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_MATMUL_TRANSPOSE_A); + m_allTensors.emplace_back(transposeATensor); + + OH_NN_ReturnCode ret = m_matmul.Build(m_params, m_inputs, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: matmul_build_016 + * @tc.desc: Verify that the build function returns a failed message without set buffer for transposeB. + * @tc.type: FUNC + */ +HWTEST_F(MatMulBuilderTest, matmul_build_016, TestSize.Level0) +{ + SetInputTensor(m_inputTensor); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + SaveTransposeATensor(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_MATMUL_TRANSPOSE_A); + SaveActivationTensor(OH_NN_INT8, m_paramDim, nullptr, OH_NN_MATMUL_ACTIVATION_TYPE); + + std::shared_ptr transposeBTensor; + transposeBTensor = TransToNNTensor(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_MATMUL_TRANSPOSE_B); + m_allTensors.emplace_back(transposeBTensor); + + OH_NN_ReturnCode ret = m_matmul.Build(m_params, m_inputs, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: matmul_build_017 + * @tc.desc: Verify that the build function returns a failed message without set buffer for activation. + * @tc.type: FUNC + */ +HWTEST_F(MatMulBuilderTest, matmul_build_017, TestSize.Level0) +{ + SetInputTensor(m_inputTensor); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + SaveTransposeATensor(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_MATMUL_TRANSPOSE_A); + SaveTransposeBTensor(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_MATMUL_TRANSPOSE_B); + + std::shared_ptr activationTensor; + activationTensor = TransToNNTensor(OH_NN_INT8, m_paramDim, nullptr, OH_NN_MATMUL_ACTIVATION_TYPE); + m_allTensors.emplace_back(activationTensor); + + OH_NN_ReturnCode ret = m_matmul.Build(m_params, m_inputs, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: matmul_getprimitive_001 + * @tc.desc: Verify that the getPrimitive function returns a successful message + * @tc.type: FUNC + */ +HWTEST_F(MatMulBuilderTest, matmul_getprimitive_001, TestSize.Level0) +{ + SetInputTensor(m_inputTensor); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + SaveTransposeATensor(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_MATMUL_TRANSPOSE_A); + SaveTransposeBTensor(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_MATMUL_TRANSPOSE_B); + SaveActivationTensor(OH_NN_INT8, m_paramDim, nullptr, OH_NN_MATMUL_ACTIVATION_TYPE); + + bool transposeAValue = false; + bool transposeBValue = false; + int8_t activationValue = 0; + EXPECT_EQ(OH_NN_SUCCESS, m_matmul.Build(m_params, m_inputs, m_outputsIndex, m_allTensors)); + LiteGraphPrimitvePtr primitive = m_matmul.GetPrimitive(); + LiteGraphPrimitvePtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); + EXPECT_NE(expectPrimitive, primitive); + + auto returnValue = mindspore::lite::MindIR_MatMulFusion_GetTransposeA(primitive.get()); + EXPECT_EQ(returnValue, transposeAValue); + returnValue = mindspore::lite::MindIR_MatMulFusion_GetTransposeB(primitive.get()); + EXPECT_EQ(returnValue, transposeBValue); + returnValue = mindspore::lite::MindIR_MatMulFusion_GetActivationType(primitive.get()); + EXPECT_EQ(returnValue, activationValue); +} + +/** + * @tc.name: matmul_getprimitive_002 + * @tc.desc: Verify that the getPrimitive function returns a failed message without build. + * @tc.type: FUNC + */ +HWTEST_F(MatMulBuilderTest, matmul_getprimitive_002, TestSize.Level0) +{ + MatmulBuilder matmul; + LiteGraphPrimitvePtr primitive = m_matmul.GetPrimitive(); + LiteGraphPrimitvePtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); + EXPECT_EQ(expectPrimitive, primitive); +} +} +} +} \ No newline at end of file diff --git a/test/unittest/ops/maximum_builder_test.cpp b/test/unittest/ops/maximum_builder_test.cpp new file mode 100644 index 0000000..1e038af --- /dev/null +++ b/test/unittest/ops/maximum_builder_test.cpp @@ -0,0 +1,182 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "frameworks/native/ops/maximum_builder.h" + +#include "ops_test.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Ops; + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class MaximumBuilderTest : public OpsTest { +public: + void SetUp() override; + void TearDown() override; + +protected: + MaximumBuilder m_maximum; + std::vector m_inputs {0, 1}; + std::vector m_outputs {2}; + std::vector m_params {}; + std::vector m_inputDim {1, 3, 1, 1}; + std::vector m_outputDim {1, 3, 1, 1}; +}; + +void MaximumBuilderTest::SetUp() {} + +void MaximumBuilderTest::TearDown() {} + + +/** + * @tc.name: maximum_build_001 + * @tc.desc: Verify that the build function returns a successful message. + * @tc.type: FUNC + */ +HWTEST_F(MaximumBuilderTest, maximum_build_001, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + + OH_NN_ReturnCode ret = m_maximum.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/** + * @tc.name: maximum_build_002 + * @tc.desc: Verify that the build function returns a failed message with true m_isBuild. + * @tc.type: FUNC + */ +HWTEST_F(MaximumBuilderTest, maximum_build_002, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + + EXPECT_EQ(OH_NN_SUCCESS, m_maximum.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors)); + OH_NN_ReturnCode ret = m_maximum.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/** + * @tc.name: maximum_build_003 + * @tc.desc: Verify that the build function returns a failed message with invalided input. + * @tc.type: FUNC + */ +HWTEST_F(MaximumBuilderTest, maximum_build_003, TestSize.Level0) +{ + m_inputs = {0, 1, 2}; + m_outputs = {3}; + + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + + OH_NN_ReturnCode ret = m_maximum.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: maximum_build_004 + * @tc.desc: Verify that the build function returns a failed message with invalided output. + * @tc.type: FUNC + */ +HWTEST_F(MaximumBuilderTest, maximum_build_004, TestSize.Level0) +{ + m_outputs = {2, 3, 4}; + + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + + OH_NN_ReturnCode ret = m_maximum.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: maximum_build_005 + * @tc.desc: Verify that the build function returns a failed message with empty allTensor. + * @tc.type: FUNC + */ +HWTEST_F(MaximumBuilderTest, maximum_build_005, TestSize.Level0) +{ + OH_NN_ReturnCode ret = m_maximum.Build(m_params, m_inputs, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: maximum_build_006 + * @tc.desc: Verify that the build function returns a failed message without output tensor. + * @tc.type: FUNC + */ +HWTEST_F(MaximumBuilderTest, maximum_build_006, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + + OH_NN_ReturnCode ret = m_maximum.Build(m_params, m_inputsIndex, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: maximum_build_007 + * @tc.desc: Verify that the build function returns a failed message with a virtual parameter. + * @tc.type: FUNC + */ +HWTEST_F(MaximumBuilderTest, maximum_build_007, TestSize.Level0) +{ + std::vector m_params = {3}; + std::vector paramDim = {}; + + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + std::shared_ptr paramTensor; + paramTensor = TransToNNTensor(OH_NN_INT32, paramDim, nullptr, OH_NN_TENSOR); + m_allTensors.emplace_back(paramTensor); + + OH_NN_ReturnCode ret = m_maximum.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: maximum_getprimitive_001 + * @tc.desc: Verify that the getPrimitive function returns a successful message + * @tc.type: FUNC + */ +HWTEST_F(MaximumBuilderTest, maximum_getprimitive_001, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + + EXPECT_EQ(OH_NN_SUCCESS, m_maximum.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors)); + LiteGraphPrimitvePtr primitive = m_maximum.GetPrimitive(); + LiteGraphPrimitvePtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); + EXPECT_NE(expectPrimitive, primitive); +} + +/** + * @tc.name: maximum_getprimitive_002 + * @tc.desc: Verify that the getPrimitive function returns a failed message without build. + * @tc.type: FUNC + */ +HWTEST_F(MaximumBuilderTest, maximum_getprimitive_002, TestSize.Level0) +{ + MaximumBuilder maximum; + LiteGraphPrimitvePtr primitive = m_maximum.GetPrimitive(); + LiteGraphPrimitvePtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); + EXPECT_EQ(expectPrimitive, primitive); +} +} +} +} \ No newline at end of file diff --git a/test/unittest/ops/maxpool_pad_test.cpp b/test/unittest/ops/maxpool_pad_test.cpp new file mode 100644 index 0000000..a331abf --- /dev/null +++ b/test/unittest/ops/maxpool_pad_test.cpp @@ -0,0 +1,432 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "frameworks/native/ops/maxpool_builder.h" + +#include "ops_test.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Ops; + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class MaxPoolPadBuilderTest : public OpsTest { +public: + void SetUp(); + void TearDown(); + + void SetPad(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + void SetPadParam(); + +public: + MaxPoolBuilder m_builder; + std::vector m_inputs{0}; + std::vector m_outputs{1}; + std::vector m_params{2, 3, 4, 5}; + std::vector m_input_dim{1, 3, 3, 1}; + std::vector m_output_dim{1, 2, 2, 1}; + std::vector m_kenelsize_dim{2}; + std::vector m_stride_dim{2}; + std::vector m_pad_dim{4}; + std::vector m_param_dim{}; +}; + +void MaxPoolPadBuilderTest::SetUp() {} + +void MaxPoolPadBuilderTest::TearDown() {} + +void MaxPoolPadBuilderTest::SetPad(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + int32_t padNum{4}; + std::shared_ptr tensor = TransToNNTensor(dataType, dim, quantParam, type); + int64_t* padValue = new (std::nothrow) int64_t[padNum]{0, 0, 0, 0}; + EXPECT_NE(nullptr, padValue); + + tensor->SetBuffer(padValue, sizeof(int64_t) * padNum); + m_allTensors.emplace_back(tensor); +} + +void MaxPoolPadBuilderTest::SetPadParam() +{ + SetKernelSize(OH_NN_INT64, m_kenelsize_dim, nullptr, OH_NN_MAX_POOL_KERNEL_SIZE); + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_MAX_POOL_STRIDE); + SetPad(OH_NN_INT64, m_pad_dim, nullptr, OH_NN_MAX_POOL_PAD); + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_MAX_POOL_ACTIVATION_TYPE); +} + +/** + * @tc.name: maxpool_build_pad_001 + * @tc.desc: Verify the success of the build function + * @tc.type: FUNC + */ +HWTEST_F(MaxPoolPadBuilderTest, maxpool_build_pad_001, TestSize.Level1) +{ + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetPadParam(); + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: maxpool_build_pad_002 + * @tc.desc: Verify the forbidden of the build function + * @tc.type: FUNC + */ +HWTEST_F(MaxPoolPadBuilderTest, maxpool_build_pad_002, TestSize.Level1) +{ + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetPadParam(); + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: maxpool_build_pad_003 + * @tc.desc: Verify the missing input of the build function + * @tc.type: FUNC + */ +HWTEST_F(MaxPoolPadBuilderTest, maxpool_build_pad_003, TestSize.Level1) +{ + m_inputs = {}; + m_outputs = {0}; + m_params = {1, 2, 3, 4}; + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetPadParam(); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: maxpool_build_pad_004 + * @tc.desc: Verify the missing output of the build function + * @tc.type: FUNC + */ +HWTEST_F(MaxPoolPadBuilderTest, maxpool_build_pad_004, TestSize.Level1) +{ + m_inputs = {0}; + m_outputs = {}; + m_params = {1, 2, 3, 4}; + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetPadParam(); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: maxpool_build_pad_005 + * @tc.desc: Verify the inputIndex out of bounds of the build function + * @tc.type: FUNC + */ +HWTEST_F(MaxPoolPadBuilderTest, maxpool_build_pad_005, TestSize.Level1) +{ + m_inputs = {6}; + m_outputs = {1}; + m_params = {2, 3, 4, 5}; + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetPadParam(); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: maxpool_build_pad_006 + * @tc.desc: Verify the outputIndex out of bounds of the build function + * @tc.type: FUNC + */ +HWTEST_F(MaxPoolPadBuilderTest, maxpool_build_pad_006, TestSize.Level1) +{ + m_inputs = {0}; + m_outputs = {6}; + m_params = {2, 3, 4, 5}; + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetPadParam(); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: maxpool_build_pad_007 + * @tc.desc: Verify the invalid kernelSize of the build function + * @tc.type: FUNC + */ +HWTEST_F(MaxPoolPadBuilderTest, maxpool_build_pad_007, TestSize.Level1) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + int32_t kernelsNum{2}; + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT32, m_kenelsize_dim, nullptr, + OH_NN_MAX_POOL_KERNEL_SIZE); + int32_t* valueKernelSize = new (std::nothrow) int32_t[kernelsNum]{1, 1}; + EXPECT_NE(nullptr, valueKernelSize); + + tensor->SetBuffer(valueKernelSize, sizeof(int32_t) * kernelsNum); + m_allTensors.emplace_back(tensor); + + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_MAX_POOL_STRIDE); + SetPad(OH_NN_INT64, m_pad_dim, nullptr, OH_NN_MAX_POOL_PAD); + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_MAX_POOL_ACTIVATION_TYPE); + m_paramsIndex = m_params; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: maxpool_build_pad_008 + * @tc.desc: Verify the invalid stride of the build function + * @tc.type: FUNC + */ +HWTEST_F(MaxPoolPadBuilderTest, maxpool_build_pad_008, TestSize.Level1) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + m_paramsIndex = m_params; + + SetKernelSize(OH_NN_INT64, m_kenelsize_dim, nullptr, OH_NN_MAX_POOL_KERNEL_SIZE); + int32_t strideNum{2}; + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT32, m_stride_dim, nullptr, OH_NN_MAX_POOL_STRIDE); + int32_t* strideValue = new (std::nothrow) int32_t[strideNum]{1, 1}; + EXPECT_NE(nullptr, strideValue); + + tensor->SetBuffer(strideValue, sizeof(int32_t) * strideNum); + m_allTensors.emplace_back(tensor); + SetPad(OH_NN_INT64, m_pad_dim, nullptr, OH_NN_MAX_POOL_PAD); + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_MAX_POOL_ACTIVATION_TYPE); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: maxpool_build_pad_009 + * @tc.desc: Verify the invalid pad of the build function + * @tc.type: FUNC + */ +HWTEST_F(MaxPoolPadBuilderTest, maxpool_build_pad_009, TestSize.Level1) +{ + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetKernelSize(OH_NN_INT64, m_kenelsize_dim, nullptr, OH_NN_MAX_POOL_KERNEL_SIZE); + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_MAX_POOL_STRIDE); + int32_t padNum{4}; + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT32, m_pad_dim, nullptr, OH_NN_MAX_POOL_PAD); + int32_t* padValue = new (std::nothrow) int32_t[padNum]{0, 0, 0, 0}; + EXPECT_NE(nullptr, padValue); + + tensor->SetBuffer(padValue, sizeof(int32_t) * padNum); + m_allTensors.emplace_back(tensor); + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_MAX_POOL_ACTIVATION_TYPE); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + + +/** + * @tc.name: maxpool_build_pad_010 + * @tc.desc: Verify the invalid activation of the build function + * @tc.type: FUNC + */ +HWTEST_F(MaxPoolPadBuilderTest, maxpool_build_pad_010, TestSize.Level1) +{ + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetKernelSize(OH_NN_INT64, m_kenelsize_dim, nullptr, OH_NN_MAX_POOL_KERNEL_SIZE); + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_MAX_POOL_STRIDE); + SetPad(OH_NN_INT64, m_pad_dim, nullptr, OH_NN_MAX_POOL_PAD); + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT32, m_param_dim, nullptr, + OH_NN_MAX_POOL_ACTIVATION_TYPE); + int32_t* activationValue = new (std::nothrow) int32_t(0); + EXPECT_NE(nullptr, activationValue); + + tensor->SetBuffer(activationValue, sizeof(int32_t)); + m_allTensors.emplace_back(tensor); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: maxpool_build_pad_011 + * @tc.desc: Verify the activation scalar length of the build function + * @tc.type: FUNC + */ +HWTEST_F(MaxPoolPadBuilderTest, maxpool_build_pad_011, TestSize.Level1) +{ + m_param_dim = {2}; + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetKernelSize(OH_NN_INT64, m_kenelsize_dim, nullptr, OH_NN_MAX_POOL_KERNEL_SIZE); + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_MAX_POOL_STRIDE); + SetPad(OH_NN_INT64, m_pad_dim, nullptr, OH_NN_MAX_POOL_PAD); + int8_t* activationValue = new (std::nothrow) int8_t[2]{1, 2}; + EXPECT_NE(nullptr, activationValue); + + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT8, m_param_dim, nullptr, + OH_NN_MAX_POOL_ACTIVATION_TYPE); + tensor->SetBuffer(activationValue, 2 * sizeof(int8_t)); + m_allTensors.emplace_back(tensor); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: maxpool_build_pad_012 + * @tc.desc: Verify the maxpool without set kernelsize of the build function + * @tc.type: FUNC + */ +HWTEST_F(MaxPoolPadBuilderTest, maxpool_build_pad_012, TestSize.Level1) +{ + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT64, m_kenelsize_dim, nullptr, + OH_NN_MAX_POOL_KERNEL_SIZE); + m_allTensors.emplace_back(tensor); + + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_MAX_POOL_STRIDE); + SetPad(OH_NN_INT64, m_pad_dim, nullptr, OH_NN_MAX_POOL_PAD); + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_MAX_POOL_ACTIVATION_TYPE); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: maxpool_build_pad_013 + * @tc.desc: Verify the maxpool without set stride of the build function + * @tc.type: FUNC + */ +HWTEST_F(MaxPoolPadBuilderTest, maxpool_build_pad_013, TestSize.Level1) +{ + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetKernelSize(OH_NN_INT64, m_kenelsize_dim, nullptr, OH_NN_MAX_POOL_KERNEL_SIZE); + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_MAX_POOL_STRIDE); + m_allTensors.emplace_back(tensor); + + SetPad(OH_NN_INT64, m_pad_dim, nullptr, OH_NN_MAX_POOL_PAD); + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_MAX_POOL_ACTIVATION_TYPE); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: maxpool_build_pad_014 + * @tc.desc: Verify the maxpool without set pad of the build function + * @tc.type: FUNC + */ +HWTEST_F(MaxPoolPadBuilderTest, maxpool_build_pad_014, TestSize.Level1) +{ + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetKernelSize(OH_NN_INT64, m_kenelsize_dim, nullptr, OH_NN_MAX_POOL_KERNEL_SIZE); + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_MAX_POOL_STRIDE); + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT64, m_pad_dim, nullptr, OH_NN_MAX_POOL_PAD); + m_allTensors.emplace_back(tensor); + + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_MAX_POOL_ACTIVATION_TYPE); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: maxpool_build_pad_015 + * @tc.desc: Verify the maxpool without set activation of the build function + * @tc.type: FUNC + */ +HWTEST_F(MaxPoolPadBuilderTest, maxpool_build_pad_015, TestSize.Level1) +{ + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetKernelSize(OH_NN_INT64, m_kenelsize_dim, nullptr, OH_NN_MAX_POOL_KERNEL_SIZE); + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_MAX_POOL_STRIDE); + SetPad(OH_NN_INT64, m_pad_dim, nullptr, OH_NN_MAX_POOL_PAD); + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT8, m_param_dim, nullptr, + OH_NN_MAX_POOL_ACTIVATION_TYPE); + m_allTensors.emplace_back(tensor); + + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: maxpool_getprimitive_pad_001 + * @tc.desc: Verify the behavior of the GetPrimitive function + * @tc.type: FUNC + */ +HWTEST_F(MaxPoolPadBuilderTest, maxpool_getprimitive_pad_001, TestSize.Level1) +{ + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetPadParam(); + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + LiteGraphTensorPtr primitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive = {nullptr, DestroyLiteGraphPrimitive}; + EXPECT_NE(expectPrimitive, primitive); + + std::vector expectKernelSize = mindspore::lite::MindIR_MaxPoolFusion_GetKernelSize(primitive.get()); + std::vector kernelSizeValueTest{1, 1}; + EXPECT_EQ(kernelSizeValueTest, expectKernelSize); + + std::vector expectStrides = mindspore::lite::MindIR_MaxPoolFusion_GetStrides(primitive.get()); + std::vector strideValueTest{1, 1}; + std::vector expectPadValue = mindspore::lite::MindIR_MaxPoolFusion_GetPad(primitive.get()); + std::vector padValueValueTest{0, 0, 0, 0}; + EXPECT_EQ(padValueValueTest, expectPadValue); + + int8_t activationValue = 0; + int expectActivation = mindspore::lite::MindIR_MaxPoolFusion_GetActivationType(primitive.get()); + EXPECT_EQ(activationValue, expectActivation); +} + +/** + * @tc.name: maxpool_getprimitive_pad_002 + * @tc.desc: Verify the behavior of the GetPrimitive function + * @tc.type: FUNC + */ +HWTEST_F(MaxPoolPadBuilderTest, maxpool_getprimitive_pad_002, TestSize.Level1) +{ + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetPadParam(); + LiteGraphTensorPtr primitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive = {nullptr, DestroyLiteGraphPrimitive}; + EXPECT_EQ(expectPrimitive, primitive); +} +} // namespace UnitTest +} // namespace NeuralNetworkRuntime +} // namespace OHOS diff --git a/test/unittest/ops/maxpool_padmode_test.cpp b/test/unittest/ops/maxpool_padmode_test.cpp new file mode 100644 index 0000000..799edca --- /dev/null +++ b/test/unittest/ops/maxpool_padmode_test.cpp @@ -0,0 +1,341 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "frameworks/native/ops/maxpool_builder.h" + +#include "ops_test.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Ops; + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class MaxPoolBuilderTest : public OpsTest { +public: + void SetUp() override; + void TearDown() override; + + void SetPadMode(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + void SetParam(); + +public: + MaxPoolBuilder m_builder; + std::vector m_inputs{0}; + std::vector m_outputs{1}; + std::vector m_params{2, 3, 4, 5}; + std::vector m_input_dim{1, 3, 3, 1}; + std::vector m_output_dim{1, 2, 2, 1}; + std::vector m_kenelsize_dim{2}; + std::vector m_stride_dim{2}; + std::vector m_param_dim{}; +}; + +void MaxPoolBuilderTest::SetUp() {} + +void MaxPoolBuilderTest::TearDown() {} + +void MaxPoolBuilderTest::SetPadMode(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + std::shared_ptr tensor = TransToNNTensor(dataType, dim, quantParam, type); + int8_t* padModeValue = new (std::nothrow) int8_t(0); + EXPECT_NE(nullptr, padModeValue); + tensor->SetBuffer(padModeValue, sizeof(int8_t)); + m_allTensors.emplace_back(tensor); +} + +void MaxPoolBuilderTest::SetParam() +{ + SetKernelSize(OH_NN_INT64, m_kenelsize_dim, nullptr, OH_NN_MAX_POOL_KERNEL_SIZE); + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_MAX_POOL_STRIDE); + SetPadMode(OH_NN_INT8, m_param_dim, nullptr, OH_NN_MAX_POOL_PAD_MODE); + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_MAX_POOL_ACTIVATION_TYPE); +} + +/** + * @tc.name: maxpool_build_pad_mode_001 + * @tc.desc: Verify the success of the build function + * @tc.type: FUNC + */ +HWTEST_F(MaxPoolBuilderTest, maxpool_build_pad_mode_001, TestSize.Level1) +{ + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetParam(); + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: maxpool_build_pad_mode_002 + * @tc.desc: Verify the forbidden of the build function + * @tc.type: FUNC + */ +HWTEST_F(MaxPoolBuilderTest, maxpool_build_pad_mode_002, TestSize.Level1) +{ + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetParam(); + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: maxpool_build_pad_mode_003 + * @tc.desc: Verify the missing input of the build function + * @tc.type: FUNC + */ +HWTEST_F(MaxPoolBuilderTest, maxpool_build_pad_mode_003, TestSize.Level1) +{ + m_inputs = {}; + m_outputs = {0}; + m_params = {1, 2, 3, 4}; + m_paramsIndex = m_params; + + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetParam(); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: maxpool_build_pad_mode_004 + * @tc.desc: Verify the missing output of the build function + * @tc.type: FUNC + */ +HWTEST_F(MaxPoolBuilderTest, maxpool_build_pad_mode_004, TestSize.Level1) +{ + m_inputs = {0}; + m_outputs = {}; + m_params = {1, 2, 3, 4}; + m_paramsIndex = m_params; + + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetParam(); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: maxpool_build_pad_mode_005 + * @tc.desc: Verify the inputIndex out of bounds of the build function + * @tc.type: FUNC + */ +HWTEST_F(MaxPoolBuilderTest, maxpool_build_pad_mode_005, TestSize.Level1) +{ + m_inputs = {6}; + m_outputs = {1}; + m_params = {2, 3, 4, 5}; + m_paramsIndex = m_params; + + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetParam(); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: maxpool_build_pad_mode_006 + * @tc.desc: Verify the outputIndex out of bounds of the build function + * @tc.type: FUNC + */ +HWTEST_F(MaxPoolBuilderTest, maxpool_build_pad_mode_006, TestSize.Level1) +{ + m_inputs = {0}; + m_outputs = {6}; + m_params = {2, 3, 4, 5}; + m_paramsIndex = m_params; + + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetParam(); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: maxpool_build_pad_mode_007 + * @tc.desc: Verify the invalid kernelSize of the build function + * @tc.type: FUNC + */ +HWTEST_F(MaxPoolBuilderTest, maxpool_build_pad_mode_007, TestSize.Level1) +{ + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + int32_t kernelsNum{2}; + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT32, m_kenelsize_dim, nullptr, + OH_NN_MAX_POOL_KERNEL_SIZE); + int32_t* kernelSizeValue = new (std::nothrow) int32_t[kernelsNum]{1, 1}; + EXPECT_NE(nullptr, kernelSizeValue); + tensor->SetBuffer(kernelSizeValue, sizeof(int32_t) * kernelsNum); + m_allTensors.emplace_back(tensor); + + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_MAX_POOL_STRIDE); + SetPadMode(OH_NN_INT8, m_param_dim, nullptr, OH_NN_MAX_POOL_PAD_MODE); + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_MAX_POOL_ACTIVATION_TYPE); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: maxpool_build_pad_mode_008 + * @tc.desc: Verify the invalid stride of the build function + * @tc.type: FUNC + */ +HWTEST_F(MaxPoolBuilderTest, maxpool_build_pad_mode_008, TestSize.Level1) +{ + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetKernelSize(OH_NN_INT64, m_kenelsize_dim, nullptr, OH_NN_MAX_POOL_KERNEL_SIZE); + int32_t strideNum{2}; + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT32, m_stride_dim, nullptr, OH_NN_MAX_POOL_STRIDE); + int32_t* strideValue = new (std::nothrow) int32_t[strideNum]{1, 1}; + EXPECT_NE(nullptr, strideValue); + + tensor->SetBuffer(strideValue, sizeof(int32_t) * strideNum); + m_allTensors.emplace_back(tensor); + SetPadMode(OH_NN_INT8, m_param_dim, nullptr, OH_NN_MAX_POOL_PAD_MODE); + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_MAX_POOL_ACTIVATION_TYPE); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: maxpool_build_pad_mode_009 + * @tc.desc: Verify the invalid padmode of the build function + * @tc.type: FUNC + */ +HWTEST_F(MaxPoolBuilderTest, maxpool_build_pad_mode_009, TestSize.Level1) +{ + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetKernelSize(OH_NN_INT64, m_kenelsize_dim, nullptr, OH_NN_MAX_POOL_KERNEL_SIZE); + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_MAX_POOL_STRIDE); + int32_t *padValueTest = new (std::nothrow) int32_t(0); + EXPECT_NE(nullptr, padValueTest); + + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT32, m_param_dim, nullptr, OH_NN_MAX_POOL_PAD); + tensor->SetBuffer(padValueTest, sizeof(int32_t)); + m_allTensors.emplace_back(tensor); + SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_MAX_POOL_ACTIVATION_TYPE); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + + +/** + * @tc.name: maxpool_build_pad_mode_010 + * @tc.desc: Verify the invalid activation of the build function + * @tc.type: FUNC + */ +HWTEST_F(MaxPoolBuilderTest, maxpool_build_pad_mode_010, TestSize.Level1) +{ + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + SetKernelSize(OH_NN_INT64, m_kenelsize_dim, nullptr, OH_NN_MAX_POOL_KERNEL_SIZE); + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_MAX_POOL_STRIDE); + SetPadMode(OH_NN_INT8, m_param_dim, nullptr, OH_NN_MAX_POOL_PAD_MODE); + + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT32, m_param_dim, nullptr, + OH_NN_MAX_POOL_ACTIVATION_TYPE); + int32_t* activationValue = new (std::nothrow) int32_t(0); + EXPECT_NE(nullptr, activationValue); + + tensor->SetBuffer(activationValue, sizeof(int32_t)); + m_allTensors.emplace_back(tensor); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: maxpool_build_pad_mode_011 + * @tc.desc: Verify the scalar length of the build function + * @tc.type: FUNC + */ +HWTEST_F(MaxPoolBuilderTest, maxpool_build_pad_mode_011, TestSize.Level1) +{ + m_param_dim = {2}; + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetKernelSize(OH_NN_INT64, m_kenelsize_dim, nullptr, OH_NN_MAX_POOL_KERNEL_SIZE); + SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_MAX_POOL_STRIDE); + SetPadMode(OH_NN_INT8, m_param_dim, nullptr, OH_NN_MAX_POOL_PAD_MODE); + int8_t* activationValue = new (std::nothrow) int8_t[2]{1, 2}; + EXPECT_NE(nullptr, activationValue); + + std::shared_ptr tensor = TransToNNTensor(OH_NN_INT8, m_param_dim, nullptr, + OH_NN_MAX_POOL_ACTIVATION_TYPE); + tensor->SetBuffer(activationValue, 2 * sizeof(int8_t)); + m_allTensors.emplace_back(tensor); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: maxpool_getprimitive_pad_mode_001 + * @tc.desc: Verify the behavior of the GetPrimitive function + * @tc.type: FUNC + */ +HWTEST_F(MaxPoolBuilderTest, maxpool_getprimitive_pad_mode_001, TestSize.Level1) +{ + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetParam(); + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + LiteGraphTensorPtr primitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive = {nullptr, DestroyLiteGraphPrimitive}; + EXPECT_NE(expectPrimitive, primitive); + + std::vector returnKernelSize = mindspore::lite::MindIR_MaxPoolFusion_GetKernelSize(primitive.get()); + std::vector kernelSizeValueTest{1, 1}; + EXPECT_EQ(kernelSizeValueTest, returnKernelSize); + + std::vector returnStrides = mindspore::lite::MindIR_MaxPoolFusion_GetStrides(primitive.get()); + std::vector strideValueTest{1, 1}; + int returnPadMode = mindspore::lite::MindIR_MaxPoolFusion_GetPadMode(primitive.get()); + EXPECT_EQ(1, returnPadMode); + + int returnActivation = mindspore::lite::MindIR_MaxPoolFusion_GetActivationType(primitive.get()); + EXPECT_EQ(0, returnActivation); +} + +/** + * @tc.name: maxpool_getprimitive_pad_mode_002 + * @tc.desc: Verify the behavior of the GetPrimitive function + * @tc.type: FUNC + */ +HWTEST_F(MaxPoolBuilderTest, maxpool_getprimitive_pad_mode_002, TestSize.Level1) +{ + m_paramsIndex = m_params; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr); + + SetParam(); + LiteGraphTensorPtr primitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive = {nullptr, DestroyLiteGraphPrimitive}; + EXPECT_EQ(expectPrimitive, primitive); +} +} // namespace UnitTest +} // namespace NeuralNetworkRuntime +} // namespace OHOS \ No newline at end of file diff --git a/test/unittest/ops/mul_builder_test.cpp b/test/unittest/ops/mul_builder_test.cpp new file mode 100644 index 0000000..de6c0e1 --- /dev/null +++ b/test/unittest/ops/mul_builder_test.cpp @@ -0,0 +1,288 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "frameworks/native/ops/mul_builder.h" + +#include "ops_test.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Ops; + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class MulBuilderTest : public OpsTest { +public: + void SetUp() override; + void TearDown() override; + +protected: + void SaveParamsTensor(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + +protected: + MulBuilder m_mul; + std::vector m_inputs {0, 1}; + std::vector m_outputs {2}; + std::vector m_params {3}; + std::vector m_inputDim {1, 2, 2, 1}; + std::vector m_outputDim {1, 2, 2, 1}; + std::vector m_paramDim {}; +}; + +void MulBuilderTest::SetUp() {} + +void MulBuilderTest::TearDown() {} + +void MulBuilderTest::SaveParamsTensor(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + std::shared_ptr activationTensor = TransToNNTensor(dataType, dim, quantParam, type); + int8_t* activationValue = new (std::nothrow) int8_t(0); + EXPECT_NE(nullptr, activationValue); + activationTensor->SetBuffer(activationValue, sizeof(int8_t)); + m_allTensors.emplace_back(activationTensor); +} + +/** + * @tc.name: mul_build_001 + * @tc.desc: Verify that the build function returns a successful message. + * @tc.type: FUNC + */ +HWTEST_F(MulBuilderTest, mul_build_001, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + SaveParamsTensor(OH_NN_INT8, m_paramDim, nullptr, OH_NN_MUL_ACTIVATION_TYPE); + + OH_NN_ReturnCode ret = m_mul.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/** + * @tc.name: mul_build_002 + * @tc.desc: Verify that the build function returns a failed message with true m_isBuild. + * @tc.type: FUNC + */ +HWTEST_F(MulBuilderTest, mul_build_002, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + SaveParamsTensor(OH_NN_INT8, m_paramDim, nullptr, OH_NN_MUL_ACTIVATION_TYPE); + + EXPECT_EQ(OH_NN_SUCCESS, m_mul.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors)); + OH_NN_ReturnCode ret = m_mul.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/** + * @tc.name: mul_build_003 + * @tc.desc: Verify that the build function returns a failed message with invalided input. + * @tc.type: FUNC + */ +HWTEST_F(MulBuilderTest, mul_build_003, TestSize.Level0) +{ + m_inputs = {0, 1, 2}; + m_outputs = {3}; + m_params = {4}; + + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + SaveParamsTensor(OH_NN_INT8, m_paramDim, nullptr, OH_NN_MUL_ACTIVATION_TYPE); + + OH_NN_ReturnCode ret = m_mul.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: mul_build_004 + * @tc.desc: Verify that the build function returns a failed message with invalided output. + * @tc.type: FUNC + */ +HWTEST_F(MulBuilderTest, mul_build_004, TestSize.Level0) +{ + m_outputs = {2, 3}; + m_params = {4}; + + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + SaveParamsTensor(OH_NN_INT8, m_paramDim, nullptr, OH_NN_MUL_ACTIVATION_TYPE); + + OH_NN_ReturnCode ret = m_mul.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: mul_build_005 + * @tc.desc: Verify that the build function returns a failed message with empty allTensor. + * @tc.type: FUNC + */ +HWTEST_F(MulBuilderTest, mul_build_005, TestSize.Level0) +{ + OH_NN_ReturnCode ret = m_mul.Build(m_params, m_inputs, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: mul_build_006 + * @tc.desc: Verify that the build function returns a failed message without output tensor. + * @tc.type: FUNC + */ +HWTEST_F(MulBuilderTest, mul_build_006, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + + OH_NN_ReturnCode ret = m_mul.Build(m_params, m_inputsIndex, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: mul_build_007 + * @tc.desc: Verify that the build function returns a failed message with invalid activation's dataType. + * @tc.type: FUNC + */ +HWTEST_F(MulBuilderTest, mul_build_007, TestSize.Level0) +{ + m_params = {3}; + std::vector m_paramDim = {}; + + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + + std::shared_ptr activationTensor = TransToNNTensor(OH_NN_FLOAT32, m_paramDim, + nullptr, OH_NN_MUL_ACTIVATION_TYPE); + float activationValue = 1e-7; + activationTensor->SetBuffer(&activationValue, sizeof(activationValue)); + m_allTensors.emplace_back(activationTensor); + + OH_NN_ReturnCode ret = m_mul.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + activationTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: mul_build_008 + * @tc.desc: Verify that the build function returns a failed message with invalid activation's dimension. + * @tc.type: FUNC + */ +HWTEST_F(MulBuilderTest, mul_build_008, TestSize.Level0) +{ + m_paramDim = {2}; + + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + + std::shared_ptr activationTensor = TransToNNTensor(OH_NN_INT8, m_paramDim, + nullptr, OH_NN_MUL_ACTIVATION_TYPE); + int8_t activationValue[2] = {0, 1}; + activationTensor->SetBuffer(activationValue, 2 * sizeof(int8_t)); + m_allTensors.emplace_back(activationTensor); + + OH_NN_ReturnCode ret = m_mul.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + activationTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: mul_build_009 + * @tc.desc: Verify that the build function returns a failed message with invalid activation's data. + * @tc.type: FUNC + */ +HWTEST_F(MulBuilderTest, mul_build_009, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + + std::shared_ptr activationTensor = TransToNNTensor(OH_NN_INT8, m_paramDim, + nullptr, OH_NN_MUL_ACTIVATION_TYPE); + int8_t activationValue = -1; + activationTensor->SetBuffer(&activationValue, sizeof(activationValue)); + m_allTensors.emplace_back(activationTensor); + + OH_NN_ReturnCode ret = m_mul.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + activationTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: mul_build_010 + * @tc.desc: Verify that the build function returns a failed message with passing invalid param. + * @tc.type: FUNC + */ +HWTEST_F(MulBuilderTest, mul_build_010, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + SaveParamsTensor(OH_NN_INT8, m_paramDim, nullptr, OH_NN_MATMUL_ACTIVATION_TYPE); + + OH_NN_ReturnCode ret = m_mul.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: mul_build_011 + * @tc.desc: Verify that the build function returns a failed message without set buffer for activation. + * @tc.type: FUNC + */ +HWTEST_F(MulBuilderTest, mul_build_011, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + + std::shared_ptr activationTensor = TransToNNTensor(OH_NN_INT8, m_paramDim, + nullptr, OH_NN_MUL_ACTIVATION_TYPE); + m_allTensors.emplace_back(activationTensor); + + OH_NN_ReturnCode ret = m_mul.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: mul_getprimitive_001 + * @tc.desc: Verify that the getPrimitive function returns a successful message + * @tc.type: FUNC + */ +HWTEST_F(MulBuilderTest, mul_getprimitive_001, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + SaveParamsTensor(OH_NN_INT8, m_paramDim, nullptr, OH_NN_MUL_ACTIVATION_TYPE); + + int8_t activationValue = 0; + EXPECT_EQ(OH_NN_SUCCESS, m_mul.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors)); + LiteGraphPrimitvePtr primitive = m_mul.GetPrimitive(); + LiteGraphPrimitvePtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); + EXPECT_NE(expectPrimitive, primitive); + + auto returnValue = mindspore::lite::MindIR_MulFusion_GetActivationType(primitive.get()); + EXPECT_EQ(returnValue, activationValue); +} + +/** + * @tc.name: mul_getprimitive_002 + * @tc.desc: Verify that the getPrimitive function returns a failed message without build. + * @tc.type: FUNC + */ +HWTEST_F(MulBuilderTest, mul_getprimitive_002, TestSize.Level0) +{ + MulBuilder mul; + LiteGraphPrimitvePtr primitive = m_mul.GetPrimitive(); + LiteGraphPrimitvePtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); + EXPECT_EQ(expectPrimitive, primitive); +} +} +} +} \ No newline at end of file diff --git a/test/unittest/ops/onehot_builder_test.cpp b/test/unittest/ops/onehot_builder_test.cpp new file mode 100644 index 0000000..16832ff --- /dev/null +++ b/test/unittest/ops/onehot_builder_test.cpp @@ -0,0 +1,239 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "frameworks/native/ops/onehot_builder.h" + +#include "ops_test.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Ops; + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class OneHotBuilderTest : public OpsTest { +public: + void SetUp() override; + void TearDown() override; + +protected: + void SaveParamsTensor(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + +protected: + OnehotBuilder m_oneHot; + std::vector m_inputs {0, 1, 2, 3}; + std::vector m_outputs {4}; + std::vector m_params {5}; + std::vector m_inputDim {3}; + std::vector m_outputDim {3, 3}; + std::vector m_paramDim {}; +}; + +void OneHotBuilderTest::SetUp() {} + +void OneHotBuilderTest::TearDown() {} + +void OneHotBuilderTest::SaveParamsTensor(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + std::shared_ptr axisTensor = TransToNNTensor(dataType, dim, quantParam, type); + int64_t* axisValue = new (std::nothrow) int64_t(-1); + EXPECT_NE(nullptr, axisValue); + axisTensor->SetBuffer(axisValue, sizeof(int64_t)); + m_allTensors.emplace_back(axisTensor); +} + +/** + * @tc.name: onehot_build_001 + * @tc.desc: Verify that the build function returns a successful message. + * @tc.type: FUNC + */ +HWTEST_F(OneHotBuilderTest, onehot_build_001, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + SaveParamsTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_ONE_HOT_AXIS); + + OH_NN_ReturnCode ret = m_oneHot.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/** + * @tc.name: onehot_build_001 + * @tc.desc: Verify that the build function returns a failed message with true m_isBuild. + * @tc.type: FUNC + */ +HWTEST_F(OneHotBuilderTest, onehot_build_002, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + SaveParamsTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_ONE_HOT_AXIS); + + EXPECT_EQ(OH_NN_SUCCESS, m_oneHot.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors)); + OH_NN_ReturnCode ret = m_oneHot.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/** + * @tc.name: onehot_build_003 + * @tc.desc: Verify that the build function returns a failed message with invalided input. + * @tc.type: FUNC + */ +HWTEST_F(OneHotBuilderTest, onehot_build_003, TestSize.Level0) +{ + m_inputs = {0, 1, 2, 3, 4}; + m_outputs = {5}; + m_params = {6}; + + SaveInputTensor(m_inputs, OH_NN_INT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + SaveParamsTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_ONE_HOT_AXIS); + + OH_NN_ReturnCode ret = m_oneHot.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: onehot_build_004 + * @tc.desc: Verify that the build function returns a failed message with invalided output. + * @tc.type: FUNC + */ +HWTEST_F(OneHotBuilderTest, onehot_build_004, TestSize.Level0) +{ + m_outputs = {4, 5}; + m_params = {6}; + + SaveInputTensor(m_inputs, OH_NN_INT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + SaveParamsTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_ONE_HOT_AXIS); + + OH_NN_ReturnCode ret = m_oneHot.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: onehot_build_005 + * @tc.desc: Verify that the build function returns a failed message with empty allTensor. + * @tc.type: FUNC + */ +HWTEST_F(OneHotBuilderTest, onehot_build_005, TestSize.Level0) +{ + OH_NN_ReturnCode ret = m_oneHot.Build(m_params, m_inputs, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: onehot_build_006 + * @tc.desc: Verify that the build function returns a failed message without output tensor. + * @tc.type: FUNC + */ +HWTEST_F(OneHotBuilderTest, onehot_build_006, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_inputDim, nullptr); + + OH_NN_ReturnCode ret = m_oneHot.Build(m_params, m_inputsIndex, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: onehot_build_007 + * @tc.desc: Verify that the build function returns a failed message with invalid axis's dataType. + * @tc.type: FUNC + */ +HWTEST_F(OneHotBuilderTest, onehot_build_007, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + std::shared_ptr axisTensor = TransToNNTensor(OH_NN_FLOAT32, m_paramDim, + nullptr, OH_NN_ONE_HOT_AXIS); + float axisValue = 1e-7; + axisTensor->SetBuffer(&axisValue, sizeof(axisValue)); + m_allTensors.emplace_back(axisTensor); + + OH_NN_ReturnCode ret = m_oneHot.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + axisTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: onehot_build_008 + * @tc.desc: Verify that the build function returns a failed message with passing invalid param. + * @tc.type: FUNC + */ +HWTEST_F(OneHotBuilderTest, onehot_build_008, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + SaveParamsTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_MUL_ACTIVATION_TYPE); + + OH_NN_ReturnCode ret = m_oneHot.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: onehot_build_009 + * @tc.desc: Verify that the build function returns a failed message without set buffer for axis. + * @tc.type: FUNC + */ +HWTEST_F(OneHotBuilderTest, onehot_build_009, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + std::shared_ptr axisTensor = TransToNNTensor(OH_NN_INT64, m_paramDim, + nullptr, OH_NN_ONE_HOT_AXIS); + m_allTensors.emplace_back(axisTensor); + + OH_NN_ReturnCode ret = m_oneHot.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: onehot_getprimitive_001 + * @tc.desc: Verify that the getPrimitive function returns a successful message + * @tc.type: FUNC + */ +HWTEST_F(OneHotBuilderTest, onehot_getprimitive_001, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + SaveParamsTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_ONE_HOT_AXIS); + + int64_t axisValue = -1; + EXPECT_EQ(OH_NN_SUCCESS, m_oneHot.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors)); + LiteGraphPrimitvePtr primitive = m_oneHot.GetPrimitive(); + LiteGraphPrimitvePtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); + EXPECT_NE(expectPrimitive, primitive); + + auto returnValue = mindspore::lite::MindIR_OneHot_GetAxis(primitive.get()); + EXPECT_EQ(returnValue, axisValue); +} + +/** + * @tc.name: onehot_getprimitive_002 + * @tc.desc: Verify that the getPrimitive function returns a failed message without build. + * @tc.type: FUNC + */ +HWTEST_F(OneHotBuilderTest, onehot_getprimitive_002, TestSize.Level0) +{ + OnehotBuilder oneHot; + LiteGraphPrimitvePtr primitive = m_oneHot.GetPrimitive(); + LiteGraphPrimitvePtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); + EXPECT_EQ(expectPrimitive, primitive); +} +} +} +} \ No newline at end of file diff --git a/test/unittest/ops/ops_test.cpp b/test/unittest/ops/ops_test.cpp new file mode 100644 index 0000000..2a800a0 --- /dev/null +++ b/test/unittest/ops/ops_test.cpp @@ -0,0 +1,100 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ops_test.h" + +using namespace OHOS::NeuralNetworkRuntime::Ops; +using namespace std; +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +void OpsTest::SaveInputTensor(const std::vector& inputsIndex, OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam) +{ + m_inputsIndex = inputsIndex; + for (size_t i = 0; i < inputsIndex.size(); ++i) { + std::shared_ptr inputTensor; + inputTensor = TransToNNTensor(dataType, dim, quantParam, OH_NN_TENSOR); + m_allTensors.emplace_back(inputTensor); + } +} + +void OpsTest::SaveOutputTensor(const std::vector& outputsIndex, OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam) +{ + m_outputsIndex = outputsIndex; + for (size_t i = 0; i < outputsIndex.size(); ++i) { + std::shared_ptr outputTensor; + outputTensor = TransToNNTensor(dataType, dim, quantParam, OH_NN_TENSOR); + m_allTensors.emplace_back(outputTensor); + } +} + +void OpsTest::SetKernelSize(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + int32_t kernelsNum{2}; + std::shared_ptr tensor = TransToNNTensor(dataType, dim, quantParam, type); + int64_t* kernelSizeValue = new (std::nothrow) int64_t[kernelsNum]{1, 1}; + EXPECT_NE(nullptr, kernelSizeValue); + tensor->SetBuffer(kernelSizeValue, sizeof(int64_t) * kernelsNum); + m_allTensors.emplace_back(tensor); +} + +void OpsTest::SetStride(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + int32_t strideNum{2}; + std::shared_ptr tensor = TransToNNTensor(dataType, dim, quantParam, type); + int64_t* strideValue = new (std::nothrow) int64_t[strideNum]{1, 1}; + EXPECT_NE(nullptr, strideValue); + tensor->SetBuffer(strideValue, sizeof(int64_t) * strideNum); + m_allTensors.emplace_back(tensor); +} + +void OpsTest::SetActivation(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + std::shared_ptr tensor = TransToNNTensor(dataType, dim, quantParam, type); + int8_t* activationValue = new (std::nothrow) int8_t(0); + EXPECT_NE(nullptr, activationValue); + tensor->SetBuffer(activationValue, sizeof(int8_t)); + m_allTensors.emplace_back(tensor); +} + +void OpsTest::SetDilation(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + int32_t dilationNum = 2; + std::shared_ptr tensor = TransToNNTensor(dataType, dim, quantParam, type); + int64_t* dilationValue = new (std::nothrow) int64_t[2]{1, 1}; + EXPECT_NE(nullptr, dilationValue); + tensor->SetBuffer(dilationValue, dilationNum * sizeof(int64_t)); + m_allTensors.emplace_back(tensor); +} + +void OpsTest::SetGroup(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + std::shared_ptr tensor = TransToNNTensor(dataType, dim, quantParam, type); + int64_t* groupValue = new (std::nothrow) int64_t(0); + EXPECT_NE(nullptr, groupValue); + tensor->SetBuffer(groupValue, sizeof(int64_t)); + m_allTensors.emplace_back(tensor); +} + +} // namespace UnitTest +} // namespace NeuralNetworkRuntime +} // namespace OHOS diff --git a/test/unittest/ops/ops_test.h b/test/unittest/ops/ops_test.h new file mode 100644 index 0000000..a352218 --- /dev/null +++ b/test/unittest/ops/ops_test.h @@ -0,0 +1,58 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_OPS_TEST_H +#define NEURAL_NETWORK_RUNTIME_OPS_TEST_H + +#include + +#include "mindir.h" + +#include "frameworks/native/nn_tensor.h" +#include "test/unittest/common/base_test.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class OpsTest : public BaseTest { +public: + OpsTest() = default; + virtual void SaveInputTensor(const std::vector& inputsIndex, OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam); + virtual void SaveOutputTensor(const std::vector& outputsIndex, OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam); + virtual void InitTensor(const std::vector& inputsIndex, const std::vector& outputsIndex) {}; + + void SetKernelSize(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + void SetStride(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + void SetActivation(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + void SetDilation(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + void SetGroup(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + +public: + std::vector m_inputsIndex {}; + std::vector m_outputsIndex {}; + std::vector m_paramsIndex {}; + std::vector> m_allTensors; +}; +} // namespace UnitTest +} // namespace NeuralNetworkRuntime +} // namespace OHOS +#endif // NEURAL_NETWORK_RUNTIME_OPS_TEST_H diff --git a/test/unittest/ops/pad_builder_test.cpp b/test/unittest/ops/pad_builder_test.cpp new file mode 100644 index 0000000..402d545 --- /dev/null +++ b/test/unittest/ops/pad_builder_test.cpp @@ -0,0 +1,261 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "frameworks/native/ops/pad_builder.h" + +#include "ops_test.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Ops; + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class PadBuilderTest : public OpsTest { +public: + void SetUp() override; + void TearDown() override; + +protected: + void SaveParamsTensor(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + +protected: + PadBuilder m_pad; + std::vector m_inputs {0, 1}; + std::vector m_outputs {2}; + std::vector m_params {3}; + std::vector m_inputDim {1, 1, 2, 3}; + std::vector m_outputDim {1, 2, 7, 7}; + std::vector m_paramDim {}; +}; + +void PadBuilderTest::SetUp() {} + +void PadBuilderTest::TearDown() {} + +void PadBuilderTest::SaveParamsTensor(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + std::shared_ptr constantValueTensor = TransToNNTensor(dataType, dim, quantParam, type); + float* constantValue = new (std::nothrow) float(2.0); + EXPECT_NE(nullptr, constantValue); + constantValueTensor->SetBuffer(constantValue, sizeof(float)); + m_allTensors.emplace_back(constantValueTensor); +} + +/** + * @tc.name: pad_build_001 + * @tc.desc: Verify that the build function returns a successful message. + * @tc.type: FUNC + */ +HWTEST_F(PadBuilderTest, pad_build_001, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + SaveParamsTensor(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_PAD_CONSTANT_VALUE); + + OH_NN_ReturnCode ret = m_pad.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/** + * @tc.name: pad_build_002 + * @tc.desc: Verify that the build function returns a failed message with true m_isBuild. + * @tc.type: FUNC + */ +HWTEST_F(PadBuilderTest, pad_build_002, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + SaveParamsTensor(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_PAD_CONSTANT_VALUE); + + EXPECT_EQ(OH_NN_SUCCESS, m_pad.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors)); + OH_NN_ReturnCode ret = m_pad.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/** + * @tc.name: pad_build_003 + * @tc.desc: Verify that the build function returns a failed message with invalided input. + * @tc.type: FUNC + */ +HWTEST_F(PadBuilderTest, pad_build_003, TestSize.Level0) +{ + m_inputs = {0, 1, 2}; + m_outputs = {3}; + m_params = {4}; + + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + SaveParamsTensor(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_PAD_CONSTANT_VALUE); + + OH_NN_ReturnCode ret = m_pad.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: pad_build_004 + * @tc.desc: Verify that the build function returns a failed message with invalided output. + * @tc.type: FUNC + */ +HWTEST_F(PadBuilderTest, pad_build_004, TestSize.Level0) +{ + m_outputs = {2, 3}; + m_params = {4}; + + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + SaveParamsTensor(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_PAD_CONSTANT_VALUE); + + OH_NN_ReturnCode ret = m_pad.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: pad_build_005 + * @tc.desc: Verify that the build function returns a failed message with empty allTensor. + * @tc.type: FUNC + */ +HWTEST_F(PadBuilderTest, pad_build_005, TestSize.Level0) +{ + OH_NN_ReturnCode ret = m_pad.Build(m_params, m_inputs, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: pad_build_006 + * @tc.desc: Verify that the build function returns a failed message without output tensor. + * @tc.type: FUNC + */ +HWTEST_F(PadBuilderTest, pad_build_006, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + + OH_NN_ReturnCode ret = m_pad.Build(m_params, m_inputsIndex, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: pad_build_007 + * @tc.desc: Verify that the build function returns a failed message with invalid constant's dataType. + * @tc.type: FUNC + */ +HWTEST_F(PadBuilderTest, pad_build_007, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + std::shared_ptr constantValueTensor = TransToNNTensor(OH_NN_INT32, m_paramDim, + nullptr, OH_NN_PAD_CONSTANT_VALUE); + int32_t constantValue = 0; + constantValueTensor->SetBuffer(&constantValue, sizeof(constantValue)); + m_allTensors.emplace_back(constantValueTensor); + + OH_NN_ReturnCode ret = m_pad.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + constantValueTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: pad_build_008 + * @tc.desc: Verify that the build function returns a failed message with invalid constant's dimension. + * @tc.type: FUNC + */ +HWTEST_F(PadBuilderTest, pad_build_008, TestSize.Level0) +{ + m_paramDim = {2}; + + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + + std::shared_ptr constantValueTensor = TransToNNTensor(OH_NN_FLOAT32, m_paramDim, + nullptr, OH_NN_PAD_CONSTANT_VALUE); + float constantValue[2] = {2.0, 2.0}; + constantValueTensor->SetBuffer(constantValue, 2 * sizeof(float)); + m_allTensors.emplace_back(constantValueTensor); + OH_NN_ReturnCode ret = m_pad.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + constantValueTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: pad_build_009 + * @tc.desc: Verify that the build function returns a failed message with passing invalid param. + * @tc.type: FUNC + */ +HWTEST_F(PadBuilderTest, pad_build_009, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + SaveParamsTensor(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_ONE_HOT_AXIS); + + OH_NN_ReturnCode ret = m_pad.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: pad_build_010 + * @tc.desc: Verify that the build function returns a failed message without set buffer for constantValue. + * @tc.type: FUNC + */ +HWTEST_F(PadBuilderTest, pad_build_010, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + std::shared_ptr constantValueTensor = TransToNNTensor(OH_NN_FLOAT32, m_paramDim, + nullptr, OH_NN_PAD_CONSTANT_VALUE); + m_allTensors.emplace_back(constantValueTensor); + + OH_NN_ReturnCode ret = m_pad.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: pad_getprimitive_001 + * @tc.desc: Verify that the getPrimitive function returns a successful message + * @tc.type: FUNC + */ +HWTEST_F(PadBuilderTest, pad_getprimitive_001, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + SaveParamsTensor(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_PAD_CONSTANT_VALUE); + + float constantValue = 2.0; + EXPECT_EQ(OH_NN_SUCCESS, m_pad.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors)); + LiteGraphPrimitvePtr primitive = m_pad.GetPrimitive(); + LiteGraphPrimitvePtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); + EXPECT_NE(expectPrimitive, primitive); + + auto returnValue = mindspore::lite::MindIR_PadFusion_GetConstantValue(primitive.get()); + EXPECT_EQ(returnValue, constantValue); +} + +/** + * @tc.name: pad_getprimitive_002 + * @tc.desc: Verify that the getPrimitive function returns a failed message without build. + * @tc.type: FUNC + */ +HWTEST_F(PadBuilderTest, pad_getprimitive_002, TestSize.Level0) +{ + PadBuilder pad; + LiteGraphPrimitvePtr primitive = m_pad.GetPrimitive(); + LiteGraphPrimitvePtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); + EXPECT_EQ(expectPrimitive, primitive); +} +} +} +} \ No newline at end of file diff --git a/test/unittest/ops/pow_builder_test.cpp b/test/unittest/ops/pow_builder_test.cpp new file mode 100644 index 0000000..099e2bf --- /dev/null +++ b/test/unittest/ops/pow_builder_test.cpp @@ -0,0 +1,177 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "frameworks/native/ops/pow_builder.h" + +#include "ops_test.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Ops; + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class PowBuilderTest : public OpsTest { +public: + void SetUp() override; + void TearDown() override; + +protected: + PowBuilder m_builder; + std::vector m_inputs {0, 1}; + std::vector m_outputs {2}; + std::vector m_dim {1, 2, 2, 1}; +}; + +void PowBuilderTest::SetUp() {} + +void PowBuilderTest::TearDown() {} + +/** + * @tc.name: pow_build_001 + * @tc.desc: Provide normal input, output, and parameters to verify the normal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(PowBuilderTest, pow_build_001, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_dim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/** + * @tc.name: pow_build_002 + * @tc.desc: Call Build func twice to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(PowBuilderTest, pow_build_002, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_dim, nullptr); + + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/** + * @tc.name: pow_build_003 + * @tc.desc: Provide one more than normal input to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(PowBuilderTest, pow_build_003, TestSize.Level0) +{ + m_inputs = {0, 1, 2}; + m_outputs = {3}; + + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_dim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: pow_build_004 + * @tc.desc: Provide one more than normal output to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(PowBuilderTest, pow_build_004, TestSize.Level0) +{ + m_outputs = {2, 3}; + + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_dim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: pow_build_005 + * @tc.desc: Verify that the build function return a failed message with null allTensor + * @tc.type: FUNC + */ +HWTEST_F(PowBuilderTest, pow_build_005, TestSize.Level0) +{ + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputs, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: pow_build_006 + * @tc.desc: Verify that the build function return a failed message without output tensor + * @tc.type: FUNC + */ +HWTEST_F(PowBuilderTest, pow_build_006, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_dim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: pow_build_007 + * @tc.desc: Verify that the build function return a failed message with a virtual parameter + * @tc.type: FUNC + */ +HWTEST_F(PowBuilderTest, pow_build_007, TestSize.Level0) +{ + std::vector paramsIndex = {3}; + std::vector paramDim = {}; + + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_dim, nullptr); + std::shared_ptr powTensor = TransToNNTensor(OH_NN_INT32, paramDim, nullptr, OH_NN_TENSOR); + m_allTensors.emplace_back(powTensor); + + OH_NN_ReturnCode ret = m_builder.Build(paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: pow_get_primitive_001 + * @tc.desc: Verify the GetPrimitive function return nullptr + * @tc.type: FUNC + */ +HWTEST_F(PowBuilderTest, pow_get_primitive_001, TestSize.Level0) +{ + LiteGraphTensorPtr primitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive = {nullptr, DestroyLiteGraphPrimitive}; + EXPECT_EQ(primitive, expectPrimitive); +} + +/** + * @tc.name: pow_get_primitive_002 + * @tc.desc: Verify the normal params return behavior of the getprimitive function + * @tc.type: FUNC + */ +HWTEST_F(PowBuilderTest, pow_get_primitive_002, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_dim, nullptr); + + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + LiteGraphTensorPtr powPrimitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive = {nullptr, DestroyLiteGraphPrimitive}; + EXPECT_NE(powPrimitive, expectPrimitive); +} +} // namespace UnitTest +} // namespace NeuralNetworkRuntime +} // namespace OHOS \ No newline at end of file diff --git a/test/unittest/ops/prelu_builder_test.cpp b/test/unittest/ops/prelu_builder_test.cpp new file mode 100644 index 0000000..030cd2b --- /dev/null +++ b/test/unittest/ops/prelu_builder_test.cpp @@ -0,0 +1,177 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "frameworks/native/ops/prelu_builder.h" + +#include "ops_test.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Ops; + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class PReluBuilderTest : public OpsTest { +public: + void SetUp() override; + void TearDown() override; + +protected: + PReluBuilder m_builder; + std::vector m_inputs {0, 1}; + std::vector m_outputs {2}; + std::vector m_dim {1, 2, 2, 2}; +}; + +void PReluBuilderTest::SetUp() {} + +void PReluBuilderTest::TearDown() {} + +/** + * @tc.name: prelu_build_001 + * @tc.desc: Provide normal input, output, and parameters to verify the normal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(PReluBuilderTest, prelu_build_001, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_dim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/** + * @tc.name: prelu_build_002 + * @tc.desc: Call Build func twice to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(PReluBuilderTest, prelu_build_002, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_dim, nullptr); + + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/** + * @tc.name: prelu_build_003 + * @tc.desc: Provide one more than normal input to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(PReluBuilderTest, prelu_build_003, TestSize.Level0) +{ + m_inputs = {0, 1, 2}; + m_outputs = {3}; + + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_dim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: prelu_build_004 + * @tc.desc: Provide one more than normal output to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(PReluBuilderTest, prelu_build_004, TestSize.Level0) +{ + m_outputs = {2, 3}; + + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_dim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: prelu_build_005 + * @tc.desc: Verify that the build function return a failed message with null allTensor + * @tc.type: FUNC + */ +HWTEST_F(PReluBuilderTest, prelu_build_005, TestSize.Level0) +{ + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputs, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: prelu_build_006 + * @tc.desc: Verify that the build function return a failed message without output tensor. + * @tc.type: FUNC + */ +HWTEST_F(PReluBuilderTest, prelu_build_006, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_dim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: prelu_build_007 + * @tc.desc: Verify that the build function return a successful message with a virtual parameter + * @tc.type: FUNC + */ +HWTEST_F(PReluBuilderTest, prelu_build_007, TestSize.Level0) +{ + std::vector paramsIndex = {3}; + std::vector paramDim = {}; + + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_dim, nullptr); + std::shared_ptr preluTensor = TransToNNTensor(OH_NN_INT32, paramDim, nullptr, OH_NN_TENSOR); + m_allTensors.emplace_back(preluTensor); + + OH_NN_ReturnCode ret = m_builder.Build(paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: prelu_get_primitive_001 + * @tc.desc: Verify the GetPrimitive function return nullptr + * @tc.type: FUNC + */ +HWTEST_F(PReluBuilderTest, prelu_get_primitive_001, TestSize.Level0) +{ + LiteGraphTensorPtr primitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive = {nullptr, DestroyLiteGraphPrimitive}; + EXPECT_EQ(primitive, expectPrimitive); +} + +/** + * @tc.name: prelu_get_primitive_002 + * @tc.desc: Verify the normal params return behavior of the getprimitive function + * @tc.type: FUNC + */ +HWTEST_F(PReluBuilderTest, prelu_get_primitive_002, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_dim, nullptr); + + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + LiteGraphTensorPtr preluPrimitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive = {nullptr, DestroyLiteGraphPrimitive}; + EXPECT_NE(preluPrimitive, expectPrimitive); +} +} // namespace UnitTest +} // namespace NeuralNetworkRuntime +} // namespace OHOS \ No newline at end of file diff --git a/test/unittest/ops/quant_dtype_cast_builder_test.cpp b/test/unittest/ops/quant_dtype_cast_builder_test.cpp new file mode 100644 index 0000000..69a2d5d --- /dev/null +++ b/test/unittest/ops/quant_dtype_cast_builder_test.cpp @@ -0,0 +1,307 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "frameworks/native/ops/quant_dtype_cast_builder.h" + +#include "ops_test.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Ops; + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class QuantDTypeCastBuilderTest : public OpsTest { +public: + void SetUp() override; + void TearDown() override; + +protected: + void SaveSrcTensor(OH_NN_DataType dataType, const std::vector &dim, + const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + void SaveDstTensor(OH_NN_DataType dataType, const std::vector &dim, + const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + +protected: + QuantDTypeCastBuilder m_builder; + std::vector m_inputs {0}; + std::vector m_outputs {1}; + std::vector m_params {2, 3}; + std::vector m_dim {3, 3}; + std::vector m_paramDim {}; +}; + +void QuantDTypeCastBuilderTest::SetUp() {} + +void QuantDTypeCastBuilderTest::TearDown() {} + +void QuantDTypeCastBuilderTest::SaveSrcTensor(OH_NN_DataType dataType, const std::vector &dim, + const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + std::shared_ptr srcTensor = TransToNNTensor(dataType, dim, quantParam, type); + int64_t *srcValue = new (std::nothrow) int64_t(1); + EXPECT_NE(nullptr, srcValue); + srcTensor->SetBuffer(srcValue, sizeof(int64_t)); + m_allTensors.emplace_back(srcTensor); +} + +void QuantDTypeCastBuilderTest::SaveDstTensor(OH_NN_DataType dataType, const std::vector &dim, + const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + std::shared_ptr dstTensor = TransToNNTensor(dataType, dim, quantParam, type); + int64_t *dstValue = new (std::nothrow) int64_t(1); + EXPECT_NE(nullptr, dstValue); + dstTensor->SetBuffer(dstValue, sizeof(int64_t)); + m_allTensors.emplace_back(dstTensor); +} + +/** + * @tc.name: quantdtypecast_build_001 + * @tc.desc: Provide normal input, output, and parameters to verify the normal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(QuantDTypeCastBuilderTest, quantdtypecast_build_001, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_INT8, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT8, m_dim, nullptr); + SaveSrcTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_QUANT_DTYPE_CAST_SRC_T); + SaveDstTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_QUANT_DTYPE_CAST_DST_T); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/** + * @tc.name: quantdtypecast_build_002 + * @tc.desc: Call Build func twice to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(QuantDTypeCastBuilderTest, quantdtypecast_build_002, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_INT8, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT8, m_dim, nullptr); + SaveSrcTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_QUANT_DTYPE_CAST_SRC_T); + SaveDstTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_QUANT_DTYPE_CAST_DST_T); + + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors)); + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/** + * @tc.name: quantdtypecast_build_003 + * @tc.desc: Provide one more than normal input to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(QuantDTypeCastBuilderTest, quantdtypecast_build_003, TestSize.Level0) +{ + m_inputs = {0, 1}; + m_outputs = {2}; + m_params = {3, 4}; + + SaveInputTensor(m_inputs, OH_NN_INT8, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT8, m_dim, nullptr); + SaveSrcTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_QUANT_DTYPE_CAST_SRC_T); + SaveDstTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_QUANT_DTYPE_CAST_DST_T); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: quantdtypecast_build_004 + * @tc.desc: Provide one more than normal output to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(QuantDTypeCastBuilderTest, quantdtypecast_build_004, TestSize.Level0) +{ + m_outputs = {1, 2}; + m_params = {3, 4}; + + SaveInputTensor(m_inputs, OH_NN_INT8, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT8, m_dim, nullptr); + SaveSrcTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_QUANT_DTYPE_CAST_SRC_T); + SaveDstTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_QUANT_DTYPE_CAST_DST_T); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: quantdtypecast_build_005 + * @tc.desc: Verify that the build function return a failed message with null allTensor + * @tc.type: FUNC + */ +HWTEST_F(QuantDTypeCastBuilderTest, quantdtypecast_build_005, TestSize.Level0) +{ + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputs, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: quantdtypecast_build_006 + * @tc.desc: Verify that the build function return a failed message without output tensor + * @tc.type: FUNC + */ +HWTEST_F(QuantDTypeCastBuilderTest, quantdtypecast_build_006, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_INT8, m_dim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: quantdtypecast_build_007 + * @tc.desc: Verify that the build function return a failed message with invalided src's dataType + * @tc.type: FUNC + */ +HWTEST_F(QuantDTypeCastBuilderTest, quantdtypecast_build_007, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_INT8, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT8, m_dim, nullptr); + SaveDstTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_QUANT_DTYPE_CAST_DST_T); + + std::shared_ptr srcTensor = TransToNNTensor(OH_NN_INT32, m_paramDim, + nullptr, OH_NN_QUANT_DTYPE_CAST_SRC_T); + int32_t srcValue = 1; + srcTensor->SetBuffer(&srcValue, sizeof(srcValue)); + m_allTensors.emplace_back(srcTensor); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + srcTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: quantdtypecast_build_008 + * @tc.desc: Verify that the build function return a failed message with invalided dst's dataType + * @tc.type: FUNC + */ +HWTEST_F(QuantDTypeCastBuilderTest, quantdtypecast_build_008, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_INT8, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT8, m_dim, nullptr); + SaveSrcTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_QUANT_DTYPE_CAST_SRC_T); + + + std::shared_ptr dstTensor = TransToNNTensor(OH_NN_INT32, m_paramDim, + nullptr, OH_NN_QUANT_DTYPE_CAST_DST_T); + int32_t dstValue = 1; + dstTensor->SetBuffer(&dstValue, sizeof(dstValue)); + m_allTensors.emplace_back(dstTensor); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + dstTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: quantdtypecast_build_009 + * @tc.desc: Verify that the build function return a failed message with invalided parameter + * @tc.type: FUNC + */ +HWTEST_F(QuantDTypeCastBuilderTest, quantdtypecast_build_009, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_INT8, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT8, m_dim, nullptr); + SaveSrcTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_QUANT_DTYPE_CAST_SRC_T); + SaveDstTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_REDUCE_ALL_KEEP_DIMS); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: quantdtypecast_build_010 + * @tc.desc: Verify that the build function return a failed message with empty src's buffer + * @tc.type: FUNC + */ +HWTEST_F(QuantDTypeCastBuilderTest, quantdtypecast_build_010, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_INT8, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT8, m_dim, nullptr); + SaveDstTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_QUANT_DTYPE_CAST_DST_T); + + std::shared_ptr srcTensor = TransToNNTensor(OH_NN_INT64, m_paramDim, + nullptr, OH_NN_QUANT_DTYPE_CAST_SRC_T); + m_allTensors.emplace_back(srcTensor); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + srcTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: quantdtypecast_build_011 + * @tc.desc: Verify that the build function return a failed message with empty dst's buffer + * @tc.type: FUNC + */ +HWTEST_F(QuantDTypeCastBuilderTest, quantdtypecast_build_011, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_INT8, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT8, m_dim, nullptr); + SaveSrcTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_QUANT_DTYPE_CAST_SRC_T); + + + std::shared_ptr dstTensor = TransToNNTensor(OH_NN_INT64, m_paramDim, + nullptr, OH_NN_QUANT_DTYPE_CAST_DST_T); + m_allTensors.emplace_back(dstTensor); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + dstTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: quantdtypecast_get_primitive_001 + * @tc.desc: Verify the GetPrimitive function return nullptr + * @tc.type: FUNC + */ +HWTEST_F(QuantDTypeCastBuilderTest, quantdtypecast_get_primitive_001, TestSize.Level0) +{ + LiteGraphTensorPtr primitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive = {nullptr, DestroyLiteGraphPrimitive}; + EXPECT_EQ(primitive, expectPrimitive); +} + +/** + * @tc.name: quantdtypecast_get_primitive_002 + * @tc.desc: Verify the normal params return behavior of the getprimitive function + * @tc.type: FUNC + */ +HWTEST_F(QuantDTypeCastBuilderTest, quantdtypecast_get_primitive_002, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_INT8, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT8, m_dim, nullptr); + SaveSrcTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_QUANT_DTYPE_CAST_SRC_T); + SaveDstTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_QUANT_DTYPE_CAST_DST_T); + + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors)); + LiteGraphTensorPtr primitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive = {nullptr, DestroyLiteGraphPrimitive}; + EXPECT_NE(primitive, expectPrimitive); + + int64_t srcValue = 1; + int64_t dstValue = 1; + auto srcReturn = mindspore::lite::MindIR_QuantDTypeCast_GetSrcT(primitive.get()); + EXPECT_EQ(srcReturn, srcValue); + auto dstReturn = mindspore::lite::MindIR_QuantDTypeCast_GetDstT(primitive.get()); + EXPECT_EQ(dstReturn, dstValue); +} +} // namespace UnitTest +} // namespace NeuralNetworkRuntime +} // namespace OHOS \ No newline at end of file diff --git a/test/unittest/ops/reduce_all_builder_test.cpp b/test/unittest/ops/reduce_all_builder_test.cpp new file mode 100644 index 0000000..ff661f5 --- /dev/null +++ b/test/unittest/ops/reduce_all_builder_test.cpp @@ -0,0 +1,263 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "frameworks/native/ops/reduceall_builder.h" + +#include "ops_test.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Ops; + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class ReduceAllBuilderTest : public OpsTest { +public: + void SetUp() override; + void TearDown() override; + +protected: + void SaveParamsTensor(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + +protected: + ReduceAllBuilder m_builder; + std::vector m_inputs {0, 1}; + std::vector m_outputs {2}; + std::vector m_params {3}; + std::vector m_inputDim {1, 1, 2, 2}; + std::vector m_outputDim {1, 1, 1, 2}; + std::vector m_paramDim {1}; +}; + +void ReduceAllBuilderTest::SetUp() {} + +void ReduceAllBuilderTest::TearDown() {} + +void ReduceAllBuilderTest::SaveParamsTensor(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + std::shared_ptr keepDimsTensor = TransToNNTensor(dataType, dim, quantParam, type); + bool *keepDimsValue = new (std::nothrow) bool(true); + EXPECT_NE(nullptr, keepDimsValue); + keepDimsTensor->SetBuffer(keepDimsValue, sizeof(bool)); + m_allTensors.emplace_back(keepDimsTensor); +} + +/** + * @tc.name: reduceall_build_001 + * @tc.desc: Provide normal input, output, and parameters to verify the normal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(ReduceAllBuilderTest, reduceall_build_001, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_BOOL, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_BOOL, m_outputDim, nullptr); + SaveParamsTensor(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_ALL_KEEP_DIMS); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/** + * @tc.name: reduceall_build_002 + * @tc.desc: Call Build func twice to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(ReduceAllBuilderTest, reduceall_build_002, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_BOOL, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_BOOL, m_outputDim, nullptr); + SaveParamsTensor(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_ALL_KEEP_DIMS); + + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors)); + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/** + * @tc.name: reduceall_build_003 + * @tc.desc: Provide one more than normal input to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(ReduceAllBuilderTest, reduceall_build_003, TestSize.Level0) +{ + m_inputs = {0, 1, 2}; + m_outputs = {3}; + m_params = {4}; + + SaveInputTensor(m_inputs, OH_NN_BOOL, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_BOOL, m_outputDim, nullptr); + SaveParamsTensor(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_ALL_KEEP_DIMS); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: reduceall_build_004 + * @tc.desc: Provide one more than normal output to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(ReduceAllBuilderTest, reduceall_build_004, TestSize.Level0) +{ + m_outputs = {2, 3}; + m_params = {4}; + + SaveInputTensor(m_inputs, OH_NN_BOOL, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_BOOL, m_outputDim, nullptr); + SaveParamsTensor(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_ALL_KEEP_DIMS); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: reduceall_build_005 + * @tc.desc: Verify that the build function return a failed message with null allTensor + * @tc.type: FUNC + */ +HWTEST_F(ReduceAllBuilderTest, reduceall_build_005, TestSize.Level0) +{ + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputs, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: reduceall_build_006 + * @tc.desc: Verify that the build function return a failed message without output tensor + * @tc.type: FUNC + */ +HWTEST_F(ReduceAllBuilderTest, reduceall_build_006, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_BOOL, m_inputDim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: reduceall_build_007 + * @tc.desc: Verify that the build function return a failed message with invalided keepdims's dataType + * @tc.type: FUNC + */ +HWTEST_F(ReduceAllBuilderTest, reduceall_build_007, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_BOOL, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_BOOL, m_outputDim, nullptr); + + std::shared_ptr keepDimsTensor = TransToNNTensor(OH_NN_INT64, + m_paramDim, nullptr, OH_NN_REDUCE_ALL_KEEP_DIMS); + int64_t keepDimsValue = 1; + keepDimsTensor->SetBuffer(&keepDimsValue, sizeof(keepDimsValue)); + m_allTensors.emplace_back(keepDimsTensor); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + keepDimsTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: reduceall_build_008 + * @tc.desc: Verify that the build function return a failed message with invalided keepdims's dimension + * @tc.type: FUNC + */ +HWTEST_F(ReduceAllBuilderTest, reduceall_build_008, TestSize.Level0) +{ + m_paramDim = {1, 2}; + + SaveInputTensor(m_inputs, OH_NN_BOOL, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_BOOL, m_outputDim, nullptr); + + std::shared_ptr keepDimsTensor = TransToNNTensor(OH_NN_BOOL, m_paramDim, + nullptr, OH_NN_REDUCE_ALL_KEEP_DIMS); + bool keepDimsValue[2] = {true, true}; + keepDimsTensor->SetBuffer(keepDimsValue, 2 * sizeof(bool)); + m_allTensors.emplace_back(keepDimsTensor); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + keepDimsTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: reduceall_build_009 + * @tc.desc: Verify that the build function return a failed message with invalided parameter + * @tc.type: FUNC + */ +HWTEST_F(ReduceAllBuilderTest, reduceall_build_009, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_BOOL, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_BOOL, m_outputDim, nullptr); + SaveParamsTensor(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_QUANT_DTYPE_CAST_SRC_T); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: reduceall_build_010 + * @tc.desc: Verify that the build function return a failed message with empty keepdims's buffer + * @tc.type: FUNC + */ +HWTEST_F(ReduceAllBuilderTest, reduceall_build_010, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_BOOL, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_BOOL, m_outputDim, nullptr); + + std::shared_ptr keepDimsTensor = TransToNNTensor(OH_NN_BOOL, + m_paramDim, nullptr, OH_NN_REDUCE_ALL_KEEP_DIMS); + m_allTensors.emplace_back(keepDimsTensor); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + keepDimsTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: reduceall_get_primitive_001 + * @tc.desc: Verify the GetPrimitive function return nullptr + * @tc.type: FUNC + */ +HWTEST_F(ReduceAllBuilderTest, reduceall_get_primitive_001, TestSize.Level0) +{ + LiteGraphTensorPtr primitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive = {nullptr, DestroyLiteGraphPrimitive}; + EXPECT_EQ(primitive, expectPrimitive); +} + +/** + * @tc.name: reduceall_get_primitive_002 + * @tc.desc: Verify the normal params return behavior of the getprimitive function + * @tc.type: FUNC + */ +HWTEST_F(ReduceAllBuilderTest, reduceall_get_primitive_002, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_BOOL, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_BOOL, m_outputDim, nullptr); + SaveParamsTensor(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_ALL_KEEP_DIMS); + + bool keepDimsValue = true; + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors)); + LiteGraphTensorPtr reduceallPrimitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive = {nullptr, DestroyLiteGraphPrimitive}; + EXPECT_NE(reduceallPrimitive, expectPrimitive); + auto returnValue = mindspore::lite::MindIR_ReduceFusion_GetKeepDims(reduceallPrimitive.get()); + EXPECT_EQ(returnValue, keepDimsValue); +} +} // namespace UnitTest +} // namespace NeuralNetworkRuntime +} // namespace OHOS \ No newline at end of file diff --git a/test/unittest/ops/reduce_mean_builder_test.cpp b/test/unittest/ops/reduce_mean_builder_test.cpp new file mode 100644 index 0000000..3f3f657 --- /dev/null +++ b/test/unittest/ops/reduce_mean_builder_test.cpp @@ -0,0 +1,263 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "frameworks/native/ops/reducemean_builder.h" + +#include "ops_test.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Ops; + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class ReduceMeanBuilderTest : public OpsTest { +public: + void SetUp() override; + void TearDown() override; + +protected: + void SaveParamsTensor(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + +protected: + ReduceMeanBuilder m_builder; + std::vector m_inputs {0, 1}; + std::vector m_outputs {2}; + std::vector m_params {3}; + std::vector m_inputDim {3, 5, 6, 4}; + std::vector m_outputDim {3, 5, 6, 1}; + std::vector m_paramDim {1}; +}; + +void ReduceMeanBuilderTest::SetUp() {} + +void ReduceMeanBuilderTest::TearDown() {} + +void ReduceMeanBuilderTest::SaveParamsTensor(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + std::shared_ptr keepDimsTensor = TransToNNTensor(dataType, dim, quantParam, type); + bool *keepDimsValue = new (std::nothrow) bool(true); + EXPECT_NE(nullptr, keepDimsValue); + keepDimsTensor->SetBuffer(keepDimsValue, sizeof(bool)); + m_allTensors.emplace_back(keepDimsTensor); +} + +/** + * @tc.name: reducemean_build_001 + * @tc.desc: Provide normal input, output, and parameters to verify the normal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(ReduceMeanBuilderTest, reducemean_build_001, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_BOOL, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_BOOL, m_outputDim, nullptr); + SaveParamsTensor(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_MEAN_KEEP_DIMS); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/** + * @tc.name: reducemean_build_002 + * @tc.desc: Call Build func twice to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(ReduceMeanBuilderTest, reducemean_build_002, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_BOOL, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_BOOL, m_outputDim, nullptr); + SaveParamsTensor(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_MEAN_KEEP_DIMS); + + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors)); + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/** + * @tc.name: reducemean_build_003 + * @tc.desc: Provide one more than normal input to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(ReduceMeanBuilderTest, reducemean_build_003, TestSize.Level0) +{ + m_inputs = {0, 1, 2}; + m_outputs = {3}; + m_params = {4}; + + SaveInputTensor(m_inputs, OH_NN_BOOL, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_BOOL, m_outputDim, nullptr); + SaveParamsTensor(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_MEAN_KEEP_DIMS); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: reducemean_build_004 + * @tc.desc: Provide one more than normal output to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(ReduceMeanBuilderTest, reducemean_build_004, TestSize.Level0) +{ + m_outputs = {2, 3}; + m_params = {4}; + + SaveInputTensor(m_inputs, OH_NN_BOOL, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_BOOL, m_outputDim, nullptr); + SaveParamsTensor(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_MEAN_KEEP_DIMS); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: reducemean_build_005 + * @tc.desc: Verify that the build function return a failed message with null allTensor + * @tc.type: FUNC + */ +HWTEST_F(ReduceMeanBuilderTest, reducemean_build_005, TestSize.Level0) +{ + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputs, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: reducemean_build_006 + * @tc.desc: Verify that the build function return a failed message without output tensor + * @tc.type: FUNC + */ +HWTEST_F(ReduceMeanBuilderTest, reducemean_build_006, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_BOOL, m_inputDim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: reducemean_build_007 + * @tc.desc: Verify that the build function return a failed message with invalided keepdims's dataType + * @tc.type: FUNC + */ +HWTEST_F(ReduceMeanBuilderTest, reducemean_build_007, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_BOOL, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_BOOL, m_outputDim, nullptr); + + std::shared_ptr keepDimsTensor = TransToNNTensor(OH_NN_INT64, + m_paramDim, nullptr, OH_NN_REDUCE_MEAN_KEEP_DIMS); + int64_t keepDimsValue = 1; + keepDimsTensor->SetBuffer(&keepDimsValue, sizeof(keepDimsValue)); + m_allTensors.emplace_back(keepDimsTensor); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + keepDimsTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: reducemean_build_008 + * @tc.desc: Verify that the build function return a failed message with invalided keepdims's dimension + * @tc.type: FUNC + */ +HWTEST_F(ReduceMeanBuilderTest, reducemean_build_008, TestSize.Level0) +{ + m_paramDim = {1, 2}; + + SaveInputTensor(m_inputs, OH_NN_BOOL, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_BOOL, m_outputDim, nullptr); + + std::shared_ptr keepDimsTensor = TransToNNTensor(OH_NN_BOOL, + m_paramDim, nullptr, OH_NN_REDUCE_MEAN_KEEP_DIMS); + bool keepDimsValue[2] = {true, true}; + keepDimsTensor->SetBuffer(keepDimsValue, 2 * sizeof(bool)); + m_allTensors.emplace_back(keepDimsTensor); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + keepDimsTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: reducemean_build_009 + * @tc.desc: Verify that the build function return a failed message with invalided parameter + * @tc.type: FUNC + */ +HWTEST_F(ReduceMeanBuilderTest, reducemean_build_009, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_BOOL, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_BOOL, m_outputDim, nullptr); + SaveParamsTensor(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_QUANT_DTYPE_CAST_SRC_T); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: reducemean_build_010 + * @tc.desc: Verify that the build function return a failed message with empty keepdims's buffer + * @tc.type: FUNC + */ +HWTEST_F(ReduceMeanBuilderTest, reducemean_build_010, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_BOOL, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_BOOL, m_outputDim, nullptr); + + std::shared_ptr keepDimsTensor = TransToNNTensor(OH_NN_BOOL, + m_paramDim, nullptr, OH_NN_REDUCE_MEAN_KEEP_DIMS); + m_allTensors.emplace_back(keepDimsTensor); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + keepDimsTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: reducemean_get_primitive_001 + * @tc.desc: Verify the GetPrimitive function return nullptr + * @tc.type: FUNC + */ +HWTEST_F(ReduceMeanBuilderTest, reducemean_get_primitive_001, TestSize.Level0) +{ + LiteGraphTensorPtr primitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive = {nullptr, DestroyLiteGraphPrimitive}; + EXPECT_EQ(primitive, expectPrimitive); +} + +/** + * @tc.name: reducemean_get_primitive_002 + * @tc.desc: Verify the normal params return behavior of the getprimitive function + * @tc.type: FUNC + */ +HWTEST_F(ReduceMeanBuilderTest, reducemean_get_primitive_002, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_BOOL, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_BOOL, m_outputDim, nullptr); + SaveParamsTensor(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_MEAN_KEEP_DIMS); + + bool keepDimsValue = true; + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors)); + LiteGraphTensorPtr reducemeanPrimitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive = {nullptr, DestroyLiteGraphPrimitive}; + EXPECT_NE(reducemeanPrimitive, expectPrimitive); + auto returnValue = mindspore::lite::MindIR_ReduceFusion_GetKeepDims(reducemeanPrimitive.get()); + EXPECT_EQ(returnValue, keepDimsValue); +} +} // namespace UnitTest +} // namespace NeuralNetworkRuntime +} // namespace OHOS \ No newline at end of file diff --git a/test/unittest/ops/reduce_prod_builder_test.cpp b/test/unittest/ops/reduce_prod_builder_test.cpp new file mode 100644 index 0000000..5b160a8 --- /dev/null +++ b/test/unittest/ops/reduce_prod_builder_test.cpp @@ -0,0 +1,263 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "frameworks/native/ops/reduceprod_builder.h" + +#include "ops_test.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Ops; + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class ReduceProdBuilderTest : public OpsTest { +public: + void SetUp() override; + void TearDown() override; + +protected: + void SaveParamsTensor(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + +protected: + ReduceProdBuilder m_builder; + std::vector m_inputs {0, 1}; + std::vector m_outputs {2}; + std::vector m_params {3}; + std::vector m_inputDim {3, 5, 6, 4}; + std::vector m_outputDim {3, 5, 6, 1}; + std::vector m_paramDim {1}; +}; + +void ReduceProdBuilderTest::SetUp() {} + +void ReduceProdBuilderTest::TearDown() {} + +void ReduceProdBuilderTest::SaveParamsTensor(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + std::shared_ptr keepDimsTensor = TransToNNTensor(dataType, dim, quantParam, type); + bool *keepDimsValue = new (std::nothrow) bool(true); + EXPECT_NE(nullptr, keepDimsValue); + keepDimsTensor->SetBuffer(keepDimsValue, sizeof(bool)); + m_allTensors.emplace_back(keepDimsTensor); +} + +/** + * @tc.name: reduceprod_build_001 + * @tc.desc: Provide normal input, output, and parameters to verify the normal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(ReduceProdBuilderTest, reduceprod_build_001, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_BOOL, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_BOOL, m_outputDim, nullptr); + SaveParamsTensor(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_PROD_KEEP_DIMS); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/** + * @tc.name: reduceprod_build_002 + * @tc.desc: Call Build func twice to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(ReduceProdBuilderTest, reduceprod_build_002, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_BOOL, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_BOOL, m_outputDim, nullptr); + SaveParamsTensor(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_PROD_KEEP_DIMS); + + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors)); + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/** + * @tc.name: reduceprod_build_003 + * @tc.desc: Provide one more than normal input to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(ReduceProdBuilderTest, reduceprod_build_003, TestSize.Level0) +{ + m_inputs = {0, 1, 2}; + m_outputs = {3}; + m_params = {4}; + + SaveInputTensor(m_inputs, OH_NN_BOOL, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_BOOL, m_outputDim, nullptr); + SaveParamsTensor(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_PROD_KEEP_DIMS); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: reduceprod_build_004 + * @tc.desc: Provide one more than normal output to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(ReduceProdBuilderTest, reduceprod_build_004, TestSize.Level0) +{ + m_outputs = {2, 3}; + m_params = {4}; + + SaveInputTensor(m_inputs, OH_NN_BOOL, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_BOOL, m_outputDim, nullptr); + SaveParamsTensor(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_PROD_KEEP_DIMS); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: reduceprod_build_005 + * @tc.desc: Verify that the build function return a failed message with null allTensor + * @tc.type: FUNC + */ +HWTEST_F(ReduceProdBuilderTest, reduceprod_build_005, TestSize.Level0) +{ + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputs, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: reduceprod_build_006 + * @tc.desc: Verify that the build function return a failed message without output tensor + * @tc.type: FUNC + */ +HWTEST_F(ReduceProdBuilderTest, reduceprod_build_006, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_BOOL, m_inputDim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: reduceprod_build_007 + * @tc.desc: Verify that the build function return a failed message with invalided keepdims's dataType + * @tc.type: FUNC + */ +HWTEST_F(ReduceProdBuilderTest, reduceprod_build_007, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_BOOL, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_BOOL, m_outputDim, nullptr); + + std::shared_ptr keepDimsTensor = TransToNNTensor(OH_NN_INT64, + m_paramDim, nullptr, OH_NN_REDUCE_PROD_KEEP_DIMS); + int64_t keepDimsValue = 1; + keepDimsTensor->SetBuffer(&keepDimsValue, sizeof(keepDimsValue)); + m_allTensors.emplace_back(keepDimsTensor); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + keepDimsTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: reduceprod_build_008 + * @tc.desc: Verify that the build function return a failed message with invalided keepdims's dimension + * @tc.type: FUNC + */ +HWTEST_F(ReduceProdBuilderTest, reduceprod_build_008, TestSize.Level0) +{ + m_paramDim = {1, 2}; + + SaveInputTensor(m_inputs, OH_NN_BOOL, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_BOOL, m_outputDim, nullptr); + + std::shared_ptr keepDimsTensor = TransToNNTensor(OH_NN_BOOL, + m_paramDim, nullptr, OH_NN_REDUCE_PROD_KEEP_DIMS); + bool keepDimsValue[2] = {true, true}; + keepDimsTensor->SetBuffer(keepDimsValue, 2 * sizeof(bool)); + m_allTensors.emplace_back(keepDimsTensor); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + keepDimsTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: reduceprod_build_009 + * @tc.desc: Verify that the build function return a failed message with invalided parameter + * @tc.type: FUNC + */ +HWTEST_F(ReduceProdBuilderTest, reduceprod_build_009, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_BOOL, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_BOOL, m_outputDim, nullptr); + SaveParamsTensor(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_QUANT_DTYPE_CAST_SRC_T); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: reduceprod_build_010 + * @tc.desc: Verify that the build function return a failed message with empty keepdims's buffer + * @tc.type: FUNC + */ +HWTEST_F(ReduceProdBuilderTest, reduceprod_build_010, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_BOOL, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_BOOL, m_outputDim, nullptr); + + std::shared_ptr keepDimsTensor = TransToNNTensor(OH_NN_BOOL, + m_paramDim, nullptr, OH_NN_REDUCE_PROD_KEEP_DIMS); + m_allTensors.emplace_back(keepDimsTensor); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + keepDimsTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: reduceprod_get_primitive_001 + * @tc.desc: Verify the GetPrimitive function return nullptr + * @tc.type: FUNC + */ +HWTEST_F(ReduceProdBuilderTest, reduceprod_get_primitive_001, TestSize.Level0) +{ + LiteGraphTensorPtr primitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive = {nullptr, DestroyLiteGraphPrimitive}; + EXPECT_EQ(primitive, expectPrimitive); +} + +/** + * @tc.name: reduceprod_get_primitive_002 + * @tc.desc: Verify the normal params return behavior of the getprimitive function + * @tc.type: FUNC + */ +HWTEST_F(ReduceProdBuilderTest, reduceprod_get_primitive_002, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_BOOL, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_BOOL, m_outputDim, nullptr); + SaveParamsTensor(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_REDUCE_PROD_KEEP_DIMS); + + bool keepDimsValue = true; + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors)); + LiteGraphTensorPtr reduceprodPrimitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive = {nullptr, DestroyLiteGraphPrimitive}; + EXPECT_NE(reduceprodPrimitive, expectPrimitive); + auto returnValue = mindspore::lite::MindIR_ReduceFusion_GetKeepDims(reduceprodPrimitive.get()); + EXPECT_EQ(returnValue, keepDimsValue); +} +} // namespace UnitTest +} // namespace NeuralNetworkRuntime +} // namespace OHOS \ No newline at end of file diff --git a/test/unittest/ops/relu6_builder_test.cpp b/test/unittest/ops/relu6_builder_test.cpp new file mode 100644 index 0000000..4ce5c1e --- /dev/null +++ b/test/unittest/ops/relu6_builder_test.cpp @@ -0,0 +1,180 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "frameworks/native/ops/relu6_builder.h" + +#include "ops_test.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Ops; + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class Relu6BuilderTest : public OpsTest { +public: + void SetUp() override; + void TearDown() override; + +protected: + Relu6Builder m_builder; + std::vector m_inputs {0}; + std::vector m_outputs {1}; + std::vector m_dim {1, 5, 1, 1}; +}; + +void Relu6BuilderTest::SetUp() {} + +void Relu6BuilderTest::TearDown() {} + +/** + * @tc.name: relu6_build_001 + * @tc.desc: Provide normal input, output, and parameters to verify the normal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(Relu6BuilderTest, relu6_build_001, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_dim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/** + * @tc.name: relu6_build_002 + * @tc.desc: Call Build func twice to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(Relu6BuilderTest, relu6_build_002, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_dim, nullptr); + + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: relu6_build_003 + * @tc.desc: Provide one more than normal input to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(Relu6BuilderTest, relu6_build_003, TestSize.Level0) +{ + m_inputs = {0, 1}; + m_outputs = {2}; + + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_dim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: relu6_build_004 + * @tc.desc: Provide one more than normal output to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(Relu6BuilderTest, relu6_build_004, TestSize.Level0) +{ + m_outputs = {1, 2}; + + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_dim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: relu6_build_005 + * @tc.desc: Verify that the build function return a failed message with null allTensor + * @tc.type: FUNC + */ +HWTEST_F(Relu6BuilderTest, relu6_build_005, TestSize.Level0) +{ + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputs, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: relu6_build_006 + * @tc.desc: Verify that the build function return a failed message without output tensor + * @tc.type: FUNC + */ +HWTEST_F(Relu6BuilderTest, relu6_build_006, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_dim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: relu6_build_007 + * @tc.desc: Verify that the build function return a successful message with a virtual parameter + * @tc.type: FUNC + */ +HWTEST_F(Relu6BuilderTest, relu6_build_007, TestSize.Level0) +{ + std::vector paramsIndex = {2}; + std::vector paramDim = {}; + + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_dim, nullptr); + std::shared_ptr relu6Tensor = TransToNNTensor(OH_NN_INT32, paramDim, nullptr, OH_NN_TENSOR); + m_allTensors.emplace_back(relu6Tensor); + + OH_NN_ReturnCode ret = m_builder.Build(paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: relu6_get_primitive_001 + * @tc.desc: Verify the GetPrimitive function return nullptr + * @tc.type: FUNC + */ +HWTEST_F(Relu6BuilderTest, relu6_get_primitive_001, TestSize.Level0) +{ + LiteGraphTensorPtr primitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive = {nullptr, DestroyLiteGraphPrimitive}; + EXPECT_EQ(primitive, expectPrimitive); +} + +/** + * @tc.name: relu6_get_primitive_002 + * @tc.desc: Verify the normal params return behavior of the getprimitive function + * @tc.type: FUNC + */ +HWTEST_F(Relu6BuilderTest, relu6_get_primitive_002, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_dim, nullptr); + + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + LiteGraphTensorPtr relu6Primitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive = {nullptr, DestroyLiteGraphPrimitive}; + EXPECT_NE(relu6Primitive, expectPrimitive); + + mindspore::lite::ActivationType activationType = mindspore::lite::ACTIVATION_TYPE_RELU6; + auto returnValue = mindspore::lite::MindIR_Activation_GetActivationType(relu6Primitive.get()); + EXPECT_EQ(returnValue, activationType); +} +} // namespace UnitTest +} // namespace NeuralNetworkRuntime +} // namespace OHOS \ No newline at end of file diff --git a/test/unittest/ops/relu_builder_test.cpp b/test/unittest/ops/relu_builder_test.cpp new file mode 100644 index 0000000..5fabb49 --- /dev/null +++ b/test/unittest/ops/relu_builder_test.cpp @@ -0,0 +1,180 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "frameworks/native/ops/relu_builder.h" + +#include "ops_test.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Ops; + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class ReluBuilderTest : public OpsTest { +public: + void SetUp() override; + void TearDown() override; + +protected: + ReluBuilder m_builder; + std::vector m_inputs {0}; + std::vector m_outputs {1}; + std::vector m_dim {1, 5, 1, 1}; +}; + +void ReluBuilderTest::SetUp() {} + +void ReluBuilderTest::TearDown() {} + +/** + * @tc.name: relu_build_001 + * @tc.desc: Provide normal input, output, and parameters to verify the normal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(ReluBuilderTest, relu_build_001, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_dim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/** + * @tc.name: relu_build_002 + * @tc.desc: Call Build func twice to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(ReluBuilderTest, relu_build_002, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_dim, nullptr); + + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: relu_build_003 + * @tc.desc: Provide one more than normal input to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(ReluBuilderTest, relu_build_003, TestSize.Level0) +{ + m_inputs = {0, 1}; + m_outputs = {2}; + + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_dim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: relu_build_004 + * @tc.desc: Provide one more than normal output to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(ReluBuilderTest, relu_build_004, TestSize.Level0) +{ + m_outputs = {1, 2}; + + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_dim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: relu_build_005 + * @tc.desc: Verify that the build function return a failed message with null allTensor + * @tc.type: FUNC + */ +HWTEST_F(ReluBuilderTest, relu_build_005, TestSize.Level0) +{ + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputs, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: relu_build_006 + * @tc.desc: Verify that the build function return a failed message without output tensor + * @tc.type: FUNC + */ +HWTEST_F(ReluBuilderTest, relu_build_006, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_dim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: relu_build_007 + * @tc.desc: Verify that the build function return a successful message with a virtual parameter + * @tc.type: FUNC + */ +HWTEST_F(ReluBuilderTest, relu_build_007, TestSize.Level0) +{ + std::vector paramsIndex = {2}; + std::vector paramDim = {}; + + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_dim, nullptr); + std::shared_ptr reluTensor = TransToNNTensor(OH_NN_INT32, paramDim, nullptr, OH_NN_TENSOR); + m_allTensors.emplace_back(reluTensor); + + OH_NN_ReturnCode ret = m_builder.Build(paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: relu_get_primitive_001 + * @tc.desc: Verify the GetPrimitive function return nullptr + * @tc.type: FUNC + */ +HWTEST_F(ReluBuilderTest, relu_get_primitive_001, TestSize.Level0) +{ + LiteGraphTensorPtr primitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive = {nullptr, DestroyLiteGraphPrimitive}; + EXPECT_EQ(primitive, expectPrimitive); +} + +/** + * @tc.name: relu_get_primitive_002 + * @tc.desc: Verify the normal params return behavior of the getprimitive function + * @tc.type: FUNC + */ +HWTEST_F(ReluBuilderTest, relu_get_primitive_002, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_dim, nullptr); + + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + LiteGraphTensorPtr reluPrimitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive = {nullptr, DestroyLiteGraphPrimitive}; + EXPECT_NE(reluPrimitive, expectPrimitive); + + mindspore::lite::ActivationType activationType = mindspore::lite::ACTIVATION_TYPE_RELU; + auto returnValue = mindspore::lite::MindIR_Activation_GetActivationType(reluPrimitive.get()); + EXPECT_EQ(returnValue, activationType); +} +} // namespace UnitTest +} // namespace NeuralNetworkRuntime +} // namespace OHOS \ No newline at end of file diff --git a/test/unittest/ops/reshape_builder_test.cpp b/test/unittest/ops/reshape_builder_test.cpp new file mode 100644 index 0000000..6443b49 --- /dev/null +++ b/test/unittest/ops/reshape_builder_test.cpp @@ -0,0 +1,177 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "frameworks/native/ops/reshape_builder.h" + +#include "ops_test.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Ops; + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class ReshapeBuilderTest : public OpsTest { +public: + void SetUp() override; + void TearDown() override; + +protected: + ReshapeBuilder m_builder; + std::vector m_inputs {0, 1}; + std::vector m_outputs {2}; + std::vector m_inputDim {1, 2, 4, 1}; + std::vector m_outputDim {1, 4, 2, 4}; +}; + +void ReshapeBuilderTest::SetUp() {} + +void ReshapeBuilderTest::TearDown() {} + +/** + * @tc.name: reshape_build_001 + * @tc.desc: Provide normal input, output, and parameters to verify the normal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(ReshapeBuilderTest, reshape_build_001, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/** + * @tc.name: reshape_build_002 + * @tc.desc: Call Build func twice to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(ReshapeBuilderTest, reshape_build_002, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/** + * @tc.name: reshape_build_003 + * @tc.desc: Provide one more than normal input to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(ReshapeBuilderTest, reshape_build_003, TestSize.Level0) +{ + m_inputs = {0, 1, 2}; + m_outputs = {3}; + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: reshape_build_004 + * @tc.desc: Provide one more than normal output to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(ReshapeBuilderTest, reshape_build_004, TestSize.Level0) +{ + m_outputs = {2, 3}; + + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: reshape_build_005 + * @tc.desc: Verify that the build function return a failed message with null allTensor + * @tc.type: FUNC + */ +HWTEST_F(ReshapeBuilderTest, reshape_build_005, TestSize.Level0) +{ + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputs, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: reshape_build_006 + * @tc.desc: Verify that the build function return a failed message without output tensor + * @tc.type: FUNC + */ +HWTEST_F(ReshapeBuilderTest, reshape_build_006, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: reshape_build_007 + * @tc.desc: Verify that the build function return a failed message with a virtual parameter + * @tc.type: FUNC + */ +HWTEST_F(ReshapeBuilderTest, reshape_build_007, TestSize.Level0) +{ + std::vector paramsIndex = {3}; + std::vector paramDim = {}; + + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + std::shared_ptr paramTensor = TransToNNTensor(OH_NN_INT32, paramDim, nullptr, OH_NN_TENSOR); + m_allTensors.emplace_back(paramTensor); + + OH_NN_ReturnCode ret = m_builder.Build(paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: reshape_get_primitive_001 + * @tc.desc: Verify the GetPrimitive function return nullptr + * @tc.type: FUNC + */ +HWTEST_F(ReshapeBuilderTest, reshape_get_primitive_001, TestSize.Level0) +{ + LiteGraphTensorPtr primitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive = {nullptr, DestroyLiteGraphPrimitive}; + EXPECT_EQ(primitive, expectPrimitive); +} + +/** + * @tc.name: reshape_get_primitive_002 + * @tc.desc: Verify the normal params return behavior of the getprimitive function + * @tc.type: FUNC + */ +HWTEST_F(ReshapeBuilderTest, reshape_get_primitive_002, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + LiteGraphTensorPtr reshapePrimitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive = {nullptr, DestroyLiteGraphPrimitive}; + EXPECT_NE(reshapePrimitive, expectPrimitive); +} +} // namespace UnitTest +} // namespace NeuralNetworkRuntime +} // namespace OHOS \ No newline at end of file diff --git a/test/unittest/ops/resize_bilinear_builder_test.cpp b/test/unittest/ops/resize_bilinear_builder_test.cpp new file mode 100644 index 0000000..0ada3d9 --- /dev/null +++ b/test/unittest/ops/resize_bilinear_builder_test.cpp @@ -0,0 +1,664 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "frameworks/native/ops/resize_bilinear_builder.h" + +#include "ops_test.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Ops; + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class ResizeBilinearBuilderTest : public OpsTest { +public: + void SetUp() override; + void TearDown() override; + +protected: + void SaveHeightTensor(OH_NN_DataType dataType, const std::vector &dim, + const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + void SaveWidthTensor(OH_NN_DataType dataType, const std::vector &dim, + const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + void SaveRatioTensor(OH_NN_DataType dataType, const std::vector &dim, + const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + void SaveModeTensor(OH_NN_DataType dataType, const std::vector &dim, + const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + void SaveOutsideTensor(OH_NN_DataType dataType, const std::vector &dim, + const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + void SetParameterTensor(); + +protected: + ResizeBilinearBuilder m_builder; + + std::shared_ptr heightTensor {nullptr}; + std::shared_ptr widthTensor {nullptr}; + std::shared_ptr ratioTensor {nullptr}; + std::shared_ptr modeTensor {nullptr}; + std::shared_ptr outsideTensor {nullptr}; + + std::vector m_inputs {0}; + std::vector m_outputs {1}; + std::vector m_params {2, 3, 4, 5, 6}; + std::vector m_dim {1, 2, 2, 2}; + std::vector m_paramDim {}; +}; + +void ResizeBilinearBuilderTest::SetUp() {} + +void ResizeBilinearBuilderTest::TearDown() {} + +void ResizeBilinearBuilderTest::SaveHeightTensor(OH_NN_DataType dataType, const std::vector &dim, + const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + heightTensor = TransToNNTensor(dataType, dim, quantParam, type); + int64_t *heightValue = new (std::nothrow) int64_t(1); + EXPECT_NE(nullptr, heightValue); + heightTensor->SetBuffer(heightValue, sizeof(int64_t)); + m_allTensors.emplace_back(heightTensor); +} + +void ResizeBilinearBuilderTest::SaveWidthTensor(OH_NN_DataType dataType, const std::vector &dim, + const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + widthTensor = TransToNNTensor(dataType, dim, quantParam, type); + int64_t *widthValue = new (std::nothrow) int64_t(1); + EXPECT_NE(nullptr, widthValue); + widthTensor->SetBuffer(widthValue, sizeof(int64_t)); + m_allTensors.emplace_back(widthTensor); +} + +void ResizeBilinearBuilderTest::SaveRatioTensor(OH_NN_DataType dataType, const std::vector &dim, + const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + ratioTensor = TransToNNTensor(dataType, dim, quantParam, type); + bool *ratioValue = new (std::nothrow) bool(true); + EXPECT_NE(nullptr, ratioValue); + ratioTensor->SetBuffer(ratioValue, sizeof(bool)); + m_allTensors.emplace_back(ratioTensor); +} + +void ResizeBilinearBuilderTest::SaveModeTensor(OH_NN_DataType dataType, const std::vector &dim, + const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + modeTensor = TransToNNTensor(dataType, dim, quantParam, type); + int8_t *modeValue = new (std::nothrow) int8_t(1); + EXPECT_NE(nullptr, modeValue); + modeTensor->SetBuffer(modeValue, sizeof(int8_t)); + m_allTensors.emplace_back(modeTensor); +} + +void ResizeBilinearBuilderTest::SaveOutsideTensor(OH_NN_DataType dataType, const std::vector &dim, + const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + outsideTensor = TransToNNTensor(dataType, dim, quantParam, type); + int64_t *outsideValue = new (std::nothrow) int64_t(1); + EXPECT_NE(nullptr, outsideValue); + outsideTensor->SetBuffer(outsideValue, sizeof(int64_t)); + m_allTensors.emplace_back(outsideTensor); +} + +void ResizeBilinearBuilderTest::SetParameterTensor() +{ + SaveHeightTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RESIZE_BILINEAR_NEW_HEIGHT); + SaveWidthTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RESIZE_BILINEAR_NEW_WIDTH); + SaveRatioTensor(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_RESIZE_BILINEAR_PRESERVE_ASPECT_RATIO); + SaveModeTensor(OH_NN_INT8, m_paramDim, nullptr, OH_NN_RESIZE_BILINEAR_COORDINATE_TRANSFORM_MODE); + SaveOutsideTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RESIZE_BILINEAR_EXCLUDE_OUTSIDE); +} + +/** + * @tc.name: resizebilinear_build_001 + * @tc.desc: Provide normal input, output, and parameters to verify the normal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(ResizeBilinearBuilderTest, resizebilinear_build_001, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_dim, nullptr); + + SetParameterTensor(); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/** + * @tc.name: resizebilinear_build_002 + * @tc.desc: Call Build func twice to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(ResizeBilinearBuilderTest, resizebilinear_build_002, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_dim, nullptr); + + SetParameterTensor(); + + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors)); + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/** + * @tc.name: resizebilinear_build_003 + * @tc.desc: Provide one more than normal input to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(ResizeBilinearBuilderTest, resizebilinear_build_003, TestSize.Level0) +{ + m_inputs = {0, 1}; + m_outputs = {2}; + + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_dim, nullptr); + + SetParameterTensor(); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: resizebilinear_build_004 + * @tc.desc: Provide one more than normal output to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(ResizeBilinearBuilderTest, resizebilinear_build_004, TestSize.Level0) +{ + m_outputs = {1, 2}; + + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_dim, nullptr); + + SetParameterTensor(); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: resizebilinear_build_005 + * @tc.desc: Verify that the build function return a failed message with null allTensor + * @tc.type: FUNC + */ +HWTEST_F(ResizeBilinearBuilderTest, resizebilinear_build_005, TestSize.Level0) +{ + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputs, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: resizebilinear_build_006 + * @tc.desc: Verify that the build function return a failed message without output tensor + * @tc.type: FUNC + */ +HWTEST_F(ResizeBilinearBuilderTest, resizebilinear_build_006, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_dim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: resizebilinear_build_007 + * @tc.desc: Verify that the build function return a failed message with invalided height's dataType + * @tc.type: FUNC + */ +HWTEST_F(ResizeBilinearBuilderTest, resizebilinear_build_007, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_dim, nullptr); + + heightTensor = TransToNNTensor(OH_NN_INT32, m_paramDim, nullptr, OH_NN_RESIZE_BILINEAR_NEW_HEIGHT); + int32_t heightValues = 1; + heightTensor->SetBuffer(&heightValues, sizeof(heightValues)); + m_allTensors.emplace_back(heightTensor); + + SaveWidthTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RESIZE_BILINEAR_NEW_WIDTH); + SaveRatioTensor(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_RESIZE_BILINEAR_PRESERVE_ASPECT_RATIO); + SaveModeTensor(OH_NN_INT8, m_paramDim, nullptr, OH_NN_RESIZE_BILINEAR_COORDINATE_TRANSFORM_MODE); + SaveOutsideTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RESIZE_BILINEAR_EXCLUDE_OUTSIDE); + + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + heightTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: resizebilinear_build_008 + * @tc.desc: Verify that the build function return a failed message with invalided width's dataType + * @tc.type: FUNC + */ +HWTEST_F(ResizeBilinearBuilderTest, resizebilinear_build_008, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_dim, nullptr); + + SaveHeightTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RESIZE_BILINEAR_NEW_HEIGHT); + SaveRatioTensor(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_RESIZE_BILINEAR_PRESERVE_ASPECT_RATIO); + SaveModeTensor(OH_NN_INT8, m_paramDim, nullptr, OH_NN_RESIZE_BILINEAR_COORDINATE_TRANSFORM_MODE); + SaveOutsideTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RESIZE_BILINEAR_EXCLUDE_OUTSIDE); + + + widthTensor = TransToNNTensor(OH_NN_INT32, m_paramDim, nullptr, OH_NN_RESIZE_BILINEAR_NEW_WIDTH); + int32_t widthValues = 1; + widthTensor->SetBuffer(&widthValues, sizeof(widthValues)); + m_allTensors.emplace_back(widthTensor); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + widthTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: resizebilinear_build_009 + * @tc.desc: Verify that the build function return a failed message with invalided ratio's dataType + * @tc.type: FUNC + */ +HWTEST_F(ResizeBilinearBuilderTest, resizebilinear_build_009, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_dim, nullptr); + + SaveHeightTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RESIZE_BILINEAR_NEW_HEIGHT); + SaveWidthTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RESIZE_BILINEAR_NEW_WIDTH); + SaveModeTensor(OH_NN_INT8, m_paramDim, nullptr, OH_NN_RESIZE_BILINEAR_COORDINATE_TRANSFORM_MODE); + SaveOutsideTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RESIZE_BILINEAR_EXCLUDE_OUTSIDE); + + ratioTensor = TransToNNTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RESIZE_BILINEAR_PRESERVE_ASPECT_RATIO); + int64_t ratioValues = 1; + ratioTensor->SetBuffer(&ratioValues, sizeof(ratioValues)); + m_allTensors.emplace_back(ratioTensor); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + ratioTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: resizebilinear_build_010 + * @tc.desc: Verify that the build function return a failed message with invalided mode's dataType + * @tc.type: FUNC + */ +HWTEST_F(ResizeBilinearBuilderTest, resizebilinear_build_010, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_dim, nullptr); + + SaveHeightTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RESIZE_BILINEAR_NEW_HEIGHT); + SaveWidthTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RESIZE_BILINEAR_NEW_WIDTH); + SaveRatioTensor(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_RESIZE_BILINEAR_PRESERVE_ASPECT_RATIO); + SaveOutsideTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RESIZE_BILINEAR_EXCLUDE_OUTSIDE); + + modeTensor = TransToNNTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RESIZE_BILINEAR_COORDINATE_TRANSFORM_MODE); + int64_t modeValues = 1; + modeTensor->SetBuffer(&modeValues, sizeof(modeValues)); + m_allTensors.emplace_back(modeTensor); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + modeTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: resizebilinear_build_011 + * @tc.desc: Verify that the build function return a failed message with invalided outside's dataType + * @tc.type: FUNC + */ +HWTEST_F(ResizeBilinearBuilderTest, resizebilinear_build_011, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_dim, nullptr); + + SaveHeightTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RESIZE_BILINEAR_NEW_HEIGHT); + SaveWidthTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RESIZE_BILINEAR_NEW_WIDTH); + SaveRatioTensor(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_RESIZE_BILINEAR_PRESERVE_ASPECT_RATIO); + SaveModeTensor(OH_NN_INT8, m_paramDim, nullptr, OH_NN_RESIZE_BILINEAR_COORDINATE_TRANSFORM_MODE); + + outsideTensor = TransToNNTensor(OH_NN_INT32, + m_paramDim, nullptr, OH_NN_RESIZE_BILINEAR_EXCLUDE_OUTSIDE); + int32_t outsideValues = 1; + outsideTensor->SetBuffer(&outsideValues, sizeof(outsideValues)); + m_allTensors.emplace_back(outsideTensor); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + outsideTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: resizebilinear_build_012 + * @tc.desc: Verify that the build function return a failed message with invalided height's dimension + * @tc.type: FUNC + */ +HWTEST_F(ResizeBilinearBuilderTest, resizebilinear_build_012, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_dim, nullptr); + + SaveWidthTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RESIZE_BILINEAR_NEW_WIDTH); + SaveRatioTensor(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_RESIZE_BILINEAR_PRESERVE_ASPECT_RATIO); + SaveModeTensor(OH_NN_INT8, m_paramDim, nullptr, OH_NN_RESIZE_BILINEAR_COORDINATE_TRANSFORM_MODE); + SaveOutsideTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RESIZE_BILINEAR_EXCLUDE_OUTSIDE); + + std::vector heightDim = {2}; + heightTensor = TransToNNTensor(OH_NN_INT64, heightDim, nullptr, OH_NN_RESIZE_BILINEAR_NEW_HEIGHT); + int64_t heightValues[2] = {1, 1}; + heightTensor->SetBuffer(heightValues, 2 * sizeof(int64_t)); + m_allTensors.emplace_back(heightTensor); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + heightTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: resizebilinear_build_013 + * @tc.desc: Verify that the build function return a failed message with invalided width's dimension + * @tc.type: FUNC + */ +HWTEST_F(ResizeBilinearBuilderTest, resizebilinear_build_013, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_dim, nullptr); + + SaveHeightTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RESIZE_BILINEAR_NEW_HEIGHT); + SaveRatioTensor(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_RESIZE_BILINEAR_PRESERVE_ASPECT_RATIO); + SaveModeTensor(OH_NN_INT8, m_paramDim, nullptr, OH_NN_RESIZE_BILINEAR_COORDINATE_TRANSFORM_MODE); + SaveOutsideTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RESIZE_BILINEAR_EXCLUDE_OUTSIDE); + + std::vector widthDim = {2}; + widthTensor = TransToNNTensor(OH_NN_INT64, widthDim, nullptr, OH_NN_RESIZE_BILINEAR_NEW_WIDTH); + int64_t widthValues[2] = {1, 1}; + widthTensor->SetBuffer(widthValues, 2 * sizeof(int64_t)); + m_allTensors.emplace_back(widthTensor); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + widthTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: resizebilinear_build_014 + * @tc.desc: Verify that the build function return a failed message with invalided ratio's dimension + * @tc.type: FUNC + */ +HWTEST_F(ResizeBilinearBuilderTest, resizebilinear_build_014, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_dim, nullptr); + + SaveHeightTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RESIZE_BILINEAR_NEW_HEIGHT); + SaveWidthTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RESIZE_BILINEAR_NEW_WIDTH); + SaveModeTensor(OH_NN_INT8, m_paramDim, nullptr, OH_NN_RESIZE_BILINEAR_COORDINATE_TRANSFORM_MODE); + SaveOutsideTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RESIZE_BILINEAR_EXCLUDE_OUTSIDE); + + std::vector ratioDim = {2}; + ratioTensor = TransToNNTensor(OH_NN_BOOL, + ratioDim, nullptr, OH_NN_RESIZE_BILINEAR_PRESERVE_ASPECT_RATIO); + bool ratioValues[2] = {true, true}; + ratioTensor->SetBuffer(ratioValues, 2 * sizeof(bool)); + m_allTensors.emplace_back(ratioTensor); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + ratioTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: resizebilinear_build_015 + * @tc.desc: Verify that the build function return a failed message with invalided mode's dimension + * @tc.type: FUNC + */ +HWTEST_F(ResizeBilinearBuilderTest, resizebilinear_build_015, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_dim, nullptr); + + SaveHeightTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RESIZE_BILINEAR_NEW_HEIGHT); + SaveWidthTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RESIZE_BILINEAR_NEW_WIDTH); + SaveRatioTensor(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_RESIZE_BILINEAR_PRESERVE_ASPECT_RATIO); + SaveOutsideTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RESIZE_BILINEAR_EXCLUDE_OUTSIDE); + + + std::vector modeDim = {2}; + modeTensor = TransToNNTensor(OH_NN_INT8, + modeDim, nullptr, OH_NN_RESIZE_BILINEAR_COORDINATE_TRANSFORM_MODE); + int8_t modeValues[2] = {1, 1}; + modeTensor->SetBuffer(modeValues, 2 * sizeof(int8_t)); + m_allTensors.emplace_back(modeTensor); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + modeTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: resizebilinear_build_016 + * @tc.desc: Verify that the build function return a failed message with invalided outside's dimension + * @tc.type: FUNC + */ +HWTEST_F(ResizeBilinearBuilderTest, resizebilinear_build_016, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_dim, nullptr); + + SaveHeightTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RESIZE_BILINEAR_NEW_HEIGHT); + SaveWidthTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RESIZE_BILINEAR_NEW_WIDTH); + SaveRatioTensor(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_RESIZE_BILINEAR_PRESERVE_ASPECT_RATIO); + SaveModeTensor(OH_NN_INT8, m_paramDim, nullptr, OH_NN_RESIZE_BILINEAR_COORDINATE_TRANSFORM_MODE); + + std::vector outsideDim = {2}; + outsideTensor = TransToNNTensor(OH_NN_INT64, + outsideDim, nullptr, OH_NN_RESIZE_BILINEAR_EXCLUDE_OUTSIDE); + int64_t outsideValues[2] = {1, 1}; + outsideTensor->SetBuffer(outsideValues, 2 * sizeof(int64_t)); + m_allTensors.emplace_back(outsideTensor); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + outsideTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: resizebilinear_build_017 + * @tc.desc: Verify that the build function return a failed message with invalided parameter + * @tc.type: FUNC + */ +HWTEST_F(ResizeBilinearBuilderTest, resizebilinear_build_017, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_dim, nullptr); + + SaveHeightTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RESIZE_BILINEAR_NEW_HEIGHT); + SaveWidthTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RESIZE_BILINEAR_NEW_WIDTH); + SaveRatioTensor(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_RESIZE_BILINEAR_PRESERVE_ASPECT_RATIO); + SaveModeTensor(OH_NN_INT8, m_paramDim, nullptr, OH_NN_RESIZE_BILINEAR_COORDINATE_TRANSFORM_MODE); + SaveOutsideTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_QUANT_DTYPE_CAST_SRC_T); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: resizebilinear_build_018 + * @tc.desc: Verify that the build function return a failed message with empty height's buffer + * @tc.type: FUNC + */ +HWTEST_F(ResizeBilinearBuilderTest, resizebilinear_build_018, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_dim, nullptr); + + heightTensor = TransToNNTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RESIZE_BILINEAR_NEW_HEIGHT); + m_allTensors.emplace_back(heightTensor); + + SaveWidthTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RESIZE_BILINEAR_NEW_WIDTH); + SaveRatioTensor(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_RESIZE_BILINEAR_PRESERVE_ASPECT_RATIO); + SaveModeTensor(OH_NN_INT8, m_paramDim, nullptr, OH_NN_RESIZE_BILINEAR_COORDINATE_TRANSFORM_MODE); + SaveOutsideTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RESIZE_BILINEAR_EXCLUDE_OUTSIDE); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + heightTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: resizebilinear_build_019 + * @tc.desc: Verify that the build function return a failed message with empty width's buffer + * @tc.type: FUNC + */ +HWTEST_F(ResizeBilinearBuilderTest, resizebilinear_build_019, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_dim, nullptr); + + SaveHeightTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RESIZE_BILINEAR_NEW_HEIGHT); + SaveRatioTensor(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_RESIZE_BILINEAR_PRESERVE_ASPECT_RATIO); + SaveModeTensor(OH_NN_INT8, m_paramDim, nullptr, OH_NN_RESIZE_BILINEAR_COORDINATE_TRANSFORM_MODE); + SaveOutsideTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RESIZE_BILINEAR_EXCLUDE_OUTSIDE); + + widthTensor = TransToNNTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RESIZE_BILINEAR_NEW_WIDTH); + m_allTensors.emplace_back(widthTensor); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + widthTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: resizebilinear_build_020 + * @tc.desc: Verify that the build function return a failed message with empty ratio's buffer + * @tc.type: FUNC + */ +HWTEST_F(ResizeBilinearBuilderTest, resizebilinear_build_020, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_dim, nullptr); + + SaveHeightTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RESIZE_BILINEAR_NEW_HEIGHT); + SaveWidthTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RESIZE_BILINEAR_NEW_WIDTH); + SaveModeTensor(OH_NN_INT8, m_paramDim, nullptr, OH_NN_RESIZE_BILINEAR_COORDINATE_TRANSFORM_MODE); + SaveOutsideTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RESIZE_BILINEAR_EXCLUDE_OUTSIDE); + + ratioTensor = TransToNNTensor(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_RESIZE_BILINEAR_PRESERVE_ASPECT_RATIO); + m_allTensors.emplace_back(ratioTensor); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + ratioTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: resizebilinear_build_021 + * @tc.desc: Verify that the build function return a failed message with empty mode's buffer + * @tc.type: FUNC + */ +HWTEST_F(ResizeBilinearBuilderTest, resizebilinear_build_021, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_dim, nullptr); + + SaveHeightTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RESIZE_BILINEAR_NEW_HEIGHT); + SaveWidthTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RESIZE_BILINEAR_NEW_WIDTH); + SaveRatioTensor(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_RESIZE_BILINEAR_PRESERVE_ASPECT_RATIO); + SaveOutsideTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RESIZE_BILINEAR_EXCLUDE_OUTSIDE); + + modeTensor = TransToNNTensor(OH_NN_INT8, m_paramDim, nullptr, OH_NN_RESIZE_BILINEAR_COORDINATE_TRANSFORM_MODE); + m_allTensors.emplace_back(modeTensor); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + modeTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: resizebilinear_build_022 + * @tc.desc: Verify that the build function return a failed message with empty outside's buffer + * @tc.type: FUNC + */ +HWTEST_F(ResizeBilinearBuilderTest, resizebilinear_build_022, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_dim, nullptr); + + SaveHeightTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RESIZE_BILINEAR_NEW_HEIGHT); + SaveWidthTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RESIZE_BILINEAR_NEW_WIDTH); + SaveRatioTensor(OH_NN_BOOL, m_paramDim, nullptr, OH_NN_RESIZE_BILINEAR_PRESERVE_ASPECT_RATIO); + SaveModeTensor(OH_NN_INT8, m_paramDim, nullptr, OH_NN_RESIZE_BILINEAR_COORDINATE_TRANSFORM_MODE); + + outsideTensor = TransToNNTensor(OH_NN_INT64, + m_paramDim, nullptr, OH_NN_RESIZE_BILINEAR_EXCLUDE_OUTSIDE); + m_allTensors.emplace_back(outsideTensor); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + outsideTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: resizebilinear_get_primitive_001 + * @tc.desc: Verify the GetPrimitive function return nullptr + * @tc.type: FUNC + */ +HWTEST_F(ResizeBilinearBuilderTest, resizebilinear_get_primitive_001, TestSize.Level0) +{ + LiteGraphTensorPtr primitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive = {nullptr, DestroyLiteGraphPrimitive}; + EXPECT_EQ(primitive, expectPrimitive); +} + +/** + * @tc.name: resizebilinear_get_primitive_002 + * @tc.desc: Verify the normal params return behavior of the getprimitive function + * @tc.type: FUNC + */ +HWTEST_F(ResizeBilinearBuilderTest, resizebilinear_get_primitive_002, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_dim, nullptr); + + SetParameterTensor(); + + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors)); + LiteGraphTensorPtr primitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive = {nullptr, DestroyLiteGraphPrimitive}; + EXPECT_NE(primitive, expectPrimitive); + + int64_t heightValue = 1; + int64_t widthValue = 1; + bool ratioValue = true; + int8_t modeValue = 1; + int64_t outsideValue = 1; + + int64_t heightReturn = mindspore::lite::MindIR_Resize_GetNewHeight(primitive.get()); + EXPECT_EQ(heightReturn, heightValue); + int64_t widthReturn = mindspore::lite::MindIR_Resize_GetNewWidth(primitive.get()); + EXPECT_EQ(widthReturn, widthValue); + bool ratioReturn = mindspore::lite::MindIR_Resize_GetPreserveAspectRatio(primitive.get()); + EXPECT_EQ(ratioReturn, ratioValue); + int8_t modeReturn = mindspore::lite::MindIR_Resize_GetCoordinateTransformMode(primitive.get()); + EXPECT_EQ(modeReturn, modeValue); + int64_t outsideReturn = mindspore::lite::MindIR_Resize_GetExcludeOutside(primitive.get()); + EXPECT_EQ(outsideReturn, outsideValue); +} +} // namespace UnitTest +} // namespace NeuralNetworkRuntime +} // namespace OHOS \ No newline at end of file diff --git a/test/unittest/ops/rsqrt_builder_test.cpp b/test/unittest/ops/rsqrt_builder_test.cpp new file mode 100644 index 0000000..62c98b3 --- /dev/null +++ b/test/unittest/ops/rsqrt_builder_test.cpp @@ -0,0 +1,177 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "frameworks/native/ops/rsqrt_builder.h" + +#include "ops_test.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Ops; + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class RsqrtBuilderTest : public OpsTest { +public: + void SetUp() override; + void TearDown() override; + +protected: + RsqrtBuilder m_builder; + std::vector m_inputs {0}; + std::vector m_outputs {1}; + std::vector m_dim {1, 8, 1, 1}; +}; + +void RsqrtBuilderTest::SetUp() {} + +void RsqrtBuilderTest::TearDown() {} + +/** + * @tc.name: rsqrt_build_001 + * @tc.desc: Provide normal input, output, and parameters to verify the normal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(RsqrtBuilderTest, rsqrt_build_001, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_dim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputs, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/** + * @tc.name: rsqrt_build_002 + * @tc.desc: Call Build func twice to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(RsqrtBuilderTest, rsqrt_build_002, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_dim, nullptr); + + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputs, m_outputsIndex, m_allTensors)); + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputs, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/** + * @tc.name: rsqrt_build_003 + * @tc.desc: Provide one more than normal input to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(RsqrtBuilderTest, rsqrt_build_003, TestSize.Level0) +{ + m_inputs = {0, 1}; + m_outputs = {2}; + + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_dim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputs, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: rsqrt_build_004 + * @tc.desc: Provide one more than normal output to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(RsqrtBuilderTest, rsqrt_build_004, TestSize.Level0) +{ + m_outputs = {1, 2}; + + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_dim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputs, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: rsqrt_build_005 + * @tc.desc: Verify that the build function return a failed message with null allTensor + * @tc.type: FUNC + */ +HWTEST_F(RsqrtBuilderTest, rsqrt_build_005, TestSize.Level0) +{ + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputs, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: rsqrt_build_006 + * @tc.desc: Verify that the build function return a failed message without output tensor + * @tc.type: FUNC + */ +HWTEST_F(RsqrtBuilderTest, rsqrt_build_006, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_dim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputs, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: rsqrt_build_007 + * @tc.desc: Verify that the build function return a failed message with a virtual parameter + * @tc.type: FUNC + */ +HWTEST_F(RsqrtBuilderTest, rsqrt_build_007, TestSize.Level0) +{ + std::vector paramsIndex = {2}; + std::vector paramDim = {}; + + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_dim, nullptr); + std::shared_ptr paramTensor = TransToNNTensor(OH_NN_INT32, paramDim, nullptr, OH_NN_TENSOR); + m_allTensors.emplace_back(paramTensor); + + OH_NN_ReturnCode ret = m_builder.Build(paramsIndex, m_inputs, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: rsqrt_get_primitive_001 + * @tc.desc: Verify the GetPrimitive function return nullptr + * @tc.type: FUNC + */ +HWTEST_F(RsqrtBuilderTest, rsqrt_get_primitive_001, TestSize.Level0) +{ + LiteGraphTensorPtr primitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive = {nullptr, DestroyLiteGraphPrimitive}; + EXPECT_EQ(primitive, expectPrimitive); +} + +/** + * @tc.name: rsqrt_get_primitive_002 + * @tc.desc: Verify the normal params return behavior of the getprimitive function + * @tc.type: FUNC + */ +HWTEST_F(RsqrtBuilderTest, rsqrt_get_primitive_002, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_dim, nullptr); + + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputs, m_outputsIndex, m_allTensors)); + LiteGraphTensorPtr rsqrtPrimitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive = {nullptr, DestroyLiteGraphPrimitive}; + EXPECT_NE(rsqrtPrimitive, expectPrimitive); +} +} // namespace UnitTest +} // namespace NeuralNetworkRuntime +} // namespace OHOS \ No newline at end of file diff --git a/test/unittest/ops/scale_builder_test.cpp b/test/unittest/ops/scale_builder_test.cpp new file mode 100644 index 0000000..5ab6946 --- /dev/null +++ b/test/unittest/ops/scale_builder_test.cpp @@ -0,0 +1,377 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "frameworks/native/ops/scale_builder.h" + +#include "ops_test.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Ops; + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class ScaleBuilderTest : public OpsTest { +public: + void SetUp() override; + void TearDown() override; + +protected: + void SaveAxisTensor(OH_NN_DataType dataType, const std::vector &dim, + const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + void SaveActivationTensor(OH_NN_DataType dataType, const std::vector &dim, + const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + +protected: + ScaleBuilder m_builder; + std::vector m_inputs {0, 1, 2}; + std::vector m_outputs {3}; + std::vector m_params {4, 5}; + std::vector m_dim {1, 4, 1, 1}; + std::vector m_paramDim {}; +}; + +void ScaleBuilderTest::SetUp() {} + +void ScaleBuilderTest::TearDown() {} + +void ScaleBuilderTest::SaveAxisTensor(OH_NN_DataType dataType, const std::vector &dim, + const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + std::shared_ptr axisTensor = TransToNNTensor(dataType, dim, quantParam, type); + int64_t *axisValue = new (std::nothrow) int64_t(1); + EXPECT_NE(nullptr, axisValue); + axisTensor->SetBuffer(axisValue, sizeof(int64_t)); + m_allTensors.emplace_back(axisTensor); +} + +void ScaleBuilderTest::SaveActivationTensor(OH_NN_DataType dataType, const std::vector &dim, + const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + std::shared_ptr activationTensor = TransToNNTensor(dataType, dim, quantParam, type); + int8_t *activationValue = new (std::nothrow) int8_t(0); + EXPECT_NE(nullptr, activationValue); + activationTensor->SetBuffer(activationValue, sizeof(int64_t)); + m_allTensors.emplace_back(activationTensor); +} + +/** + * @tc.name: scale_build_001 + * @tc.desc: Provide normal input, output, and parameters to verify the normal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(ScaleBuilderTest, scale_build_001, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_dim, nullptr); + SaveAxisTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_SCALE_AXIS); + SaveActivationTensor(OH_NN_INT8, m_paramDim, nullptr, OH_NN_SCALE_ACTIVATIONTYPE); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/** + * @tc.name: scale_build_002 + * @tc.desc: Call Build func twice to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(ScaleBuilderTest, scale_build_002, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_dim, nullptr); + SaveAxisTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_SCALE_AXIS); + SaveActivationTensor(OH_NN_INT8, m_paramDim, nullptr, OH_NN_SCALE_ACTIVATIONTYPE); + + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors)); + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/** + * @tc.name: scale_build_003 + * @tc.desc: Provide one more than normal input to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(ScaleBuilderTest, scale_build_003, TestSize.Level0) +{ + m_inputs = {0, 1, 2, 3}; + m_outputs = {4}; + m_params = {5, 6}; + + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_dim, nullptr); + SaveAxisTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_SCALE_AXIS); + SaveActivationTensor(OH_NN_INT8, m_paramDim, nullptr, OH_NN_SCALE_ACTIVATIONTYPE); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: scale_build_004 + * @tc.desc: Provide one more than normal output to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(ScaleBuilderTest, scale_build_004, TestSize.Level0) +{ + m_outputs = {3, 4}; + m_params = {5, 6}; + + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_dim, nullptr); + SaveAxisTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_SCALE_AXIS); + SaveActivationTensor(OH_NN_INT8, m_paramDim, nullptr, OH_NN_SCALE_ACTIVATIONTYPE); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: scale_build_005 + * @tc.desc: Verify that the build function return a failed message with null allTensor + * @tc.type: FUNC + */ +HWTEST_F(ScaleBuilderTest, scale_build_005, TestSize.Level0) +{ + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputs, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: scale_build_006 + * @tc.desc: Verify that the build function return a failed message without output tensor + * @tc.type: FUNC + */ +HWTEST_F(ScaleBuilderTest, scale_build_006, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_dim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: scale_build_007 + * @tc.desc: Verify that the build function return a failed message with invalided axis's dataType + * @tc.type: FUNC + */ +HWTEST_F(ScaleBuilderTest, scale_build_007, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_dim, nullptr); + + SaveActivationTensor(OH_NN_INT8, m_paramDim, nullptr, OH_NN_SCALE_ACTIVATIONTYPE); + + std::shared_ptr axisTensor = TransToNNTensor(OH_NN_INT32, m_paramDim, nullptr, OH_NN_SCALE_AXIS); + int32_t axisValue = 1; + axisTensor->SetBuffer(&axisValue, sizeof(axisValue)); + m_allTensors.emplace_back(axisTensor); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + axisTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: scale_build_008 + * @tc.desc: Verify that the build function return a failed message with invalided activation's dataType + * @tc.type: FUNC + */ +HWTEST_F(ScaleBuilderTest, scale_build_008, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_dim, nullptr); + + SaveAxisTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_SCALE_AXIS); + + std::shared_ptr activationTensor = TransToNNTensor(OH_NN_INT64, m_paramDim, + nullptr, OH_NN_SCALE_ACTIVATIONTYPE); + int64_t activationValue = 0; + activationTensor->SetBuffer(&activationValue, sizeof(activationValue)); + m_allTensors.emplace_back(activationTensor); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + activationTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: scale_build_009 + * @tc.desc: Verify that the build function return a failed message with invalided axis's dimension + * @tc.type: FUNC + */ +HWTEST_F(ScaleBuilderTest, scale_build_009, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_dim, nullptr); + + std::vector axistDim = {2}; + std::shared_ptr axisTensor = TransToNNTensor(OH_NN_INT64, axistDim, nullptr, OH_NN_SCALE_AXIS); + int64_t axisValue[2] = {1, 1}; + axisTensor->SetBuffer(axisValue, 2 * sizeof(int64_t)); + m_allTensors.emplace_back(axisTensor); + + SaveActivationTensor(OH_NN_INT8, m_paramDim, nullptr, OH_NN_SCALE_ACTIVATIONTYPE); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + axisTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: scale_build_010 + * @tc.desc: Verify that the build function return a failed message with invalided activation's dimension + * @tc.type: FUNC + */ +HWTEST_F(ScaleBuilderTest, scale_build_010, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_dim, nullptr); + + SaveAxisTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_SCALE_AXIS); + + std::vector activationDim = {2}; + std::shared_ptr activationTensor = TransToNNTensor(OH_NN_INT8, + activationDim, nullptr, OH_NN_SCALE_ACTIVATIONTYPE); + int64_t activationValue[2] = {1, 1}; + activationTensor->SetBuffer(activationValue, 2 * sizeof(int64_t)); + m_allTensors.emplace_back(activationTensor); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + activationTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: scale_build_011 + * @tc.desc: Verify that the build function return a failed message with invalided activation's buffer + * @tc.type: FUNC + */ +HWTEST_F(ScaleBuilderTest, scale_build_011, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_dim, nullptr); + + SaveAxisTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_SCALE_AXIS); + + std::shared_ptr activationTensor = TransToNNTensor(OH_NN_INT8, + m_paramDim, nullptr, OH_NN_SCALE_ACTIVATIONTYPE); + int8_t activationValue = -1; + activationTensor->SetBuffer(&activationValue, sizeof(activationValue)); + m_allTensors.emplace_back(activationTensor); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + activationTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: scale_build_012 + * @tc.desc: Verify that the build function return a failed message with invalided parameter + * @tc.type: FUNC + */ +HWTEST_F(ScaleBuilderTest, scale_build_012, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_dim, nullptr); + SaveAxisTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_SCALE_AXIS); + SaveActivationTensor(OH_NN_INT8, m_paramDim, nullptr, OH_NN_QUANT_DTYPE_CAST_SRC_T); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: scale_build_013 + * @tc.desc: Verify that the build function return a failed message with empty axis's buffer + * @tc.type: FUNC + */ +HWTEST_F(ScaleBuilderTest, scale_build_013, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_dim, nullptr); + + SaveActivationTensor(OH_NN_INT8, m_paramDim, nullptr, OH_NN_SCALE_ACTIVATIONTYPE); + + std::shared_ptr axisTensor = TransToNNTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_SCALE_AXIS); + m_allTensors.emplace_back(axisTensor); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + axisTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: scale_build_014 + * @tc.desc: Verify that the build function return a failed message with empty activation's buffer + * @tc.type: FUNC + */ +HWTEST_F(ScaleBuilderTest, scale_build_014, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_dim, nullptr); + + SaveAxisTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_SCALE_AXIS); + + std::shared_ptr activationTensor = TransToNNTensor(OH_NN_INT8, m_paramDim, + nullptr, OH_NN_SCALE_ACTIVATIONTYPE); + m_allTensors.emplace_back(activationTensor); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + activationTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: scale_get_primitive_001 + * @tc.desc: Verify the GetPrimitive function return nullptr + * @tc.type: FUNC + */ +HWTEST_F(ScaleBuilderTest, scale_get_primitive_001, TestSize.Level0) +{ + LiteGraphTensorPtr primitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive = {nullptr, DestroyLiteGraphPrimitive}; + EXPECT_EQ(primitive, expectPrimitive); +} + +/** + * @tc.name: scale_get_primitive_002 + * @tc.desc: Verify the normal params return behavior of the getprimitive function + * @tc.type: FUNC + */ +HWTEST_F(ScaleBuilderTest, scale_get_primitive_002, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_dim, nullptr); + SaveAxisTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_SCALE_AXIS); + SaveActivationTensor(OH_NN_INT8, m_paramDim, nullptr, OH_NN_SCALE_ACTIVATIONTYPE); + + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors)); + LiteGraphTensorPtr primitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive = {nullptr, DestroyLiteGraphPrimitive}; + EXPECT_NE(primitive, expectPrimitive); + + int64_t axisValue = 1; + int8_t activationValue = 0; + auto axisReturn = mindspore::lite::MindIR_ScaleFusion_GetAxis(primitive.get()); + EXPECT_EQ(axisReturn, axisValue); + auto activationReturn = mindspore::lite::MindIR_ScaleFusion_GetActivationType(primitive.get()); + EXPECT_EQ(activationReturn, activationValue); +} +} // namespace UnitTest +} // namespace NeuralNetworkRuntime +} // namespace OHOS \ No newline at end of file diff --git a/test/unittest/ops/shape_builder_test.cpp b/test/unittest/ops/shape_builder_test.cpp new file mode 100644 index 0000000..b52f0db --- /dev/null +++ b/test/unittest/ops/shape_builder_test.cpp @@ -0,0 +1,178 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "frameworks/native/ops/shape_builder.h" + +#include "ops_test.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Ops; + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class ShapeBuilderTest : public OpsTest { +public: + void SetUp() override; + void TearDown() override; + +protected: + ShapeBuilder m_builder; + std::vector m_inputs {0}; + std::vector m_outputs {1}; + std::vector m_inputDim {1, 2, 3, 1}; + std::vector m_outputDim {4}; +}; + +void ShapeBuilderTest::SetUp() {} + +void ShapeBuilderTest::TearDown() {} + +/** + * @tc.name: shape_build_001 + * @tc.desc: Provide normal input, output, and parameters to verify the normal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(ShapeBuilderTest, shape_build_001, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/** + * @tc.name: shape_build_002 + * @tc.desc: Call Build func twice to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(ShapeBuilderTest, shape_build_002, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/** + * @tc.name: shape_build_003 + * @tc.desc: Provide one more than normal input to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(ShapeBuilderTest, shape_build_003, TestSize.Level0) +{ + m_inputs = {0, 1}; + m_outputs = {2}; + + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: shape_build_004 + * @tc.desc: Provide one more than normal output to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(ShapeBuilderTest, shape_build_004, TestSize.Level0) +{ + m_outputs = {1, 2}; + + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: shape_build_005 + * @tc.desc: Verify that the build function return a failed message with null allTensor + * @tc.type: FUNC + */ +HWTEST_F(ShapeBuilderTest, shape_build_005, TestSize.Level0) +{ + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputs, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: shape_build_006 + * @tc.desc: Verify that the build function return a failed message without output tensor + * @tc.type: FUNC + */ +HWTEST_F(ShapeBuilderTest, shape_build_006, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: shape_build_007 + * @tc.desc: Verify that the build function return a failed message with a virtual parameter + * @tc.type: FUNC + */ +HWTEST_F(ShapeBuilderTest, shape_build_007, TestSize.Level0) +{ + std::vector paramsIndex = {2}; + std::vector paramDim = {}; + + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + std::shared_ptr paramTensor = TransToNNTensor(OH_NN_INT32, paramDim, nullptr, OH_NN_TENSOR); + m_allTensors.emplace_back(paramTensor); + + OH_NN_ReturnCode ret = m_builder.Build(paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: shape_get_primitive_001 + * @tc.desc: Verify the GetPrimitive function return nullptr + * @tc.type: FUNC + */ +HWTEST_F(ShapeBuilderTest, shape_get_primitive_001, TestSize.Level0) +{ + LiteGraphTensorPtr primitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive = {nullptr, DestroyLiteGraphPrimitive}; + EXPECT_EQ(primitive, expectPrimitive); +} + +/** + * @tc.name: shape_get_primitive_002 + * @tc.desc: Verify the normal params return behavior of the getprimitive function + * @tc.type: FUNC + */ +HWTEST_F(ShapeBuilderTest, shape_get_primitive_002, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr); + + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + LiteGraphTensorPtr shapePrimitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive = {nullptr, DestroyLiteGraphPrimitive}; + EXPECT_NE(shapePrimitive, expectPrimitive); +} +} // namespace UnitTest +} // namespace NeuralNetworkRuntime +} // namespace OHOS \ No newline at end of file diff --git a/test/unittest/ops/sigmoid_builder_test.cpp b/test/unittest/ops/sigmoid_builder_test.cpp new file mode 100644 index 0000000..0eb4dcc --- /dev/null +++ b/test/unittest/ops/sigmoid_builder_test.cpp @@ -0,0 +1,180 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "frameworks/native/ops/sigmoid_builder.h" + +#include "ops_test.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Ops; + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class SigmoidBuilderTest : public OpsTest { +public: + void SetUp() override; + void TearDown() override; + +protected: + SigmoidBuilder m_builder; + std::vector m_inputs {0}; + std::vector m_outputs {1}; + std::vector m_dim {1, 5, 1, 1}; +}; + +void SigmoidBuilderTest::SetUp() {} + +void SigmoidBuilderTest::TearDown() {} + +/** + * @tc.name: sigmoid_build_001 + * @tc.desc: Provide normal input, output, and parameters to verify the normal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(SigmoidBuilderTest, sigmoid_build_001, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_dim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/** + * @tc.name: sigmoid_build_002 + * @tc.desc: Call Build func twice to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(SigmoidBuilderTest, sigmoid_build_002, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_dim, nullptr); + + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); +} + +/** + * @tc.name: sigmoid_build_003 + * @tc.desc: Provide one more than normal input to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(SigmoidBuilderTest, sigmoid_build_003, TestSize.Level0) +{ + m_inputs = {0, 1}; + m_outputs = {2}; + + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_dim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: sigmoid_build_004 + * @tc.desc: Provide one more than normal output to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(SigmoidBuilderTest, sigmoid_build_004, TestSize.Level0) +{ + m_outputs = {1, 2}; + + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_dim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: sigmoid_build_005 + * @tc.desc: Verify that the build function return a failed message with null allTensor + * @tc.type: FUNC + */ +HWTEST_F(SigmoidBuilderTest, sigmoid_build_005, TestSize.Level0) +{ + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputs, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: sigmoid_build_006 + * @tc.desc: Verify that the build function return a failed message without output tensor + * @tc.type: FUNC + */ +HWTEST_F(SigmoidBuilderTest, sigmoid_build_006, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_dim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: sigmoid_build_007 + * @tc.desc: Verify that the build function return a failed message with a virtual parameter + * @tc.type: FUNC + */ +HWTEST_F(SigmoidBuilderTest, sigmoid_build_007, TestSize.Level0) +{ + std::vector paramsIndex = {2}; + std::vector paramDim = {}; + + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_dim, nullptr); + std::shared_ptr sigmoidTensor = TransToNNTensor(OH_NN_INT32, paramDim, nullptr, OH_NN_TENSOR); + m_allTensors.emplace_back(sigmoidTensor); + + OH_NN_ReturnCode ret = m_builder.Build(paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: sigmoid_get_primitive_001 + * @tc.desc: Verify the GetPrimitive function return nullptr + * @tc.type: FUNC + */ +HWTEST_F(SigmoidBuilderTest, sigmoid_get_primitive_001, TestSize.Level0) +{ + LiteGraphTensorPtr primitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive = {nullptr, DestroyLiteGraphPrimitive}; + EXPECT_EQ(primitive, expectPrimitive); +} + +/** + * @tc.name: sigmoid_get_primitive_002 + * @tc.desc: Verify the normal params return behavior of the getprimitive function + * @tc.type: FUNC + */ +HWTEST_F(SigmoidBuilderTest, sigmoid_get_primitive_002, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_dim, nullptr); + + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + LiteGraphTensorPtr sigmoidPrimitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive = {nullptr, DestroyLiteGraphPrimitive}; + EXPECT_NE(sigmoidPrimitive, expectPrimitive); + + mindspore::lite::ActivationType activationType = mindspore::lite::ACTIVATION_TYPE_SIGMOID; + auto returnValue = mindspore::lite::MindIR_Activation_GetActivationType(sigmoidPrimitive.get()); + EXPECT_EQ(returnValue, activationType); +} +} // namespace UnitTest +} // namespace NeuralNetworkRuntime +} // namespace OHOS \ No newline at end of file diff --git a/test/unittest/ops/slice_builder_test.cpp b/test/unittest/ops/slice_builder_test.cpp new file mode 100644 index 0000000..b1a2fcf --- /dev/null +++ b/test/unittest/ops/slice_builder_test.cpp @@ -0,0 +1,214 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "frameworks/native/ops/slice_builder.h" + +#include +#include "frameworks/native/nn_tensor.h" +#include "ops_test.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Ops; + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class SliceBuilderTest : public OpsTest { +protected: + void InitTensor(const std::vector& inputsIndex, + const std::vector& outputsIndex) override; + void SaveAxesTensor(OH_NN_DataType dataType, const std::vector &dim, + const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + +protected: + SliceBuilder m_builder; +}; + +void SliceBuilderTest::InitTensor(const std::vector& inputsIndex, + const std::vector& outputsIndex) +{ + std::vector paramsIndex = {}; + std::vector inputDim = {3, 2, 3}; + std::vector OutputDim = {1, 1, 3}; + + m_paramsIndex = paramsIndex; + SaveInputTensor(inputsIndex, OH_NN_FLOAT32, inputDim, nullptr); + SaveOutputTensor(outputsIndex, OH_NN_FLOAT32, OutputDim, nullptr); +} + +void SliceBuilderTest::SaveAxesTensor(OH_NN_DataType dataType, const std::vector &dim, + const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + std::shared_ptr axesTensor = TransToNNTensor(dataType, dim, quantParam, type); + int64_t* axesValue = new (std::nothrow) int64_t[1]{0}; + EXPECT_NE(nullptr, axesValue); + axesTensor->SetBuffer(axesValue, sizeof(int64_t)); + m_allTensors.emplace_back(axesTensor); +} + +/** + * @tc.name: slice_build_001 + * @tc.desc: Provide normal input, output, and parameters to verify the normal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(SliceBuilderTest, slice_build_001, TestSize.Level0) +{ + std::vector inputsIndex = { 0, 1, 2 }; + std::vector outputsIndex = { 3 }; + + InitTensor(inputsIndex, outputsIndex); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/** + * @tc.name: slice_build_002 + * @tc.desc: Call Build func twice to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(SliceBuilderTest, slice_build_002, TestSize.Level0) +{ + std::vector inputsIndex = { 0, 1, 2 }; + std::vector outputsIndex = { 3 }; + + InitTensor(inputsIndex, outputsIndex); + + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/** + * @tc.name: slice_build_003 + * @tc.desc: Provide one more than normal input to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(SliceBuilderTest, slice_build_003, TestSize.Level0) +{ + std::vector inputsIndex = { 0, 1, 2, 3 }; + std::vector outputsIndex = { 4 }; + + InitTensor(inputsIndex, outputsIndex); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: slice_build_004 + * @tc.desc: Provide one more than normal output to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(SliceBuilderTest, slice_build_004, TestSize.Level0) +{ + std::vector inputsIndex = { 0, 1, 2 }; + std::vector outputsIndex = { 3, 4 }; + + InitTensor(inputsIndex, outputsIndex); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: slice_build_005 + * @tc.desc: Provide empty input, output, and parameters to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(SliceBuilderTest, slice_build_005, TestSize.Level0) +{ + std::vector inputsIndex = {}; + std::vector outputsIndex = {}; + + InitTensor(inputsIndex, outputsIndex); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: slice_build_006 + * @tc.desc: Provide empty output to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(SliceBuilderTest, slice_build_006, TestSize.Level0) +{ + std::vector inputsIndex = { 0, 1, 2 }; + std::vector outputsIndex = {}; + + InitTensor(inputsIndex, outputsIndex); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: slice_build_007 + * @tc.desc: Provide a param to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(SliceBuilderTest, slice_build_007, TestSize.Level0) +{ + std::vector inputsIndex = { 0, 1, 2 }; + std::vector outputsIndex = {3}; + std::vector paramsIndex = { 4 }; + std::vector inputDim = {3, 2, 3}; + std::vector OutputDim = {1, 1, 3}; + std::vector paramDim = {}; + + m_paramsIndex = paramsIndex; + SaveInputTensor(inputsIndex, OH_NN_FLOAT32, inputDim, nullptr); + SaveOutputTensor(outputsIndex, OH_NN_FLOAT32, OutputDim, nullptr); + SaveAxesTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_SPLIT_AXIS); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: slice_getprimitive_001 + * @tc.desc: Verify the GetPrimitive function return nullptr + * @tc.type: FUNC + */ +HWTEST_F(SliceBuilderTest, slice_getprimitive_001, TestSize.Level0) +{ + LiteGraphTensorPtr primitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); + EXPECT_EQ(primitive, expectPrimitive); +} + +/** + * @tc.name: slice_getprimitive_002 + * @tc.desc: Verify the normal params return behavior of the getprimitive function + * @tc.type: FUNC + */ +HWTEST_F(SliceBuilderTest, slice_getprimitive_002, TestSize.Level0) +{ + std::vector inputsIndex = { 0, 1, 2 }; + std::vector outputsIndex = { 3 }; + + InitTensor(inputsIndex, outputsIndex); + + std::vector expectAxesValue = {0}; + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + LiteGraphTensorPtr primitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); + EXPECT_NE(primitive, expectPrimitive); +} +} // namespace UnitTest +} // namespace NeuralNetworkRuntime +} // namespace OHOS diff --git a/test/unittest/ops/softmax_builder_test.cpp b/test/unittest/ops/softmax_builder_test.cpp new file mode 100644 index 0000000..58ce833 --- /dev/null +++ b/test/unittest/ops/softmax_builder_test.cpp @@ -0,0 +1,303 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "frameworks/native/ops/softmax_builder.h" + +#include +#include "frameworks/native/nn_tensor.h" +#include "ops_test.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Ops; + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class SoftmaxBuilderTest : public OpsTest { +protected: + void InitTensor(const std::vector& inputsIndex, + const std::vector& outputsIndex) override; + void SaveAxisTensor(OH_NN_DataType dataType, const std::vector &dim, + const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + +protected: + SoftmaxBuilder m_builder; + std::vector m_expectAxisValue; +}; + +void SoftmaxBuilderTest::SaveAxisTensor(OH_NN_DataType dataType, const std::vector &dim, + const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + std::shared_ptr axisTensor = TransToNNTensor(dataType, dim, quantParam, type); + int64_t* axisValue = new (std::nothrow) int64_t[1]{1}; + EXPECT_NE(nullptr, axisValue); + axisTensor->SetBuffer(axisValue, sizeof(int64_t)); + m_allTensors.emplace_back(axisTensor); + m_expectAxisValue.emplace_back(*axisValue); +} + +void SoftmaxBuilderTest::InitTensor(const std::vector& inputsIndex, + const std::vector& outputsIndex) +{ + std::vector paramsIndex = { 2 }; + std::vector inputDim = {1, 5, 1, 1}; + std::vector OutputDim = {1, 5, 1, 1}; + + m_paramsIndex = paramsIndex; + SaveInputTensor(inputsIndex, OH_NN_FLOAT32, inputDim, nullptr); + SaveOutputTensor(outputsIndex, OH_NN_FLOAT32, OutputDim, nullptr); +} + +/** + * @tc.name: softmax_build_001 + * @tc.desc: Provide normal input, output, and parameters to verify the normal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(SoftmaxBuilderTest, softmax_build_001, TestSize.Level0) +{ + std::vector inputsIndex = { 0 }; + std::vector outputsIndex = { 1 }; + std::vector paramDim = {}; + + InitTensor(inputsIndex, outputsIndex); + SaveAxisTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_SOFTMAX_AXIS); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/** + * @tc.name: softmax_build_002 + * @tc.desc: Call Build func twice to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(SoftmaxBuilderTest, softmax_build_002, TestSize.Level0) +{ + std::vector inputsIndex = { 0 }; + std::vector outputsIndex = { 1 }; + std::vector paramDim = {}; + + InitTensor(inputsIndex, outputsIndex); + SaveAxisTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_SOFTMAX_AXIS); + + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/** + * @tc.name: softmax_build_003 + * @tc.desc: Provide two more than normal input to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(SoftmaxBuilderTest, softmax_build_003, TestSize.Level0) +{ + std::vector inputsIndex = { 0, 1, 2 }; + std::vector outputsIndex = { 3 }; + std::vector paramDim = {}; + + InitTensor(inputsIndex, outputsIndex); + SaveAxisTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_SOFTMAX_AXIS); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: softmax_build_004 + * @tc.desc: Provide one more than normal output to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(SoftmaxBuilderTest, softmax_build_004, TestSize.Level0) +{ + std::vector inputsIndex = { 0 }; + std::vector outputsIndex = { 1, 2 }; + std::vector paramDim = {}; + + InitTensor(inputsIndex, outputsIndex); + SaveAxisTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_SOFTMAX_AXIS); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: softmax_build_005 + * @tc.desc: Provide empty input, output, and parameters to verify the normal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(SoftmaxBuilderTest, softmax_build_005, TestSize.Level0) +{ + std::vector inputsIndex = { 0, 1 }; + std::vector outputsIndex = { 2 }; + std::vector paramsIndex = { 3 }; + OH_NN_ReturnCode ret = m_builder.Build(paramsIndex, inputsIndex, outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: softmax_build_006 + * @tc.desc: Provide empty output to verify the normal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(SoftmaxBuilderTest, softmax_build_006, TestSize.Level0) +{ + std::vector inputsIndex = { 0 }; + std::vector outputsIndex = {}; + std::vector paramDim = {}; + + InitTensor(inputsIndex, outputsIndex); + SaveAxisTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_SOFTMAX_AXIS); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: softmax_build_007 + * @tc.desc: Provide no param error to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(SoftmaxBuilderTest, softmax_build_007, TestSize.Level0) +{ + std::vector inputsIndex = { 0, 1 }; + std::vector outputsIndex = { 2 }; + std::vector paramsIndex = {}; + std::vector inputDim = {1, 5, 1, 1}; + std::vector OutputDim = {1, 5, 1, 1}; + + m_paramsIndex = paramsIndex; + SaveInputTensor(inputsIndex, OH_NN_FLOAT32, inputDim, nullptr); + SaveOutputTensor(outputsIndex, OH_NN_FLOAT32, OutputDim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: softmax_build_008 + * @tc.desc: Provide param type error to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(SoftmaxBuilderTest, softmax_build_008, TestSize.Level0) +{ + std::vector inputsIndex = { 0 }; + std::vector outputsIndex = { 1 }; + std::vector paramDim = {}; + + InitTensor(inputsIndex, outputsIndex); + SaveAxisTensor(OH_NN_INT32, paramDim, nullptr, OH_NN_SOFTMAX_AXIS); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: softmax_build_009 + * @tc.desc: Provide param dimension error to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(SoftmaxBuilderTest, softmax_build_009, TestSize.Level0) +{ + std::vector inputsIndex = { 0 }; + std::vector outputsIndex = { 1 }; + std::vector paramDim = {1, 5, 1, 1}; + + InitTensor(inputsIndex, outputsIndex); + SaveAxisTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_SOFTMAX_AXIS); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: softmax_build_010 + * @tc.desc: Provide parameter buffer is nullptr to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(SoftmaxBuilderTest, softmax_build_010, TestSize.Level0) +{ + std::vector inputsIndex = { 0 }; + std::vector outputsIndex = { 1 }; + std::vector paramDim = {}; + + InitTensor(inputsIndex, outputsIndex); + std::shared_ptr axisTensor = TransToNNTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_SOFTMAX_AXIS); + axisTensor->SetBuffer(nullptr, 0); + m_allTensors.emplace_back(axisTensor); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: softmax_build_011 + * @tc.desc: Provide invalid parameter type to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(SoftmaxBuilderTest, softmax_build_011, TestSize.Level0) +{ + std::vector inputsIndex = { 0 }; + std::vector outputsIndex = { 1 }; + std::vector paramDim = {}; + + InitTensor(inputsIndex, outputsIndex); + SaveAxisTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_SCALE_AXIS); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: softmax_getprimitive_001 + * @tc.desc: Verify the GetPrimitive function return nullptr + * @tc.type: FUNC + */ +HWTEST_F(SoftmaxBuilderTest, softmax_getprimitive_001, TestSize.Level0) +{ + LiteGraphTensorPtr primitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); + EXPECT_EQ(primitive, expectPrimitive); +} + +/** + * @tc.name: softmax_getprimitive_002 + * @tc.desc: Verify the normal params return behavior of the getprimitive function + * @tc.type: FUNC + */ +HWTEST_F(SoftmaxBuilderTest, softmax_getprimitive_002, TestSize.Level0) +{ + std::vector inputsIndex = { 0 }; + std::vector outputsIndex = { 1 }; + std::vector paramDim = {}; + + InitTensor(inputsIndex, outputsIndex); + SaveAxisTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_SOFTMAX_AXIS); + + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + LiteGraphTensorPtr primitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr nullPrimitive(nullptr, DestroyLiteGraphPrimitive); + EXPECT_NE(nullPrimitive, primitive); + + auto returnValue = mindspore::lite::MindIR_Softmax_GetAxis(primitive.get()); + auto returnValueSize = returnValue.size(); + for (size_t i = 0; i < returnValueSize; ++i) { + EXPECT_EQ(returnValue[i], m_expectAxisValue[i]); + } +} +} // namespace UnitTest +} // namespace NeuralNetworkRuntime +} // namespace OHOS diff --git a/test/unittest/ops/spacetobatchnd_builder_test.cpp b/test/unittest/ops/spacetobatchnd_builder_test.cpp new file mode 100644 index 0000000..05c6317 --- /dev/null +++ b/test/unittest/ops/spacetobatchnd_builder_test.cpp @@ -0,0 +1,487 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "frameworks/native/ops/space_to_batch_nd_builder.h" + +#include +#include "frameworks/native/nn_tensor.h" +#include "ops_test.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Ops; + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class SpaceToBatchNDBuilderTest : public OpsTest { +protected: + void InitTensor(const std::vector& inputsIndex, + const std::vector& outputsIndex) override; + void SaveBlockShapeTensor(OH_NN_DataType dataType, const std::vector &dim, + const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + void SavePaddingsTensor(OH_NN_DataType dataType, const std::vector &dim, + const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + void InitTensor(const std::vector& inputsIndex, const std::vector& outputsIndex, + const std::vector& paramsIndex); + +protected: + SpaceToBatchNDBuilder m_builder; + std::vector m_expectBlockShapeValue; + std::vector> m_expectPaddingsValue; +}; + +void SpaceToBatchNDBuilderTest::InitTensor(const std::vector& inputsIndex, + const std::vector& outputsIndex) +{ + std::vector paramsIndex = { 2, 3 }; + std::vector inputDim = {3, 2, 3}; + std::vector OutputDim = {1, 1, 3}; + + m_paramsIndex = paramsIndex; + SaveInputTensor(inputsIndex, OH_NN_FLOAT32, inputDim, nullptr); + SaveOutputTensor(outputsIndex, OH_NN_FLOAT32, OutputDim, nullptr); +} + +void SpaceToBatchNDBuilderTest::InitTensor(const std::vector& inputsIndex, + const std::vector& outputsIndex, const std::vector& paramsIndex) +{ + std::vector inputDim = {1, 2, 2, 1}; + std::vector OutputDim = {4, 1, 1, 1}; + std::vector shapeDim = {3}; + std::vector paddingsDim = {2, 2}; + + m_paramsIndex = paramsIndex; + SaveInputTensor(inputsIndex, OH_NN_FLOAT32, inputDim, nullptr); + SaveOutputTensor(outputsIndex, OH_NN_FLOAT32, OutputDim, nullptr); + SaveBlockShapeTensor(OH_NN_INT64, shapeDim, nullptr, OH_NN_SPACE_TO_BATCH_ND_BLOCK_SHAPE); + SavePaddingsTensor(OH_NN_INT64, paddingsDim, nullptr, OH_NN_SPACE_TO_BATCH_ND_PADDINGS); +} + +void SpaceToBatchNDBuilderTest::SaveBlockShapeTensor(OH_NN_DataType dataType, const std::vector &dim, + const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + const int blockShapeLen = 2; + std::shared_ptr blockShapeTensor = TransToNNTensor(dataType, dim, quantParam, type); + int64_t* blockShapeValue = new (std::nothrow) int64_t[blockShapeLen] {2, 2}; + EXPECT_NE(nullptr, blockShapeValue); + blockShapeTensor->SetBuffer(blockShapeValue, sizeof(int64_t) * blockShapeLen); + m_allTensors.emplace_back(blockShapeTensor); + m_expectBlockShapeValue.assign(blockShapeValue, blockShapeValue + blockShapeLen); +} + +void SpaceToBatchNDBuilderTest::SavePaddingsTensor(OH_NN_DataType dataType, const std::vector &dim, + const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + const int paddingsLen = 4; + const int row = 2; + const int col = 2; + std::shared_ptr paddingsTensor = TransToNNTensor(dataType, dim, quantParam, type); + int64_t* paddingsValue = new (std::nothrow) int64_t[paddingsLen] {0, 0, 0, 0}; + EXPECT_NE(nullptr, paddingsValue); + paddingsTensor->SetBuffer(paddingsValue, sizeof(int64_t) * paddingsLen); + m_allTensors.emplace_back(paddingsTensor); + + m_expectPaddingsValue.resize(row); + for (int i = 0; i < row; ++i) { + m_expectPaddingsValue[i].resize(col); + } + + int i = 0; + int j = 0; + for (int k = 0; k < paddingsLen; ++k) { + i = k / col; + j = k % col; + m_expectPaddingsValue[i][j] = paddingsValue[k]; + } +} + +/** + * @tc.name: spacetobatchnd_build_001 + * @tc.desc: Provide normal input, output, and parameters to verify the normal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(SpaceToBatchNDBuilderTest, spacetobatchnd_build_001, TestSize.Level0) +{ + std::vector inputsIndex = { 0 }; + std::vector outputsIndex = { 1 }; + std::vector shapeDim = {2}; + std::vector paddingsDim = {2, 2}; + + InitTensor(inputsIndex, outputsIndex); + SaveBlockShapeTensor(OH_NN_INT64, shapeDim, nullptr, OH_NN_SPACE_TO_BATCH_ND_BLOCK_SHAPE); + SavePaddingsTensor(OH_NN_INT64, paddingsDim, nullptr, OH_NN_SPACE_TO_BATCH_ND_PADDINGS); + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/** + * @tc.name: spacetobatchnd_build_002 + * @tc.desc: Call Build func twice to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(SpaceToBatchNDBuilderTest, spacetobatchnd_build_002, TestSize.Level0) +{ + std::vector inputsIndex = { 0 }; + std::vector outputsIndex = { 1 }; + std::vector shapeDim = {2}; + std::vector paddingsDim = {2, 2}; + + InitTensor(inputsIndex, outputsIndex); + + SaveBlockShapeTensor(OH_NN_INT64, shapeDim, nullptr, OH_NN_SPACE_TO_BATCH_ND_BLOCK_SHAPE); + SavePaddingsTensor(OH_NN_INT64, paddingsDim, nullptr, OH_NN_SPACE_TO_BATCH_ND_PADDINGS); + + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/** + * @tc.name: spacetobatchnd_build_003 + * @tc.desc: Provide one more than normal input to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(SpaceToBatchNDBuilderTest, spacetobatchnd_build_003, TestSize.Level0) +{ + std::vector inputsIndex = { 0, 1, 2, 3 }; + std::vector outputsIndex = { 4 }; + std::vector shapeDim = {2}; + std::vector paddingsDim = {2, 2}; + + InitTensor(inputsIndex, outputsIndex); + SaveBlockShapeTensor(OH_NN_INT64, shapeDim, nullptr, OH_NN_SPACE_TO_BATCH_ND_BLOCK_SHAPE); + SavePaddingsTensor(OH_NN_INT64, paddingsDim, nullptr, OH_NN_SPACE_TO_BATCH_ND_PADDINGS); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: spacetobatchnd_build_004 + * @tc.desc: Provide one more than normal output to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(SpaceToBatchNDBuilderTest, spacetobatchnd_build_004, TestSize.Level0) +{ + std::vector inputsIndex = { 0 }; + std::vector outputsIndex = { 1, 2 }; + std::vector shapeDim = {2}; + std::vector paddingsDim = {2, 2}; + + InitTensor(inputsIndex, outputsIndex); + SaveBlockShapeTensor(OH_NN_INT64, shapeDim, nullptr, OH_NN_SPACE_TO_BATCH_ND_BLOCK_SHAPE); + SavePaddingsTensor(OH_NN_INT64, paddingsDim, nullptr, OH_NN_SPACE_TO_BATCH_ND_PADDINGS); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: spacetobatchnd_build_005 + * @tc.desc: Provide empty input to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(SpaceToBatchNDBuilderTest, spacetobatchnd_build_005, TestSize.Level0) +{ + std::vector inputsIndex = {}; + std::vector outputsIndex = {}; + std::vector shapeDim = {2}; + std::vector paddingsDim = {2, 2}; + + InitTensor(inputsIndex, outputsIndex); + SaveBlockShapeTensor(OH_NN_INT64, shapeDim, nullptr, OH_NN_SPACE_TO_BATCH_ND_BLOCK_SHAPE); + SavePaddingsTensor(OH_NN_INT64, paddingsDim, nullptr, OH_NN_SPACE_TO_BATCH_ND_PADDINGS); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: spacetobatchnd_build_006 + * @tc.desc: Provide param type error to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(SpaceToBatchNDBuilderTest, spacetobatchnd_build_006, TestSize.Level0) +{ + std::vector inputsIndex = { 0 }; + std::vector outputsIndex = { 1 }; + std::vector shapeDim = {2}; + std::vector paddingsDim = {2, 2}; + + InitTensor(inputsIndex, outputsIndex); + + SaveBlockShapeTensor(OH_NN_INT32, shapeDim, nullptr, OH_NN_SPACE_TO_BATCH_ND_BLOCK_SHAPE); + SavePaddingsTensor(OH_NN_INT64, paddingsDim, nullptr, OH_NN_SPACE_TO_BATCH_ND_PADDINGS); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: spacetobatchnd_build_007 + * @tc.desc: Provide param type error to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(SpaceToBatchNDBuilderTest, spacetobatchnd_build_007, TestSize.Level0) +{ + std::vector inputsIndex = { 0 }; + std::vector outputsIndex = { 1 }; + std::vector shapeDim = {2}; + std::vector paddingsDim = {2, 2}; + + InitTensor(inputsIndex, outputsIndex); + + SaveBlockShapeTensor(OH_NN_INT64, shapeDim, nullptr, OH_NN_SPACE_TO_BATCH_ND_BLOCK_SHAPE); + SavePaddingsTensor(OH_NN_INT32, paddingsDim, nullptr, OH_NN_SPACE_TO_BATCH_ND_PADDINGS); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: spacetobatchnd_build_008 + * @tc.desc: Provide input dimensions error to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(SpaceToBatchNDBuilderTest, spacetobatchnd_build_008, TestSize.Level0) +{ + std::vector inputsIndex = { 0 }; + std::vector outputsIndex = { 1 }; + std::vector paramsIndex = { 2, 3 }; + InitTensor(inputsIndex, outputsIndex, paramsIndex); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: spacetobatchnd_build_009 + * @tc.desc: Provide output dimensions error to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(SpaceToBatchNDBuilderTest, spacetobatchnd_build_009, TestSize.Level0) +{ + std::vector inputsIndex = { 0 }; + std::vector outputsIndex = { 1 }; + std::vector paramsIndex = { 2, 3 }; + InitTensor(inputsIndex, outputsIndex, paramsIndex); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: spacetobatchnd_build_010 + * @tc.desc: Provide empty output to verify the normal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(SpaceToBatchNDBuilderTest, spacetobatchnd_build_0010, TestSize.Level0) +{ + std::vector inputsIndex = { 0 }; + std::vector outputsIndex = {}; + std::vector paramsIndex = { 1, 2 }; + + InitTensor(inputsIndex, outputsIndex, paramsIndex); + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: spacetobatchnd_build_011 + * @tc.desc: Provide block shape parameter buffer is nullptr to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(SpaceToBatchNDBuilderTest, spacetobatchnd_build_011, TestSize.Level0) +{ + std::vector inputsIndex = { 0 }; + std::vector outputsIndex = { 1 }; + std::vector shapeDim = {2}; + std::vector paddingsDim = {2, 2}; + + InitTensor(inputsIndex, outputsIndex); + std::shared_ptr blockShapeTensor = TransToNNTensor(OH_NN_INT64, shapeDim, nullptr, + OH_NN_SPACE_TO_BATCH_ND_BLOCK_SHAPE); + blockShapeTensor->SetBuffer(nullptr, 0); + m_allTensors.emplace_back(blockShapeTensor); + + SavePaddingsTensor(OH_NN_INT64, paddingsDim, nullptr, OH_NN_SPACE_TO_BATCH_ND_PADDINGS); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: spacetobatchnd_build_012 + * @tc.desc: Provide paddings parameter buffer is nullptr to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(SpaceToBatchNDBuilderTest, spacetobatchnd_build_012, TestSize.Level0) +{ + std::vector inputsIndex = { 0 }; + std::vector outputsIndex = { 1 }; + std::vector shapeDim = {2}; + std::vector paddingsDim = {2, 2}; + + InitTensor(inputsIndex, outputsIndex); + SaveBlockShapeTensor(OH_NN_INT64, shapeDim, nullptr, OH_NN_SPACE_TO_BATCH_ND_BLOCK_SHAPE); + + std::shared_ptr blockShapeTensor = TransToNNTensor(OH_NN_INT64, paddingsDim, nullptr, + OH_NN_SPACE_TO_BATCH_ND_PADDINGS); + blockShapeTensor->SetBuffer(nullptr, 0); + m_allTensors.emplace_back(blockShapeTensor); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: spacetobatchnd_build_013 + * @tc.desc: Provide invalid parameter type to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(SpaceToBatchNDBuilderTest, spacetobatchnd_build_013, TestSize.Level0) +{ + std::vector inputsIndex = { 0 }; + std::vector outputsIndex = { 1 }; + std::vector shapeDim = {2}; + std::vector paddingsDim = {2, 2}; + + InitTensor(inputsIndex, outputsIndex); + SaveBlockShapeTensor(OH_NN_INT64, shapeDim, nullptr, OH_NN_SPACE_TO_BATCH_ND_BLOCK_SHAPE); + SavePaddingsTensor(OH_NN_INT64, paddingsDim, nullptr, OH_NN_SCALE_AXIS); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: spacetobatchnd_build_014 + * @tc.desc: Provide block shape parameter dimension error to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(SpaceToBatchNDBuilderTest, spacetobatchnd_build_014, TestSize.Level0) +{ + std::vector inputsIndex = { 0 }; + std::vector outputsIndex = { 1 }; + std::vector shapeDim = {2, 3}; + std::vector paddingsDim = {2, 2}; + + InitTensor(inputsIndex, outputsIndex); + SaveBlockShapeTensor(OH_NN_INT64, shapeDim, nullptr, OH_NN_SPACE_TO_BATCH_ND_BLOCK_SHAPE); + SavePaddingsTensor(OH_NN_INT64, paddingsDim, nullptr, OH_NN_SPACE_TO_BATCH_ND_PADDINGS); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: spacetobatchnd_build_015 + * @tc.desc: Provide paddings parameter dimension error to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(SpaceToBatchNDBuilderTest, spacetobatchnd_build_015, TestSize.Level0) +{ + std::vector inputsIndex = { 0 }; + std::vector outputsIndex = { 1 }; + std::vector shapeDim = {2}; + std::vector paddingsDim = {2, 2, 2}; + + InitTensor(inputsIndex, outputsIndex); + SaveBlockShapeTensor(OH_NN_INT64, shapeDim, nullptr, OH_NN_SPACE_TO_BATCH_ND_BLOCK_SHAPE); + SavePaddingsTensor(OH_NN_INT64, paddingsDim, nullptr, OH_NN_SPACE_TO_BATCH_ND_PADDINGS); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: spacetobatchnd_build_016 + * @tc.desc: Provide paddings parameter dimension error to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(SpaceToBatchNDBuilderTest, spacetobatchnd_build_016, TestSize.Level0) +{ + std::vector inputsIndex = { 0 }; + std::vector outputsIndex = { 1 }; + std::vector shapeDim = {2}; + std::vector paddingsDim = {2, 3}; + + InitTensor(inputsIndex, outputsIndex); + SaveBlockShapeTensor(OH_NN_INT64, shapeDim, nullptr, OH_NN_SPACE_TO_BATCH_ND_BLOCK_SHAPE); + + const int paddingsLen = 6; + std::shared_ptr paddingsTensor = TransToNNTensor(OH_NN_INT64, paddingsDim, nullptr, + OH_NN_SPACE_TO_BATCH_ND_PADDINGS); + int64_t* paddingsValue = new (std::nothrow) int64_t[paddingsLen] {0, 0, 0, 0, 0, 0}; + EXPECT_NE(nullptr, paddingsValue); + paddingsTensor->SetBuffer(paddingsValue, sizeof(int64_t) * paddingsLen); + m_allTensors.emplace_back(paddingsTensor); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: spacetobatchnd_getprimitive_001 + * @tc.desc: Verify the GetPrimitive function return nullptr + * @tc.type: FUNC + */ +HWTEST_F(SpaceToBatchNDBuilderTest, spacetobatchnd_getprimitive_001, TestSize.Level0) +{ + LiteGraphTensorPtr primitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); + EXPECT_EQ(primitive, expectPrimitive); +} + +/** + * @tc.name: spacetobatchnd_getprimitive_002 + * @tc.desc: Verify the normal params return behavior of the getprimitive function + * @tc.type: FUNC + */ +HWTEST_F(SpaceToBatchNDBuilderTest, spacetobatchnd_getprimitive_002, TestSize.Level0) +{ + std::vector inputsIndex = { 0 }; + std::vector outputsIndex = { 1 }; + std::vector shapeDim = {2}; + std::vector paddingsDim = {2, 2}; + + InitTensor(inputsIndex, outputsIndex); + SaveBlockShapeTensor(OH_NN_INT64, shapeDim, nullptr, OH_NN_SPACE_TO_BATCH_ND_BLOCK_SHAPE); + SavePaddingsTensor(OH_NN_INT64, paddingsDim, nullptr, OH_NN_SPACE_TO_BATCH_ND_PADDINGS); + + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + LiteGraphTensorPtr primitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); + EXPECT_NE(primitive, expectPrimitive); + + auto returnValue = mindspore::lite::MindIR_SpaceToBatchND_GetPaddings(primitive.get()); + auto returnValueSize = returnValue.size(); + for (size_t i = 0; i < returnValueSize; ++i) { + auto k = returnValue[i].size(); + for (size_t j = 0; j < k; ++j) { + EXPECT_EQ(returnValue[i][j], m_expectPaddingsValue[i][j]); + } + } + + auto returnBlockShape = mindspore::lite::MindIR_SpaceToBatchND_GetBlockShape(primitive.get()); + auto returnBlockShapeSize = returnBlockShape.size(); + for (size_t i = 0; i < returnBlockShapeSize; ++i) { + EXPECT_EQ(returnBlockShape[i], m_expectBlockShapeValue[i]); + } +} +} // namespace UnitTest +} // namespace NeuralNetworkRuntime +} // namespace OHOS diff --git a/test/unittest/ops/split_builder_test.cpp b/test/unittest/ops/split_builder_test.cpp new file mode 100644 index 0000000..d657265 --- /dev/null +++ b/test/unittest/ops/split_builder_test.cpp @@ -0,0 +1,435 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "frameworks/native/ops/split_builder.h" + +#include +#include "frameworks/native/nn_tensor.h" +#include "ops_test.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Ops; + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class SplitBuilderTest : public OpsTest { +protected: + void InitTensor(const std::vector& inputsIndex, + const std::vector& outputsIndex) override; + void SaveAxisTensor(OH_NN_DataType dataType, const std::vector &dim, + const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + void SaveOutputNumTensor(OH_NN_DataType dataType, const std::vector &dim, + const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + void SaveSizeSplitsTensor(OH_NN_DataType dataType, const std::vector &dim, + const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + +protected: + SplitBuilder m_builder; + int64_t m_expectOutputNum {0}; + int64_t m_expectAxis {0}; + std::vector m_expectSizeSplitsValue; +}; + +void SplitBuilderTest::InitTensor(const std::vector& inputsIndex, const std::vector& outputsIndex) +{ + std::vector paramsIndex = { 3, 4, 5 }; + std::vector inputDim = {2, 4}; + std::vector OutputDim = {1, 4, 0, 0}; + + m_paramsIndex = paramsIndex; + SaveInputTensor(inputsIndex, OH_NN_FLOAT32, inputDim, nullptr); + SaveOutputTensor(outputsIndex, OH_NN_FLOAT32, OutputDim, nullptr); +} + +void SplitBuilderTest::SaveOutputNumTensor(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + std::shared_ptr outputNumTensor = TransToNNTensor(dataType, dim, quantParam, type); + int64_t* outputNumValue = new (std::nothrow) int64_t[1]{2}; + EXPECT_NE(nullptr, outputNumValue); + outputNumTensor->SetBuffer(outputNumValue, sizeof(int64_t)); + m_allTensors.emplace_back(outputNumTensor); + m_expectOutputNum = *outputNumValue; +} + +void SplitBuilderTest::SaveSizeSplitsTensor(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + const int sizeSplitsLen = 2; + std::shared_ptr sizeSplitsTensor = TransToNNTensor(dataType, dim, quantParam, type); + int64_t* sizeSplitsValue = new (std::nothrow) int64_t[sizeSplitsLen] {0, 0}; + EXPECT_NE(nullptr, sizeSplitsValue); + sizeSplitsTensor->SetBuffer(sizeSplitsValue, sizeof(int64_t) * sizeSplitsLen); + m_allTensors.emplace_back(sizeSplitsTensor); + m_expectSizeSplitsValue.assign(sizeSplitsValue, sizeSplitsValue + sizeSplitsLen); +} + +void SplitBuilderTest::SaveAxisTensor(OH_NN_DataType dataType, const std::vector &dim, + const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + std::shared_ptr axisTensor = TransToNNTensor(dataType, dim, quantParam, type); + int64_t* axisValue = new (std::nothrow) int64_t[1]{0}; + EXPECT_NE(nullptr, axisValue); + axisTensor->SetBuffer(axisValue, sizeof(int64_t)); + m_allTensors.emplace_back(axisTensor); + m_expectAxis = *axisValue; +} + +/** + * @tc.name: split_build_001 + * @tc.desc: Provide normal input, output, and parameters to verify the normal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(SplitBuilderTest, split_build_001, TestSize.Level0) +{ + std::vector inputsIndex = { 0 }; + std::vector outputsIndex = { 1, 2 }; + std::vector paramDim = {}; + + InitTensor(inputsIndex, outputsIndex); + + SaveAxisTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_SPLIT_AXIS); + SaveOutputNumTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_SPLIT_OUTPUT_NUM); + SaveSizeSplitsTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_SPLIT_SIZE_SPLITS); + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/** + * @tc.name: split_build_002 + * @tc.desc: Call Build func twice to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(SplitBuilderTest, split_build_002, TestSize.Level0) +{ + std::vector inputsIndex = { 0 }; + std::vector outputsIndex = { 1, 2 }; + std::vector paramDim = {}; + + InitTensor(inputsIndex, outputsIndex); + SaveAxisTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_SPLIT_AXIS); + SaveOutputNumTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_SPLIT_OUTPUT_NUM); + SaveSizeSplitsTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_SPLIT_SIZE_SPLITS); + + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/** + * @tc.name: split_build_003 + * @tc.desc: Provide one more than normal input to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(SplitBuilderTest, split_build_003, TestSize.Level0) +{ + std::vector inputsIndex = { 0, 1 }; + std::vector outputsIndex = { 2, 3 }; + std::vector paramDim = {}; + + InitTensor(inputsIndex, outputsIndex); + SaveAxisTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_SPLIT_AXIS); + SaveOutputNumTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_SPLIT_OUTPUT_NUM); + SaveSizeSplitsTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_SPLIT_SIZE_SPLITS); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: split_build_004 + * @tc.desc: Provide one more than normal output to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(SplitBuilderTest, split_build_004, TestSize.Level0) +{ + std::vector inputsIndex = { 0 }; + std::vector outputsIndex = { 1, 2, 3 }; + std::vector paramDim = {}; + + InitTensor(inputsIndex, outputsIndex); + + SaveAxisTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_SPLIT_AXIS); + SaveOutputNumTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_SPLIT_OUTPUT_NUM); + SaveSizeSplitsTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_SPLIT_SIZE_SPLITS); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: split_build_005 + * @tc.desc: Provide empty input, output, and parameters to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(SplitBuilderTest, split_build_005, TestSize.Level0) +{ + std::vector inputsIndex = { 0, 1, 2, 3 }; + std::vector outputsIndex = { 4, 5 }; + std::vector paramsIndex = { 6, 7, 8 }; + OH_NN_ReturnCode ret = m_builder.Build(paramsIndex, inputsIndex, outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: split_build_006 + * @tc.desc: Provide axis param type error to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(SplitBuilderTest, split_build_006, TestSize.Level0) +{ + std::vector inputsIndex = { 0 }; + std::vector outputsIndex = { 1, 2 }; + std::vector paramDim = {}; + + InitTensor(inputsIndex, outputsIndex); + SaveAxisTensor(OH_NN_INT8, paramDim, nullptr, OH_NN_SPLIT_AXIS); + SaveOutputNumTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_SPLIT_OUTPUT_NUM); + SaveSizeSplitsTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_SPLIT_SIZE_SPLITS); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: split_build_007 + * @tc.desc: Provide axis param type error to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(SplitBuilderTest, split_build_007, TestSize.Level0) +{ + std::vector inputsIndex = { 0 }; + std::vector outputsIndex = { 1, 2 }; + std::vector paramDim = {}; + + InitTensor(inputsIndex, outputsIndex); + SaveAxisTensor(OH_NN_INT32, paramDim, nullptr, OH_NN_SPLIT_AXIS); + SaveOutputNumTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_SPLIT_OUTPUT_NUM); + SaveSizeSplitsTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_SPLIT_SIZE_SPLITS); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: split_build_008 + * @tc.desc: Provide size splits param type error to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(SplitBuilderTest, split_build_008, TestSize.Level0) +{ + std::vector inputsIndex = { 0 }; + std::vector outputsIndex = { 1, 2 }; + std::vector paramDim = {}; + + InitTensor(inputsIndex, outputsIndex); + SaveAxisTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_SPLIT_AXIS); + SaveOutputNumTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_SPLIT_OUTPUT_NUM); + SaveSizeSplitsTensor(OH_NN_INT32, paramDim, nullptr, OH_NN_SPLIT_SIZE_SPLITS); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: split_build_009 + * @tc.desc: Provide output num param type error to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(SplitBuilderTest, split_build_009, TestSize.Level0) +{ + std::vector inputsIndex = { 0 }; + std::vector outputsIndex = { 1, 2 }; + std::vector paramDim = {}; + + InitTensor(inputsIndex, outputsIndex); + SaveAxisTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_SPLIT_AXIS); + SaveOutputNumTensor(OH_NN_INT32, paramDim, nullptr, OH_NN_SPLIT_OUTPUT_NUM); + SaveSizeSplitsTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_SPLIT_SIZE_SPLITS); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: split_build_010 + * @tc.desc: Provide axis param type error to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(SplitBuilderTest, split_build_010, TestSize.Level0) +{ + std::vector inputsIndex = { 0 }; + std::vector outputsIndex = { 1, 2 }; + std::vector paramDim = {}; + + InitTensor(inputsIndex, outputsIndex); + SaveAxisTensor(OH_NN_BOOL, paramDim, nullptr, OH_NN_SPLIT_AXIS); + SaveOutputNumTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_SPLIT_OUTPUT_NUM); + SaveSizeSplitsTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_SPLIT_SIZE_SPLITS); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: split_build_011 + * @tc.desc: Provide axis parameter buffer is nullptr to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(SplitBuilderTest, split_build_011, TestSize.Level0) +{ + std::vector inputsIndex = { 0 }; + std::vector outputsIndex = { 1, 2 }; + std::vector paramDim = {}; + + InitTensor(inputsIndex, outputsIndex); + + std::shared_ptr axisTensor = TransToNNTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_SPLIT_AXIS); + axisTensor->SetBuffer(nullptr, 0); + m_allTensors.emplace_back(axisTensor); + + SaveOutputNumTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_SPLIT_OUTPUT_NUM); + SaveSizeSplitsTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_SPLIT_SIZE_SPLITS); + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: split_build_012 + * @tc.desc: Provide invalid parameter type to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(SplitBuilderTest, split_build_012, TestSize.Level0) +{ + std::vector inputsIndex = { 0 }; + std::vector outputsIndex = { 1, 2 }; + std::vector paramDim = {}; + + InitTensor(inputsIndex, outputsIndex); + SaveAxisTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_SPLIT_AXIS); + SaveOutputNumTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_SPLIT_OUTPUT_NUM); + SaveSizeSplitsTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_SCALE_AXIS); + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: split_build_013 + * @tc.desc: Provide axis parameter not scalar to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(SplitBuilderTest, split_build_013, TestSize.Level0) +{ + std::vector inputsIndex = { 0 }; + std::vector outputsIndex = { 1, 2 }; + std::vector paramDim = {}; + std::vector axisDim = {1, 2}; + + InitTensor(inputsIndex, outputsIndex); + SaveAxisTensor(OH_NN_INT64, axisDim, nullptr, OH_NN_SPLIT_AXIS); + SaveOutputNumTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_SPLIT_OUTPUT_NUM); + SaveSizeSplitsTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_SPLIT_SIZE_SPLITS); + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: split_build_014 + * @tc.desc: Provide output parameter not scalar to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(SplitBuilderTest, split_build_014, TestSize.Level0) +{ + std::vector inputsIndex = { 0 }; + std::vector outputsIndex = { 1, 2 }; + std::vector paramDim = {}; + std::vector outputNumDim = {1, 2}; + + InitTensor(inputsIndex, outputsIndex); + SaveAxisTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_SPLIT_AXIS); + SaveOutputNumTensor(OH_NN_INT64, outputNumDim, nullptr, OH_NN_SPLIT_OUTPUT_NUM); + SaveSizeSplitsTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_SPLIT_SIZE_SPLITS); + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: split_build_015 + * @tc.desc: Provide empty output and param to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(SplitBuilderTest, split_build_015, TestSize.Level0) +{ + std::vector inputsIndex = { 1 }; + std::vector outputsIndex = {}; + std::vector paramsIndex = {}; + std::vector inputDim = {2, 4}; + + m_paramsIndex = paramsIndex; + SaveInputTensor(inputsIndex, OH_NN_FLOAT32, inputDim, nullptr); + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: split_getprimitive_001 + * @tc.desc: Verify the GetPrimitive function return nullptr + * @tc.type: FUNC + */ +HWTEST_F(SplitBuilderTest, split_getprimitive_001, TestSize.Level0) +{ + LiteGraphTensorPtr primitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); + EXPECT_EQ(primitive, expectPrimitive); +} + +/** + * @tc.name: split_getprimitive_002 + * @tc.desc: Verify the normal params return behavior of the getprimitive function + * @tc.type: FUNC + */ +HWTEST_F(SplitBuilderTest, split_getprimitive_002, TestSize.Level0) +{ + std::vector inputsIndex = { 0 }; + std::vector outputsIndex = { 1, 2 }; + std::vector paramDim = {}; + + InitTensor(inputsIndex, outputsIndex); + SaveAxisTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_SPLIT_AXIS); + SaveOutputNumTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_SPLIT_OUTPUT_NUM); + SaveSizeSplitsTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_SPLIT_SIZE_SPLITS); + + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + LiteGraphTensorPtr primitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); + EXPECT_NE(expectPrimitive, primitive); + + auto returnValue = mindspore::lite::MindIR_Split_GetSizeSplits(primitive.get()); + auto returnValueSize = returnValue.size(); + for (size_t i = 0; i < returnValueSize; ++i) { + EXPECT_EQ(returnValue[i], m_expectSizeSplitsValue[i]); + } + + auto returnOutputNum = mindspore::lite::MindIR_Split_GetOutputNum(primitive.get()); + EXPECT_EQ(returnOutputNum, m_expectOutputNum); + + auto returnAxis = mindspore::lite::MindIR_Split_GetAxis(primitive.get()); + EXPECT_EQ(returnAxis, m_expectAxis); +} +} // namespace UnitTest +} // namespace NeuralNetworkRuntime +} // namespace OHOS diff --git a/test/unittest/ops/sqrt_builder_test.cpp b/test/unittest/ops/sqrt_builder_test.cpp new file mode 100644 index 0000000..93a1367 --- /dev/null +++ b/test/unittest/ops/sqrt_builder_test.cpp @@ -0,0 +1,205 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "frameworks/native/ops/sqrt_builder.h" + +#include +#include "frameworks/native/nn_tensor.h" +#include "ops_test.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Ops; + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class SqrtBuilderTest : public OpsTest { +protected: + void InitTensor(const std::vector& inputsIndex, + const std::vector& outputsIndex) override; + void CheckResult(); + +protected: + SqrtBuilder m_builder; +}; + +void SqrtBuilderTest::InitTensor(const std::vector& inputsIndex, + const std::vector& outputsIndex) +{ + std::vector inputDim = {3}; + std::vector OutputDim = {3}; + + SaveInputTensor(inputsIndex, OH_NN_FLOAT32, inputDim, nullptr); + SaveOutputTensor(outputsIndex, OH_NN_FLOAT32, OutputDim, nullptr); +} + +void SqrtBuilderTest::CheckResult() +{ + LiteGraphTensorPtr primitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); + EXPECT_NE(primitive, expectPrimitive); +} + +/** + * @tc.name: sqrt_build_001 + * @tc.desc: Provide normal input, output to verify the normal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(SqrtBuilderTest, sqrt_build_001, TestSize.Level0) +{ + std::vector inputsIndex = { 0 }; + std::vector outputsIndex = { 1 }; + std::vector inputDim = {3}; + std::vector OutputDim = {3}; + + SaveInputTensor(inputsIndex, OH_NN_FLOAT32, inputDim, nullptr); + SaveOutputTensor(outputsIndex, OH_NN_FLOAT32, OutputDim, nullptr); + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/** + * @tc.name: sqrt_build_002 + * @tc.desc: Provide one more than normal input to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(SqrtBuilderTest, sqrt_build_002, TestSize.Level0) +{ + std::vector inputsIndex = { 0 }; + std::vector outputsIndex = { 1 }; + std::vector inputDim = {3}; + std::vector OutputDim = {3}; + + SaveInputTensor(inputsIndex, OH_NN_FLOAT32, inputDim, nullptr); + SaveOutputTensor(outputsIndex, OH_NN_FLOAT32, OutputDim, nullptr); + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/** + * @tc.name: sqrt_build_003 + * @tc.desc: Provide one more than normal input to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(SqrtBuilderTest, sqrt_build_003, TestSize.Level0) +{ + std::vector inputsIndex = { 0, 1 }; + std::vector outputsIndex = { 2 }; + std::vector inputDim = {3}; + std::vector OutputDim = {3}; + + SaveInputTensor(inputsIndex, OH_NN_FLOAT32, inputDim, nullptr); + SaveOutputTensor(outputsIndex, OH_NN_FLOAT32, OutputDim, nullptr); + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: sqrt_build_004 + * @tc.desc: Provide one more than normal output to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(SqrtBuilderTest, sqrt_build_004, TestSize.Level0) +{ + std::vector inputsIndex = { 0 }; + std::vector outputsIndex = { 1, 2 }; + std::vector inputDim = {3}; + std::vector OutputDim = {3}; + + SaveInputTensor(inputsIndex, OH_NN_FLOAT32, inputDim, nullptr); + SaveOutputTensor(outputsIndex, OH_NN_FLOAT32, OutputDim, nullptr); + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: sqrt_build_005 + * @tc.desc: Provide empty input, output, and parameters to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(SqrtBuilderTest, sqrt_build_005, TestSize.Level0) +{ + std::vector inputsIndex = { 0 }; + std::vector outputsIndex = { 1 }; + std::vector paramsIndex = {}; + OH_NN_ReturnCode ret = m_builder.Build(paramsIndex, inputsIndex, outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: sqrt_build_006 + * @tc.desc: Provide empty output to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(SqrtBuilderTest, sqrt_build_006, TestSize.Level0) +{ + std::vector inputsIndex = { 0 }; + std::vector outputsIndex = {}; + std::vector inputDim = {3}; + std::vector OutputDim = {3}; + + SaveInputTensor(inputsIndex, OH_NN_FLOAT32, inputDim, nullptr); + SaveOutputTensor(outputsIndex, OH_NN_FLOAT32, OutputDim, nullptr); + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: sqrt_build_007 + * @tc.desc: Provide a param to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(SqrtBuilderTest, sqrt_build_007, TestSize.Level0) +{ + std::vector inputsIndex = { 0 }; + std::vector outputsIndex = { 1 }; + std::vector paramsIndex = { 4 }; + + m_paramsIndex = paramsIndex; + InitTensor(inputsIndex, outputsIndex); + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: sqrt_getprimitive_001 + * @tc.desc: Verify the GetPrimitive function return nullptr + * @tc.type: FUNC + */ +HWTEST_F(SqrtBuilderTest, sqrt_getprimitive_001, TestSize.Level0) +{ + LiteGraphTensorPtr primitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); + EXPECT_EQ(primitive, expectPrimitive); +} + +/** + * @tc.name: sqrt_getprimitive_002 + * @tc.desc: Verify the normal params return behavior of the getprimitive function + * @tc.type: FUNC + */ +HWTEST_F(SqrtBuilderTest, sqrt_getprimitive_002, TestSize.Level0) +{ + std::vector inputsIndex = { 0 }; + std::vector outputsIndex = { 1 }; + InitTensor(inputsIndex, outputsIndex); + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + CheckResult(); +} +} // namespace UnitTest +} // namespace NeuralNetworkRuntime +} // namespace OHOS diff --git a/test/unittest/ops/squared_difference_builder_test.cpp b/test/unittest/ops/squared_difference_builder_test.cpp new file mode 100644 index 0000000..c203592 --- /dev/null +++ b/test/unittest/ops/squared_difference_builder_test.cpp @@ -0,0 +1,204 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "frameworks/native/ops/squared_difference_builder.h" + +#include +#include "frameworks/native/nn_tensor.h" +#include "ops_test.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Ops; + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class SquaredDifferenceBuilderTest : public OpsTest { +protected: + void InitTensor(const std::vector& inputsIndex, + const std::vector& outputsIndex) override; + void CheckResult(); + +protected: + SquaredDifferenceBuilder m_builder; +}; + +void SquaredDifferenceBuilderTest::InitTensor(const std::vector& inputsIndex, + const std::vector& outputsIndex) +{ + std::vector inputDim = {3}; + std::vector OutputDim = {3}; + + SaveInputTensor(inputsIndex, OH_NN_FLOAT32, inputDim, nullptr); + SaveOutputTensor(outputsIndex, OH_NN_FLOAT32, OutputDim, nullptr); +} + +void SquaredDifferenceBuilderTest::CheckResult() +{ + LiteGraphTensorPtr primitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); + EXPECT_NE(primitive, expectPrimitive); +} + +/** + * @tc.name: squareddifference_build_001 + * @tc.desc: Provide normal input, output to verify the normal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(SquaredDifferenceBuilderTest, squareddifference_build_001, TestSize.Level0) +{ + std::vector inputsIndex = { 0, 1 }; + std::vector outputsIndex = { 2 }; + std::vector inputDim = {3}; + std::vector OutputDim = {3}; + + SaveInputTensor(inputsIndex, OH_NN_FLOAT32, inputDim, nullptr); + SaveOutputTensor(outputsIndex, OH_NN_FLOAT32, OutputDim, nullptr); + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/** + * @tc.name: squareddifference_build_002 + * @tc.desc: Call Build func twice to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(SquaredDifferenceBuilderTest, squareddifference_build_002, TestSize.Level0) +{ + std::vector inputsIndex = { 0, 1 }; + std::vector outputsIndex = { 2 }; + std::vector inputDim = {3}; + std::vector OutputDim = {3}; + + SaveInputTensor(inputsIndex, OH_NN_FLOAT32, inputDim, nullptr); + SaveOutputTensor(outputsIndex, OH_NN_FLOAT32, OutputDim, nullptr); + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/** + * @tc.name: squareddifference_build_003 + * @tc.desc: rovide one more than normal input to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(SquaredDifferenceBuilderTest, squareddifference_build_003, TestSize.Level0) +{ + std::vector inputsIndex = { 0, 1, 2 }; + std::vector outputsIndex = { 3 }; + std::vector inputDim = {3}; + std::vector OutputDim = {3}; + + SaveInputTensor(inputsIndex, OH_NN_FLOAT32, inputDim, nullptr); + SaveOutputTensor(outputsIndex, OH_NN_FLOAT32, OutputDim, nullptr); + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: squareddifference_build_004 + * @tc.desc: rovide one more than normal output to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(SquaredDifferenceBuilderTest, squareddifference_build_004, TestSize.Level0) +{ + std::vector inputsIndex = { 0, 1 }; + std::vector outputsIndex = { 2, 3 }; + std::vector inputDim = {3}; + std::vector OutputDim = {3}; + + SaveInputTensor(inputsIndex, OH_NN_FLOAT32, inputDim, nullptr); + SaveOutputTensor(outputsIndex, OH_NN_FLOAT32, OutputDim, nullptr); + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: squareddifference_build_005 + * @tc.desc: Provide empty input, output, and parameters to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(SquaredDifferenceBuilderTest, squareddifference_build_005, TestSize.Level0) +{ + std::vector inputsIndex = { 0, 1 }; + std::vector outputsIndex = { 2 }; + std::vector paramsIndex = {}; + OH_NN_ReturnCode ret = m_builder.Build(paramsIndex, inputsIndex, outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: squareddifference_build_006 + * @tc.desc: Provide empty output to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(SquaredDifferenceBuilderTest, squareddifference_build_006, TestSize.Level0) +{ + std::vector inputsIndex = { 0, 1 }; + std::vector outputsIndex = { 2 }; + std::vector inputDim = {3}; + + SaveInputTensor(inputsIndex, OH_NN_FLOAT32, inputDim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: squareddifference_build_007 + * @tc.desc: Provide a param to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(SquaredDifferenceBuilderTest, squareddifference_build_007, TestSize.Level0) +{ + std::vector inputsIndex = { 0, 1 }; + std::vector outputsIndex = { 2 }; + std::vector paramsIndex = { 4 }; + + m_paramsIndex = paramsIndex; + InitTensor(inputsIndex, outputsIndex); + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: squareddifference_getprimitive_001 + * @tc.desc: Verify the GetPrimitive function return nullptr + * @tc.type: FUNC + */ +HWTEST_F(SquaredDifferenceBuilderTest, squareddifference_getprimitive_001, TestSize.Level0) +{ + auto primitive = m_builder.GetPrimitive(); + LiteGraphPrimitvePtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); + EXPECT_EQ(primitive, expectPrimitive); +} + +/** + * @tc.name: squareddifference_getprimitive_002 + * @tc.desc: Verify the normal return behavior of the getprimitive function + * @tc.type: FUNC + */ +HWTEST_F(SquaredDifferenceBuilderTest, squareddifference_getprimitive_002, TestSize.Level0) +{ + std::vector inputsIndex = { 0, 1 }; + std::vector outputsIndex = { 2 }; + InitTensor(inputsIndex, outputsIndex); + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + CheckResult(); +} +} // namespace UnitTest +} // namespace NeuralNetworkRuntime +} // namespace OHOS \ No newline at end of file diff --git a/test/unittest/ops/squeeze_builder_test.cpp b/test/unittest/ops/squeeze_builder_test.cpp new file mode 100644 index 0000000..2e39915 --- /dev/null +++ b/test/unittest/ops/squeeze_builder_test.cpp @@ -0,0 +1,266 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "frameworks/native/ops/squeeze_builder.h" + +#include +#include "frameworks/native/nn_tensor.h" +#include "ops_test.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Ops; + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class SqueezeBuilderTest : public OpsTest { +protected: + void InitTensor(const std::vector& inputsIndex, + const std::vector& outputsIndex) override; + void SaveAxisTensor(OH_NN_DataType dataType, const std::vector &dim, + const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + +protected: + SqueezeBuilder m_builder; + std::vector m_expectAxisValue; +}; + +void SqueezeBuilderTest::SaveAxisTensor(OH_NN_DataType dataType, const std::vector &dim, + const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + std::shared_ptr axisTensor =TransToNNTensor(dataType, dim, quantParam, type); + int64_t* axisValue = new (std::nothrow) int64_t[1]{2}; + EXPECT_NE(nullptr, axisValue); + axisTensor->SetBuffer(axisValue, sizeof(int64_t)); + m_allTensors.emplace_back(axisTensor); + m_expectAxisValue.emplace_back(*axisValue); +} + +void SqueezeBuilderTest::InitTensor(const std::vector& inputsIndex, + const std::vector& outputsIndex) +{ + std::vector paramsIndex = { 2 }; + std::vector inputDim = {3, 2, 1}; + std::vector OutputDim = {3, 2}; + + m_paramsIndex = paramsIndex; + SaveInputTensor(inputsIndex, OH_NN_FLOAT32, inputDim, nullptr); + SaveOutputTensor(outputsIndex, OH_NN_FLOAT32, OutputDim, nullptr); +} + +/** + * @tc.name: squeeze_build_001 + * @tc.desc: Provide normal input, output, and parameters to verify the normal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(SqueezeBuilderTest, squeeze_build_001, TestSize.Level0) +{ + std::vector inputsIndex = { 0 }; + std::vector outputsIndex = { 1 }; + std::vector paramDim = {}; + + InitTensor(inputsIndex, outputsIndex); + SaveAxisTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_SQUEEZE_AXIS); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/** + * @tc.name: squeeze_build_002 + * @tc.desc: Call Build func twice to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(SqueezeBuilderTest, squeeze_build_002, TestSize.Level0) +{ + std::vector inputsIndex = { 0 }; + std::vector outputsIndex = { 1 }; + std::vector paramDim = {}; + + InitTensor(inputsIndex, outputsIndex); + SaveAxisTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_SQUEEZE_AXIS); + + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/** + * @tc.name: squeeze_build_003 + * @tc.desc: Provide one more than normal input to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(SqueezeBuilderTest, squeeze_build_003, TestSize.Level0) +{ + std::vector inputsIndex = { 0, 1, 2 }; + std::vector outputsIndex = { 3 }; + std::vector paramDim = {}; + + InitTensor(inputsIndex, outputsIndex); + SaveAxisTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_SQUEEZE_AXIS); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: squeeze_build_004 + * @tc.desc: Provide one more than normal output to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(SqueezeBuilderTest, squeeze_build_004, TestSize.Level0) +{ + std::vector inputsIndex = { 0 }; + std::vector outputsIndex = { 1, 2 }; + std::vector paramDim = {}; + + InitTensor(inputsIndex, outputsIndex); + SaveAxisTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_SQUEEZE_AXIS); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: squeeze_build_005 + * @tc.desc: Provide empty input, output, and parameters to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(SqueezeBuilderTest, squeeze_build_005, TestSize.Level0) +{ + std::vector inputsIndex = { 0, 1 }; + std::vector outputsIndex = { 2 }; + std::vector paramsIndex = { 3 }; + + OH_NN_ReturnCode ret = m_builder.Build(paramsIndex, inputsIndex, outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: squeeze_build_006 + * @tc.desc: Provide empty output to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(SqueezeBuilderTest, squeeze_build_006, TestSize.Level0) +{ + std::vector inputsIndex = { 0 }; + std::vector outputsIndex = {}; + std::vector paramDim = {}; + + InitTensor(inputsIndex, outputsIndex); + SaveAxisTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_SQUEEZE_AXIS); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: squeeze_build_007 + * @tc.desc: Provide param type error to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(SqueezeBuilderTest, squeeze_build_007, TestSize.Level0) +{ + std::vector inputsIndex = { 0 }; + std::vector outputsIndex = { 1 }; + std::vector paramDim = {}; + + InitTensor(inputsIndex, outputsIndex); + SaveAxisTensor(OH_NN_INT32, paramDim, nullptr, OH_NN_SQUEEZE_AXIS); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: squeeze_build_008 + * @tc.desc: Provide axis parameter buffer is nullptr to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(SqueezeBuilderTest, squeeze_build_008, TestSize.Level0) +{ + std::vector inputsIndex = { 0 }; + std::vector outputsIndex = { 1 }; + std::vector paramDim = {}; + + InitTensor(inputsIndex, outputsIndex); + + std::shared_ptr axisTensor =TransToNNTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_SQUEEZE_AXIS); + axisTensor->SetBuffer(nullptr, 0); + m_allTensors.emplace_back(axisTensor); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: squeeze_build_009 + * @tc.desc: Provide invalid parameter type to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(SqueezeBuilderTest, squeeze_build_009, TestSize.Level0) +{ + std::vector inputsIndex = { 0 }; + std::vector outputsIndex = { 1 }; + std::vector paramDim = {}; + + InitTensor(inputsIndex, outputsIndex); + SaveAxisTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_SCALE_AXIS); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: squeeze_getprimitive_001 + * @tc.desc: Verify the GetPrimitive function return nullptr + * @tc.type: FUNC + */ +HWTEST_F(SqueezeBuilderTest, squeeze_getprimitive_001, TestSize.Level0) +{ + LiteGraphTensorPtr primitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); + EXPECT_EQ(primitive, expectPrimitive); +} + +/** + * @tc.name: squeeze_getprimitive_002 + * @tc.desc: Verify the normal params return behavior of the getprimitive function + * @tc.type: FUNC + */ +HWTEST_F(SqueezeBuilderTest, squeeze_getprimitive_002, TestSize.Level0) +{ + std::vector inputsIndex = { 0 }; + std::vector outputsIndex = { 1 }; + std::vector paramDim = {}; + + InitTensor(inputsIndex, outputsIndex); + SaveAxisTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_SQUEEZE_AXIS); + + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + LiteGraphTensorPtr primitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); + EXPECT_NE(primitive, expectPrimitive); + + auto returnValue = mindspore::lite::MindIR_Squeeze_GetAxis(primitive.get()); + auto returnValueSize = returnValue.size(); + for (size_t i = 0; i < returnValueSize; ++i) { + EXPECT_EQ(returnValue[i], m_expectAxisValue[i]); + } +} +} // namespace UnitTest +} // namespace NeuralNetworkRuntime +} // namespace OHOS \ No newline at end of file diff --git a/test/unittest/ops/stack_builder_test.cpp b/test/unittest/ops/stack_builder_test.cpp new file mode 100644 index 0000000..7dfc67e --- /dev/null +++ b/test/unittest/ops/stack_builder_test.cpp @@ -0,0 +1,299 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "frameworks/native/ops/stack_builder.h" + +#include +#include "frameworks/native/nn_tensor.h" +#include "ops_test.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Ops; + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class StackBuilderTest : public OpsTest { +protected: + void InitTensor(const std::vector& inputsIndex, + const std::vector& outputsIndex) override; + void SaveAxisTensor(OH_NN_DataType dataType, const std::vector &dim, + const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + +protected: + StackBuilder m_builder; + int64_t m_expectAxisValue {0}; +}; + +void StackBuilderTest::InitTensor(const std::vector& inputsIndex, + const std::vector& outputsIndex) +{ + std::vector paramsIndex = { 3 }; + std::vector inputDim = {2}; + std::vector OutputDim = {2, 2}; + + m_paramsIndex = paramsIndex; + SaveInputTensor(inputsIndex, OH_NN_FLOAT32, inputDim, nullptr); + SaveOutputTensor(outputsIndex, OH_NN_FLOAT32, OutputDim, nullptr); +} + +void StackBuilderTest::SaveAxisTensor(OH_NN_DataType dataType, const std::vector &dim, + const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + std::shared_ptr axisTensor = TransToNNTensor(dataType, dim, quantParam, type); + int64_t* axisValue = new (std::nothrow) int64_t[1]{1}; + EXPECT_NE(nullptr, axisValue); + axisTensor->SetBuffer(axisValue, sizeof(int64_t)); + m_allTensors.emplace_back(axisTensor); + m_expectAxisValue = *axisValue; +} + +/** + * @tc.name: stack_build_001 + * @tc.desc: Provide normal input, output, and parameters to verify the normal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(StackBuilderTest, stack_build_001, TestSize.Level0) +{ + std::vector inputsIndex = { 0, 1 }; + std::vector outputsIndex = { 2 }; + std::vector paramDim = {}; + + InitTensor(inputsIndex, outputsIndex); + SaveAxisTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_STACK_AXIS); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/** + * @tc.name: stack_build_002 + * @tc.desc: Call Build func twice to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(StackBuilderTest, stack_build_002, TestSize.Level0) +{ + std::vector inputsIndex = { 0, 1 }; + std::vector outputsIndex = { 2 }; + std::vector paramDim = {}; + + InitTensor(inputsIndex, outputsIndex); + SaveAxisTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_STACK_AXIS); + + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/** + * @tc.name: stack_build_003 + * @tc.desc: Provide one more than normal input to verify the normal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(StackBuilderTest, stack_build_003, TestSize.Level0) +{ + std::vector inputsIndex = { 0, 1, 2 }; + std::vector outputsIndex = { 3 }; + std::vector paramDim = {}; + + InitTensor(inputsIndex, outputsIndex); + SaveAxisTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_STACK_AXIS); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: stack_build_004 + * @tc.desc: Provide one more than normal output to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(StackBuilderTest, stack_build_004, TestSize.Level0) +{ + std::vector inputsIndex = { 0, 1 }; + std::vector outputsIndex = { 2, 3 }; + std::vector paramDim = {}; + + InitTensor(inputsIndex, outputsIndex); + SaveAxisTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_STACK_AXIS); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: stack_build_005 + * @tc.desc: Provide empty input, output, and parameters to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(StackBuilderTest, stack_build_005, TestSize.Level0) +{ + std::vector inputsIndex = { 0, 1 }; + std::vector outputsIndex = {}; + std::vector paramsIndex = {}; + + OH_NN_ReturnCode ret = m_builder.Build(paramsIndex, inputsIndex, outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: stack_build_006 + * @tc.desc: Provide empty output to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(StackBuilderTest, stack_build_006, TestSize.Level0) +{ + std::vector inputsIndex = { 0, 1 }; + std::vector outputsIndex = {}; + std::vector paramDim = {}; + + InitTensor(inputsIndex, outputsIndex); + SaveAxisTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_STACK_AXIS); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: stack_build_007 + * @tc.desc: Provide param type error to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(StackBuilderTest, stack_build_007, TestSize.Level0) +{ + std::vector inputsIndex = { 0, 1 }; + std::vector outputsIndex = { 2 }; + std::vector paramDim = {}; + + InitTensor(inputsIndex, outputsIndex); + SaveAxisTensor(OH_NN_INT32, paramDim, nullptr, OH_NN_STACK_AXIS); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: stack_build_008 + * @tc.desc: Provide one less than normal input to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(StackBuilderTest, stack_build_008, TestSize.Level0) +{ + std::vector inputsIndex = { 0 }; + std::vector outputsIndex = { 1 }; + std::vector paramDim = {}; + + InitTensor(inputsIndex, outputsIndex); + SaveAxisTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_STACK_AXIS); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: stack_build_009 + * @tc.desc: Provide axis parameter buffer is nullptr to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(StackBuilderTest, stack_build_009, TestSize.Level0) +{ + std::vector inputsIndex = { 0, 1 }; + std::vector outputsIndex = { 2 }; + std::vector paramDim = {}; + + InitTensor(inputsIndex, outputsIndex); + + std::shared_ptr axisTensor =TransToNNTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_STACK_AXIS); + axisTensor->SetBuffer(nullptr, 0); + m_allTensors.emplace_back(axisTensor); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: stack_build_010 + * @tc.desc: Provide axis not scaler to verify the normal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(StackBuilderTest, stack_build_010, TestSize.Level0) +{ + std::vector inputsIndex = { 0, 1 }; + std::vector outputsIndex = { 2 }; + std::vector paramDim = {1, 2}; + + InitTensor(inputsIndex, outputsIndex); + SaveAxisTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_STACK_AXIS); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: stack_build_011 + * @tc.desc: Provide invalid parameter type to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(StackBuilderTest, stack_build_011, TestSize.Level0) +{ + std::vector inputsIndex = { 0, 1 }; + std::vector outputsIndex = { 2 }; + std::vector paramDim = {}; + + InitTensor(inputsIndex, outputsIndex); + SaveAxisTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_SCALE_AXIS); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: stack_get_primitive_001 + * @tc.desc: Verify the GetPrimitive function return nullptr + * @tc.type: FUNC + */ +HWTEST_F(StackBuilderTest, stack_get_primitive_001, TestSize.Level0) +{ + LiteGraphTensorPtr primitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); + EXPECT_EQ(primitive, expectPrimitive); +} + +/** + * @tc.name: stack_get_primitive_002 + * @tc.desc: Verify the normal return behavior of the getprimitive function + * @tc.type: FUNC + */ +HWTEST_F(StackBuilderTest, stack_get_primitive_002, TestSize.Level0) +{ + std::vector inputsIndex = { 0, 1 }; + std::vector outputsIndex = { 2 }; + std::vector paramDim = {}; + + InitTensor(inputsIndex, outputsIndex); + SaveAxisTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_STACK_AXIS); + + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + LiteGraphTensorPtr primitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); + EXPECT_NE(primitive, expectPrimitive); + + auto returnValue = mindspore::lite::MindIR_Stack_GetAxis(primitive.get()); + EXPECT_EQ(returnValue, m_expectAxisValue); +} +} // namespace UnitTest +} // namespace NeuralNetworkRuntime +} // namespace OHOS \ No newline at end of file diff --git a/test/unittest/ops/strided_slice_builder_test.cpp b/test/unittest/ops/strided_slice_builder_test.cpp new file mode 100644 index 0000000..3014b75 --- /dev/null +++ b/test/unittest/ops/strided_slice_builder_test.cpp @@ -0,0 +1,557 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "frameworks/native/ops/strided_slice_builder.h" + +#include +#include "frameworks/native/nn_tensor.h" +#include "ops_test.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Ops; + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class StridedSliceBuilderTest : public OpsTest { +protected: + void InitTensor(const std::vector& inputsIndex, + const std::vector& outputsIndex) override; + void SaveBeginMaskTensor(OH_NN_DataType dataType, const std::vector &dim, + const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + void SaveEndMaskTensor(OH_NN_DataType dataType, const std::vector &dim, + const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + void SaveEllipsisMaskTensor(OH_NN_DataType dataType, const std::vector &dim, + const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + void SaveNewAxisTensor(OH_NN_DataType dataType, const std::vector &dim, + const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + void SaveShrinkAxisTensor(OH_NN_DataType dataType, const std::vector &dim, + const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + void InitParams(); + +protected: + StridedSliceBuilder m_builder; + int64_t m_expectBeginMaskValue {0}; + int64_t m_expectEndMaskValue {0}; + int64_t m_expectEllipsisMaskValue {0}; + int64_t m_expectNewAxisMaskValue {0}; + int64_t m_expectShrinkAxisMaskValue {0}; +}; + +void StridedSliceBuilderTest::SaveBeginMaskTensor(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + std::shared_ptr beginMaskTensor = TransToNNTensor(dataType, dim, quantParam, type); + int64_t* beginMaskValue = new (std::nothrow) int64_t[1]{0}; + EXPECT_NE(nullptr, beginMaskValue); + beginMaskTensor->SetBuffer(beginMaskValue, sizeof(int64_t)); + m_allTensors.emplace_back(beginMaskTensor); + m_expectBeginMaskValue = *beginMaskValue; +} + +void StridedSliceBuilderTest::SaveEndMaskTensor(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + std::shared_ptr endMaskTensor = TransToNNTensor(dataType, dim, quantParam, type); + int64_t* endMaskValue = new (std::nothrow) int64_t[1]{0}; + EXPECT_NE(nullptr, endMaskValue); + endMaskTensor->SetBuffer(endMaskValue, sizeof(int64_t)); + m_allTensors.emplace_back(endMaskTensor); + m_expectEndMaskValue = *endMaskValue; +} + +void StridedSliceBuilderTest::SaveEllipsisMaskTensor(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + std::shared_ptr ellipsisMaskTensor = TransToNNTensor(dataType, dim, quantParam, type); + int64_t* ellipsisMaskValue = new (std::nothrow) int64_t[1]{0}; + EXPECT_NE(nullptr, ellipsisMaskValue); + ellipsisMaskTensor->SetBuffer(ellipsisMaskValue, sizeof(int64_t)); + m_allTensors.emplace_back(ellipsisMaskTensor); + m_expectEllipsisMaskValue = *ellipsisMaskValue; +} + +void StridedSliceBuilderTest::SaveNewAxisTensor(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + std::shared_ptr axisTensor = TransToNNTensor(dataType, dim, quantParam, type); + int64_t* newAxisMaskValue = new (std::nothrow) int64_t[1]{0}; + EXPECT_NE(nullptr, newAxisMaskValue); + axisTensor->SetBuffer(newAxisMaskValue, sizeof(int64_t)); + m_allTensors.emplace_back(axisTensor); + m_expectNewAxisMaskValue = *newAxisMaskValue; +} + +void StridedSliceBuilderTest::SaveShrinkAxisTensor(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + std::shared_ptr shrinkAxisMaskTensor = TransToNNTensor(dataType, dim, quantParam, type); + int64_t* shrinkAxisMaskValue = new (std::nothrow) int64_t[1]{0}; + EXPECT_NE(nullptr, shrinkAxisMaskValue); + shrinkAxisMaskTensor->SetBuffer(shrinkAxisMaskValue, sizeof(int64_t)); + m_allTensors.emplace_back(shrinkAxisMaskTensor); + m_expectShrinkAxisMaskValue = *shrinkAxisMaskValue; +} + +void StridedSliceBuilderTest::InitTensor(const std::vector& inputsIndex, + const std::vector& outputsIndex) +{ + std::vector paramsIndex = { 5, 6, 7, 8, 9 }; + std::vector inputDim = {3, 2, 3}; + std::vector OutputDim = {1, 2, 2}; + + m_paramsIndex = paramsIndex; + SaveInputTensor(inputsIndex, OH_NN_FLOAT32, inputDim, nullptr); + SaveOutputTensor(outputsIndex, OH_NN_FLOAT32, OutputDim, nullptr); +} + +void StridedSliceBuilderTest::InitParams() +{ + std::vector paramDim = {}; + SaveBeginMaskTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_STRIDED_SLICE_BEGIN_MASK); + SaveEndMaskTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_STRIDED_SLICE_END_MASK); + SaveEllipsisMaskTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_STRIDED_SLICE_ELLIPSIS_MASK); + SaveNewAxisTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_STRIDED_SLICE_NEW_AXIS_MASK); + SaveShrinkAxisTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_STRIDED_SLICE_SHRINK_AXIS_MASK); +} + +/** + * @tc.name: stridedslice_build_001 + * @tc.desc: Provide normal input, output, and parameters to verify the normal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(StridedSliceBuilderTest, stridedslice_build_001, TestSize.Level0) +{ + std::vector inputsIndex = { 0, 1, 2, 3 }; + std::vector outputsIndex = { 4 }; + + InitTensor(inputsIndex, outputsIndex); + InitParams(); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/** + * @tc.name: stridedslice_build_002 + * @tc.desc: Call Build func twice to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(StridedSliceBuilderTest, stridedslice_build_002, TestSize.Level0) +{ + std::vector inputsIndex = { 0, 1, 2, 3 }; + std::vector outputsIndex = { 4 }; + + InitTensor(inputsIndex, outputsIndex); + InitParams(); + + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/** + * @tc.name: stridedslice_build_003 + * @tc.desc: Provide one more than normal input to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(StridedSliceBuilderTest, stridedslice_build_003, TestSize.Level0) +{ + std::vector inputsIndex = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 }; + std::vector outputsIndex = { 10 }; + std::vector paramsIndex = { 11, 12, 13, 14, 15 }; + std::vector inputDim = {3, 2, 3}; + std::vector OutputDim = {1, 2, 2}; + + m_paramsIndex = paramsIndex; + SaveInputTensor(inputsIndex, OH_NN_FLOAT32, inputDim, nullptr); + SaveOutputTensor(outputsIndex, OH_NN_FLOAT32, OutputDim, nullptr); + InitParams(); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: stridedslice_build_004 + * @tc.desc: Provide one more than normal output to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(StridedSliceBuilderTest, stridedslice_build_004, TestSize.Level0) +{ + std::vector inputsIndex = { 0, 1, 2, 3 }; + std::vector outputsIndex = { 4, 5 }; + std::vector paramsIndex = { 6, 7, 8, 9, 10 }; + std::vector inputDim = {3, 2, 3}; + std::vector OutputDim = {1, 2, 2}; + + m_paramsIndex = paramsIndex; + SaveInputTensor(inputsIndex, OH_NN_FLOAT32, inputDim, nullptr); + SaveOutputTensor(outputsIndex, OH_NN_FLOAT32, OutputDim, nullptr); + InitParams(); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: stridedslice_build_005 + * @tc.desc: Provide empty input, output, and parameters to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(StridedSliceBuilderTest, stridedslice_build_005, TestSize.Level0) +{ + std::vector inputsIndex = { 0, 1, 2, 3, 4, 5, 6, 7, 8 }; + std::vector outputsIndex = { 9 }; + std::vector paramsIndex = { 10, 11, 12, 13, 14 }; + + OH_NN_ReturnCode ret = m_builder.Build(paramsIndex, inputsIndex, outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: stridedslice_build_006 + * @tc.desc:Provide empty output to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(StridedSliceBuilderTest, stridedslice_build_006, TestSize.Level0) +{ + std::vector inputsIndex = { 0, 1, 2, 3 }; + std::vector outputsIndex = {}; + std::vector paramsIndex = { 4, 5, 6, 7, 8 }; + std::vector inputDim = {3, 2, 3}; + std::vector OutputDim = {1, 2, 2}; + + m_paramsIndex = paramsIndex; + SaveInputTensor(inputsIndex, OH_NN_FLOAT32, inputDim, nullptr); + SaveOutputTensor(outputsIndex, OH_NN_FLOAT32, OutputDim, nullptr); + InitParams(); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: stridedslice_build_007 + * @tc.desc: Provide beginmask param type error to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(StridedSliceBuilderTest, stridedslice_build_007, TestSize.Level0) +{ + std::vector inputsIndex = { 0, 1, 2, 3 }; + std::vector outputsIndex = { 4 }; + std::vector paramsIndex = { 5, 6, 7, 8, 9 }; + std::vector inputDim = {3, 2, 3}; + std::vector OutputDim = {1, 2, 2}; + std::vector paramDim = {}; + + m_paramsIndex = paramsIndex; + SaveInputTensor(inputsIndex, OH_NN_FLOAT32, inputDim, nullptr); + SaveOutputTensor(outputsIndex, OH_NN_FLOAT32, OutputDim, nullptr); + SaveBeginMaskTensor(OH_NN_INT32, paramDim, nullptr, OH_NN_STRIDED_SLICE_BEGIN_MASK); + SaveEndMaskTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_STRIDED_SLICE_END_MASK); + SaveEllipsisMaskTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_STRIDED_SLICE_ELLIPSIS_MASK); + SaveNewAxisTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_STRIDED_SLICE_NEW_AXIS_MASK); + SaveShrinkAxisTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_STRIDED_SLICE_SHRINK_AXIS_MASK); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: stridedslice_build_008 + * @tc.desc: Provide endmask param type error to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(StridedSliceBuilderTest, stridedslice_build_008, TestSize.Level0) +{ + std::vector inputsIndex = { 0, 1, 2, 3 }; + std::vector outputsIndex = { 4 }; + std::vector paramDim = {}; + + InitTensor(inputsIndex, outputsIndex); + SaveEndMaskTensor(OH_NN_INT32, paramDim, nullptr, OH_NN_STRIDED_SLICE_END_MASK); + SaveEllipsisMaskTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_STRIDED_SLICE_ELLIPSIS_MASK); + SaveNewAxisTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_STRIDED_SLICE_NEW_AXIS_MASK); + SaveShrinkAxisTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_STRIDED_SLICE_SHRINK_AXIS_MASK); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: stridedslice_build_009 + * @tc.desc: Provide ellipsismask param type error to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(StridedSliceBuilderTest, stridedslice_build_009, TestSize.Level0) +{ + std::vector inputsIndex = { 0, 1, 2, 3 }; + std::vector outputsIndex = { 4 }; + std::vector paramDim = {}; + + InitTensor(inputsIndex, outputsIndex); + SaveEndMaskTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_STRIDED_SLICE_END_MASK); + SaveEllipsisMaskTensor(OH_NN_INT32, paramDim, nullptr, OH_NN_STRIDED_SLICE_ELLIPSIS_MASK); + SaveNewAxisTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_STRIDED_SLICE_NEW_AXIS_MASK); + SaveShrinkAxisTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_STRIDED_SLICE_SHRINK_AXIS_MASK); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: stridedslice_build_010 + * @tc.desc: Provide axis param type error to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(StridedSliceBuilderTest, stridedslice_build_010, TestSize.Level0) +{ + std::vector inputsIndex = { 0, 1, 2, 3 }; + std::vector outputsIndex = { 4 }; + std::vector paramDim = {}; + + InitTensor(inputsIndex, outputsIndex); + SaveEndMaskTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_STRIDED_SLICE_END_MASK); + SaveEllipsisMaskTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_STRIDED_SLICE_ELLIPSIS_MASK); + SaveNewAxisTensor(OH_NN_INT32, paramDim, nullptr, OH_NN_STRIDED_SLICE_NEW_AXIS_MASK); + SaveShrinkAxisTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_STRIDED_SLICE_SHRINK_AXIS_MASK); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: stridedslice_build_011 + * @tc.desc: Provide shrinkaxis param type error to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(StridedSliceBuilderTest, stridedslice_build_011, TestSize.Level0) +{ + std::vector inputsIndex = { 0, 1, 2, 3 }; + std::vector outputsIndex = { 4 }; + std::vector paramDim = {}; + + InitTensor(inputsIndex, outputsIndex); + SaveEndMaskTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_STRIDED_SLICE_END_MASK); + SaveEllipsisMaskTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_STRIDED_SLICE_ELLIPSIS_MASK); + SaveNewAxisTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_STRIDED_SLICE_NEW_AXIS_MASK); + SaveShrinkAxisTensor(OH_NN_INT32, paramDim, nullptr, OH_NN_STRIDED_SLICE_SHRINK_AXIS_MASK); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + + +/** + * @tc.name: stridedslice_build_012 + * @tc.desc: Provide begin mask parameter buffer is nullptr to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(StridedSliceBuilderTest, stridedslice_build_012, TestSize.Level0) +{ + std::vector inputsIndex = { 0, 1, 2, 3 }; + std::vector outputsIndex = { 4 }; + std::vector paramDim = {}; + + InitTensor(inputsIndex, outputsIndex); + std::shared_ptr beginMaskTensor = TransToNNTensor(OH_NN_INT64, paramDim, nullptr, + OH_NN_STRIDED_SLICE_BEGIN_MASK); + beginMaskTensor->SetBuffer(nullptr, 0); + m_allTensors.emplace_back(beginMaskTensor); + + SaveEndMaskTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_STRIDED_SLICE_END_MASK); + SaveEllipsisMaskTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_STRIDED_SLICE_ELLIPSIS_MASK); + SaveNewAxisTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_STRIDED_SLICE_NEW_AXIS_MASK); + SaveShrinkAxisTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_STRIDED_SLICE_SHRINK_AXIS_MASK); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: stridedslice_build_013 + * @tc.desc: Provide end mask parameter buffer is nullptr to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(StridedSliceBuilderTest, stridedslice_build_013, TestSize.Level0) +{ + std::vector inputsIndex = { 0, 1, 2, 3 }; + std::vector outputsIndex = { 4 }; + std::vector paramDim = {}; + + InitTensor(inputsIndex, outputsIndex); + SaveBeginMaskTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_STRIDED_SLICE_BEGIN_MASK); + + std::shared_ptr endMaskTensor = TransToNNTensor(OH_NN_INT64, paramDim, nullptr, + OH_NN_STRIDED_SLICE_END_MASK); + endMaskTensor->SetBuffer(nullptr, 0); + m_allTensors.emplace_back(endMaskTensor); + + SaveEllipsisMaskTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_STRIDED_SLICE_ELLIPSIS_MASK); + SaveNewAxisTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_STRIDED_SLICE_NEW_AXIS_MASK); + SaveShrinkAxisTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_STRIDED_SLICE_SHRINK_AXIS_MASK); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: stridedslice_build_014 + * @tc.desc: Provide ellipsis mask parameter buffer is nullptr to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(StridedSliceBuilderTest, stridedslice_build_014, TestSize.Level0) +{ + std::vector inputsIndex = { 0, 1, 2, 3 }; + std::vector outputsIndex = { 4 }; + std::vector paramDim = {}; + + InitTensor(inputsIndex, outputsIndex); + SaveBeginMaskTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_STRIDED_SLICE_BEGIN_MASK); + SaveEndMaskTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_STRIDED_SLICE_END_MASK); + + std::shared_ptr ellipsisMaskTensor = TransToNNTensor(OH_NN_INT64, paramDim, nullptr, + OH_NN_STRIDED_SLICE_ELLIPSIS_MASK); + ellipsisMaskTensor->SetBuffer(nullptr, 0); + m_allTensors.emplace_back(ellipsisMaskTensor); + + SaveNewAxisTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_STRIDED_SLICE_NEW_AXIS_MASK); + SaveShrinkAxisTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_STRIDED_SLICE_SHRINK_AXIS_MASK); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: stridedslice_build_015 + * @tc.desc: Provide new axis parameter buffer is nullptr to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(StridedSliceBuilderTest, stridedslice_build_015, TestSize.Level0) +{ + std::vector inputsIndex = { 0, 1, 2, 3 }; + std::vector outputsIndex = { 4 }; + std::vector paramDim = {}; + + InitTensor(inputsIndex, outputsIndex); + SaveBeginMaskTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_STRIDED_SLICE_BEGIN_MASK); + SaveEndMaskTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_STRIDED_SLICE_END_MASK); + SaveEllipsisMaskTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_STRIDED_SLICE_ELLIPSIS_MASK); + + std::shared_ptr axisTensor = TransToNNTensor(OH_NN_INT64, paramDim, nullptr, + OH_NN_STRIDED_SLICE_NEW_AXIS_MASK); + axisTensor->SetBuffer(nullptr, 0); + m_allTensors.emplace_back(axisTensor); + + SaveShrinkAxisTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_STRIDED_SLICE_SHRINK_AXIS_MASK); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: stridedslice_build_016 + * @tc.desc: Provide shrink axis parameter buffer is nullptr to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(StridedSliceBuilderTest, stridedslice_build_016, TestSize.Level0) +{ + std::vector inputsIndex = { 0, 1, 2, 3 }; + std::vector outputsIndex = { 4 }; + std::vector paramDim = {}; + + InitTensor(inputsIndex, outputsIndex); + SaveBeginMaskTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_STRIDED_SLICE_BEGIN_MASK); + SaveEndMaskTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_STRIDED_SLICE_END_MASK); + SaveEllipsisMaskTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_STRIDED_SLICE_ELLIPSIS_MASK); + SaveNewAxisTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_STRIDED_SLICE_NEW_AXIS_MASK); + + std::shared_ptr shrinkAxisTensor = TransToNNTensor(OH_NN_INT64, paramDim, nullptr, + OH_NN_STRIDED_SLICE_SHRINK_AXIS_MASK); + shrinkAxisTensor->SetBuffer(nullptr, 0); + m_allTensors.emplace_back(shrinkAxisTensor); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: stridedslice_build_017 + * @tc.desc: Provide invalid parameter type to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(StridedSliceBuilderTest, stridedslice_build_017, TestSize.Level0) +{ + std::vector inputsIndex = { 0, 1, 2, 3 }; + std::vector outputsIndex = { 4 }; + std::vector paramDim = {}; + + InitTensor(inputsIndex, outputsIndex); + SaveBeginMaskTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_STRIDED_SLICE_BEGIN_MASK); + SaveEndMaskTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_STRIDED_SLICE_END_MASK); + SaveEllipsisMaskTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_STRIDED_SLICE_ELLIPSIS_MASK); + SaveNewAxisTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_STRIDED_SLICE_NEW_AXIS_MASK); + SaveShrinkAxisTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_SCALE_AXIS); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: stridedslice_get_primitive_001 + * @tc.desc: Verify the GetPrimitive function return nullptr + * @tc.type: FUNC + */ +HWTEST_F(StridedSliceBuilderTest, stridedslice_get_primitive_001, TestSize.Level0) +{ + LiteGraphTensorPtr primitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive = { nullptr, DestroyLiteGraphPrimitive }; + EXPECT_EQ(primitive, expectPrimitive); +} + +/** + * @tc.name: stridedslice_get_primitive_002 + * @tc.desc: Verify the normal params return behavior of the getprimitive function + * @tc.type: FUNC + */ +HWTEST_F(StridedSliceBuilderTest, stridedslice_get_primitive_002, TestSize.Level0) +{ + std::vector inputsIndex = { 0, 1, 2, 3 }; + std::vector outputsIndex = { 4 }; + + InitTensor(inputsIndex, outputsIndex); + InitParams(); + + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + LiteGraphTensorPtr primitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive = { nullptr, DestroyLiteGraphPrimitive }; + EXPECT_NE(primitive, expectPrimitive); + + auto beginMaskReturn = mindspore::lite::MindIR_StridedSlice_GetBeginMask(primitive.get()); + EXPECT_EQ(beginMaskReturn, m_expectBeginMaskValue); + auto endMaskReturn = mindspore::lite::MindIR_StridedSlice_GetEndMask(primitive.get()); + EXPECT_EQ(endMaskReturn, m_expectEndMaskValue); + auto ellipsisMaskReturn = mindspore::lite::MindIR_StridedSlice_GetEllipsisMask(primitive.get()); + EXPECT_EQ(ellipsisMaskReturn, m_expectEllipsisMaskValue); + auto newAxisMaskReturn = mindspore::lite::MindIR_StridedSlice_GetNewAxisMask(primitive.get()); + EXPECT_EQ(newAxisMaskReturn, m_expectNewAxisMaskValue); + auto shrinkAxisMaskReturn = mindspore::lite::MindIR_StridedSlice_GetShrinkAxisMask(primitive.get()); + EXPECT_EQ(shrinkAxisMaskReturn, m_expectShrinkAxisMaskValue); +} +} // namespace UnitTest +} // namespace NeuralNetworkRuntime +} // namespace OHOS diff --git a/test/unittest/ops/sub_builder_test.cpp b/test/unittest/ops/sub_builder_test.cpp new file mode 100644 index 0000000..ee7f7a8 --- /dev/null +++ b/test/unittest/ops/sub_builder_test.cpp @@ -0,0 +1,324 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "frameworks/native/ops/sub_builder.h" + +#include +#include "frameworks/native/nn_tensor.h" +#include "ops_test.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Ops; + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class SubBuilderTest : public OpsTest { +protected: + void InitTensor(const std::vector& inputsIndex, + const std::vector& outputsIndex) override; + void SaveActivateTypeTensor(OH_NN_DataType dataType, const std::vector &dim, + const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + +protected: + SubBuilder m_builder; + int8_t m_expectActivationTypeValue {0}; +}; + +void SubBuilderTest::SaveActivateTypeTensor(OH_NN_DataType dataType, const std::vector &dim, + const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + std::shared_ptr activationTypeTensor = TransToNNTensor(dataType, dim, quantParam, type); + int8_t* activationTypeValue = new (std::nothrow) int8_t[1]{OH_NN_FUSED_RELU6}; + EXPECT_NE(nullptr, activationTypeValue); + activationTypeTensor->SetBuffer(activationTypeValue, sizeof(int8_t)); + m_allTensors.emplace_back(activationTypeTensor); + m_expectActivationTypeValue = mindspore::lite::ACTIVATION_TYPE_RELU6; +} + +void SubBuilderTest::InitTensor(const std::vector& inputsIndex, const std::vector& outputsIndex) +{ + std::vector paramsIndex = { 3 }; + std::vector inputDim = {3}; + std::vector OutputDim = {3}; + + m_paramsIndex = paramsIndex; + SaveInputTensor(inputsIndex, OH_NN_FLOAT32, inputDim, nullptr); + SaveOutputTensor(outputsIndex, OH_NN_FLOAT32, OutputDim, nullptr); +} + +/** + * @tc.name: sub_build_001 + * @tc.desc: Provide normal input, output, and parameters to verify the normal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(SubBuilderTest, sub_build_001, TestSize.Level0) +{ + std::vector inputsIndex = { 0, 1 }; + std::vector outputsIndex = { 2 }; + std::vector paramDim = {}; + + InitTensor(inputsIndex, outputsIndex); + SaveActivateTypeTensor(OH_NN_INT8, paramDim, nullptr, OH_NN_SUB_ACTIVATIONTYPE); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/** + * @tc.name: sub_build_002 + * @tc.desc: Call Build func twice to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(SubBuilderTest, sub_build_002, TestSize.Level0) +{ + std::vector inputsIndex = { 0, 1 }; + std::vector outputsIndex = { 2 }; + std::vector paramDim = {}; + + InitTensor(inputsIndex, outputsIndex); + SaveActivateTypeTensor(OH_NN_INT8, paramDim, nullptr, OH_NN_SUB_ACTIVATIONTYPE); + + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/** + * @tc.name: sub_build_003 + * @tc.desc:Provide one more than normal input to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(SubBuilderTest, sub_build_003, TestSize.Level0) +{ + std::vector inputsIndex = { 0, 1, 2, 3 }; + std::vector outputsIndex = { 4 }; + std::vector paramDim = {}; + + InitTensor(inputsIndex, outputsIndex); + SaveActivateTypeTensor(OH_NN_INT8, paramDim, nullptr, OH_NN_SUB_ACTIVATIONTYPE); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: sub_build_004 + * @tc.desc: Provide one more than normal output to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(SubBuilderTest, sub_build_004, TestSize.Level0) +{ + std::vector inputsIndex = { 0, 1 }; + std::vector outputsIndex = { 2, 3 }; + std::vector paramDim = {}; + + InitTensor(inputsIndex, outputsIndex); + SaveActivateTypeTensor(OH_NN_INT8, paramDim, nullptr, OH_NN_SUB_ACTIVATIONTYPE); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: sub_build_005 + * @tc.desc: Provide empty input, output, and parameters to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(SubBuilderTest, sub_build_005, TestSize.Level0) +{ + std::vector inputsIndex = { 0, 1, 2 }; + std::vector outputsIndex = { 3 }; + std::vector paramsIndex = { 4 }; + + OH_NN_ReturnCode ret = m_builder.Build(paramsIndex, inputsIndex, outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: sub_build_006 + * @tc.desc: Provide empty output to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(SubBuilderTest, sub_build_006, TestSize.Level0) +{ + std::vector inputsIndex = { 0, 1, 2 }; + std::vector outputsIndex = {}; + std::vector paramDim = {}; + + InitTensor(inputsIndex, outputsIndex); + SaveActivateTypeTensor(OH_NN_BOOL, paramDim, nullptr, OH_NN_SUB_ACTIVATIONTYPE); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: sub_build_007 + * @tc.desc: Provide param type error to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(SubBuilderTest, sub_build_007, TestSize.Level0) +{ + std::vector inputsIndex = { 0, 1 }; + std::vector outputsIndex = { 2 }; + std::vector paramDim = {}; + + InitTensor(inputsIndex, outputsIndex); + SaveActivateTypeTensor(OH_NN_BOOL, paramDim, nullptr, OH_NN_SUB_ACTIVATIONTYPE); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: sub_build_008 + * @tc.desc:Provide param type error to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(SubBuilderTest, sub_build_008, TestSize.Level0) +{ + std::vector inputsIndex = { 0, 1 }; + std::vector outputsIndex = { 2 }; + std::vector paramDim = {}; + + InitTensor(inputsIndex, outputsIndex); + SaveActivateTypeTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_SUB_ACTIVATIONTYPE); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: sub_build_009 + * @tc.desc: Provide activate type parameter buffer is nullptr to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(SubBuilderTest, sub_build_009, TestSize.Level0) +{ + std::vector inputsIndex = { 0, 1 }; + std::vector outputsIndex = { 2 }; + std::vector paramDim = {}; + + InitTensor(inputsIndex, outputsIndex); + + std::shared_ptr activationTypeTensor = TransToNNTensor(OH_NN_INT8, paramDim, nullptr, + OH_NN_SUB_ACTIVATIONTYPE); + activationTypeTensor->SetBuffer(nullptr, 0); + m_allTensors.emplace_back(activationTypeTensor); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: sub_build_010 + * @tc.desc: Provide parameter is not scaler to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(SubBuilderTest, sub_build_010, TestSize.Level0) +{ + std::vector inputsIndex = { 0, 1 }; + std::vector outputsIndex = { 2 }; + std::vector paramDim = {1, 2}; + + InitTensor(inputsIndex, outputsIndex); + SaveActivateTypeTensor(OH_NN_INT8, paramDim, nullptr, OH_NN_SUB_ACTIVATIONTYPE); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: sub_build_011 + * @tc.desc: Provide invalid parameter type to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(SubBuilderTest, sub_build_011, TestSize.Level0) +{ + std::vector inputsIndex = { 0, 1 }; + std::vector outputsIndex = { 2 }; + std::vector paramDim = {}; + + InitTensor(inputsIndex, outputsIndex); + SaveActivateTypeTensor(OH_NN_INT8, paramDim, nullptr, OH_NN_SCALE_AXIS); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: sub_build_012 + * @tc.desc: Provide invalid param value to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(SubBuilderTest, sub_build_012, TestSize.Level0) +{ + std::vector inputsIndex = { 0, 1 }; + std::vector outputsIndex = { 2 }; + std::vector paramDim = {}; + + InitTensor(inputsIndex, outputsIndex); + + std::shared_ptr activationTypeTensor = TransToNNTensor(OH_NN_INT8, paramDim, nullptr, + OH_NN_SUB_ACTIVATIONTYPE); + int8_t* activationTypeValue = new (std::nothrow) int8_t[1]{-1}; + EXPECT_NE(nullptr, activationTypeValue); + activationTypeTensor->SetBuffer(activationTypeValue, sizeof(int8_t)); + m_allTensors.emplace_back(activationTypeTensor); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: sub_get_primitive_001 + * @tc.desc: Verify the GetPrimitive function return nullptr + * @tc.type: FUNC + */ +HWTEST_F(SubBuilderTest, sub_get_primitive_001, TestSize.Level0) +{ + LiteGraphTensorPtr primitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive = { nullptr, DestroyLiteGraphPrimitive }; + EXPECT_EQ(primitive, expectPrimitive); +} + +/** + * @tc.name: sub_get_primitive_002 + * @tc.desc: Verify the normal params return behavior of the getprimitive function + * @tc.type: FUNC + */ +HWTEST_F(SubBuilderTest, sub_get_primitive_002, TestSize.Level0) +{ + std::vector inputsIndex = { 0, 1 }; + std::vector outputsIndex = { 2 }; + std::vector paramDim = {}; + + InitTensor(inputsIndex, outputsIndex); + SaveActivateTypeTensor(OH_NN_INT8, paramDim, nullptr, OH_NN_SUB_ACTIVATIONTYPE); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_SUCCESS, ret); + LiteGraphTensorPtr primitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive = { nullptr, DestroyLiteGraphPrimitive }; + EXPECT_NE(primitive, expectPrimitive); + + auto returnValue = mindspore::lite::MindIR_SubFusion_GetActivationType(primitive.get()); + EXPECT_EQ(returnValue, m_expectActivationTypeValue); +} +} // namespace UnitTest +} // namespace NeuralNetworkRuntime +} // namespace OHOS diff --git a/test/unittest/ops/tanh_builder_test.cpp b/test/unittest/ops/tanh_builder_test.cpp new file mode 100644 index 0000000..275ce20 --- /dev/null +++ b/test/unittest/ops/tanh_builder_test.cpp @@ -0,0 +1,194 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "frameworks/native/ops/tanh_builder.h" + +#include +#include "frameworks/native/nn_tensor.h" +#include "ops_test.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Ops; + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class TanhBuilderTest : public OpsTest { +protected: + void InitTensor(const std::vector& inputsIndex, + const std::vector& outputsIndex) override; + +protected: + TanhBuilder m_builder; +}; + +void TanhBuilderTest::InitTensor(const std::vector& inputsIndex, + const std::vector& outputsIndex) +{ + std::vector inputDim = {1, 5, 1, 1}; + std::vector OutputDim = {1, 5, 1, 1}; + + SaveInputTensor(inputsIndex, OH_NN_FLOAT32, inputDim, nullptr); + SaveOutputTensor(outputsIndex, OH_NN_FLOAT32, OutputDim, nullptr); +} + +/** + * @tc.name: tanh_build_001 + * @tc.desc: Provide normal input, output to verify the normal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(TanhBuilderTest, tanh_build_001, TestSize.Level0) +{ + std::vector inputsIndex = { 0 }; + std::vector outputsIndex = { 1 }; + + InitTensor(inputsIndex, outputsIndex); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/** + * @tc.name: tanh_build_002 + * @tc.desc: Call Build func twice to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(TanhBuilderTest, tanh_build_002, TestSize.Level0) +{ + std::vector inputsIndex = { 0 }; + std::vector outputsIndex = { 1 }; + + InitTensor(inputsIndex, outputsIndex); + + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/** + * @tc.name: tanh_build_003 + * @tc.desc: Provide one more than normal input to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(TanhBuilderTest, tanh_build_003, TestSize.Level0) +{ + std::vector inputsIndex = { 0, 1 }; + std::vector outputsIndex = { 2 }; + + InitTensor(inputsIndex, outputsIndex); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: tanh_build_004 + * @tc.desc: Provide one more than normal output to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(TanhBuilderTest, tanh_build_004, TestSize.Level0) +{ + std::vector inputsIndex = { 0 }; + std::vector outputsIndex = { 1, 2 }; + + InitTensor(inputsIndex, outputsIndex); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: tanh_build_005 + * @tc.desc: Provide empty input, output, and parameters to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(TanhBuilderTest, tanh_build_005, TestSize.Level0) +{ + std::vector inputsIndex = { 0 }; + std::vector outputsIndex = { 1 }; + std::vector paramsIndex = {}; + + OH_NN_ReturnCode ret = m_builder.Build(paramsIndex, inputsIndex, outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: tanh_build_006 + * @tc.desc: Provide empty output to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(TanhBuilderTest, tanh_build_006, TestSize.Level0) +{ + std::vector inputsIndex = { 0 }; + std::vector outputsIndex = { 1 }; + std::vector inputDim = {1, 5, 1, 1}; + + SaveInputTensor(inputsIndex, OH_NN_FLOAT32, inputDim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: tanh_build_007 + * @tc.desc: Provide a param to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(TanhBuilderTest, tanh_build_007, TestSize.Level0) +{ + std::vector inputsIndex = { 0 }; + std::vector outputsIndex = { 1 }; + std::vector paramsIndex = { 4 }; + + m_paramsIndex = paramsIndex; + InitTensor(inputsIndex, outputsIndex); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: tanh_get_primitive_001 + * @tc.desc: Verify the GetPrimitive function return nullptr + * @tc.type: FUNC + */ +HWTEST_F(TanhBuilderTest, tanh_get_primitive_001, TestSize.Level0) +{ + LiteGraphTensorPtr primitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive = { nullptr, DestroyLiteGraphPrimitive }; + EXPECT_EQ(primitive, expectPrimitive); +} + +/** + * @tc.name: tanh_get_primitive_002 + * @tc.desc: Verify the normal params return behavior of the getprimitive function + * @tc.type: FUNC + */ +HWTEST_F(TanhBuilderTest, tanh_get_primitive_002, TestSize.Level0) +{ + std::vector inputsIndex = { 0 }; + std::vector outputsIndex = { 1 }; + + InitTensor(inputsIndex, outputsIndex); + + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + LiteGraphTensorPtr primitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive = { nullptr, DestroyLiteGraphPrimitive }; + EXPECT_NE(primitive, expectPrimitive); +} +} // namespace UnitTest +} // namespace NeuralNetworkRuntime +} // namespace OHOS diff --git a/test/unittest/ops/tile_builder_test.cpp b/test/unittest/ops/tile_builder_test.cpp new file mode 100644 index 0000000..f6b9808 --- /dev/null +++ b/test/unittest/ops/tile_builder_test.cpp @@ -0,0 +1,200 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "frameworks/native/ops/tile_builder.h" + +#include +#include "frameworks/native/nn_tensor.h" +#include "ops_test.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Ops; + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class TileBuilderTest : public OpsTest { +protected: + void InitTensor(const std::vector& inputsIndex, + const std::vector& outputsIndex) override; + void CheckResult(); + +protected: + TileBuilder m_builder; +}; + +void TileBuilderTest::InitTensor(const std::vector& inputsIndex, + const std::vector& outputsIndex) +{ + std::vector inputDim = {2, 2}; + std::vector OutputDim = {4, 4}; + + SaveInputTensor(inputsIndex, OH_NN_FLOAT32, inputDim, nullptr); + SaveOutputTensor(outputsIndex, OH_NN_FLOAT32, OutputDim, nullptr); +} + +void TileBuilderTest::CheckResult() +{ + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + LiteGraphTensorPtr primitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive = { nullptr, DestroyLiteGraphPrimitive }; + EXPECT_NE(primitive, expectPrimitive); +} + +/** + * @tc.name: tile_build_001 + * @tc.desc: Provide normal input, output to verify the normal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(TileBuilderTest, tile_build_001, TestSize.Level0) +{ + std::vector inputsIndex = { 0, 1 }; + std::vector outputsIndex = { 2 }; + + InitTensor(inputsIndex, outputsIndex); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/** + * @tc.name: tile_build_002 + * @tc.desc: Call Build func twice to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(TileBuilderTest, tile_build_002, TestSize.Level0) +{ + std::vector inputsIndex = { 0, 1 }; + std::vector outputsIndex = { 2 }; + + InitTensor(inputsIndex, outputsIndex); + + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/** + * @tc.name: tile_build_003 + * @tc.desc: Provide one more than normal input to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(TileBuilderTest, tile_build_003, TestSize.Level0) +{ + std::vector inputsIndex = { 0, 1, 2 }; + std::vector outputsIndex = { 3 }; + + InitTensor(inputsIndex, outputsIndex); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: tile_build_004 + * @tc.desc: Provide one more than normal output to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(TileBuilderTest, tile_build_004, TestSize.Level0) +{ + std::vector inputsIndex = { 0, 1 }; + std::vector outputsIndex = { 2, 3 }; + + InitTensor(inputsIndex, outputsIndex); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: tile_build_005 + * @tc.desc: Provide empty input, output, and parameters to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(TileBuilderTest, tile_build_005, TestSize.Level0) +{ + std::vector inputsIndex = { 0, 1 }; + std::vector outputsIndex = { 2 }; + std::vector paramsIndex = {}; + + OH_NN_ReturnCode ret = m_builder.Build(paramsIndex, inputsIndex, outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: tile_build_001 + * @tc.desc: Provide empty output to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(TileBuilderTest, tile_build_006, TestSize.Level0) +{ + std::vector inputsIndex = { 0, 1 }; + std::vector outputsIndex = { 2 }; + std::vector inputDim = {2, 2}; + + SaveInputTensor(inputsIndex, OH_NN_FLOAT32, inputDim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: tile_build_007 + * @tc.desc: Provide a param to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(TileBuilderTest, tile_build_007, TestSize.Level0) +{ + std::vector inputsIndex = { 0, 1 }; + std::vector outputsIndex = { 2 }; + std::vector paramsIndex = { 4 }; + + m_paramsIndex = paramsIndex; + InitTensor(inputsIndex, outputsIndex); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: tile_get_primitive_001 + * @tc.desc: Verify the GetPrimitive function return nullptr + * @tc.type: FUNC + */ +HWTEST_F(TileBuilderTest, tile_get_primitive_001, TestSize.Level0) +{ + LiteGraphTensorPtr primitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive = { nullptr, DestroyLiteGraphPrimitive }; + EXPECT_EQ(primitive, expectPrimitive); +} + +/** + * @tc.name: tile_getprimitive_002 + * @tc.desc: Verify the normal params return behavior of the getprimitive function + * @tc.type: FUNC + */ +HWTEST_F(TileBuilderTest, tile_getprimitive_002, TestSize.Level0) +{ + std::vector inputsIndex = { 0, 1 }; + std::vector outputsIndex = { 2 }; + + InitTensor(inputsIndex, outputsIndex); + + CheckResult(); +} +} // namespace UnitTest +} // namespace NeuralNetworkRuntime +} // namespace OHOS diff --git a/test/unittest/ops/topk_builder_test.cpp b/test/unittest/ops/topk_builder_test.cpp new file mode 100644 index 0000000..8237e92 --- /dev/null +++ b/test/unittest/ops/topk_builder_test.cpp @@ -0,0 +1,261 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "frameworks/native/ops/top_k_builder.h" + +#include +#include "frameworks/native/nn_tensor.h" +#include "ops_test.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Ops; + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class TopKBuilderTest : public OpsTest { +protected: + void InitTensor(const std::vector& inputsIndex, + const std::vector& outputsIndex) override; + void SaveSortedTensor(OH_NN_DataType dataType, const std::vector &dim, + const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + +protected: + TopKBuilder m_builder; + bool m_topkValue {false}; +}; + +void TopKBuilderTest::InitTensor(const std::vector& inputsIndex, const std::vector& outputsIndex) +{ + std::vector paramsIndex = { 4 }; + std::vector inputDim = {9}; + std::vector OutputDim = {3}; + + m_paramsIndex = paramsIndex; + SaveInputTensor(inputsIndex, OH_NN_FLOAT32, inputDim, nullptr); + SaveOutputTensor(outputsIndex, OH_NN_FLOAT32, OutputDim, nullptr); +} + +void TopKBuilderTest::SaveSortedTensor(OH_NN_DataType dataType, const std::vector &dim, + const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + std::shared_ptr topkTensor = TransToNNTensor(dataType, dim, quantParam, type); + bool* topkValue = new (std::nothrow) bool[1]{true}; + EXPECT_NE(nullptr, topkValue); + topkTensor->SetBuffer(topkValue, sizeof(bool)); + m_allTensors.emplace_back(topkTensor); + m_topkValue = *topkValue; +} + +/** + * @tc.name: topk_build_001 + * @tc.desc: Provide normal input, output, and parameters to verify the normal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(TopKBuilderTest, topk_build_001, TestSize.Level0) +{ + std::vector inputsIndex = { 0, 1 }; + std::vector outputsIndex = { 2, 3 }; + std::vector paramDim = {}; + + InitTensor(inputsIndex, outputsIndex); + SaveSortedTensor(OH_NN_BOOL, paramDim, nullptr, OH_NN_TOP_K_SORTED); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/** + * @tc.name: topk_build_002 + * @tc.desc: Call Build func twice to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(TopKBuilderTest, topk_build_002, TestSize.Level0) +{ + std::vector inputsIndex = { 0, 1 }; + std::vector outputsIndex = { 2, 3 }; + std::vector paramDim = {}; + + InitTensor(inputsIndex, outputsIndex); + SaveSortedTensor(OH_NN_BOOL, paramDim, nullptr, OH_NN_TOP_K_SORTED); + + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/** + * @tc.name: topk_build_003 + * @tc.desc: Provide one more than normal input to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(TopKBuilderTest, topk_build_003, TestSize.Level0) +{ + std::vector inputsIndex = { 0, 1, 2, 3 }; + std::vector outputsIndex = { 4, 5 }; + std::vector paramDim = {}; + + InitTensor(inputsIndex, outputsIndex); + SaveSortedTensor(OH_NN_BOOL, paramDim, nullptr, OH_NN_TOP_K_SORTED); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: topk_builder_004 + * @tc.desc: Provide one more than normal output to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(TopKBuilderTest, topk_builder_004, TestSize.Level0) +{ + std::vector inputsIndex = { 0, 1 }; + std::vector outputsIndex = { 2, 3, 4 }; + std::vector paramDim = {}; + + InitTensor(inputsIndex, outputsIndex); + SaveSortedTensor(OH_NN_BOOL, paramDim, nullptr, OH_NN_TOP_K_SORTED); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: topk_build_005 + * @tc.desc: Provide empty input, output, and parameters to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(TopKBuilderTest, topk_build_005, TestSize.Level0) +{ + std::vector inputsIndex = { 0, 1, 2 }; + std::vector outputsIndex = { 3, 4 }; + std::vector paramsIndex = { 5 }; + + OH_NN_ReturnCode ret = m_builder.Build(paramsIndex, inputsIndex, outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: topk_build_006 + * @tc.desc: Provide param type error to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(TopKBuilderTest, topk_build_006, TestSize.Level0) +{ + std::vector inputsIndex = { 0, 1 }; + std::vector outputsIndex = { 2, 3 }; + std::vector paramDim = {}; + + InitTensor(inputsIndex, outputsIndex); + SaveSortedTensor(OH_NN_INT8, paramDim, nullptr, OH_NN_TOP_K_SORTED); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: topk_build_007 + * @tc.desc: Provide param type error to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(TopKBuilderTest, topk_build_007, TestSize.Level0) +{ + std::vector inputsIndex = { 0, 1 }; + std::vector outputsIndex = { 2, 3 }; + std::vector paramDim = {}; + + InitTensor(inputsIndex, outputsIndex); + SaveSortedTensor(OH_NN_INT32, paramDim, nullptr, OH_NN_TOP_K_SORTED); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: topk_build_008 + * @tc.desc: Provide sorted parameter buffer is nullptr to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(TopKBuilderTest, topk_build_008, TestSize.Level0) +{ + std::vector inputsIndex = { 0, 1 }; + std::vector outputsIndex = { 2, 3 }; + std::vector paramDim = {}; + + InitTensor(inputsIndex, outputsIndex); + + std::shared_ptr topkTensor = TransToNNTensor(OH_NN_BOOL, paramDim, nullptr, OH_NN_TOP_K_SORTED); + topkTensor->SetBuffer(nullptr, 0); + m_allTensors.emplace_back(topkTensor); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: topk_build_009 + * @tc.desc: Provide invalid parameter type to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(TopKBuilderTest, topk_build_009, TestSize.Level0) +{ + std::vector inputsIndex = { 0, 1 }; + std::vector outputsIndex = { 2, 3 }; + std::vector paramDim = {}; + + InitTensor(inputsIndex, outputsIndex); + SaveSortedTensor(OH_NN_BOOL, paramDim, nullptr, OH_NN_SCALE_AXIS); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: topk_get_primitive_001 + * @tc.desc: Verify the GetPrimitive function return nullptr + * @tc.type: FUNC + */ +HWTEST_F(TopKBuilderTest, topk_get_primitive_001, TestSize.Level0) +{ + LiteGraphTensorPtr primitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive = { nullptr, DestroyLiteGraphPrimitive }; + EXPECT_EQ(primitive, expectPrimitive); +} + +/** + * @tc.name: topk_get_primitive_002 + * @tc.desc: Verify the normal params return behavior of the getprimitive function + * @tc.type: FUNC + */ +HWTEST_F(TopKBuilderTest, topk_get_primitive_002, TestSize.Level0) +{ + std::vector inputsIndex = { 0, 1 }; + std::vector outputsIndex = { 2, 3 }; + std::vector paramDim = {}; + InitTensor(inputsIndex, outputsIndex); + SaveSortedTensor(OH_NN_BOOL, paramDim, nullptr, OH_NN_TOP_K_SORTED); + + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + LiteGraphTensorPtr primitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive = { nullptr, DestroyLiteGraphPrimitive }; + EXPECT_NE(primitive, expectPrimitive); + + auto sortedReturn = mindspore::lite::MindIR_TopKFusion_GetSorted(primitive.get()); + EXPECT_EQ(sortedReturn, m_topkValue); +} +} // namespace UnitTest +} // namespace NeuralNetworkRuntime +} // namespace OHOS diff --git a/test/unittest/ops/transpose_builder_test.cpp b/test/unittest/ops/transpose_builder_test.cpp new file mode 100644 index 0000000..a0a5095 --- /dev/null +++ b/test/unittest/ops/transpose_builder_test.cpp @@ -0,0 +1,193 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "frameworks/native/ops/transpose_builder.h" + +#include +#include "frameworks/native/nn_tensor.h" +#include "ops_test.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Ops; + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class TransposeBuilderTest : public OpsTest { +protected: + void InitTensor(const std::vector& inputsIndex, + const std::vector& outputsIndex) override; + +protected: + TransposeBuilder m_builder; +}; + +void TransposeBuilderTest::InitTensor(const std::vector& inputsIndex, + const std::vector& outputsIndex) +{ + std::vector inputDim = {2, 3}; + std::vector OutputDim = {3, 2}; + + SaveInputTensor(inputsIndex, OH_NN_FLOAT32, inputDim, nullptr); + SaveOutputTensor(outputsIndex, OH_NN_FLOAT32, OutputDim, nullptr); +} + +/** + * @tc.name: transpose_build_001 + * @tc.desc: Provide normal input, output to verify the normal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(TransposeBuilderTest, transpose_build_001, TestSize.Level0) +{ + std::vector inputsIndex = { 0, 1 }; + std::vector outputsIndex = { 2 }; + + InitTensor(inputsIndex, outputsIndex); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/** + * @tc.name: transpose_build_002 + * @tc.desc: Call Build func twice to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(TransposeBuilderTest, transpose_build_002, TestSize.Level0) +{ + std::vector inputsIndex = { 0, 1 }; + std::vector outputsIndex = { 2 }; + + InitTensor(inputsIndex, outputsIndex); + + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/** + * @tc.name: transpose_build_003 + * @tc.desc: Provide one more than normal input to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(TransposeBuilderTest, transpose_build_003, TestSize.Level0) +{ + std::vector inputsIndex = { 0, 1, 2 }; + std::vector outputsIndex = { 3 }; + + InitTensor(inputsIndex, outputsIndex); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: transpose_build_004 + * @tc.desc: Provide one more than normal output to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(TransposeBuilderTest, transpose_build_004, TestSize.Level0) +{ + std::vector inputsIndex = { 0, 1 }; + std::vector outputsIndex = { 2, 3 }; + + InitTensor(inputsIndex, outputsIndex); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: transpose_build_005 + * @tc.desc: Provide empty input, output, and parameters to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(TransposeBuilderTest, transpose_build_005, TestSize.Level0) +{ + std::vector inputsIndex = { 0, 1 }; + std::vector outputsIndex = { 2 }; + std::vector paramsIndex = {}; + + OH_NN_ReturnCode ret = m_builder.Build(paramsIndex, inputsIndex, outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: transpose_build_006 + * @tc.desc: Provide empty output to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(TransposeBuilderTest, transpose_build_006, TestSize.Level0) +{ + std::vector inputsIndex = { 0, 1 }; + std::vector outputsIndex = {}; + + InitTensor(inputsIndex, outputsIndex); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: transpose_build_007 + * @tc.desc: Provide a param to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(TransposeBuilderTest, transpose_build_007, TestSize.Level0) +{ + std::vector inputsIndex = { 0, 1 }; + std::vector outputsIndex = { 2 }; + std::vector paramsIndex = { 4 }; + + m_paramsIndex = paramsIndex; + InitTensor(inputsIndex, outputsIndex); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: transpose_get_primitive_001 + * @tc.desc: Verify the GetPrimitive function return nullptr + * @tc.type: FUNC + */ +HWTEST_F(TransposeBuilderTest, transpose_get_primitive_001, TestSize.Level0) +{ + LiteGraphTensorPtr primitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive = { nullptr, DestroyLiteGraphPrimitive }; + EXPECT_EQ(primitive, expectPrimitive); +} + +/** + * @tc.name: transpose_getprimitive_002 + * @tc.desc: Verify the normal return behavior of the getprimitive function + * @tc.type: FUNC + */ +HWTEST_F(TransposeBuilderTest, transpose_getprimitive_002, TestSize.Level0) +{ + std::vector inputsIndex = { 0, 1 }; + std::vector outputsIndex = { 2 }; + + InitTensor(inputsIndex, outputsIndex); + + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + LiteGraphTensorPtr primitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive = { nullptr, DestroyLiteGraphPrimitive }; + EXPECT_NE(primitive, expectPrimitive); +} +} // namespace UnitTest +} // namespace NeuralNetworkRuntime +} // namespace OHOS diff --git a/test/unittest/ops/unsqueeze_builder_test.cpp b/test/unittest/ops/unsqueeze_builder_test.cpp new file mode 100644 index 0000000..512b4d2 --- /dev/null +++ b/test/unittest/ops/unsqueeze_builder_test.cpp @@ -0,0 +1,301 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "frameworks/native/ops/unsqueeze_builder.h" + +#include +#include "frameworks/native/nn_tensor.h" +#include "ops_test.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Ops; + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class UnsqueezeBuilderTest : public OpsTest { +protected: + void InitTensor(const std::vector& inputsIndex, + const std::vector& outputsIndex) override; + void SaveAxisTensor(OH_NN_DataType dataType, const std::vector &dim, + const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + +protected: + UnsqueezeBuilder m_builder; + std::vector m_expectAxisValue; +}; + +void UnsqueezeBuilderTest::SaveAxisTensor(OH_NN_DataType dataType, const std::vector &dim, + const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + std::shared_ptr axisTensor = TransToNNTensor(dataType, dim, quantParam, type); + int64_t* axisValue = new (std::nothrow) int64_t[1]{1}; + axisTensor->SetBuffer(axisValue, sizeof(int64_t)); + m_allTensors.emplace_back(axisTensor); + m_expectAxisValue.emplace_back(*axisValue); +} + +void UnsqueezeBuilderTest::InitTensor(const std::vector& inputsIndex, + const std::vector& outputsIndex) +{ + std::vector paramsIndex = { 2 }; + std::vector inputDim = {1, 5, 1}; + std::vector OutputDim = {1, 1, 5, 1}; + + m_paramsIndex = paramsIndex; + SaveInputTensor(inputsIndex, OH_NN_FLOAT32, inputDim, nullptr); + SaveOutputTensor(outputsIndex, OH_NN_FLOAT32, OutputDim, nullptr); +} + +/** + * @tc.name: unsqueeze_build_001 + * @tc.desc: Provide normal input, output, and parameters to verify the normal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(UnsqueezeBuilderTest, unsqueeze_build_001, TestSize.Level0) +{ + std::vector inputsIndex = { 0 }; + std::vector outputsIndex = { 1 }; + std::vector paramDim = {}; + + InitTensor(inputsIndex, outputsIndex); + SaveAxisTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_UNSQUEEZE_AXIS); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/** + * @tc.name: unsqueeze_build_002 + * @tc.desc: Call Build func twice to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(UnsqueezeBuilderTest, unsqueeze_build_002, TestSize.Level0) +{ + std::vector inputsIndex = { 0 }; + std::vector outputsIndex = { 1 }; + std::vector paramDim = {}; + + InitTensor(inputsIndex, outputsIndex); + SaveAxisTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_UNSQUEEZE_AXIS); + + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/** + * @tc.name: unsqueeze_build_003 + * @tc.desc: Provide one more than normal input to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(UnsqueezeBuilderTest, unsqueeze_build_003, TestSize.Level0) +{ + std::vector inputsIndex = { 0, 1, 2 }; + std::vector outputsIndex = { 3 }; + std::vector paramDim = {}; + + InitTensor(inputsIndex, outputsIndex); + SaveAxisTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_UNSQUEEZE_AXIS); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: unsqueeze_build_004 + * @tc.desc: Provide one more than normal output to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(UnsqueezeBuilderTest, unsqueeze_build_004, TestSize.Level0) +{ + std::vector inputsIndex = { 0 }; + std::vector outputsIndex = { 1, 2 }; + std::vector paramDim = {}; + + InitTensor(inputsIndex, outputsIndex); + SaveAxisTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_UNSQUEEZE_AXIS); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: unsqueeze_build_005 + * @tc.desc: Provide empty input, output, and parameters to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(UnsqueezeBuilderTest, unsqueeze_build_005, TestSize.Level0) +{ + std::vector inputsIndex = { 0 }; + std::vector outputsIndex = { 1 }; + std::vector paramsIndex = { 2 }; + + OH_NN_ReturnCode ret = m_builder.Build(paramsIndex, inputsIndex, outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: unsqueeze_build_006 + * @tc.desc: Provide empty output to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(UnsqueezeBuilderTest, unsqueeze_build_006, TestSize.Level0) +{ + std::vector inputsIndex = { 0 }; + std::vector outputsIndex = {}; + std::vector paramDim = {}; + + InitTensor(inputsIndex, outputsIndex); + SaveAxisTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_UNSQUEEZE_AXIS); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: unsqueeze_build_007 + * @tc.desc: Provide param type error to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(UnsqueezeBuilderTest, unsqueeze_build_007, TestSize.Level0) +{ + std::vector inputsIndex = { 0 }; + std::vector outputsIndex = { 1 }; + std::vector paramDim = {}; + + InitTensor(inputsIndex, outputsIndex); + SaveAxisTensor(OH_NN_INT8, paramDim, nullptr, OH_NN_UNSQUEEZE_AXIS); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: unsqueeze_build_008 + * @tc.desc: Provide param type error to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(UnsqueezeBuilderTest, unsqueeze_build_008, TestSize.Level0) +{ + std::vector inputsIndex = { 0 }; + std::vector outputsIndex = { 1 }; + std::vector paramDim = {}; + + InitTensor(inputsIndex, outputsIndex); + SaveAxisTensor(OH_NN_INT32, paramDim, nullptr, OH_NN_UNSQUEEZE_AXIS); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: unsqueeze_build_009 + * @tc.desc: Provide axis parameter buffer is nullptr to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(UnsqueezeBuilderTest, unsqueeze_build_009, TestSize.Level0) +{ + std::vector inputsIndex = { 0 }; + std::vector outputsIndex = { 1 }; + std::vector paramDim = {}; + + InitTensor(inputsIndex, outputsIndex); + + std::shared_ptr axisTensor = TransToNNTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_UNSQUEEZE_AXIS); + axisTensor->SetBuffer(nullptr, 0); + m_allTensors.emplace_back(axisTensor); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: unsqueeze_build_010 + * @tc.desc: Provide axis parameter is not scaler to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(UnsqueezeBuilderTest, unsqueeze_build_010, TestSize.Level0) +{ + std::vector inputsIndex = { 0 }; + std::vector outputsIndex = { 1 }; + std::vector paramDim = {1, 2}; + + InitTensor(inputsIndex, outputsIndex); + SaveAxisTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_UNSQUEEZE_AXIS); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: unsqueeze_build_011 + * @tc.desc: Provide invalid parameter type to verify the abnormal behavior of the Build function + * @tc.type: FUNC + */ +HWTEST_F(UnsqueezeBuilderTest, unsqueeze_build_011, TestSize.Level0) +{ + std::vector inputsIndex = { 0 }; + std::vector outputsIndex = { 1 }; + std::vector paramDim = {}; + + InitTensor(inputsIndex, outputsIndex); + SaveAxisTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_SCALE_AXIS); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: unsqueeze_get_primitive_001 + * @tc.desc: Verify the GetPrimitive function return nullptr + * @tc.type: FUNC + */ +HWTEST_F(UnsqueezeBuilderTest, unsqueeze_get_primitive_001, TestSize.Level0) +{ + LiteGraphTensorPtr primitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive = { nullptr, DestroyLiteGraphPrimitive }; + EXPECT_EQ(primitive, expectPrimitive); +} + +/** + * @tc.name: unsqueeze_get_primitive_002 + * @tc.desc: Verify the normal params return behavior of the getprimitive function + * @tc.type: FUNC + */ +HWTEST_F(UnsqueezeBuilderTest, unsqueeze_get_primitive_002, TestSize.Level0) +{ + std::vector inputsIndex = { 0 }; + std::vector outputsIndex = { 1 }; + std::vector paramDim = {}; + + InitTensor(inputsIndex, outputsIndex); + SaveAxisTensor(OH_NN_INT64, paramDim, nullptr, OH_NN_UNSQUEEZE_AXIS); + + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + LiteGraphTensorPtr primitive = m_builder.GetPrimitive(); + LiteGraphTensorPtr expectPrimitive = { nullptr, DestroyLiteGraphPrimitive }; + EXPECT_NE(primitive, expectPrimitive); + + auto axisReturn = mindspore::lite::MindIR_Unsqueeze_GetAxis(primitive.get()); + auto axisReturnSize = axisReturn.size(); + for (size_t i = 0; i < axisReturnSize; ++i) { + EXPECT_EQ(axisReturn[i], m_expectAxisValue[i]); + } +} +} // namespace UnitTest +} // namespace NeuralNetworkRuntime +} // namespace OHOS -- Gitee